text stringlengths 1 1.05M |
|---|
head -n 2 *.state | tail -n 1 | awk '{print $2}'
tail -n 1 *traj | awk '{print $1}'
tail -n 1 *xsc | awk '{print $1}'
#find -type f -name '*.count' ! -iname '*hist.count' -path '*noMin*'
#find -type f -name '*.count' ! -iname '*hist.count' -path '*noMin*' -exec tail -f "$file" {} +
|
<reponame>kokoropie/shopee-auto-login
import datetime
import os
import signal
import time
from settings import *
from crontab import CronTab
time_format = "%Y-%m-%d %H:%M:%S"
def stop_me(_signo, _stack):
log.info("Docker container has stoped....")
exit(-1)
def main():
signal.signal(signal.SIGINT, stop_me)
log.info("Use Docker to run HoYoLAB sign-in")
env = os.environ
cron_signin = env["CRON_SIGNIN"]
cron = CronTab(cron_signin, loop=True, random_seconds=True)
def next_run_time():
nt = datetime.datetime.now().strftime(time_format)
delayt = cron.next(default_utc=False)
nextrun = datetime.datetime.now() + datetime.timedelta(seconds=delayt)
nextruntime = nextrun.strftime(time_format)
log.info(f"Current running datetime: {nt}")
log.info(f"Next run datetime: {nextruntime}")
def sign():
log.info("Starting signing")
os.system("python3 ./genshin.py")
sign()
next_run_time()
while True:
ct = cron.next(default_utc=False)
time.sleep(ct)
sign()
next_run_time()
if __name__ == '__main__':
main()
|
<reponame>incessantmeraki/labmanagement
'use strict';
const Subject = require('../models/subject.js');
const Question = require('../models/question.js');
const Batch = require('../models/batch.js');
const subjects = module.exports = {};
/**
* GET /subjects - render list-subjects page.
*
* Results can be filtered with URL query strings eg /subjects?firstname=alice.
*/
subjects.list = function*() {
let sql = 'Select * From Subject';
try {
const result = yield this.db.query({ sql: sql, namedPlaceholders: true }, this.query);
const subjects = result[0];
const context = { subjects: subjects };
yield this.render('templates/admin-subjects-list', context);
} catch (e) {
switch (e.code) {
case 'ER_BAD_FIELD_ERROR': this.throw(403, 'Unrecognised Member field'); break;
default: this.throw(e.status||500, e.message); break;
}
}
};
/**
* GET /subjects/add - render add-member page
*/
subjects.add = function*() {
const context = this.flash.formdata || {}; // failed validation? fill in previous values
yield this.render('templates/admin-subjects-add', context);
};
/**
* POST /subjects - process add-member
*/
subjects.processAdd = function*() {
if (this.passport.user.Role != 'admin') return this.redirect('/');
try {
const newid = yield Subject.insert(this.request.body);
this.redirect('/admin/subjects');
} catch (e) {
// stay on same page to report error (with current filled fields)
this.flash = { formdata: this.request.body, _error: e.message };
this.redirect(this.url);
}
};
/**
* GET /subjects/:id - render view-member page
*/
subjects.view = function*() {
const subject = yield Subject.get(this.params.id);
if (!subject) this.throw(404, 'Subject not found');
//Getting questions to display
const sql = `Select *
From Question
Where SubjectId = ? `;
const result = yield this.db.query(sql,this.params.id);
//Getting batches to display
const sql1= `Select *
From Batch
Where SubjectId = ? `;
const result1 = yield this.db.query(sql1,this.params.id);
const context = { questions: result[0] };
context.batches = result1[0];
// context.teams = teams;
// this.body="reached here";
this.body=result[0];
context.SubjectId = this.params.id;
yield this.render('templates/admin-subjects-view.html', context);
};
/**
* GET /questions/:id/delete - render delete-member page
*/
subjects.delete = function*() {
if (this.passport.user.Role != 'admin') return this.redirect('/');
try{
yield Subject.delete(this.params.id);
this.redirect('/admin/subjects/');
}catch (e) {
// stay on same page to report error
this.flash = { _error: e.message };
this.redirect(this.url);
}
};
/**
* GET /subjects/:id/edit - render edit-student page
*/
subjects.edit = function*() {
// member details
let subject = yield Subject.get(this.params.id);
if (!subject) this.throw(404, 'Member not found');
const context = subject;
yield this.render('templates/admin-subjects-edit', context);
};
/**
* POST /members/:id/edit - process edit-member
*/
subjects.processEdit = function*() {
if (this.passport.user.Role != 'admin') return this.redirect('/');
// update member details
if ('Subjectname' in this.request.body) {
try {
yield Subject.update(this.params.id, this.request.body);
// return to list of members
this.redirect('/admin/subjects');
} catch (e) {
// stay on same page to report error (with current filled fields)
this.flash = { formdata: this.request.body, _error: e.message };
this.redirect(this.url);
}
}
};
/**
* GET /subjects/:id/questions/add - render add-question page
*/
subjects.addQuestions = function*() {
const context = this.flash.formdata || {}; // failed validation? fill in previous values
yield this.render('templates/admin-subjects-questions-add', context);
};
/**
* POST /subjects - process add-member
*/
subjects.processAddQuestions = function*() {
if (this.passport.user.Role != 'admin') return this.redirect('/');
try {
const questionQuery = {
Question: this.request.body.Question,
SubjectId: this.params.id
};
const newid = yield Question.insert(questionQuery);
this.redirect('/admin/subjects/'+this.params.id);
} catch (e) {
// stay on same page to report error (with current filled fields)
this.flash = { formdata: this.request.body, _error: e.message };
this.redirect(this.url);
}
};
/**
* GET /subjects/:id/batches/add - render add-batches page
*/
subjects.addBatches = function*() {
let sql1 = 'Select * From Teacher';
let sql2 = 'Select * From Student';
try {
const result1 = yield this.db.query({ sql: sql1, namedPlaceholders: true }, this.query);
const teachers = result1[0];
const result2 = yield this.db.query({ sql: sql2, namedPlaceholders: true });
const students = result2[0];
const context = {
teachers: teachers,
students: students
};
// this.body=students;
yield this.render('templates/admin-subjects-batches-add', context);
} catch (e) {
switch (e.code) {
case 'ER_BAD_FIELD_ERROR': this.throw(403, 'Unrecognised Member field'); break;
default: this.throw(e.status||500, e.message); break;
}
}
};
/**
* POST /subjects - process add-member
*/
subjects.processAddBatches = function*() {
this.body = this.request.body;
if (this.passport.user.Role != 'admin') return this.redirect('/');
try {
const batchValue = {
SubjectId: this.params.id
};
const newid = yield Batch.insert(batchValue);
this.body = newid;
//sql to insert into teacherbatch
const teacherBatch = {
TeacherId : this.request.body.TeacherId,
BatchId : newid
};
// this.body= teacherBatch;
yield GLOBAL.db.query('Insert Into TeacherBatch Set ?', teacherBatch);
//inserting into studentbatch
const students = this.request.body.StudentId;
for(let i=0; i < students.length;i++){
const studentBatch = {
StudentId: students[i],
BatchId: newid
};
yield GLOBAL.db.query('Insert into StudentBatch Set ?',studentBatch);
}
this.redirect('/admin/subjects/'+this.params.id);
} catch (e) {
// stay on same page to report error (with current filled fields)
this.flash = { formdata: this.request.body, _error: e.message };
this.redirect(this.url);
}
}; |
#!/bin/bash
filewebsite="https://raw.githubusercontent.com/RetroFlag/retroflag-picase/master"
sleep 2s
#Step 1) Check if root--------------------------------------
if [[ $EUID -ne 0 ]]; then
echo "Please execute script as root."
exit 1
fi
#-----------------------------------------------------------
#Step 3) Update repository----------------------------------
sudo apt-get update -y
sleep 2s
#-----------------------------------------------------------
#Step 4) Install gpiozero module----------------------------
sudo apt-get install -y python3-gpiozero
sleep 2s
#-----------------------------------------------------------
#Step 5) Download Python script-----------------------------
cd /opt/
sudo mkdir RetroFlag
cd /opt/RetroFlag
script=SafeShutdown.py
if [ -e $script ];
then
echo "Script SafeShutdown.py already exists. Doing nothing."
else
wget --no-check-certificate -O $script "$filewebsite""/SafeShutdown_gpi.py"
fi
#-----------------------------------------------------------
sleep 2s
#Step 6) Enable Python script to run on start up------------
cd /etc/
RC=rc.local
if grep -q "sudo python3 \/opt\/RetroFlag\/SafeShutdown.py \&" "$RC";
then
echo "File /etc/rc.local already configured. Doing nothing."
else
sed -i -e "s/^exit 0/sudo python3 \/opt\/RetroFlag\/SafeShutdown.py \&\n&/g" "$RC"
echo "File /etc/rc.local configured."
fi
#-----------------------------------------------------------
#Step 7) Reboot to apply changes----------------------------
echo "RetroFlag Pi Case installation done. Will now reboot after 3 seconds."
sleep 3s
sudo reboot
#-----------------------------------------------------------
|
#!/bin/sh
if [ ! -f /tmp/mnt/sda1/myswap.swp ]; then
dd if=/dev/zero of=/tmp/mnt/sda1/myswap.swp bs=1M count=2048
mkswap /tmp/mnt/sda1/myswap.swp
fi
#enable swap
swapon /tmp/mnt/sda1/myswap.swp
echo 20 > /proc/sys/vm/swappiness
#check if swap is on
free
|
#https://developer.arm.com/open-source/gnu-toolchain/gnu-rm/downloads
set -e
mkdir -p build
cd build
arm-none-eabi-g++ -Wall -Os -Werror -fno-common -mcpu=cortex-m3 -mthumb -msoft-float -fno-exceptions -fno-rtti -fno-threadsafe-statics -nostdlib -Wno-psabi -DLA104 -MD -D _ARM -c ../source/main.cpp ../../../os_host/source/framework/Serialize.cpp ../../../os_host/source/framework/Wnd.cpp -I../../../os_library/include/
#arm-none-eabi-gcc -fPIC -mcpu=cortex-m3 -mthumb -o output.elf -nostartfiles -Wl,--unresolved-symbols=ignore-all -T ../app.lds ./main.o
arm-none-eabi-gcc -fPIC -mcpu=cortex-m3 -mthumb -o output.elf -nostartfiles -T ../source/app.lds ./main.o ./Serialize.o ./Wnd.o -lbios_la104 -L../../../os_library/build
arm-none-eabi-objdump -d -S output.elf > output.asm
find . -type f -name '*.o' -delete
find . -type f -name '*.d' -delete
../../../../tools/elfstrip/elfstrip output.elf 81image.elf |
import tensorflow as tf
# Create the model
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(128, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(128, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(10, activation=tf.nn.softmax))
# Compile the model
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# Train the model
model.fit(X_train, y_train, epochs = 10) |
#!/bin/sh
if uname -a | grep -i -q ubuntu; then
lvmLine=`/usr/bin/nsenter --mount=/proc/1/ns/mnt dpkg --get-selections lvm2 | grep install -w -i | wc -l`
if [ "$lvmLine" = "0" ]; then
/usr/bin/nsenter --mount=/proc/1/ns/mnt apt install lvm2 -y
fi
else
lvmLine=`/usr/bin/nsenter --mount=/proc/1/ns/mnt rpm -qa lvm2 | wc -l`
if [ "$lvmLine" = "0" ]; then
/usr/bin/nsenter --mount=/proc/1/ns/mnt yum install lvm2 -y
fi
fi
if [ "$lvmLine" = "0" ]; then
/usr/bin/nsenter --mount=/proc/1/ns/mnt sed -i 's/udev_sync\ =\ 0/udev_sync\ =\ 1/g' /etc/lvm/lvm.conf
/usr/bin/nsenter --mount=/proc/1/ns/mnt sed -i 's/udev_rules\ =\ 0/udev_rules\ =\ 1/g' /etc/lvm/lvm.conf
/usr/bin/nsenter --mount=/proc/1/ns/mnt systemctl restart lvm2-lvmetad.service
echo "install lvm and starting..."
else
udevLine=`/usr/bin/nsenter --mount=/proc/1/ns/mnt cat /etc/lvm/lvm.conf | grep "udev_sync = 0" | wc -l`
if [ "$udevLine" != "0" ]; then
/usr/bin/nsenter --mount=/proc/1/ns/mnt sed -i 's/udev_sync\ =\ 0/udev_sync\ =\ 1/g' /etc/lvm/lvm.conf
/usr/bin/nsenter --mount=/proc/1/ns/mnt sed -i 's/udev_rules\ =\ 0/udev_rules\ =\ 1/g' /etc/lvm/lvm.conf
/usr/bin/nsenter --mount=/proc/1/ns/mnt systemctl restart lvm2-lvmetad.service
echo "update lvm.conf file: udev_sync from 0 to 1, udev_rules from 0 to 1"
fi
fi
/bin/nrm $@
|
package main
import "fmt"
// 类型别名
type mySentence string
type myInt int
type myFloat64 float64
func main() {
var message mySentence = "Hello World!"
var i myInt = 10
var f myFloat64 = 10.01
fmt.Println(message)
fmt.Printf("%T\n", message)
fmt.Println(i)
fmt.Printf("%T\n", i)
fmt.Println(f)
fmt.Printf("%T\n", f)
}
/*
[Running] go run "c:\Users\li-370\ForkGitW\LearnGolang\GolangTraining-master\27_code-in-process\26_playing-with-type\02_string\main.go"
Hello World!
main.mySentence
10
main.myInt
10.01
main.myFloat64
[Done] exited with code=0 in 8.792 seconds
*/ |
<gh_stars>0
/*
Buttons (UI elements)
*/
var pin_button;
var option_button;
var menu_button;
var start_button;
var stop_button;
var update_button;
/*
Links
*/
var recovery_link;
/*
Visual elements (pop-up's, menu's)
*/
var update_screen;
var login_screen;
var overlay;
var navbar;
var temp;
var time;
var state;
/*
Input elements
*/
var pin_field;
window.onload = function() {
window.socket = WebSocketConnect(); // Connect to the websocket
// bool
var user_login = false; // bool
var update_available = false; // bool; Whether or not there's an update available
var recovery_mode = false; // bool; Determine if the user is in recovery mode
var show_menu = false; // bool; Whether or not the user menu is being shown
// JSON
window.user_json; // json array containing user settings
window.page_json; // json array for populating the page
// int
var entered_pin; // int
/*
Buttons (UI elements)
*/
pin_button = document.getElementById("pinbutton");
option_button = document.getElementById("optionbutton");
menu_button = document.getElementById("menubutton");
start_button = document.getElementById("startbutton");
stop_button = document.getElementById("stopbutton");
update_button = document.getElementById("updatebutton");
/*
Links
*/
recovery_link = document.getElementById("recoverylink");
/*
Visual elements (pop-up's, menu's)
*/
update_screen = document.getElementById("update-message");
login_screen = document.getElementById("login");
overlay = document.getElementById("black-overlay");
navbar = document.getElementById("navbar");
temp = document.getElementById("temperaturefield");
time = document.getElementById("timefield");
state = document.getElementById("statusfield");
/*
Input elements
*/
pin_field = document.getElementById("pinfield");
// Check if the user is logged in
if(user_login == false) {
navbar.style.display = "none";
login_screen.style.display = "block";
}
recovery_link.onclick = function() {
document.getElementById("recoveryprompt").style.display = "block";
recovery_mode = true;
};
// Perform function when the pin button is pressed
pin_button.onclick = function() {
entered_pin = pin_field.value;
if(recovery_mode == true) {
var pin_code = 01189998819991197253; // Emergency services phone number
} else {
var pin_code = window.user_json.pin; // The pin code is simply the user pin
}
if(pin_code == entered_pin) {
user_login = true;
check_update_status();
// Set default value for time input fields
document.getElementById("duration").value = "00:00";
navbar.style.display = "block"; // Show the menu
overlay.style.display = "none"; // Hide the black overlay
document.body.style.overflow = "scroll"; // Show the scrollbar
login_screen.style.display = "none"; // Hide the login screen
} else {
user_login = false;
}
};
// Perform function when the menu button is pressed
menu_button.onclick = function() {
if(show_menu == false) {
document.getElementById("user_menu").style.display = "block";
document.getElementById(window.user_json.recovery).checked = true;
show_menu = true;
} else {
document.getElementById("user_menu").style.display = "none";
show_menu = false;
}
};
option_button.onclick = function() {
var new_pin = document.getElementById("newpinfield").value;
// Check if the pin is at least 4 characters long and a number
if((new_pin.length != 4 || isNaN(new_pin) == true) && new_pin) {
alert("Pin moet bestaan uit 4 getallen");
} else {
if(document.getElementById("continue").checked == true) { // Check if the first radio button is checked
save_settings(new_pin, "continue");
} else { // If not, the only other option must be selected
save_settings(new_pin, "cancel");
}
recovery_mode = false;
document.getElementById("user_menu").style.display = "none";
show_menu = false;
}
}
// Perform function when start button is pressed
start_button.onclick = function() {
// Get the selected washingprogram
var was_dropdown = document.getElementById("wasprogrammas");
var was_selected = was_dropdown.value;
// Get the selected temperature
var temp_dropdown = document.getElementById("temperaturen");
var temp_selected = parseInt(temp_dropdown.value);
console.log(temp_selected);
var delay_value = document.getElementById("duration").value; // Get the selected time until start of program
var time = delay_value.split(":"); // Split the time by the : delimiter
var delay = (parseInt(time[0]) * 60) + parseInt(time[1]); // Get the amount of minutes and convert them to integers, then add them up
// Send a start command to the websocket, along with a few parameters
var json = {"request" : "StartWashingProgram",
"parameters" : {
"program" : was_selected,
"temperature" : temp_selected,
"delay" : delay
}
};
window.socket.send(JSON.stringify(json));
}
// Perform function when stop button is pressed
stop_button.onclick = function() {
// Send a start command to the websocket, along with a few parameters
var json = {"request" : "StopWashingProgram"};
window.socket.send(JSON.stringify(json));
}
// Perform function when update button is pressed
update_button.onclick = function() {
update_screen.style.display = "none";
// Perform an update (read update file, place into wasprogramma's array and tell the user a new program has been added)
// Send an update command to the websocket, along with a few parameters
var json = {"request" : "FetchUpdate"};
window.socket.send(JSON.stringify(json));
}
setInterval(function(){ window.socket.send(JSON.stringify({"request" : "Status"})); }, 1000);
}
/*
Send a request to save the user settings
@param pin The pincode of the user
@param recovery The user's selected recovery method
*/
function save_settings(pin, recovery) {
if(!pin) { pin = window.user_json.pin; } // If the new_pin is empty, fill it with the current pin number
// Send a start command to the websocket, along with a few parameters
var json = {"request" : "UpdateUser",
"Parameters" : {
"Pin" : pin,
"RecoveryMethod" : recovery
}
};
window.socket.send(JSON.stringify(json));
}
/*
Populate the webpage with values obtained from the server
Call this function only after the server has replied with a JSON array
*/
function populate_wasprogrammas() {
var was_dropdown = document.getElementById("wasprogrammas");
for (var key in window.page_json) {
if (window.page_json.hasOwnProperty(key)) {
var option = document.createElement("option");
option.textContent = window.page_json[key].name; // Fill the text content of the dropdown
option.label = window.page_json[key].name; // Set a label so the start command can send a name that does not contain spaces
option.value = key; // Give the option an ID that can be used to populate the temperature dropdown
was_dropdown.appendChild(option);
}
}
populate_temp(); // Initial call to populate_temp to fill the dropdown (since onchange doesn't get called on intial load)
}
/*
Populate the temperature dropdown list by fetching the current value from the wasprogramma dropdown
*/
function populate_temp() {
var was_dropdown = document.getElementById("wasprogrammas");
var temp_dropdown = document.getElementById("temperaturen");
// Empty the temperature dropdown so it can be populated again
while(temp_dropdown.hasChildNodes()) {
temp_dropdown.removeChild(temp_dropdown.lastChild);
}
/*
Loop through the temperatures for each washing program and generate option fields for them
*/
for(var i = 0; i < window.page_json[was_dropdown.value].temperature.length; i++) {
var option = document.createElement("option");
option.innerHTML = window.page_json[was_dropdown.value].temperature[i] + "°C"; // Set text for the dropdown to the number with a Celcius symbol
option.value = window.page_json[was_dropdown.value].temperature[i];
temp_dropdown.appendChild(option);
}
}
/*
Function to connect to the websocket and show message based on results
*/
function WebSocketConnect() {
if ("WebSocket" in window) {
ws = new WebSocket("ws://" + window.location.hostname + ":2222");
ws.onopen = function(evt) {
console.log("connection open");
var json = {"request" : "FetchUserSettings"};
window.socket.send(JSON.stringify(json)); // Request the user settings from the server
json = {"request" : "FetchWashingProgram"};
window.socket.send(JSON.stringify(json)); // Request the list of washing programs from the server
json = {"request" : "Status"};
window.socket.send(JSON.stringify(json));
};
ws.onclose = function(evt) { console.log("Connection closed with code: " + evt.code); }
ws.onmessage = function(evt) { process_message(evt); }
ws.onerror = function(evt) { console.log("websocket error: " + evt); }
return ws;
} else {
alert("WebSocket NOT supported by your Browser!");
}
}
Number.prototype.toHHMMSS = function () {
var sec_num = this;
var hours = Math.floor(sec_num / 3600);
var minutes = Math.floor((sec_num - (hours * 3600)) / 60);
var seconds = sec_num - (hours * 3600) - (minutes * 60);
if (hours < 10) {hours = "0"+hours;}
if (minutes < 10) {minutes = "0"+minutes;}
if (seconds < 10) {seconds = "0"+seconds;}
var time = hours+':'+minutes+':'+seconds;
return time;
}
/*
Process the message recieved from the websocket, and fill the JSON arrays if required
*/
function process_message(message) {
var json = JSON.parse(message.data);
console.log(json);
switch(json.response) {
case "FetchUserSettings":
window.user_json = json.settings;
break;
case "FetchWashingProgram":
window.page_json = json.programs;
console.log(window.page_json);
populate_wasprogrammas();
break;
case "UpdateAvailable":
break;
case "UpdateUser":
console.log("User settings are saved");
break;
case "StartWashingProgram":
case "StopWashingProgram":
// Change state
json = {"request" : "Status"};
window.socket.send(JSON.stringify(json));
break;
case "Status":
if (json.status) {
state.innerHTML = "Er is momenteel een wastaak bezig";
time.innerHTML = json.time.toHHMMSS();
temp.innerHTML = json.temp + "°C";
} else {
state.innerHTML = "Er is momenteel geen wastaak bezig";
time.innerHTML = "00:00:00";
temp.innerHTML = "0°C";
}
break;
default:
console.log("Warning: Unknown message received: " + message);
break;
}
}
function check_update_status() {
// Send a signal to (or recieve from) the websocket to check if there is an update available
// If so, set update_available to true
console.log("check_update_status()");
//update_available = true;
}
|
<reponame>vikneshwara-r-b/chaosmonkey
// Copyright 2016 Netflix, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package mock
import (
"github.com/pkg/errors"
D "github.com/vikneshwara-r-b/chaosmonkey/deploy"
)
const cloudProvider = "aws"
// Dep returns a mock implementation of deploy.Deployment
// Dep has 4 apps: foo, bar, baz, quux
// Each app runs in 1 account:
// foo, bar, baz run in prod
// quux runs in test
// Each app has one cluster: foo-prod, bar-prod, baz-prod
// Each cluster runs in one region: us-east-1
// Each cluster contains 1 AZ with two instances
func Dep() D.Deployment {
prod := D.AccountName("prod")
test := D.AccountName("test")
usEast1 := D.RegionName("us-east-1")
return &Deployment{map[string]D.AppMap{
"foo": {prod: D.AccountInfo{CloudProvider: cloudProvider, Clusters: D.ClusterMap{"foo-prod": {usEast1: {"foo-prod-v001": []D.InstanceID{"i-d3e3d611", "i-63f52e25"}}}}}},
"bar": {prod: D.AccountInfo{CloudProvider: cloudProvider, Clusters: D.ClusterMap{"bar-prod": {usEast1: {"bar-prod-v011": []D.InstanceID{"i-d7f06d45", "i-ce433cf1"}}}}}},
"baz": {prod: D.AccountInfo{CloudProvider: cloudProvider, Clusters: D.ClusterMap{"baz-prod": {usEast1: {"baz-prod-v004": []D.InstanceID{"i-25b86646", "i-573d46d5"}}}}}},
"quux": {test: D.AccountInfo{CloudProvider: cloudProvider, Clusters: D.ClusterMap{"quux-test": {usEast1: {"quux-test-v004": []D.InstanceID{"i-25b866ab", "i-892d46d5"}}}}}},
}}
}
// NewDeployment returns a mock implementation of deploy.Deployment
// Pass in a deploy.AppMap, for example:
// map[string]deploy.AppMap{
// "foo": deploy.AppMap{"prod": {"foo-prod": {"us-east-1": {"foo-prod-v001": []string{"i-d3e3d611", "i-63f52e25"}}}}},
// "bar": deploy.AppMap{"prod": {"bar-prod": {"us-east-1": {"bar-prod-v011": []string{"i-d7f06d45", "i-ce433cf1"}}}}},
// "baz": deploy.AppMap{"prod": {"baz-prod": {"us-east-1": {"baz-prod-v004": []string{"i-25b86646", "i-573d46d5"}}}}},
// "quux": deploy.AppMap{"test": {"quux-test": {"us-east-1": {"quux-test-v004": []string{"i-25b866ab", "i-892d46d5"}}}}},
// }
func NewDeployment(apps map[string]D.AppMap) D.Deployment {
return &Deployment{apps}
}
// Deployment implements deploy.Deployment interface
type Deployment struct {
AppMap map[string]D.AppMap
}
// Apps implements deploy.Deployment.Apps
func (d Deployment) Apps(c chan<- *D.App, apps []string) {
defer close(c)
for name, appmap := range d.AppMap {
c <- D.NewApp(name, appmap)
}
}
// GetClusterNames implements deploy.Deployment.GetClusterNames
func (d Deployment) GetClusterNames(app string, account D.AccountName) ([]D.ClusterName, error) {
result := make([]D.ClusterName, 0)
for cluster := range d.AppMap[app][account].Clusters {
result = append(result, cluster)
}
return result, nil
}
// GetRegionNames implements deploy.Deployment.GetRegionNames
func (d Deployment) GetRegionNames(app string, account D.AccountName, cluster D.ClusterName) ([]D.RegionName, error) {
result := make([]D.RegionName, 0)
for region := range d.AppMap[app][account].Clusters[cluster] {
result = append(result, region)
}
return result, nil
}
// AppNames implements deploy.Deployment.AppNames
func (d Deployment) AppNames() ([]string, error) {
result := make([]string, len(d.AppMap), len(d.AppMap))
i := 0
for app := range d.AppMap {
result[i] = app
i++
}
return result, nil
}
// GetApp implements deploy.Deployment.GetApp
func (d Deployment) GetApp(name string) (*D.App, error) {
return D.NewApp(name, d.AppMap[name]), nil
}
// CloudProvider implements deploy.Deployment.CloudProvider
func (d Deployment) CloudProvider(account string) (string, error) {
return cloudProvider, nil
}
// GetInstanceIDs implements deploy.Deployment.GetInstanceIDs
func (d Deployment) GetInstanceIDs(app string, account D.AccountName, cloudProvider string, region D.RegionName, cluster D.ClusterName) (D.ASGName, []D.InstanceID, error) {
// Return an error if the cluster doesn't exist in the region
appInfo, ok := d.AppMap[app]
if !ok {
return "", nil, errors.Errorf("no app %s", app)
}
accountInfo, ok := appInfo[account]
if !ok {
return "", nil, errors.Errorf("app %s not deployed in account %s", app, account)
}
clusterInfo, ok := accountInfo.Clusters[cluster]
if !ok {
return "", nil, errors.Errorf("no cluster %s in app:%s, account:%s", cluster, app, account)
}
asgs, ok := clusterInfo[region]
if !ok {
return "", nil, errors.Errorf("cluster %s in account %s not deployed in region %s", cluster, account, region)
}
instances := make([]D.InstanceID, 0)
// We assume there's only one asg, and retrieve the instances
var asg D.ASGName
var ids []D.InstanceID
for asg, ids = range asgs {
for _, id := range ids {
instances = append(instances, id)
}
}
return asg, instances, nil
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package brooklyn.location.jclouds;
import java.util.Map;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import javax.annotation.Nullable;
import org.jclouds.compute.ComputeService;
import org.jclouds.compute.domain.Template;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.Assert;
import org.testng.annotations.AfterMethod;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
import brooklyn.config.BrooklynProperties;
import brooklyn.config.ConfigKey;
import brooklyn.entity.basic.ConfigKeys;
import brooklyn.entity.basic.Entities;
import brooklyn.location.LocationSpec;
import brooklyn.location.NoMachinesAvailableException;
import brooklyn.location.basic.LocationConfigKeys;
import brooklyn.location.geo.HostGeoInfo;
import brooklyn.management.internal.LocalManagementContext;
import brooklyn.test.Asserts;
import brooklyn.test.entity.LocalManagementContextForTests;
import brooklyn.util.collections.MutableMap;
import brooklyn.util.config.ConfigBag;
import brooklyn.util.exceptions.CompoundRuntimeException;
import brooklyn.util.exceptions.Exceptions;
import com.google.common.base.Function;
import com.google.common.base.Predicate;
import com.google.common.base.Predicates;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Maps;
import com.google.common.reflect.TypeToken;
/**
* @author <NAME>
*/
public class JcloudsLocationTest implements JcloudsLocationConfig {
private static final Logger log = LoggerFactory.getLogger(JcloudsLocationTest.class);
// Don't care which image; not actually provisioning
private static final String US_EAST_IMAGE_ID = "us-east-1/ami-7d7bfc14";
public static final RuntimeException BAIL_OUT_FOR_TESTING =
new RuntimeException("early termination for test");
@SuppressWarnings("serial")
public static class BailOutJcloudsLocation extends JcloudsLocation {
public static final ConfigKey<Function<ConfigBag,Void>> BUILD_TEMPLATE_INTERCEPTOR = ConfigKeys.newConfigKey(new TypeToken<Function<ConfigBag,Void>>() {}, "buildtemplateinterceptor");
ConfigBag lastConfigBag;
public BailOutJcloudsLocation() {
super();
}
public BailOutJcloudsLocation(Map<?, ?> conf) {
super(conf);
}
@Override
public Template buildTemplate(ComputeService computeService, ConfigBag config) {
lastConfigBag = config;
if (getConfig(BUILD_TEMPLATE_INTERCEPTOR) != null) getConfig(BUILD_TEMPLATE_INTERCEPTOR).apply(config);
throw BAIL_OUT_FOR_TESTING;
}
protected void tryObtainAndCheck(Map<?,?> flags, Predicate<? super ConfigBag> test) {
try {
obtain(flags);
} catch (Exception e) {
if (e==BAIL_OUT_FOR_TESTING || e.getCause()==BAIL_OUT_FOR_TESTING
|| (e instanceof CompoundRuntimeException && ((CompoundRuntimeException)e).getAllCauses().contains(BAIL_OUT_FOR_TESTING))) {
test.apply(lastConfigBag);
} else {
throw Exceptions.propagate(e);
}
}
}
}
@SuppressWarnings("serial")
public static class CountingBailOutJcloudsLocation extends BailOutJcloudsLocation {
int buildTemplateCount = 0;
@Override
public Template buildTemplate(ComputeService computeService, ConfigBag config) {
buildTemplateCount++;
return super.buildTemplate(computeService, config);
}
}
@SuppressWarnings("serial")
public static class BailOutWithTemplateJcloudsLocation extends JcloudsLocation {
ConfigBag lastConfigBag;
Template template;
public BailOutWithTemplateJcloudsLocation() {
super();
}
public BailOutWithTemplateJcloudsLocation(Map<?, ?> conf) {
super(conf);
}
@Override
public Template buildTemplate(ComputeService computeService, ConfigBag config) {
template = super.buildTemplate(computeService, config);
lastConfigBag = config;
throw BAIL_OUT_FOR_TESTING;
}
protected synchronized void tryObtainAndCheck(Map<?,?> flags, Predicate<ConfigBag> test) {
try {
obtain(flags);
} catch (Throwable e) {
if (e == BAIL_OUT_FOR_TESTING) {
test.apply(lastConfigBag);
} else {
throw Exceptions.propagate(e);
}
}
}
public Template getTemplate() {
return template;
}
}
protected BailOutJcloudsLocation newSampleBailOutJcloudsLocationForTesting() {
return newSampleBailOutJcloudsLocationForTesting(ImmutableMap.<ConfigKey<?>,Object>of());
}
@SuppressWarnings({ "unchecked", "rawtypes" })
protected BailOutJcloudsLocation newSampleBailOutJcloudsLocationForTesting(Map<?,?> config) {
Map<ConfigKey<?>,?> allConfig = MutableMap.<ConfigKey<?>,Object>builder()
.put(IMAGE_ID, "bogus")
.put(CLOUD_PROVIDER, "aws-ec2")
.put(ACCESS_IDENTITY, "bogus")
.put(CLOUD_REGION_ID, "bogus")
.put(ACCESS_CREDENTIAL, "bogus")
.put(USER, "fred")
.put(MIN_RAM, 16)
.put(JcloudsLocation.MACHINE_CREATE_ATTEMPTS, 1)
.putAll((Map)config)
.build();
return managementContext.getLocationManager().createLocation(LocationSpec.create(BailOutJcloudsLocation.class)
.configure(allConfig));
}
protected BailOutWithTemplateJcloudsLocation newSampleBailOutWithTemplateJcloudsLocation() {
return newSampleBailOutWithTemplateJcloudsLocation(ImmutableMap.<ConfigKey<?>,Object>of());
}
@SuppressWarnings({ "unchecked", "rawtypes" })
protected BailOutWithTemplateJcloudsLocation newSampleBailOutWithTemplateJcloudsLocation(Map<?,?> config) {
String identity = (String) brooklynProperties.get("brooklyn.location.jclouds.aws-ec2.identity");
if (identity == null) identity = (String) brooklynProperties.get("brooklyn.jclouds.aws-ec2.identity");
String credential = (String) brooklynProperties.get("brooklyn.location.jclouds.aws-ec2.credential");
if (credential == null) credential = (String) brooklynProperties.get("brooklyn.jclouds.aws-ec2.credential");
Map<ConfigKey<?>,?> allConfig = MutableMap.<ConfigKey<?>,Object>builder()
.put(CLOUD_PROVIDER, AbstractJcloudsLiveTest.AWS_EC2_PROVIDER)
.put(CLOUD_REGION_ID, AbstractJcloudsLiveTest.AWS_EC2_USEAST_REGION_NAME)
.put(IMAGE_ID, US_EAST_IMAGE_ID) // so it runs faster, without loading all EC2 images
.put(ACCESS_IDENTITY, identity)
.put(ACCESS_CREDENTIAL, credential)
.put(USER, "fred")
.put(INBOUND_PORTS, "[22, 80, 9999]")
.putAll((Map)config)
.build();
return managementContext.getLocationManager().createLocation(LocationSpec.create(BailOutWithTemplateJcloudsLocation.class)
.configure(allConfig));
}
public static Predicate<ConfigBag> checkerFor(final String user, final Integer minRam, final Integer minCores) {
return new Predicate<ConfigBag>() {
@Override
public boolean apply(@Nullable ConfigBag input) {
Assert.assertEquals(input.get(USER), user);
Assert.assertEquals(input.get(MIN_RAM), minRam);
Assert.assertEquals(input.get(MIN_CORES), minCores);
return true;
}
};
}
public static Predicate<ConfigBag> templateCheckerFor(final String ports) {
return new Predicate<ConfigBag>() {
@Override
public boolean apply(@Nullable ConfigBag input) {
Assert.assertEquals(input.get(INBOUND_PORTS), ports);
return false;
}
};
}
private BrooklynProperties brooklynProperties;
private LocalManagementContext managementContext;
@BeforeMethod(alwaysRun=true)
public void setUp() throws Exception {
managementContext = LocalManagementContextForTests.newInstance(BrooklynProperties.Factory.builderEmpty().build());
brooklynProperties = managementContext.getBrooklynProperties();
}
@AfterMethod(alwaysRun=true)
public void tearUp() throws Exception {
if (managementContext != null) Entities.destroyAll(managementContext);
}
@Test
public void testCreateWithFlagsDirectly() throws Exception {
BailOutJcloudsLocation jcl = newSampleBailOutJcloudsLocationForTesting();
jcl.tryObtainAndCheck(MutableMap.of(MIN_CORES, 2), checkerFor("fred", 16, 2));
}
@Test
public void testCreateWithFlagsDirectlyAndOverride() throws Exception {
BailOutJcloudsLocation jcl = newSampleBailOutJcloudsLocationForTesting();
jcl.tryObtainAndCheck(MutableMap.of(MIN_CORES, 2, MIN_RAM, 8), checkerFor("fred", 8, 2));
}
@Test
public void testCreateWithFlagsSubLocation() throws Exception {
BailOutJcloudsLocation jcl = newSampleBailOutJcloudsLocationForTesting();
jcl = (BailOutJcloudsLocation) jcl.newSubLocation(MutableMap.of(USER, "jon", MIN_CORES, 2));
jcl.tryObtainAndCheck(MutableMap.of(MIN_CORES, 3), checkerFor("jon", 16, 3));
}
@Test
public void testStringListToIntArray() {
String listString = "[1, 2, 3, 4]";
int[] intArray = new int[] {1, 2, 3, 4};
Assert.assertEquals(JcloudsLocation.toIntArray(listString), intArray);
}
@Test(expectedExceptions = IllegalArgumentException.class)
public void testMalformedStringListToIntArray() {
String listString = "1, 2, 3, 4";
JcloudsLocation.toIntArray(listString);
}
@Test
public void testEmptyStringListToIntArray() {
String listString = "[]";
int[] intArray = new int[] {};
Assert.assertEquals(JcloudsLocation.toIntArray(listString), intArray);
}
@Test
public void testIntArrayToIntArray() {
int[] intArray = new int[] {1, 2, 3, 4};
Assert.assertEquals(JcloudsLocation.toIntArray(intArray), intArray);
}
@Test
public void testObjectArrayToIntArray() {
Object[] longArray = new Object[] {1, 2, 3, 4};
int[] intArray = new int[] {1, 2, 3, 4};
Assert.assertEquals(JcloudsLocation.toIntArray(longArray), intArray);
}
@Test(expectedExceptions = ClassCastException.class)
public void testInvalidObjectArrayToIntArray() {
String[] stringArray = new String[] {"1", "2", "3"};
JcloudsLocation.toIntArray(stringArray);
}
@Test
public void testVMCreationIsRetriedOnFailure() {
Map<ConfigKey<?>, Object> flags = Maps.newHashMap();
flags.put(IMAGE_ID, "bogus");
flags.put(CLOUD_PROVIDER, "aws-ec2");
flags.put(ACCESS_IDENTITY, "bogus");
flags.put(CLOUD_REGION_ID, "bogus");
flags.put(ACCESS_CREDENTIAL, "bogus");
flags.put(USER, "fred");
flags.put(MIN_RAM, 16);
flags.put(MACHINE_CREATE_ATTEMPTS, 3);
CountingBailOutJcloudsLocation jcl = managementContext.getLocationManager().createLocation(
LocationSpec.create(CountingBailOutJcloudsLocation.class).configure(flags));
jcl.tryObtainAndCheck(ImmutableMap.of(), Predicates.<ConfigBag>alwaysTrue());
Assert.assertEquals(jcl.buildTemplateCount, 3);
}
@Test(groups={"Live", "Live-sanity"})
public void testCreateWithInboundPorts() {
BailOutWithTemplateJcloudsLocation jcloudsLocation = newSampleBailOutWithTemplateJcloudsLocation();
jcloudsLocation = (BailOutWithTemplateJcloudsLocation) jcloudsLocation.newSubLocation(MutableMap.of());
jcloudsLocation.tryObtainAndCheck(MutableMap.of(), templateCheckerFor("[22, 80, 9999]"));
int[] ports = new int[] {22, 80, 9999};
Assert.assertEquals(jcloudsLocation.template.getOptions().getInboundPorts(), ports);
}
@Test(groups={"Live", "Live-sanity"})
public void testCreateWithInboundPortsOverride() {
BailOutWithTemplateJcloudsLocation jcloudsLocation = newSampleBailOutWithTemplateJcloudsLocation();
jcloudsLocation = (BailOutWithTemplateJcloudsLocation) jcloudsLocation.newSubLocation(MutableMap.of());
jcloudsLocation.tryObtainAndCheck(MutableMap.of(INBOUND_PORTS, "[23, 81, 9998]"), templateCheckerFor("[23, 81, 9998]"));
int[] ports = new int[] {23, 81, 9998};
Assert.assertEquals(jcloudsLocation.template.getOptions().getInboundPorts(), ports);
}
@Test
public void testCreateWithMaxConcurrentCallsUnboundedByDefault() throws Exception {
final int numCalls = 20;
ConcurrencyTracker interceptor = new ConcurrencyTracker();
ExecutorService executor = Executors.newCachedThreadPool();
try {
final BailOutJcloudsLocation jcloudsLocation = newSampleBailOutJcloudsLocationForTesting(ImmutableMap.of(BailOutJcloudsLocation.BUILD_TEMPLATE_INTERCEPTOR, interceptor));
for (int i = 0; i < numCalls; i++) {
executor.execute(new Runnable() {
@Override public void run() {
jcloudsLocation.tryObtainAndCheck(MutableMap.of(), Predicates.alwaysTrue());
}});
}
interceptor.assertCallCountEventually(numCalls);
interceptor.unblock();
executor.shutdown();
executor.awaitTermination(10, TimeUnit.SECONDS);
} finally {
executor.shutdownNow();
}
}
@Test(groups="Integration") // because takes 1 sec
public void testCreateWithMaxConcurrentCallsRespectsConfig() throws Exception {
final int numCalls = 4;
final int maxConcurrentCreations = 2;
ConcurrencyTracker interceptor = new ConcurrencyTracker();
ExecutorService executor = Executors.newCachedThreadPool();
try {
final BailOutJcloudsLocation jcloudsLocation = newSampleBailOutJcloudsLocationForTesting(ImmutableMap.of(
BailOutJcloudsLocation.BUILD_TEMPLATE_INTERCEPTOR, interceptor,
JcloudsLocation.MAX_CONCURRENT_MACHINE_CREATIONS, maxConcurrentCreations));
for (int i = 0; i < numCalls; i++) {
executor.execute(new Runnable() {
@Override public void run() {
jcloudsLocation.tryObtainAndCheck(MutableMap.of(), Predicates.alwaysTrue());
}});
}
interceptor.assertCallCountEventually(maxConcurrentCreations);
interceptor.assertCallCountContinually(maxConcurrentCreations);
interceptor.unblock();
interceptor.assertCallCountEventually(numCalls);
executor.shutdown();
executor.awaitTermination(10, TimeUnit.SECONDS);
} finally {
executor.shutdownNow();
}
}
@Test(groups="Integration") // because takes 1 sec
public void testCreateWithMaxConcurrentCallsAppliesToSubLocations() throws Exception {
final int numCalls = 4;
final int maxConcurrentCreations = 2;
ConcurrencyTracker interceptor = new ConcurrencyTracker();
ExecutorService executor = Executors.newCachedThreadPool();
try {
final BailOutJcloudsLocation jcloudsLocation = newSampleBailOutJcloudsLocationForTesting(ImmutableMap.of(
BailOutJcloudsLocation.BUILD_TEMPLATE_INTERCEPTOR, interceptor,
JcloudsLocation.MAX_CONCURRENT_MACHINE_CREATIONS, maxConcurrentCreations));
for (int i = 0; i < numCalls; i++) {
final BailOutJcloudsLocation subLocation = (BailOutJcloudsLocation) jcloudsLocation.newSubLocation(MutableMap.of());
executor.execute(new Runnable() {
@Override public void run() {
subLocation.tryObtainAndCheck(MutableMap.of(), Predicates.alwaysTrue());
}});
}
interceptor.assertCallCountEventually(maxConcurrentCreations);
interceptor.assertCallCountContinually(maxConcurrentCreations);
interceptor.unblock();
interceptor.assertCallCountEventually(numCalls);
executor.shutdown();
executor.awaitTermination(10, TimeUnit.SECONDS);
} finally {
executor.shutdownNow();
}
}
@Test
public void testCreateWithCustomMachineNamer() {
final String machineNamerClass = "brooklyn.location.cloud.CustomMachineNamer";
BailOutJcloudsLocation jcloudsLocation = newSampleBailOutJcloudsLocationForTesting(ImmutableMap.of(
LocationConfigKeys.CLOUD_MACHINE_NAMER_CLASS, machineNamerClass));
jcloudsLocation.tryObtainAndCheck(ImmutableMap.of(), new Predicate<ConfigBag>() {
public boolean apply(ConfigBag input) {
Assert.assertEquals(input.get(LocationConfigKeys.CLOUD_MACHINE_NAMER_CLASS), machineNamerClass);
return true;
}
});
}
@Test
public void testCreateWithCustomMachineNamerOnObtain() {
final String machineNamerClass = "brooklyn.location.cloud.CustomMachineNamer";
BailOutJcloudsLocation jcloudsLocation = newSampleBailOutJcloudsLocationForTesting();
jcloudsLocation.tryObtainAndCheck(ImmutableMap.of(
LocationConfigKeys.CLOUD_MACHINE_NAMER_CLASS, machineNamerClass), new Predicate<ConfigBag>() {
public boolean apply(ConfigBag input) {
Assert.assertEquals(input.get(LocationConfigKeys.CLOUD_MACHINE_NAMER_CLASS), machineNamerClass);
return true;
}
});
}
public static class ConcurrencyTracker implements Function<ConfigBag,Void> {
final AtomicInteger concurrentCallsCounter = new AtomicInteger();
final CountDownLatch continuationLatch = new CountDownLatch(1);
@Override public Void apply(ConfigBag input) {
concurrentCallsCounter.incrementAndGet();
try {
continuationLatch.await();
} catch (InterruptedException e) {
throw Exceptions.propagate(e);
}
return null;
}
public void unblock() {
continuationLatch.countDown();
}
public void assertCallCountEventually(final int expected) {
Asserts.succeedsEventually(new Runnable() {
@Override public void run() {
Assert.assertEquals(concurrentCallsCounter.get(), expected);
}
});
}
public void assertCallCountContinually(final int expected) {
Asserts.succeedsContinually(new Runnable() {
@Override public void run() {
Assert.assertEquals(concurrentCallsCounter.get(), expected);
}
});
}
}
@SuppressWarnings("serial")
public static class FakeLocalhostWithParentJcloudsLocation extends JcloudsLocation {
public static final ConfigKey<Function<ConfigBag,Void>> BUILD_TEMPLATE_INTERCEPTOR = ConfigKeys.newConfigKey(new TypeToken<Function<ConfigBag,Void>>() {}, "buildtemplateinterceptor");
ConfigBag lastConfigBag;
public FakeLocalhostWithParentJcloudsLocation() {
super();
}
public FakeLocalhostWithParentJcloudsLocation(Map<?, ?> conf) {
super(conf);
}
@Override
public JcloudsSshMachineLocation obtain(Map<?, ?> flags) throws NoMachinesAvailableException {
return getManagementContext().getLocationManager().createLocation(LocationSpec.create(JcloudsSshMachineLocation.class)
.configure("address", "127.0.0.1")
.configure("port", 22)
.configure("user", "bob")
.configure("jcloudsParent", this));
}
}
@Test
public void testInheritsGeo() throws Exception {
ConfigBag allConfig = ConfigBag.newInstance()
.configure(IMAGE_ID, "bogus")
.configure(CLOUD_PROVIDER, "aws-ec2")
.configure(CLOUD_REGION_ID, "bogus")
.configure(ACCESS_IDENTITY, "bogus")
.configure(ACCESS_CREDENTIAL, "bogus")
.configure(LocationConfigKeys.LATITUDE, 42d)
.configure(LocationConfigKeys.LONGITUDE, -20d)
.configure(JcloudsLocation.MACHINE_CREATE_ATTEMPTS, 1);
FakeLocalhostWithParentJcloudsLocation ll = managementContext.getLocationManager().createLocation(LocationSpec.create(FakeLocalhostWithParentJcloudsLocation.class).configure(allConfig.getAllConfig()));
JcloudsSshMachineLocation l = ll.obtain();
log.info("loc:" +l);
HostGeoInfo geo = HostGeoInfo.fromLocation(l);
log.info("geo: "+geo);
Assert.assertEquals(geo.latitude, 42d, 0.00001);
Assert.assertEquals(geo.longitude, -20d, 0.00001);
}
@SuppressWarnings("unchecked")
@Test
public void testInheritsGeoFromLocationMetadataProperties() throws Exception {
// in location-metadata.properties:
// brooklyn.location.jclouds.softlayer@wdc01.latitude=38.909202
// brooklyn.location.jclouds.softlayer@wdc01.longitude=-77.47314
ConfigBag allConfig = ConfigBag.newInstance()
.configure(IMAGE_ID, "bogus")
.configure(CLOUD_PROVIDER, "softlayer")
.configure(CLOUD_REGION_ID, "wdc01")
.configure(ACCESS_IDENTITY, "bogus")
.configure(ACCESS_CREDENTIAL, "bogus")
.configure(JcloudsLocation.MACHINE_CREATE_ATTEMPTS, 1);
FakeLocalhostWithParentJcloudsLocation ll = managementContext.getLocationManager().createLocation(LocationSpec.create(FakeLocalhostWithParentJcloudsLocation.class)
.configure(new JcloudsPropertiesFromBrooklynProperties().getJcloudsProperties("softlayer", "wdc01", null, managementContext.getBrooklynProperties()))
.configure(allConfig.getAllConfig()));
JcloudsSshMachineLocation l = ll.obtain();
log.info("loc:" +l);
HostGeoInfo geo = HostGeoInfo.fromLocation(l);
log.info("geo: "+geo);
Assert.assertEquals(geo.latitude, 38.909202d, 0.00001);
Assert.assertEquals(geo.longitude, -77.47314d, 0.00001);
}
// TODO more tests, where flags come in from resolver, named locations, etc
}
|
#include <iostream>
using namespace std;
// Function to copy one array to another
void copyArray(int dest[], const int src[], int size) {
for (int i=0; i<size; i++)
dest[i] = src[i];
}
// Main method
int main() {
const int size = 4;
int arr1[size] = {1, 3, 5, 7};
int arr2[size];
copyArray(arr2, arr1, size);
for (int i=0; i<size; i++)
cout << arr2[i] << " ";
return 0;
} |
<filename>database_manager.py<gh_stars>0
import sqlite3
class DatabaseManager:
def __init__(self, database_file):
self.conn = sqlite3.connect(database_file, check_same_thread=False)
def __del__(self):
self.conn.commit()
self.conn.close()
def get_cursor(self):
return self.conn.cursor()
def commit(self):
self.conn.commit()
def execute_get(self, command):
cursor = self.get_cursor()
cursor.execute(command)
return cursor.fetchall()
def select(self, table, var_name="", var=""):
cursor = self.get_cursor()
if (var_name == ""):
command = f"SELECT * FROM {table}"
cursor.execute(command)
return cursor.fetchall()
command = f"SELECT * FROM {table} WHERE {var_name}=?"
cursor.execute(command, (var,))
return cursor.fetchone()
def select_col(self, table, col_name, var_name="", var=""):
cursor = self.get_cursor()
if (var_name == ""):
command = f"SELECT {col_name} FROM {table}"
cursor.execute(command)
return cursor.fetchall()
command = f"SELECT {col_name} FROM {table} WHERE {var_name}=?"
cursor.execute(command, (var,))
return cursor.fetchall()
def select_subtable(self, table, var_name, in_table, in_select, in_var_name, in_var):
cursor = self.get_cursor()
command = f"SELECT * FROM {table} WHERE {var_name} IN (SELECT {in_select} FROM {in_table} WHERE {in_var_name}=?)"
cursor.execute(command, (in_var,))
return cursor.fetchall()
def insert(self, table, var_name, var, commit=True):
if len(var_name) != len(var):
raise RuntimeError(
"variable names and variables must have same size")
cursor = self.get_cursor()
command = f"INSERT INTO {table}(" + ", ".join(var_name) + \
") VALUES (" + ", ".join(["?"] * len(var)) + ")"
cursor.execute(command, var)
rows_changed = cursor.rowcount
row_id = None
if commit:
self.conn.commit()
row_id = cursor.lastrowid
return row_id, rows_changed
def update(self, table, var_changed, var_name, var, commit=True):
cursor = self.get_cursor()
command = "UPDATE tasks SET " + \
", ".join([f"{key}=\"{var_changed[key]}\"" for key in var_changed]
) + f" WHERE {var_name}={var}"
cursor.execute(command)
if commit:
self.conn.commit()
def delete(self, table, var_name, var, op=" AND ", commit=True):
cursor = self.get_cursor()
command = f"DELETE FROM {table} WHERE (" + op.join(
[f"{var_name[v]}={var[v]}" for v in range(len(var_name))]) + ")"
cursor.execute(command)
rows_changed = cursor.rowcount
if commit:
self.conn.commit()
return rows_changed
|
#!/bin/bash
pipenv shell
/docker_start.sh
|
<filename>src/main/resources/static/clustergrammer/d3_clustergram.py<gh_stars>0
# define a class for networks
class Network(object):
'''
Networks have two states: the data state where they are stored as: matrix and nodes;
and a viz state where they are stored as: viz.links, viz.row_nodes, viz.col_nodes.
The goal is to start in a data-state and produce a viz-state of the network that will be
used as input to d3_clustergram.js.
'''
def __init__(self):
# network: data-state
self.dat = {}
self.dat['nodes'] = {}
self.dat['nodes']['row'] = []
self.dat['nodes']['col'] = []
# node_info holds the orderings (ini, clust, rank), classification ('cl'), and other general information
self.dat['node_info'] = {}
for inst_rc in self.dat['nodes']:
self.dat['node_info'][inst_rc] = {}
self.dat['node_info'][inst_rc]['ini'] = []
self.dat['node_info'][inst_rc]['clust'] = []
self.dat['node_info'][inst_rc]['rank'] = []
self.dat['node_info'][inst_rc]['info'] = []
# classification is specifically used to color the class triangles
self.dat['node_info'][inst_rc]['cl'] = []
# initialize matrix
self.dat['mat'] = []
# mat_info is an optional dictionary
# so I'm not including it by default
# network: viz-state
self.viz = {}
self.viz['row_nodes'] = []
self.viz['col_nodes'] = []
self.viz['links'] = []
def load_tsv_to_net(self, filename):
f = open(filename,'r')
lines = f.readlines()
f.close()
self.load_lines_from_tsv_to_net(lines)
def load_lines_from_tsv_to_net(self, lines):
import numpy as np
# get row/col labels and data from lines
for i in range(len(lines)):
# get inst_line
inst_line = lines[i].split('\t')
# strip each element
inst_line = [z.strip() for z in inst_line]
# get column labels from first row
if i == 0:
tmp_col_labels = inst_line
# add the labels
for inst_elem in range(len(tmp_col_labels)):
# skip the first element
if inst_elem > 0:
# get the column label
inst_col_label = tmp_col_labels[inst_elem]
# add to network data
self.dat['nodes']['col'].append(inst_col_label)
# get row info
if i > 0:
# save row labels
self.dat['nodes']['row'].append(inst_line[0])
# get data - still strings
inst_data_row = inst_line[1:]
# convert to float
inst_data_row = [float(tmp_dat) for tmp_dat in inst_data_row]
# save the row data as an array
inst_data_row = np.asarray(inst_data_row)
# initailize matrix
if i == 1:
self.dat['mat'] = inst_data_row
# add rows to matrix
if i > 1:
self.dat['mat'] = np.vstack( ( self.dat['mat'], inst_data_row ) )
def load_hgram(self, filename):
import numpy as np
# example data format
###########################
# # # DatasetName Achilles Cell Line Gene Essentiality Profiles
# # # DatasetGroup disease or phenotype associations
# GeneSym NA NA/DatasetID 1
# 1060P11.3 na na 0
# 3.8-1.2 na na 0
# 3.8-1.5 na na 0
# A1BG na na 0
# A1BG-AS1 na na 0
# A1CF na na 0
# A2M na na 0
# processing steps
# line 1 has dataset names starting on 4th column
# line 2 has dataset groups starting on 4th column
# line 3 has column labels and dataset numbers, but no information that I need
# line 4 and after have gene symbols (first column) and values (4th and after columns)
# load gene classes for harmonogram
gc = self.load_json_to_dict('gene_classes_harmonogram.json')
f = open(filename,'r')
lines = f.readlines()
f.close()
# loop through the lines of the file
for i in range(len(lines)):
# get the inst_line and make list
inst_line = lines[i].strip().split('\t')
if i%1000 == 0:
print(i)
# line 1: get dataset names
if i ==0:
# gather column information
for j in range(len(inst_line)):
# skip the first three columns
if j > 2:
# get inst label
inst_col = inst_line[j]
# gather column labels
self.dat['nodes']['col'].append(inst_col)
# line 2: get dataset groups - do not save as 'cl', save as 'info' to sidestep d3_clustergram.js code
if i ==1:
# gather column classification information
for j in range(len(inst_line)):
# skip the first three columns
if j > 2:
# get inst label
inst_col = inst_line[j]
# gather column labels
self.dat['node_info']['col']['info'].append(inst_col)
# line 3: no information
# line 4: get gene symbol and data
if i > 2:
# get gene
inst_gene = inst_line[0]
# add gene to rows
self.dat['nodes']['row'].append(inst_gene)
# not going to do this here
############################
# # add protein type to classification and initialize class to other
# inst_prot_class = 'other'
# for inst_gc in gc:
# if inst_gene in gc[inst_gc]:
# inst_prot_class = inst_gc
# # add class to node_info
# self.dat['node_info']['row']['cl'].append(inst_prot_class)
# grab data, convert to float, and make numpy array
inst_data_row = inst_line[3:]
inst_data_row = [float(tmp_dat) for tmp_dat in inst_data_row]
inst_data_row = np.asarray(inst_data_row)
# initialize matrix
if i == 3:
self.dat['mat'] = inst_data_row
# add rows to matrix
if i > 3:
self.dat['mat'] = np.vstack( ( self.dat['mat'], inst_data_row ) )
print('\nthere are ' + str(len(self.dat['nodes']['row'])) + ' genes' )
print('there are ' + str(len(self.dat['nodes']['col'])) + ' resources\n' )
print('matrix shape')
print(self.dat['mat'].shape)
def load_cst_kea_enr_to_net(self, enr, pval_cutoff):
import scipy
import numpy as np
# enr - data structure
# cell line
# up_genes, dn_genes
# name, pval, pval_bon, pval_bh, int_genes
print('loading cst enriched kinases ')
# the columns are the cell lines
all_col = sorted(enr.keys())
# the rows are the enriched terms
all_row = []
# gather all genes with significantly enriched pval_bh
#######################################################
updn = ['up','dn']
# loop through cell lines
for inst_cl in enr:
# loop through up/dn genes
for inst_updn in updn:
# get inst_enr: the enrichment results from a cell line in either up/dn
inst_enr = enr[inst_cl][inst_updn]
# loop through enriched terms
for i in range(len(inst_enr)):
# append name if pval is significant
if inst_enr[i]['pval_bh'] <= pval_cutoff:
# append name to all terms
all_row.append(inst_enr[i]['name'])
# get unique terms, sort them
all_row = sorted(list(set(all_row)))
# save row and column data to nodes
nodes = {}
nodes['row'] = all_row
nodes['col'] = all_col
# gather data into matrix
#############################
# initialize data_mat
data_mat = {}
data_mat['value'] = scipy.zeros([ len(all_row), len(all_col) ])
data_mat['value_up'] = scipy.zeros([ len(all_row), len(all_col) ])
data_mat['value_dn'] = scipy.zeros([ len(all_row), len(all_col) ])
# save additional informaiton in a dictionary
mat_info = {}
# loop through the rows (genes)
for i in range(len(all_row)):
# get inst row: gene
inst_gene = all_row[i]
# loop through the columns (cell lines)
for j in range(len(all_col)):
# get inst col: cell line
inst_cl = all_col[j]
# initialize pval_nl negative log up/dn
pval_nl = {}
# ini list of substrates
substrates = []
# get enrichment from up/dn genes
for inst_updn in updn:
# initialize pval_nl[inst_updn] = np.nan
pval_nl[inst_updn] = np.nan
# gather the current set of enrichment results
# from the cell line
inst_enr = enr[inst_cl][inst_updn]
# check if kinase is in list of enriched results
if any(d['name'] == inst_gene for d in inst_enr):
# get the dict from the list
inst_dict = self.find_dict_in_list( inst_enr, 'name', inst_gene)
# only include significant pvalues
if inst_dict['pval_bh'] <= 0.05:
# retrieve the negative log pval_
pval_nl[inst_updn] = -np.log2( inst_dict['pval_bh'] )
# save ranks of substrates
substrates.extend( inst_dict['ranks'] )
else:
# set nan pval
pval_nl[inst_updn] = np.nan
# set value for data_mat
###########################
# now that the enrichment results have been gathered
# for up/dn genes save the results
# there is both up and down enrichment
if np.isnan(pval_nl['up']) == False and np.isnan(pval_nl['dn']) == False:
# set value of data_mat['merge'] as the mean of up/dn enrichment
data_mat['value'][i,j] = np.mean([ pval_nl['up'], -pval_nl['dn'] ])
# set values of up/dn
data_mat['value_up'][i,j] = pval_nl['up']
data_mat['value_dn'][i,j] = -pval_nl['dn']
# there is only up enrichment
elif np.isnan(pval_nl['up']) == False:
# set value of data_mat as up enrichment
data_mat['value'][i,j] = pval_nl['up']
data_mat['value_up'][i,j] = pval_nl['up']
# there is only dn enrichment
elif np.isnan(pval_nl['dn']) == False:
# set value of data_mat as the mean of up/dn enrichment
data_mat['value'][i,j] = -pval_nl['dn']
data_mat['value_dn'][i,j] = -pval_nl['dn']
# save substrates to mat_info
mat_info[(i,j)] = substrates
# save nodes and data_mat to self.dat
self.dat['nodes'] = nodes
self.dat['mat'] = data_mat['value']
# add up and dn values into self.dat
self.dat['mat_up'] = data_mat['value_up']
self.dat['mat_dn'] = data_mat['value_dn']
# add mat_info with substrate information
self.dat['mat_info'] = mat_info
def load_ccle_to_net(self, prot_type):
import scipy
import numpy as np
# load ccle data
ccle = self.load_json_to_dict('CCLE/nsclc_allzc.json')
ccle['data_z'] = np.asarray(ccle['data_z'], dtype = float)
# load protein type lists
gs_list = self.load_json_to_dict('gene_classes_harmonogram.json')
# generate node lists
# find the protein-types that are in ccle
self.dat['nodes']['row'] = sorted(list(set(gs_list[prot_type]).intersection(ccle['gene'])))
self.dat['nodes']['col'] = ccle['cell_lines']
# initialize mat
self.dat['mat'] = scipy.zeros([ len(self.dat['nodes']['row']), len(self.dat['nodes']['col']) ])
# loop through rows and cols
for i in range(len(self.dat['nodes']['row'])):
for j in range(len(self.dat['nodes']['col'])):
# get inst_row and inst_col
inst_row = self.dat['nodes']['row'][i]
inst_col = self.dat['nodes']['col'][j]
# find gene and cl index in zscored data
index_x = ccle['gene'].index(inst_row)
index_y = ccle['cell_lines'].index(inst_col)
# map primary data to mat
self.dat['mat'][i,j] = ccle['data_z'][index_x, index_y]
def load_g2e_to_net(self, g2e):
import numpy as np
# get all signatures
sigs = g2e['gene_signatures']
# get all genes from signatures
all_genes = []
all_sigs = []
for inst_sig in sigs:
# get gene data
gene_data = inst_sig['genes']
# gather sig names
all_sigs.append(inst_sig['name'])
# gather genes
for inst_gene_data in gene_data:
# add genes - the gene name is the first element of the list
all_genes.append( inst_gene_data[0] )
# get unique sorted list of genes
all_genes = sorted(list(set(all_genes)))
print( 'found ' + str(len(all_genes)) + ' genes' )
print( 'found ' + str(len(all_sigs)) + ' siguatures\n' )
# save genes adn sigs to nodes
self.dat['nodes']['row'] = all_genes
self.dat['nodes']['col'] = all_sigs
# initialize numpy matrix of nans
self.dat['mat'] = np.empty((len(all_genes),len(all_sigs)))
self.dat['mat'][:] = np.nan
# loop through all signatures and genes
# and place information into self.dat
for inst_sig in sigs:
# get sig name
inst_sig_name = inst_sig['name']
# get gene data
gene_data = inst_sig['genes']
# loop through genes
for inst_gene_data in gene_data:
# add gene data to signature matrix
inst_gene = inst_gene_data[0]
inst_value = inst_gene_data[1]
# find index of gene and sig in matrix
row_index = all_genes.index(inst_gene)
col_index = all_sigs.index(inst_sig_name)
# save inst_value to matrix
self.dat['mat'][row_index, col_index] = inst_value
def load_data_file_to_net(self, filename):
# load json from file to new dictionary
inst_dat = self.load_json_to_dict(filename)
# convert dat['mat'] to numpy array and add to network
self.load_data_to_net(inst_dat)
def load_data_to_net(self, inst_net):
''' load data into nodes and mat, also convert mat to numpy array'''
self.dat = inst_net
# convert to numpy array
self.mat_to_numpy_arr()
def export_net_json(self, net_type, indent='no-indent'):
''' export json string of dat '''
import json
from copy import deepcopy
if net_type == 'dat':
exp_dict = deepcopy(self.dat)
# convert numpy array to list
exp_dict['mat'] = exp_dict['mat'].tolist()
elif net_type == 'viz':
exp_dict = self.viz
# make json
if indent == 'indent':
exp_json = json.dumps(exp_dict, indent=2)
else:
exp_json = json.dumps(exp_dict)
return exp_json
def write_json_to_file(self, net_type, filename, indent='no-indent'):
import json
# get dat or viz representation as json string
if net_type == 'dat':
exp_json = self.export_net_json('dat', indent)
elif net_type == 'viz':
exp_json = self.export_net_json('viz', indent)
# save to file
fw = open(filename, 'w')
fw.write( exp_json )
fw.close()
def set_node_names(self, row_name, col_name):
'''give names to the rows and columns'''
self.dat['node_names'] = {}
self.dat['node_names']['row'] = row_name
self.dat['node_names']['col'] = col_name
def mat_to_numpy_arr(self):
''' convert list to numpy array - numpy arrays can not be saved as json '''
import numpy as np
self.dat['mat'] = np.asarray( self.dat['mat'] )
def swap_nan_for_zero(self):
import numpy as np
self.dat['mat'][ np.isnan( self.dat['mat'] ) ] = 0
def filter_network_thresh( self, cutoff, min_num_meet ):
'''
remove rows and columns from matrix that do not have at least
min_num_meet instances of a value with an absolute value above cutoff
'''
import scipy
import numpy as np
print('\nfiltering network using cutoff of ' + str(cutoff) + ' and min_num_meet of ' + str(min_num_meet))
# transfer the nodes
nodes = {}
nodes['row'] = []
nodes['col'] = []
# transfer the 'info' part of node_info if necessary
node_info = {}
node_info['row'] = []
node_info['col'] = []
print( 'initial mat shape' + str(self.dat['mat'].shape ))
# add rows with non-zero values
#################################
for i in range(len(self.dat['nodes']['row'])):
# get row name
inst_nodes_row = self.dat['nodes']['row'][i]
# get node info - disregard ini, clust, and rank orders
if len(self.dat['node_info']['row']['info']) > 0:
inst_node_info = self.dat['node_info']['row']['info'][i]
# get row vect
row_vect = np.absolute(self.dat['mat'][i,:])
# check if there are nonzero values
found_tuple = np.where(row_vect >= cutoff)
if len(found_tuple[0])>=min_num_meet:
# add name
nodes['row'].append(inst_nodes_row)
# add info if necessary
if len(self.dat['node_info']['row']['info']) > 0:
node_info['row'].append(inst_node_info)
# add cols with non-zero values
#################################
for i in range(len(self.dat['nodes']['col'])):
# get col name
inst_nodes_col = self.dat['nodes']['col'][i]
# get node info - disregard ini, clust, and rank orders
if len(self.dat['node_info']['col']['info']) > 0:
inst_node_info = self.dat['node_info']['col']['info'][i]
# get col vect
col_vect = np.absolute(self.dat['mat'][:,i])
# check if there are nonzero values
found_tuple = np.where(col_vect >= cutoff)
if len(found_tuple[0])>=min_num_meet:
# add name
nodes['col'].append(inst_nodes_col)
# add info if necessary
if len(self.dat['node_info']['col']['info']) > 0:
node_info['col'].append(inst_node_info)
# cherrypick data from self.dat['mat']
##################################
# filtered matrix
filt_mat = scipy.zeros([ len(nodes['row']), len(nodes['col']) ])
if 'mat_up' in self.dat:
filt_mat_up = scipy.zeros([ len(nodes['row']), len(nodes['col']) ])
filt_mat_dn = scipy.zeros([ len(nodes['row']), len(nodes['col']) ])
if 'mat_info' in self.dat:
# initialize filtered mat_info dictionary with tuple keys
filt_mat_info = {}
# loop through the rows
for i in range(len(nodes['row'])):
inst_row = nodes['row'][i]
# loop through the cols
for j in range(len(nodes['col'])):
inst_col = nodes['col'][j]
# get row and col index
pick_row = self.dat['nodes']['row'].index(inst_row)
pick_col = self.dat['nodes']['col'].index(inst_col)
# cherrypick
###############
filt_mat[i,j] = self.dat['mat'][pick_row, pick_col]
if 'mat_up' in self.dat:
filt_mat_up[i,j] = self.dat['mat_up'][pick_row, pick_col]
filt_mat_dn[i,j] = self.dat['mat_dn'][pick_row, pick_col]
if 'mat_info' in self.dat:
filt_mat_info[(i,j)] = self.dat['mat_info'][(pick_row,pick_col)]
# save nodes array - list of node names
self.dat['nodes'] = nodes
# save node_info array - list of node infos
self.dat['node_info']['row']['info'] = node_info['row']
self.dat['node_info']['col']['info'] = node_info['col']
# overwrite with new filtered data
self.dat['mat'] = filt_mat
# overwrite with up/dn data if necessary
if 'mat_up' in self.dat:
self.dat['mat_up'] = filt_mat_up
self.dat['mat_dn'] = filt_mat_dn
# overwrite mat_info if necessary
if 'mat_info' in self.dat:
self.dat['mat_info'] = filt_mat_info
print( 'final mat shape' + str(self.dat['mat'].shape ) + '\n')
def cluster_row_and_col(self, dist_type, cutoff, min_num_comp, dendro=True):
'''
cluster net.dat and make visualization json, net.viz.
optionally leave out dendrogram colorbar groups with dendro argument
'''
import scipy
import numpy as np
print('\nclustering the matrix using dist_type ' + dist_type + ' with a comparison requirement of at least ' + str(cutoff) + ' instances above abs-value of ' + str(min_num_comp) +' in order to compare')
# make distance matrices
##########################
# get number of rows and columns from self.dat
num_row = len(self.dat['nodes']['row'])
num_col = len(self.dat['nodes']['col'])
# initialize distance matrices
row_dm = scipy.zeros([num_row,num_row])
col_dm = scipy.zeros([num_col,num_col])
# row dist mat
for i in range(num_row):
for j in range(num_row):
# calculate distance of two rows
row_dm[i,j] = self.calc_thresh_col_dist( self.dat['mat'][i,:], self.dat['mat'][j,:], cutoff, min_num_comp )
# col dist mat
for i in range(num_col):
for j in range(num_col):
col_dm[i,j] = self.calc_thresh_col_dist( self.dat['mat'][:,i], self.dat['mat'][:,j], cutoff, min_num_comp )
# replace nans with the maximum distance in the distance matries
row_dm[ np.isnan(row_dm) ] = np.nanmax(row_dm)
col_dm[ np.isnan(col_dm) ] = np.nanmax(col_dm)
# initialize clust order
clust_order = self.ini_clust_order()
# initial ordering
###################
clust_order['row']['ini'] = range(num_row)
clust_order['col']['ini'] = range(num_col)
# cluster
##############
# cluster rows
cluster_method = 'centroid'
clust_order['row']['clust'], clust_order['row']['group'] = self.clust_and_group_nodes(row_dm, cluster_method)
clust_order['col']['clust'], clust_order['col']['group'] = self.clust_and_group_nodes(col_dm, cluster_method)
# rank
############
clust_order['row']['rank'] = self.sort_rank_nodes('row')
clust_order['col']['rank'] = self.sort_rank_nodes('col')
# save clustering orders to node_info
# row
self.dat['node_info']['row']['ini'] = clust_order['row']['ini']
self.dat['node_info']['row']['clust'] = clust_order['row']['clust']
self.dat['node_info']['row']['rank'] = clust_order['row']['rank']
self.dat['node_info']['row']['group'] = clust_order['row']['group']
# col
self.dat['node_info']['col']['ini'] = clust_order['col']['ini']
self.dat['node_info']['col']['clust'] = clust_order['col']['clust']
self.dat['node_info']['col']['rank'] = clust_order['col']['rank']
self.dat['node_info']['col']['group'] = clust_order['col']['group']
# make the viz json - can optionally leave out dendrogram
self.viz_json(dendro)
def clust_and_group_nodes( self, dm, cluster_method ):
import scipy.cluster.hierarchy as hier
# calculate linkage
Y = hier.linkage( dm, method=cluster_method )
Z = hier.dendrogram( Y, no_plot=True )
# get ordering
inst_clust_order = Z['leaves']
all_dist = self.group_cutoffs()
# generate distance cutoffs
inst_groups = {}
for inst_dist in all_dist:
# inst_groups[inst_dist] = hier.fcluster(Y, inst_dist*dm.max(), 'inconsistent')
inst_groups[inst_dist] = hier.fcluster(Y, inst_dist*dm.max(), 'distance')
return inst_clust_order, inst_groups
def sort_rank_nodes( self, rowcol ):
import numpy as np
from operator import itemgetter
from copy import deepcopy
# make a copy of node information
inst_nodes = deepcopy(self.dat['nodes'][rowcol])
inst_mat = deepcopy(self.dat['mat'])
sum_term = []
for i in range(len(inst_nodes)):
inst_dict = {}
# get name of the node
inst_dict['name'] = inst_nodes[i]
# sum values of the node
if rowcol == 'row':
inst_dict['total'] = np.sum(inst_mat[i,:])
else:
inst_dict['total'] = np.sum(inst_mat[:,i])
# add this to the list of dicts
sum_term.append(inst_dict)
# sort dictionary by number of terms
sum_term = sorted( sum_term, key=itemgetter('total'), reverse=False )
# get list of sorted nodes
tmp_sort_nodes = []
for inst_dict in sum_term:
tmp_sort_nodes.append(inst_dict['name'])
# get the sorted index
sort_index = []
for inst_node in inst_nodes:
sort_index.append( tmp_sort_nodes.index(inst_node) )
# save the sorted ranks
return sort_index
def calc_thresh_col_dist( self, vect_row, vect_col, cutoff, min_num_meet):
import scipy.spatial
import numpy as np
# apply cutoff
vect_row, vect_col = self.threshold_vect_comparison(vect_row, vect_col, cutoff)
# check min_num_meet
if len(vect_row) >= min_num_meet:
inst_dist = scipy.spatial.distance.cosine(vect_row, vect_col)
else:
# later the nans will be replaced with the maximum distance
inst_dist = np.nan
return inst_dist
def viz_json(self, dendro=True):
''' make the dictionary for the d3_clustergram.js visualization '''
# get dendrogram cutoff distances
all_dist = self.group_cutoffs()
# make nodes for viz
#####################
# make rows and cols
for inst_rc in self.dat['nodes']:
for i in range(len( self.dat['nodes'][inst_rc] )):
inst_dict = {}
inst_dict['name'] = self.dat['nodes'][inst_rc][i]
inst_dict['ini'] = self.dat['node_info'][inst_rc]['ini'][i]
#!! clean this up so I do not have to get the index here
inst_dict['clust'] = self.dat['node_info'][inst_rc]['clust'].index(i)
inst_dict['rank'] = self.dat['node_info'][inst_rc]['rank'][i]
# add node class 'cl' - this could potentially be a list of several classes
# if 'cl' in self.dat['node_info'][inst_rc]:
if len(self.dat['node_info'][inst_rc]['cl']) > 0:
inst_dict['cl'] = self.dat['node_info'][inst_rc]['cl'][i]
# add node information
# if 'info' in self.dat['node_info'][inst_rc]:
if len(self.dat['node_info'][inst_rc]['info']) > 0:
inst_dict['info'] = self.dat['node_info'][inst_rc]['info'][i]
# group info
if dendro==True:
inst_dict['group'] = []
for tmp_dist in all_dist:
inst_dict['group'].append( float( self.dat['node_info'][inst_rc]['group'][tmp_dist][i] ) )
# append dictionary to list of nodes
self.viz[inst_rc+'_nodes'].append(inst_dict)
# links
########
for i in range(len( self.dat['nodes']['row'] )):
for j in range(len( self.dat['nodes']['col'] )):
if abs( self.dat['mat'][i,j] ) > 0:
inst_dict = {}
inst_dict['source'] = i
inst_dict['target'] = j
inst_dict['value'] = self.dat['mat'][i,j]
# add up/dn values if necessary
if 'mat_up' in self.dat:
inst_dict['value_up'] = self.dat['mat_up'][i,j]
if 'mat_up' in self.dat:
inst_dict['value_dn'] = self.dat['mat_dn'][i,j]
# add information if necessary - use dictionary with tuple key
# each element of the matrix needs to have information
if 'mat_info' in self.dat:
inst_dict['info'] = self.dat['mat_info'][(i,j)]
# add highlight if necessary - use dictionary with tuple key
if 'mat_hl' in self.dat:
inst_dict['highlight'] = self.dat['mat_hl'][i,j]
# append link
self.viz['links'].append( inst_dict )
@staticmethod
def load_json_to_dict(filename):
''' load json to python dict and return dict '''
import json
f = open(filename, 'r')
inst_dict = json.load(f)
f.close()
return inst_dict
@staticmethod
def save_dict_to_json(inst_dict, filename, indent='no-indent'):
import json
# save as a json
fw = open(filename, 'w')
if indent == 'indent':
fw.write( json.dumps(inst_dict, indent=2) )
else:
fw.write( json.dumps(inst_dict) )
fw.close()
@staticmethod
def ini_clust_order():
rowcol = ['row','col']
orderings = ['clust','rank','group','ini']
clust_order = {}
for inst_node in rowcol:
clust_order[inst_node] = {}
for inst_order in orderings:
clust_order[inst_node][inst_order] = []
return clust_order
@staticmethod
def threshold_vect_comparison(x, y, cutoff):
import numpy as np
# x vector
############
# take absolute value of x
x_abs = np.absolute(x)
# this returns a tuple
found_tuple = np.where(x_abs >= cutoff)
# get index array
found_index_x = found_tuple[0]
# y vector
############
# take absolute value of y
y_abs = np.absolute(y)
# this returns a tuple
found_tuple = np.where(y_abs >= cutoff)
# get index array
found_index_y = found_tuple[0]
# get common intersection
found_common = np.intersect1d(found_index_x, found_index_y)
# apply cutoff
thresh_x = x[found_common]
thresh_y = y[found_common]
# return the threshold data
return thresh_x, thresh_y
@staticmethod
def group_cutoffs():
# generate distance cutoffs
all_dist = []
for i in range(11):
all_dist.append(float(i)/10)
return all_dist
@staticmethod
def find_dict_in_list(list_dict, search_value, search_string):
''' find a dict in a list of dicts by searching for a value '''
# get all the possible values of search_value
all_values = [d[search_value] for d in list_dict]
# check if the search value is in the keys
if search_string in all_values:
# find the dict
found_dict = (item for item in list_dict if item[search_value] == search_string).next()
else:
found_dict = {}
# return the found dictionary
return found_dict
|
#!/bin/bash
set -e
# Load gcc
GCC_VERSION=gcc-9.2.0
set CC=/usr/bin/gcc
set GCC=/usr/bin/gcc
INSTALL_PREFIX=/opt
# HPC-X v2.9.0
MLNX_OFED_VERSION="5.4-1.0.3.0"
HPCX_VERSION="v2.9.0"
$COMMON_DIR/write_component_version.sh "HPCX" ${HPCX_VERSION}
TARBALL="hpcx-${HPCX_VERSION}-gcc-MLNX_OFED_LINUX-${MLNX_OFED_VERSION}-ubuntu18.04-x86_64.tbz"
HPCX_DOWNLOAD_URL=https://azhpcstor.blob.core.windows.net/azhpc-images-store/${TARBALL}
HPCX_FOLDER=$(basename ${HPCX_DOWNLOAD_URL} .tbz)
$COMMON_DIR/download_and_verify.sh $HPCX_DOWNLOAD_URL "5b395006a05c888794b0b6204198d3bab597a8333c982ea5409f4f18e65bcbef"
tar -xvf ${TARBALL}
mv ${HPCX_FOLDER} ${INSTALL_PREFIX}
HPCX_PATH=${INSTALL_PREFIX}/${HPCX_FOLDER}
# Enable Sharpd
${HPCX_PATH}/sharp/sbin/sharp_daemons_setup.sh -s -d sharpd
systemctl enable sharpd
systemctl start sharpd
# MVAPICH2 2.3.6
MV2_VERSION="2.3.6"
$COMMON_DIR/write_component_version.sh "MVAPICH2" ${MV2_VERSION}
MV2_DOWNLOAD_URL=http://mvapich.cse.ohio-state.edu/download/mvapich/mv2/mvapich2-${MV2_VERSION}.tar.gz
$COMMON_DIR/download_and_verify.sh $MV2_DOWNLOAD_URL "b3a62f2a05407191b856485f99da05f5e769d6381cd63e2fcb83ee98fc46a249"
tar -xvf mvapich2-${MV2_VERSION}.tar.gz
cd mvapich2-${MV2_VERSION}
./configure --prefix=${INSTALL_PREFIX}/mvapich2-${MV2_VERSION} --enable-g=none --enable-fast=yes && make -j$(nproc) && make install
cd ..
# OpenMPI 4.1.1
OMPI_VERSION="4.1.1"
$COMMON_DIR/write_component_version.sh "OMPI" ${OMPI_VERSION}
OMPI_DOWNLOAD_URL=https://download.open-mpi.org/release/open-mpi/v4.1/openmpi-${OMPI_VERSION}.tar.gz
$COMMON_DIR/download_and_verify.sh $OMPI_DOWNLOAD_URL "d80b9219e80ea1f8bcfe5ad921bd9014285c4948c5965f4156a3831e60776444"
tar -xvf openmpi-${OMPI_VERSION}.tar.gz
cd openmpi-${OMPI_VERSION}
./configure --prefix=${INSTALL_PREFIX}/openmpi-${OMPI_VERSION} --with-ucx=${UCX_PATH} --with-hcoll=${HCOLL_PATH} --enable-mpirun-prefix-by-default --with-platform=contrib/platform/mellanox/optimized && make -j$(nproc) && make install
cd ..
# Intel MPI 2021 (Update 2)
IMPI_2021_VERSION="2021.2.0"
$COMMON_DIR/write_component_version.sh "IMPI_2021" ${IMPI_2021_VERSION}
IMPI_2021_DOWNLOAD_URL=https://registrationcenter-download.intel.com/akdlm/irc_nas/17729/l_mpi_oneapi_p_2021.2.0.215_offline.sh
$COMMON_DIR/download_and_verify.sh $IMPI_2021_DOWNLOAD_URL "d0d4cdd11edaff2e7285e38f537defccff38e37a3067c02f4af43a3629ad4aa3"
bash l_mpi_oneapi_p_2021.2.0.215_offline.sh -s -a -s --eula accept
# Module Files
MODULE_FILES_DIRECTORY=/usr/share/modules/modulefiles/mpi
mkdir -p ${MODULE_FILES_DIRECTORY}
# HPC-X
cat << EOF >> ${MODULE_FILES_DIRECTORY}/hpcx-${HPCX_VERSION}
#%Module 1.0
#
# HPCx ${HPCX_VERSION}
#
conflict mpi
module load ${HPCX_PATH}/modulefiles/hpcx
EOF
# MVAPICH2
cat << EOF >> ${MODULE_FILES_DIRECTORY}/mvapich2-${MV2_VERSION}
#%Module 1.0
#
# MVAPICH2 ${MV2_VERSION}
#
conflict mpi
prepend-path PATH /opt/mvapich2-${MV2_VERSION}/bin
prepend-path LD_LIBRARY_PATH /opt/mvapich2-${MV2_VERSION}/lib
prepend-path MANPATH /opt/mvapich2-${MV2_VERSION}/share/man
setenv MPI_BIN /opt/mvapich2-${MV2_VERSION}/bin
setenv MPI_INCLUDE /opt/mvapich2-${MV2_VERSION}/include
setenv MPI_LIB /opt/mvapich2-${MV2_VERSION}/lib
setenv MPI_MAN /opt/mvapich2-${MV2_VERSION}/share/man
setenv MPI_HOME /opt/mvapich2-${MV2_VERSION}
EOF
# OpenMPI
cat << EOF >> ${MODULE_FILES_DIRECTORY}/openmpi-${OMPI_VERSION}
#%Module 1.0
#
# OpenMPI ${OMPI_VERSION}
#
conflict mpi
prepend-path PATH /opt/openmpi-${OMPI_VERSION}/bin
prepend-path LD_LIBRARY_PATH /opt/openmpi-${OMPI_VERSION}/lib
prepend-path MANPATH /opt/openmpi-${OMPI_VERSION}/share/man
setenv MPI_BIN /opt/openmpi-${OMPI_VERSION}/bin
setenv MPI_INCLUDE /opt/openmpi-${OMPI_VERSION}/include
setenv MPI_LIB /opt/openmpi-${OMPI_VERSION}/lib
setenv MPI_MAN /opt/openmpi-${OMPI_VERSION}/share/man
setenv MPI_HOME /opt/openmpi-${OMPI_VERSION}
EOF
# Intel 2021
cat << EOF >> ${MODULE_FILES_DIRECTORY}/impi_${IMPI_2021_VERSION}
#%Module 1.0
#
# Intel MPI ${IMPI_2021_VERSION}
#
conflict mpi
module load /opt/intel/oneapi/mpi/${IMPI_2021_VERSION}/modulefiles/mpi
setenv MPI_BIN /opt/intel/oneapi/mpi/${IMPI_2021_VERSION}/bin
setenv MPI_INCLUDE /opt/intel/oneapi/mpi/${IMPI_2021_VERSION}/include
setenv MPI_LIB /opt/intel/oneapi/mpi/${IMPI_2021_VERSION}/lib
setenv MPI_MAN /opt/intel/oneapi/mpi/${IMPI_2021_VERSION}/man
setenv MPI_HOME /opt/intel/oneapi/mpi/${IMPI_2021_VERSION}
EOF
# Softlinks
ln -s ${MODULE_FILES_DIRECTORY}/hpcx-${HPCX_VERSION} ${MODULE_FILES_DIRECTORY}/hpcx
ln -s ${MODULE_FILES_DIRECTORY}/mvapich2-${MV2_VERSION} ${MODULE_FILES_DIRECTORY}/mvapich2
ln -s ${MODULE_FILES_DIRECTORY}/openmpi-${OMPI_VERSION} ${MODULE_FILES_DIRECTORY}/openmpi
ln -s ${MODULE_FILES_DIRECTORY}/impi_${IMPI_2021_VERSION} ${MODULE_FILES_DIRECTORY}/impi-2021
|
/*
* $Header: /home/cvs/jakarta-tomcat-4.0/catalina/src/share/org/apache/catalina/logger/FileLogger.java,v 1.8 2002/06/09 02:19:43 remm Exp $
* $Revision: 1.8 $
* $Date: 2002/06/09 02:19:43 $
*
* ====================================================================
*
* The Apache Software License, Version 1.1
*
* Copyright (c) 1999 The Apache Software Foundation. All rights
* reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* 3. The end-user documentation included with the redistribution, if
* any, must include the following acknowlegement:
* "This product includes software developed by the
* Apache Software Foundation (http://www.apache.org/)."
* Alternately, this acknowlegement may appear in the software itself,
* if and wherever such third-party acknowlegements normally appear.
*
* 4. The names "The Jakarta Project", "Tomcat", and "Apache Software
* Foundation" must not be used to endorse or promote products derived
* from this software without prior written permission. For written
* permission, please contact <EMAIL>.
*
* 5. Products derived from this software may not be called "Apache"
* nor may "Apache" appear in their names without prior written
* permission of the Apache Group.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE APACHE SOFTWARE FOUNDATION OR
* ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
* ====================================================================
*
* This software consists of voluntary contributions made by many
* individuals on behalf of the Apache Software Foundation. For more
* information on the Apache Software Foundation, please see
* <http://www.apache.org/>.
*
* [Additional notices, if required by prior licensing conditions]
*
*/
package org.apache.catalina.logger;
import org.apache.catalina.Lifecycle;
import org.apache.catalina.LifecycleException;
import org.apache.catalina.LifecycleListener;
import org.apache.catalina.util.LifecycleSupport;
import org.apache.catalina.util.StringManager;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.io.PrintWriter;
import java.sql.Timestamp;
/**
* Implementation of <b>Logger</b> that appends log messages to a file
* named {prefix}.{date}.{suffix} in a configured directory, with an
* optional preceding timestamp.
*
* @author <NAME>
* @version $Revision: 1.8 $ $Date: 2002/06/09 02:19:43 $
*/
public class FileLogger
extends LoggerBase
implements Lifecycle {
// ----------------------------------------------------- Instance Variables
/**
* The descriptive information about this implementation.
*/
protected static final String info =
"org.apache.catalina.logger.FileLogger/1.0";
/**
* The string manager for this package.
*/
private final StringManager sm =
StringManager.getManager(Constants.Package);
/**
* The lifecycle event support for this component.
*/
protected LifecycleSupport lifecycle = new LifecycleSupport(this);
/**
* The as-of date for the currently open log file, or a zero-length
* string if there is no open log file.
*/
private String date = "";
/**
* The directory in which log files are created.
*/
private String directory = "logs";
/**
* The prefix that is added to log file filenames.
*/
private String prefix = "catalina.";
/**
* Has this component been started?
*/
private boolean started = false;
/**
* The suffix that is added to log file filenames.
*/
private String suffix = ".log";
/**
* Should logged messages be date/time stamped?
*/
private boolean timestamp = false;
/**
* The PrintWriter to which we are currently logging, if any.
*/
private PrintWriter writer = null;
// ------------------------------------------------------------- Properties
/**
* Return the directory in which we create log files.
*/
public String getDirectory() {
return (directory);
}
/**
* Set the directory in which we create log files.
*
* @param directory The new log file directory
*/
public void setDirectory(String directory) {
String oldDirectory = this.directory;
this.directory = directory;
support.firePropertyChange("directory", oldDirectory, this.directory);
}
/**
* Return the log file prefix.
*/
public String getPrefix() {
return (prefix);
}
/**
* Set the log file prefix.
*
* @param prefix The new log file prefix
*/
public void setPrefix(String prefix) {
String oldPrefix = this.prefix;
this.prefix = prefix;
support.firePropertyChange("prefix", oldPrefix, this.prefix);
}
/**
* Return the log file suffix.
*/
public String getSuffix() {
return (suffix);
}
/**
* Set the log file suffix.
*
* @param suffix The new log file suffix
*/
public void setSuffix(String suffix) {
String oldSuffix = this.suffix;
this.suffix = suffix;
support.firePropertyChange("suffix", oldSuffix, this.suffix);
}
/**
* Return the timestamp flag.
*/
public boolean getTimestamp() {
return (timestamp);
}
/**
* Set the timestamp flag.
*
* @param timestamp The new timestamp flag
*/
public void setTimestamp(boolean timestamp) {
boolean oldTimestamp = this.timestamp;
this.timestamp = timestamp;
support.firePropertyChange("timestamp", new Boolean(oldTimestamp),
new Boolean(this.timestamp));
}
// --------------------------------------------------------- Public Methods
/**
* Writes the specified message to a servlet log file, usually an event
* log. The name and type of the servlet log is specific to the
* servlet container.
*
* @param msg A <code>String</code> specifying the message to be written
* to the log file
*/
public void log(String msg) {
// Construct the timestamp we will use, if requested
Timestamp ts = new Timestamp(System.currentTimeMillis());
String tsString = ts.toString().substring(0, 19);
String tsDate = tsString.substring(0, 10);
// If the date has changed, switch log files
if (!date.equals(tsDate)) {
synchronized (this) {
if (!date.equals(tsDate)) {
close();
date = tsDate;
open();
}
}
}
// Log this message, timestamped if necessary
if (writer != null) {
if (timestamp) {
writer.println(tsString + " " + msg);
} else {
writer.println(msg);
}
}
}
// -------------------------------------------------------- Private Methods
/**
* Close the currently open log file (if any)
*/
private void close() {
if (writer == null)
return;
writer.flush();
writer.close();
writer = null;
date = "";
}
/**
* Open the new log file for the date specified by <code>date</code>.
*/
private void open() {
// Create the directory if necessary
File dir = new File(directory);
if (!dir.isAbsolute())
dir = new File(System.getProperty("catalina.base"), directory);
dir.mkdirs();
// Open the current log file
try {
String pathname = dir.getAbsolutePath() + File.separator +
prefix + date + suffix;
writer = new PrintWriter(new FileWriter(pathname, true), true);
} catch (IOException e) {
writer = null;
}
}
// ------------------------------------------------------ Lifecycle Methods
/**
* Add a lifecycle event listener to this component.
*
* @param listener The listener to add
*/
public void addLifecycleListener(LifecycleListener listener) {
lifecycle.addLifecycleListener(listener);
}
/**
* Get the lifecycle listeners associated with this lifecycle. If this
* Lifecycle has no listeners registered, a zero-length array is returned.
*/
public LifecycleListener[] findLifecycleListeners() {
return lifecycle.findLifecycleListeners();
}
/**
* Remove a lifecycle event listener from this component.
*
* @param listener The listener to add
*/
public void removeLifecycleListener(LifecycleListener listener) {
lifecycle.removeLifecycleListener(listener);
}
/**
* Prepare for the beginning of active use of the public methods of this
* component. This method should be called after <code>configure()</code>,
* and before any of the public methods of the component are utilized.
*
* @throws LifecycleException if this component detects a fatal error
* that prevents this component from being used
*/
public void start() throws LifecycleException {
// Validate and update our current component state
if (started)
throw new LifecycleException
(sm.getString("fileLogger.alreadyStarted"));
lifecycle.fireLifecycleEvent(START_EVENT, null);
started = true;
}
/**
* Gracefully terminate the active use of the public methods of this
* component. This method should be the last one called on a given
* instance of this component.
*
* @throws LifecycleException if this component detects a fatal error
* that needs to be reported
*/
public void stop() throws LifecycleException {
// Validate and update our current component state
if (!started)
throw new LifecycleException
(sm.getString("fileLogger.notStarted"));
lifecycle.fireLifecycleEvent(STOP_EVENT, null);
started = false;
close();
}
}
|
<filename>wallet/wallet.go<gh_stars>0
package wallet
import (
"encoding/hex"
"github.com/paw-digital/crypto/ed25519"
"github.com/paw-digital/nano/address"
"github.com/paw-digital/nano/blocks"
"github.com/paw-digital/nano/store"
"github.com/paw-digital/nano/types"
"github.com/paw-digital/nano/uint128"
"github.com/pkg/errors"
)
type Wallet struct {
privateKey ed25519.PrivateKey
PublicKey ed25519.PublicKey
Head blocks.Block
Work *types.Work
PoWchan chan types.Work
}
func (w *Wallet) Address() types.Account {
return address.PubKeyToAddress(w.PublicKey)
}
func New(private string) (w Wallet) {
w.PublicKey, w.privateKey = address.KeypairFromPrivateKey(private)
account := address.PubKeyToAddress(w.PublicKey)
open := store.FetchOpen(account)
if open != nil {
w.Head = open
}
return w
}
// Returns true if the wallet has prepared proof of work,
func (w *Wallet) HasPoW() bool {
select {
case work := <-w.PoWchan:
w.Work = &work
w.PoWchan = nil
return true
default:
return false
}
}
func (w *Wallet) WaitPoW() {
for !w.HasPoW() {
}
}
func (w *Wallet) WaitingForPoW() bool {
return w.PoWchan != nil
}
func (w *Wallet) GeneratePowSync() error {
err := w.GeneratePoWAsync()
if err != nil {
return err
}
w.WaitPoW()
return nil
}
// Triggers a goroutine to generate the next proof of work.
func (w *Wallet) GeneratePoWAsync() error {
if w.PoWchan != nil {
return errors.Errorf("Already generating PoW")
}
w.PoWchan = make(chan types.Work)
go func(c chan types.Work, w *Wallet) {
if w.Head == nil {
c <- blocks.GenerateWorkForHash(types.BlockHash(hex.EncodeToString(w.PublicKey)))
} else {
c <- blocks.GenerateWork(w.Head)
}
}(w.PoWchan, w)
return nil
}
func (w *Wallet) GetBalance() uint128.Uint128 {
if w.Head == nil {
return uint128.FromInts(0, 0)
}
return store.GetBalance(w.Head)
}
func (w *Wallet) Open(source types.BlockHash, representative types.Account) (*blocks.OpenBlock, error) {
if w.Head != nil {
return nil, errors.Errorf("Cannot open a non empty account")
}
if w.Work == nil {
return nil, errors.Errorf("No PoW")
}
existing := store.FetchOpen(w.Address())
if existing != nil {
return nil, errors.Errorf("Cannot open account, open block already exists")
}
send_block := store.FetchBlock(source)
if send_block == nil {
return nil, errors.Errorf("Could not find references send")
}
common := blocks.CommonBlock{
Work: *w.Work,
Signature: "",
}
block := blocks.OpenBlock{
source,
representative,
w.Address(),
common,
}
block.Signature = block.Hash().Sign(w.privateKey)
if !blocks.ValidateBlockWork(&block) {
return nil, errors.Errorf("Invalid PoW")
}
w.Head = &block
return &block, nil
}
func (w *Wallet) Send(destination types.Account, amount uint128.Uint128) (*blocks.SendBlock, error) {
if w.Head == nil {
return nil, errors.Errorf("Cannot send from empty account")
}
if w.Work == nil {
return nil, errors.Errorf("No PoW")
}
if amount.Compare(w.GetBalance()) > 0 {
return nil, errors.Errorf("Tried to send more than balance")
}
common := blocks.CommonBlock{
Work: *w.Work,
Signature: "",
}
block := blocks.SendBlock{
w.Head.Hash(),
destination,
w.GetBalance().Sub(amount),
common,
}
block.Signature = block.Hash().Sign(w.privateKey)
w.Head = &block
return &block, nil
}
func (w *Wallet) Receive(source types.BlockHash) (*blocks.ReceiveBlock, error) {
if w.Head == nil {
return nil, errors.Errorf("Cannot receive to empty account")
}
if w.Work == nil {
return nil, errors.Errorf("No PoW")
}
send_block := store.FetchBlock(source)
if send_block == nil {
return nil, errors.Errorf("Source block not found")
}
if send_block.Type() != blocks.Send {
return nil, errors.Errorf("Source block is not a send")
}
if send_block.(*blocks.SendBlock).Destination != w.Address() {
return nil, errors.Errorf("Send is not for this account")
}
common := blocks.CommonBlock{
Work: *w.Work,
Signature: "",
}
block := blocks.ReceiveBlock{
w.Head.Hash(),
source,
common,
}
block.Signature = block.Hash().Sign(w.privateKey)
w.Head = &block
return &block, nil
}
func (w *Wallet) Change(representative types.Account) (*blocks.ChangeBlock, error) {
if w.Head == nil {
return nil, errors.Errorf("Cannot change on empty account")
}
if w.Work == nil {
return nil, errors.Errorf("No PoW")
}
common := blocks.CommonBlock{
Work: *w.Work,
Signature: "",
}
block := blocks.ChangeBlock{
w.Head.Hash(),
representative,
common,
}
block.Signature = block.Hash().Sign(w.privateKey)
w.Head = &block
return &block, nil
}
|
<filename>crow/nodes/beam.h
#ifndef CROW_BEAM_H
#define CROW_BEAM_H
#include <crow/proto/node.h>
namespace crow
{
class beam : public crow::node, public crow::alived_object
{
std::string client_name;
crow::hostaddr crowker;
nodeid_t nodeno = CROWKER_BEAMSOCKET_BROCKER_NODE_NO;
public:
void set_crowker_address(const crow::hostaddr &addr)
{
this->crowker = addr;
}
void set_client_name(const std::string &client_name)
{
this->client_name = client_name;
}
void keepalive_handle() override
{
node::send(nodeno, crowker, client_name, 0, 0);
}
void incoming_packet(crow::packet *pack) override
{
crow::release(pack);
}
};
}
#endif |
import * as React from 'react';
import { bind } from 'decko';
import { IActiveOrderColumnData, IOrderListSettings, IWidgetContentProps, IActiveOrder } from 'shared/types/models';
import { ISortInfo } from 'shared/types/ui';
import { OrderList } from '../../../containers';
type IProps = IWidgetContentProps<IOrderListSettings>;
class OrderListWidget extends React.PureComponent<IProps> {
public render() {
const {
settings: { sort, shouldOpenCancelOrderModal, hideOtherPairs, ...columns },
} = this.props;
return (
<OrderList
columns={columns}
filterPredicate={hideOtherPairs ? this.filterOtherPredicate : void 1}
sortInfo={sort}
onSortInfoChange={this.handleSortInfoChange}
onCancelConfirmationModalDisable={this.handleCancelConfirmationModalDisable}
shouldOpenCancelOrderModal={shouldOpenCancelOrderModal}
/>
);
}
@bind
private handleSortInfoChange(sort: ISortInfo<IActiveOrderColumnData>) {
this.props.onSettingsSave({ sort });
}
@bind
private handleCancelConfirmationModalDisable() {
this.props.onSettingsSave({ shouldOpenCancelOrderModal: false });
}
@bind
private filterOtherPredicate(x: IActiveOrder) {
return x.market === this.props.currentCurrencyPair.id;
}
}
export default OrderListWidget;
|
<filename>coati/merge.py
from coati.win32 import copy, execute_commandbar
from coati import utils, excel, powerpoint
import time
def resources(slide, resources):
for resource in resources:
resource.merge(slide)
|
package com.example.veterineruygulamas.Pojos;
import com.google.gson.annotations.Expose;
import com.google.gson.annotations.SerializedName;
public class AskPojo {
@SerializedName("_id")
@Expose
private String id;
@SerializedName("cevaptext")
@Expose
private String cevaptext;
@SerializedName("question")
@Expose
private String question;
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
public String getCevaptext() {
return cevaptext;
}
public void setCevaptext(String cevaptext) {
this.cevaptext = cevaptext;
}
public String getQuestion() {
return question;
}
public void setQuestion(String question) {
this.question = question;
}
}
|
const BASE_URL = process.env.REACT_APP_BACKEND_URL;
const SUFFIX = 'api';
export const AUTH_ENDPOINTS = {
login: `${BASE_URL}/${SUFFIX}/user/auth`,
register: `${BASE_URL}/${SUFFIX}/user/`,
profile: `${BASE_URL}/profile`,
};
|
const Page = require("./page");
/**
* sub page containing specific selectors and methods for a specific page
*/
class Header extends Page {
/**
* define selectors using getter methods
*/
get menuButton() {
return $(".global-menu-icon")
}
/**
* a method to encapsule automation code to interact with the page
*/
async openMenu(){
const menu = await this.menuButton
await expect(menu).toExist()
await menu.click();
}
open() {
return super.open("");
}
}
module.exports = new Header(); |
#!/bin/bash
LINK_OPTIONS=$1 # no force by default
# LINK_OPTIONS=${1:-"-f"} # force by default
echo "[OPTIONS] $LINK_OPTIONS"
echo
#
function _get_directory {
echo $(dirname "$1")
}
# absolute path
function _get_absolute_path {
echo $(readlink -f "$1")
}
# (internal helper function)
# e.g. echo `_get_absolute_path ~`
# declare -r -a MY_CONFIGURATION_LINKS=($'"./bash/.bashrc" "~/.bashrc"' ...)
# subshell preserve's (super)shell's pwd
eval $(cd dhall && cat links.dhall | dhall-to-bash --explain --declare MY_CONFIGURATION_LINKS)
echo
echo '----------------------------------------'
echo "[DIFFING]"
echo
for l in ${MY_CONFIGURATION_LINKS[@]}; do
MY_SOURCE=$(echo $l | cut -d':' -f1 | xargs readlink -f)
MY_LINK=$(echo $l | cut -d':' -f2 | xargs echo) # | xargs readlink -f)
MY_LINK=${MY_LINK/#\~/$HOME} # "unquote, e.g. "~" to ~
echo
echo "[ $MY_LINK versus $MY_SOURCE ]"
echo
diff $MY_SOURCE $MY_LINK
done
echo
echo '----------------------------------------'
echo "[SYMBOLICALLY LINKING]"
echo
for l in ${MY_CONFIGURATION_LINKS[@]}; do
MY_SOURCE=$(echo $l | cut -d':' -f1 | xargs readlink -f)
MY_LINK=$(echo $l | cut -d':' -f2 | xargs echo) # | xargs readlink -f)
MY_LINK=${MY_LINK/#\~/$HOME} # "unquote, e.g. "~" to ~
echo
echo "[ $MY_LINK -> $MY_SOURCE ]"
mkdir -p $(dirname $MY_LINK)
ln -s $LINK_OPTIONS $MY_SOURCE $MY_LINK
# echo $MY_SOURCE
# echo $MY_LINK
# echo
done
########################################
# cat configuration.dhall | dhall-to-bash --declare CONFIGURATION > configuration.sh && cat configuration.sh
# cat configuration.dhall | dhall-to-bash | xargs echo -e > configuration.sh && cat configuration.sh
# cat configuration.dhall | dhall-to-bash | xargs echo -e > configuration.sh && cat configuration.sh
# cat links.dhall | dhall-to-bash --explain --declare MY_CONFIGURATION_LINKS | eval
# echo $MY_CONFIGURATION_LINKS
# echo ${MY_CONFIGURATION_LINKS[*]}
# cat links.dhall | dhall-to-bash --explain --declare MY_VARIABLE > links.sh && source links.sh
# # absolute path
# function _get_absolute_path {
# echo $(cd $(dirname "$1"); pwd)/$(basename "$1")
# }
# MY_VARIABLE="${1:-MY_CONFIGURATION_LINKS}"
# $ readlink -f "~/.gitconfig"
# $ readlink -f ~/.gitconfig
# /home/sboo/.gitconfig
|
<gh_stars>1-10
package com.jiker.keju.taxicost;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
public class StringTools {
public static String getNumberFromText(String text) {
String regEx = "[0-9]";
Pattern p = Pattern.compile(regEx);
Matcher m = p.matcher(text);
StringBuffer sb = new StringBuffer();
while (m.find()) {
sb.append(m.group());
}
return sb.toString();
}
}
|
<gh_stars>10-100
/* Copyright 2017 <NAME>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.bitbrain.braingdx.graphics.shader;
import com.badlogic.gdx.graphics.GL20;
import com.badlogic.gdx.graphics.glutils.FrameBuffer;
import de.bitbrain.braingdx.graphics.postprocessing.PostProcessor;
import de.bitbrain.braingdx.graphics.postprocessing.PostProcessorEffect;
import java.util.ArrayList;
import java.util.List;
/**
* Manages GLSL shaders internally
*
* @author <NAME>
* @version 1.0.0
* @since 1.0.0
*/
public class BatchPostProcessor {
private PostProcessor processor;
private List<PostProcessorEffect> effects;
public BatchPostProcessor(PostProcessor processor, PostProcessorEffect... effects) {
this.processor = processor;
this.effects = new ArrayList<PostProcessorEffect>();
addEffects(effects);
}
public void addEffects(PostProcessorEffect... effects) {
for (PostProcessorEffect effect : effects) {
this.effects.add(effect);
processor.addEffect(effect);
effect.setEnabled(false);
}
}
public void begin() {
setEffectsEnabled(true);
processor.setClearColor(0f, 0f, 0f, 0f);
processor.setClearBits(GL20.GL_COLOR_BUFFER_BIT);
processor.capture();
}
public void end(FrameBuffer buffer) {
processor.render(buffer);
setEffectsEnabled(false);
}
public boolean hasEffects() {
return !effects.isEmpty();
}
public void end() {
processor.render();
setEffectsEnabled(false);
}
public void resume() {
processor.rebind();
}
private void setEffectsEnabled(boolean enabled) {
for (PostProcessorEffect effect : effects) {
effect.setEnabled(enabled);
}
}
public void clear() {
for (PostProcessorEffect effect : effects) {
effect.dispose();
}
effects.clear();
}
}
|
#!/bin/bash
#set -o verbose
SCRIPT=`realpath $0`
echo "Running ${SCRIPT}"
#USER_GROUP="${USER}:$(id -gn $USER)"
echo "You are: ${USER}"
echo "First argument is: ${1}"
if [[ $EUID -ne 0 ]]; then
echo "This script must be run as root."
echo "Please enter your root password below."
su --preserve-environment --command "${SCRIPT} ${USER}" root
exit 1
fi
echo "You're root, nice!"
echo "Installing sudo"
apt update
apt install sudo -y
echo "Adding ${1} to sudoers"
/sbin/adduser "${1}" sudo
echo "Done."
echo "Run \"newgrp sudo\" and \"./start.sh\" to continue"
|
#export MAIL_SERVER=localhost
#export MAIL_PORT=8025
python -m smtpd -n -c DebuggingServer localhost:8025
|
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
const ServiceProvider_1 = require("../Application/ServiceProvider");
const Validation_1 = require("../Validation");
/**
* @name ValidationServiceProvider
* @author <NAME>
*/
class ValidationServiceProvider extends ServiceProvider_1.default {
/**
* Registers Service Provider
*/
register() {
this.app.register({
provider: {
name: 'Haluka/Core/Validator',
alias: 'Validator'
},
content: function () {
return Validation_1.default;
}
});
}
}
exports.default = ValidationServiceProvider;
//# sourceMappingURL=ValidationServiceProvider.js.map |
define(['../map/VisibleArea','../../worldwind/navigate/LookAt'], function (VisibleArea,LookAt) {
var LookAtNavigator = LookAt;
/**
* Specific navigator for movement. It allows us to add logic to handling of the mouse wheel.
* @constructor
* @param options {Object} Options object containing
* wwd {WorldWindow}
* zoomLevelListeners {Function[]} accepting two arguments. First is zoom level and second is viewport.
* viewPortChangedListeners {Function[]} accepting two arguments. First is zoom level and second is viewport.
* @augments LookAtNavigator
*/
var MoveNavigator = function (options) {
LookAtNavigator.call(this, options.wwd);
//options.wwd.navigator = this;
this._lastRange = 0;
this._zoomLevelListeners = options.zoomLevelListeners || [];
this._zoomLevel = 10;
this._viewPortChangeListeners = options.viewPortChangedListeners || [];
this.range = 23440143;
this.worldWindow=options.wwd;
this.lookAtLocation={};
this.lookAtLocation.latitude = 10;
this.lookAtLocation.longitude = 15;
var self = this;
this._visibleArea = new VisibleArea({
viewport: function () {
return self.worldWindow.viewport
},
eyeGeoCoord: function () {
return self.lookAtLocation
},
range: function () {
return self.range
},
wwd: self.worldWindow
});
};
MoveNavigator.prototype = Object.create(LookAtNavigator.prototype);
/**
* Overrides the method from descendant and adds the functionality for modifying shown SurfacePolygons
* showing the choropleth.
* @inheritDoc
*/
MoveNavigator.prototype.handleWheelEvent = function (event) {
LookAtNavigator.prototype.handleWheelEvent.apply(this, arguments);
var previousZoomLevel = this._zoomLevel;
if (previousZoomLevel != this.getZoomLevel()) {
this.callListeners(this._zoomLevelListeners);
}
this._lastRange = this.range;
};
/**
* It returns current range. Shouldn't be used from outside.
* @returns {Number} Number representing current distance from the Earth surface. // I am not sure about this
* statement.
*/
MoveNavigator.prototype.getRange = function () {
return this.range;
};
/**
* It returns current zoom level.
* @returns {Number} Number representing current level. This number represents how many rectangles one * one
* should be grouped together.
*/
MoveNavigator.prototype.getZoomLevel = function () {
var MILLION = 1000000;
var ranges = [
0.8 * MILLION,
2 * MILLION,
5 * MILLION,
10 * MILLION
];
// First
if (this.getRange() < ranges[0] && this._lastRange > ranges[0]) {
this._zoomLevel = 1;
}
if ((this.getRange() > ranges[0] && this.getRange() < ranges[1])
&& (this._lastRange < ranges[0] || this._lastRange > ranges[1])) {
this._zoomLevel = 2;
}
if ((this.getRange() > ranges[1] && this.getRange() < ranges[2])
&& (this._lastRange < ranges[1] || this._lastRange > ranges[2])) {
this._zoomLevel = 5;
}
if ((this.getRange() > ranges[2] && this.getRange() < ranges[3])
&& (this._lastRange < ranges[2] || this._lastRange > ranges[3])) {
this._zoomLevel = 10;
}
//Last
if (this.getRange() > ranges[ranges.length - 1] && this._lastRange < ranges[ranges.length - 1]) {
this._zoomLevel = 20;
}
return this._zoomLevel;
};
/**
* @inheritDoc
* Also call listeners waiting for the change in the visible viewport. It happens whenever user ends with
* current panning or dragging.
*/
LookAtNavigator.prototype.handlePanOrDrag = function (recognizer) {
if (this.worldWindow.globe.is2D()) {
this.handlePanOrDrag2D(recognizer);
} else {
this.handlePanOrDrag3D(recognizer);
}
};
// Intentionally not documented.
LookAtNavigator.prototype.handlePanOrDrag3D = function (recognizer) {
var state = recognizer.state,
tx = recognizer.translationX,
ty = recognizer.translationY;
if (state == WorldWind.BEGAN) {
this.lastPoint.set(0, 0);
} else if (state == WorldWind.CHANGED) {
// Convert the translation from screen coordinates to arc degrees. Use this navigator's range as a
// metric for converting screen pixels to meters, and use the globe's radius for converting from meters
// to arc degrees.
var canvas = this.worldWindow.canvas,
globe = this.worldWindow.globe,
globeRadius = WorldWind.WWMath.max(globe.equatorialRadius, globe.polarRadius),
distance = WorldWind.WWMath.max(1, this.range),
metersPerPixel = WorldWind.WWMath.perspectivePixelSize(canvas.clientWidth, canvas.clientHeight, distance),
forwardMeters = (ty - this.lastPoint[1]) * metersPerPixel,
sideMeters = -(tx - this.lastPoint[0]) * metersPerPixel,
forwardDegrees = (forwardMeters / globeRadius) * WorldWind.Angle.RADIANS_TO_DEGREES,
sideDegrees = (sideMeters / globeRadius) * WorldWind.Angle.RADIANS_TO_DEGREES;
// Apply the change in latitude and longitude to this navigator, relative to the current heading.
var sinHeading = Math.sin(this.heading * WorldWind.Angle.DEGREES_TO_RADIANS),
cosHeading = Math.cos(this.heading * WorldWind.Angle.DEGREES_TO_RADIANS);
this.lookAtLocation.latitude += forwardDegrees * cosHeading - sideDegrees * sinHeading;
this.lookAtLocation.longitude += forwardDegrees * sinHeading + sideDegrees * cosHeading;
this.lastPoint.set(tx, ty);
this.applyLimits();
this.worldWindow.redraw();
}
};
// Intentionally not documented.
LookAtNavigator.prototype.handlePanOrDrag2D = function (recognizer) {
var state = recognizer.state,
x = recognizer.clientX,
y = recognizer.clientY,
tx = recognizer.translationX,
ty = recognizer.translationY;
if (state == WorldWind.BEGAN) {
this.beginPoint.set(x, y);
this.lastPoint.set(x, y);
} else if (state == WorldWind.CHANGED) {
var x1 = this.lastPoint[0],
y1 = this.lastPoint[1],
x2 = this.beginPoint[0] + tx,
y2 = this.beginPoint[1] + ty;
this.lastPoint.set(x2, y2);
var navState = this.currentState(),
globe = this.worldWindow.globe,
ray = navState.rayFromScreenPoint(this.worldWindow.canvasCoordinates(x1, y1)),
point1 = new Vec3(0, 0, 0),
point2 = new Vec3(0, 0, 0),
origin = new Vec3(0, 0, 0);
if (!globe.intersectsLine(ray, point1)) {
return;
}
ray = navState.rayFromScreenPoint(this.worldWindow.canvasCoordinates(x2, y2));
if (!globe.intersectsLine(ray, point2)) {
return;
}
// Transform the original navigator state's modelview matrix to account for the gesture's change.
var modelview = Matrix.fromIdentity();
modelview.copy(navState.modelview);
modelview.multiplyByTranslation(point2[0] - point1[0], point2[1] - point1[1], point2[2] - point1[2]);
// Compute the globe point at the screen center from the perspective of the transformed navigator state.
modelview.extractEyePoint(ray.origin);
modelview.extractForwardVector(ray.direction);
if (!globe.intersectsLine(ray, origin)) {
return;
}
// Convert the transformed modelview matrix to a set of navigator properties, then apply those
// properties to this navigator.
var params = modelview.extractViewingParameters(origin, this.roll, globe, {});
this.lookAtLocation.copy(params.origin);
this.range = params.range;
this.heading = params.heading;
this.tilt = params.tilt;
this.roll = params.roll;
this.applyLimits();
this.worldWindow.redraw();
}
};
/**
* It returns currently visible area of the Earth as a Bounding Box, which definitely surrounds the whole area.
* @returns {BoundingBox} Bounding Box of visible area.
*/
MoveNavigator.prototype.getVisibleAreaBoundaries = function () {
var status = this._visibleArea.visibilityStatus();
if (status == 0) {
return this._visibleArea.getVisibleArea4FullGlobe();
}
else if (status == 1) {
return this._visibleArea.getVisibleArea4PartGlobe();
}
else {
return this._visibleArea.getVisibleArea4FullViewport();
}
};
/**
* It calls all listeners. No information is passed to them. They must query the state somehow.
*/
MoveNavigator.prototype.callListeners = function (listeners) {
listeners.forEach(function (listener) {
listener();
})
};
return MoveNavigator;
})
; |
. ./0env.sh
set -e
set -x
# If there are errors and you want to edit and resume,
# comment out the rm -rf, the git clone, usually
# the configure -- just cd and make
rm -rf $gcc
git clone git://git.saurik.com/llvm-gcc-4.2 $gcc
rm -rf $gcc/intl
rm -rf $build/gcc
mkdir -p $build/gcc
cd $build/gcc
$gcc/configure -target=$target -prefix=$prefix \
-with-sysroot -enable-wchar_t=no -disable-nls \
-enable-languages=c,objc,c++,obj-c++ \
--with-as="${prefix}"/bin/"${target}"-as \
--with-ld="${prefix}"/bin/"${target}"-ld \
# remove gmp/mpfr/iconv dependencies
sed -ie "s/HOST_GMPLIBS = -lmpfr -lgmp/HOST_GMPLIBS = /" Makefile
$GMAKE configure-host
for a in libcpp/config.h gcc/auto-host.h; do
sed -i "" -e "s/^#define HAVE_ICONV.*$//" $a
done
for a in gcc/Makefile libcpp/Makefile; do
sed -i "" -e "s/^LIB_ICONV.*$//" $a
done
$GMAKE
$GMAKE install
|
<reponame>dldhk97/ddonawa-server
package db;
import java.util.ArrayList;
import java.util.Arrays;
import model.Favorite;
public class FavoriteManager extends DBManager {
// 계정_id가 일치한 찜 목록을 반환한다.
public ArrayList<Favorite> findByAccountId(String accountId) throws Exception{
ArrayList<String> tableColumns = getTableColumnsAll();
// 쿼리 생성. ORDER BY로 가장 최신의 정보를 뽑음.
String query = "SELECT * FROM `" +
DBInfo.DB_NAME.toString() + "`.`" + DBInfo.TABLE_FAVORITE.toString() + "` WHERE `" +
DBInfo.TABLE_FAVORITE_COLUMN_ACCOUNTID.toString() + "` = '" + accountId + "'";
DBConnector dc = new DBConnector();
ArrayList<ArrayList<String>> received = dc.select(query, tableColumns);
// 2차원 문자열 배열을 1차원 Favorite 배열로 변환 후 반환
return getModelList(received);
}
@Override
protected ArrayList<String> getTableColumnsAll() {
return new ArrayList<>(Arrays.asList(
DBInfo.TABLE_FAVORITE_COLUMN_ACCOUNTID.toString(),
DBInfo.TABLE_FAVORITE_COLUMN_PRODUCTNAME.toString(),
DBInfo.TABLE_FAVORITE_COLUMN_TARGETPRICE.toString()
));
}
@Override
protected String getSelectQueryByKeys(ArrayList<String> keyValues) {
return "SELECT * FROM `" +
DBInfo.DB_NAME.toString() + "`.`" + DBInfo.TABLE_FAVORITE.toString() + "` WHERE `" +
DBInfo.TABLE_FAVORITE_COLUMN_ACCOUNTID.toString() + "` = '" + keyValues.get(0) + "' AND `" +
DBInfo.TABLE_FAVORITE_COLUMN_PRODUCTNAME.toString() + "` = '" + keyValues.get(1) + "'";
}
@Override
protected ArrayList<Favorite> getModelList(ArrayList<ArrayList<String>> received) {
ArrayList<Favorite> result = new ArrayList<Favorite>();
for(ArrayList<String> row : received) {
String accountId = row.get(0);
String productName = row.get(1);
double targetPrice = Double.valueOf(row.get(2));
result.add(new Favorite(accountId, productName, targetPrice));
}
return result.size() > 0 ? result : null;
}
@Override
protected ArrayList<String> getValuesFromObject(Object object) {
Favorite favorite = (Favorite) object;
return new ArrayList<>(Arrays.asList(
favorite.getAccountId(),
favorite.getProductName(),
String.valueOf(favorite.getTargetPrice())
));
}
@Override
protected String getTableName() {
return DBInfo.TABLE_FAVORITE.toString();
}
@Override
protected ArrayList<String> getKeyValuesFromObject(Object object) {
Favorite favorite = (Favorite) object;
return new ArrayList<>(Arrays.asList(
favorite.getAccountId(),
favorite.getProductName()
));
}
@Override
protected ArrayList<String> getKeyColumns() {
return new ArrayList<>(Arrays.asList(
DBInfo.TABLE_FAVORITE_COLUMN_ACCOUNTID.toString(),
DBInfo.TABLE_FAVORITE_COLUMN_PRODUCTNAME.toString()
));
}
}
|
<filename>test/tests.js
// import './renderPlants.test.js';
import './determineWaterAmount.test.js';
|
#!/bin/bash
dir=$(pwd)
cd $(dirname "${BASH_SOURCE[0]}")
cd ..
# Build
docker build -t meedan/%app_name% .
# Run
secret=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 32 | head -n 1)
docker run -d -p 3000:80 --name %app_name% -e SECRET_KEY_BASE=$secret meedan/%app_name%
echo
docker ps | grep '%app_name%'
echo
echo '-----------------------------------------------------------'
echo 'Now go to your browser and access http://localhost:3000/api'
echo '-----------------------------------------------------------'
|
<gh_stars>100-1000
/***********************************************************************************************************************
* OpenStudio(R), Copyright (c) 2008-2021, Alliance for Sustainable Energy, LLC, and other contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
* following conditions are met:
*
* (1) Redistributions of source code must retain the above copyright notice, this list of conditions and the following
* disclaimer.
*
* (2) Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided with the distribution.
*
* (3) Neither the name of the copyright holder nor the names of any contributors may be used to endorse or promote products
* derived from this software without specific prior written permission from the respective party.
*
* (4) Other than as required in clauses (1) and (2), distributions in any form of modifications or other derivative works
* may not use the "OpenStudio" trademark, "OS", "os", or any other confusingly similar designation without specific prior
* written permission from Alliance for Sustainable Energy, LLC.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND ANY CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S), ANY CONTRIBUTORS, THE UNITED STATES GOVERNMENT, OR THE UNITED
* STATES DEPARTMENT OF ENERGY, NOR ANY OF THEIR EMPLOYEES, BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
***********************************************************************************************************************/
#include <gtest/gtest.h>
#include "ModelFixture.hpp"
#include "../RadianceParameters.hpp"
#include "../RadianceParameters_Impl.hpp"
using namespace openstudio;
using namespace openstudio::model;
TEST_F(ModelFixture, RadianceParameters_GettersSetters) {
Model model;
auto radianceParameters = model.getUniqueModelObject<RadianceParameters>();
// Accumulated Rays per Record: Integer
// Check Idd default: 1
EXPECT_EQ(1, radianceParameters.accumulatedRaysperRecord());
EXPECT_TRUE(radianceParameters.setAccumulatedRaysperRecord(2));
EXPECT_EQ(2, radianceParameters.accumulatedRaysperRecord());
radianceParameters.resetAccumulatedRaysperRecord();
EXPECT_EQ(1, radianceParameters.accumulatedRaysperRecord());
// Direct Threshold: Double
// Check Idd default: 0.0
EXPECT_EQ(0.0, radianceParameters.directThreshold());
EXPECT_TRUE(radianceParameters.setDirectThreshold(0.0));
EXPECT_EQ(0.0, radianceParameters.directThreshold());
radianceParameters.resetDirectThreshold();
EXPECT_EQ(0.0, radianceParameters.directThreshold());
// Direct Certainty: Double
// Check Idd default: 1.0
EXPECT_EQ(1.0, radianceParameters.directCertainty());
EXPECT_TRUE(radianceParameters.setDirectCertainty(0.6));
EXPECT_EQ(0.6, radianceParameters.directCertainty());
radianceParameters.resetDirectCertainty();
EXPECT_EQ(1.0, radianceParameters.directCertainty());
// Direct Jitter: Double
// Check Idd default: 1.0
EXPECT_EQ(1.0, radianceParameters.directJitter());
EXPECT_TRUE(radianceParameters.setDirectJitter(0.7));
EXPECT_EQ(0.7, radianceParameters.directJitter());
radianceParameters.resetDirectJitter();
EXPECT_EQ(1.0, radianceParameters.directJitter());
// Direct Pretest: Double
// Check Idd default: 1.0
EXPECT_EQ(1.0, radianceParameters.directPretest());
EXPECT_TRUE(radianceParameters.setDirectPretest(0.8));
EXPECT_EQ(0.8, radianceParameters.directPretest());
radianceParameters.resetDirectPretest();
EXPECT_EQ(1.0, radianceParameters.directPretest());
// Ambient Bounces VMX: Integer
// Check Idd default: 6
EXPECT_EQ(6, radianceParameters.ambientBouncesVMX());
EXPECT_TRUE(radianceParameters.setAmbientBouncesVMX(3));
EXPECT_EQ(3, radianceParameters.ambientBouncesVMX());
radianceParameters.resetAmbientBouncesVMX();
EXPECT_EQ(6, radianceParameters.ambientBouncesVMX());
// Ambient Bounces DMX: Integer
// Check Idd default: 2
EXPECT_EQ(2, radianceParameters.ambientBouncesDMX());
EXPECT_TRUE(radianceParameters.setAmbientBouncesDMX(1));
EXPECT_EQ(1, radianceParameters.ambientBouncesDMX());
radianceParameters.resetAmbientBouncesDMX();
EXPECT_EQ(2, radianceParameters.ambientBouncesDMX());
// Ambient Divisions VMX: Integer
// Check Idd default: 4050
EXPECT_EQ(4050, radianceParameters.ambientDivisionsVMX());
EXPECT_TRUE(radianceParameters.setAmbientDivisionsVMX(2025));
EXPECT_EQ(2025, radianceParameters.ambientDivisionsVMX());
radianceParameters.resetAmbientDivisionsVMX();
EXPECT_EQ(4050, radianceParameters.ambientDivisionsVMX());
// Ambient Divisions DMX: Integer
// Check Idd default: 512
EXPECT_EQ(512, radianceParameters.ambientDivisionsDMX());
EXPECT_TRUE(radianceParameters.setAmbientDivisionsDMX(256));
EXPECT_EQ(256, radianceParameters.ambientDivisionsDMX());
radianceParameters.resetAmbientDivisionsDMX();
EXPECT_EQ(512, radianceParameters.ambientDivisionsDMX());
// Ambient Supersamples: Integer
// Check Idd default: 256
EXPECT_EQ(256, radianceParameters.ambientSupersamples());
EXPECT_TRUE(radianceParameters.setAmbientSupersamples(128));
EXPECT_EQ(128, radianceParameters.ambientSupersamples());
radianceParameters.resetAmbientSupersamples();
EXPECT_EQ(256, radianceParameters.ambientSupersamples());
// Limit Weight VMX: Double
// Check Idd default: 0.001
EXPECT_EQ(0.001, radianceParameters.limitWeightVMX());
EXPECT_TRUE(radianceParameters.setLimitWeightVMX(0.21));
EXPECT_EQ(0.21, radianceParameters.limitWeightVMX());
radianceParameters.resetLimitWeightVMX();
EXPECT_EQ(0.001, radianceParameters.limitWeightVMX());
// Limit Weight DMX: Double
// Check Idd default: 0.001
EXPECT_EQ(0.001, radianceParameters.limitWeightDMX());
EXPECT_TRUE(radianceParameters.setLimitWeightDMX(0.005));
EXPECT_EQ(0.005, radianceParameters.limitWeightDMX());
radianceParameters.resetLimitWeightDMX();
EXPECT_EQ(0.001, radianceParameters.limitWeightDMX());
// Klems Sampling Density: Integer
// Check Idd default: 500
EXPECT_EQ(500, radianceParameters.klemsSamplingDensity());
EXPECT_TRUE(radianceParameters.setKlemsSamplingDensity(499));
EXPECT_EQ(499, radianceParameters.klemsSamplingDensity());
radianceParameters.resetKlemsSamplingDensity();
EXPECT_EQ(500, radianceParameters.klemsSamplingDensity());
// Sky Discretization Resolution: String
// Check Idd default: "146"
EXPECT_EQ("146", radianceParameters.skyDiscretizationResolution());
// Test a valid choice
EXPECT_TRUE(radianceParameters.setSkyDiscretizationResolution("578"));
EXPECT_EQ("578", radianceParameters.skyDiscretizationResolution());
// Test an invalid choice
EXPECT_FALSE(radianceParameters.setSkyDiscretizationResolution("BadChoice"));
EXPECT_EQ("578", radianceParameters.skyDiscretizationResolution());
}
TEST_F(ModelFixture, RadianceParameters_FineAndCoarseSettings) {
Model model;
auto radianceParameters = model.getUniqueModelObject<RadianceParameters>();
// Test CTOR setting coarse
EXPECT_TRUE(radianceParameters.isCoarseSettings());
radianceParameters.applyFineSettings();
EXPECT_TRUE(radianceParameters.isFineSettings());
radianceParameters.setSkyDiscretizationResolution("578");
EXPECT_EQ(radianceParameters.skyDiscretizationResolution(), "578");
EXPECT_FALSE(radianceParameters.isCoarseSettings());
EXPECT_FALSE(radianceParameters.isFineSettings());
}
|
//============================================================================
// Copyright 2009-2020 ECMWF.
// This software is licensed under the terms of the Apache Licence version 2.0
// which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
// In applying this licence, ECMWF does not waive the privileges and immunities
// granted to it by virtue of its status as an intergovernmental organisation
// nor does it submit to any jurisdiction.
//============================================================================
#ifndef DASHBOARDTITLE_HPP_
#define DASHBOARDTITLE_HPP_
#include <QObject>
#include <QPixmap>
#include "ServerFilter.hpp"
#include "ServerObserver.hpp"
class Dashboard;
class ServerItem;
class DashboardTitle : public QObject, public ServerFilterObserver, public ServerObserver
{
Q_OBJECT
friend class Dashboard;
public:
DashboardTitle(ServerFilter*,Dashboard *parent);
~DashboardTitle() override;
Dashboard* dashboard() const {return dashboard_;}
QString title() const {return title_;}
QString tooltip() const {return tooltip_;}
QString desc() const {return desc_;}
QPixmap pix() const {return pix_;}
QPixmap descPix() const {return descPix_;}
void setMaxPixWidth(int w);
void setCurrent(bool b);
int fullWidth() const;
void notifyServerFilterAdded(ServerItem* item) override;
void notifyServerFilterRemoved(ServerItem* item) override;
void notifyServerFilterChanged(ServerItem*) override;
void notifyServerFilterDelete() override;
void notifyDefsChanged(ServerHandler* server, const std::vector<ecf::Aspect::Type>& a) override;
void notifyServerDelete(ServerHandler* server) override;
void notifyBeginServerClear(ServerHandler* server) override {}
void notifyEndServerClear(ServerHandler* server) override;
void notifyBeginServerScan(ServerHandler* server,const VServerChange&) override {}
void notifyEndServerScan(ServerHandler* server) override;
void notifyServerConnectState(ServerHandler* server) override;
void notifyServerActivityChanged(ServerHandler* server) override;
void notifyServerSuiteFilterChanged(ServerHandler* server) override {}
Q_SIGNALS:
void changed(DashboardTitle*);
private:
void clear();
void updateTitle();
Dashboard* dashboard_;
ServerFilter* filter_;
int maxPixWidth_;
QPixmap pix_;
QPixmap descPix_;
QString title_;
QString tooltip_;
QString desc_;
bool current_;
static int lighter_;
};
#endif
|
// Copyright (c) 2012-2020 <NAME>
// SPDX-License-Identifier: MIT
#include <defs.h>
#include <file.h>
#include <fs.h>
#include <ip.h>
#include <net.h>
#include <param.h>
#include <sleeplock.h>
#include <socket.h>
#include <spinlock.h>
#include <types.h>
//
struct socket {
int type;
int desc;
};
struct file *
socketalloc(int domain, int type, int protocol) {
struct file *f;
struct socket *s;
if (domain != AF_INET || (type != SOCK_STREAM && type != SOCK_DGRAM) || protocol != 0) {
return NULL;
}
f = filealloc();
if (!f) {
return NULL;
}
s = (struct socket *)kalloc();
if (!s) {
fileclose(f);
return NULL;
}
s->type = type;
s->desc = (type == SOCK_STREAM ? tcp_api_open() : udp_api_open());
f->type = FD_SOCKET;
f->readable = 1;
f->writable = 1;
f->socket = s;
return f;
}
void socketclose(struct socket *s) {
if (s->type == SOCK_STREAM)
tcp_api_close(s->desc);
else
udp_api_close(s->desc);
}
int socketconnect(struct socket *s, struct sockaddr *addr, int addrlen) {
if (s->type != SOCK_STREAM)
return -1;
return tcp_api_connect(s->desc, addr, addrlen);
}
int socketbind(struct socket *s, struct sockaddr *addr, int addrlen) {
if (s->type == SOCK_STREAM)
return tcp_api_bind(s->desc, addr, addrlen);
else
return udp_api_bind(s->desc, addr, addrlen);
}
int socketlisten(struct socket *s, int backlog) {
if (s->type != SOCK_STREAM)
return -1;
return tcp_api_listen(s->desc, backlog);
}
struct file *
socketaccept(struct socket *s, struct sockaddr *addr, int *addrlen) {
int adesc;
struct file *f;
struct socket *as;
if (s->type != SOCK_STREAM)
return NULL;
f = filealloc();
if (!f) {
return NULL;
}
as = (struct socket *)kalloc();
if (!as) {
fileclose(f);
return NULL;
}
adesc = tcp_api_accept(s->desc, addr, addrlen);
if (adesc == -1) {
fileclose(f);
kfree((void *)as);
return NULL;
}
as->type = s->type;
as->desc = adesc;
f->type = FD_SOCKET;
f->readable = 1;
f->writable = 1;
f->socket = as;
return f;
}
int socketread(struct socket *s, char *addr, int n) {
if (s->type != SOCK_STREAM)
return -1;
return tcp_api_recv(s->desc, (uint8_t *)addr, n);
}
int socketwrite(struct socket *s, char *addr, int n) {
if (s->type != SOCK_STREAM)
return -1;
return tcp_api_send(s->desc, (uint8_t *)addr, n);
}
int socketrecvfrom(struct socket *s, char *buf, int n, struct sockaddr *addr, int *addrlen) {
if (s->type != SOCK_DGRAM)
return -1;
return udp_api_recvfrom(s->desc, (uint8_t *)buf, n, addr, addrlen);
}
int socketsendto(struct socket *s, char *buf, int n, struct sockaddr *addr, int addrlen) {
if (s->type != SOCK_DGRAM)
return -1;
return udp_api_sendto(s->desc, (uint8_t *)buf, n, addr, addrlen);
}
int socketioctl(struct socket *s, int req, void *arg) {
struct ifreq *ifreq;
struct netdev *dev;
struct netif *iface;
switch (req) {
case SIOCGIFINDEX:
ifreq = (struct ifreq *)arg;
dev = netdev_by_name(ifreq->ifr_name);
if (!dev)
return -1;
ifreq->ifr_ifindex = dev->index;
break;
case SIOCGIFNAME:
ifreq = (struct ifreq *)arg;
dev = netdev_by_index(ifreq->ifr_ifindex);
if (!dev)
return -1;
strncpy(ifreq->ifr_name, dev->name, sizeof(ifreq->ifr_name));
break;
case SIOCSIFNAME:
/* TODO */
break;
case SIOCGIFHWADDR:
ifreq = (struct ifreq *)arg;
dev = netdev_by_name(ifreq->ifr_name);
if (!dev)
return -1;
/* TODO: HW type check */
memcpy(ifreq->ifr_hwaddr.sa_data, dev->addr, dev->alen);
break;
case SIOCSIFHWADDR:
/* TODO */
break;
case SIOCGIFFLAGS:
ifreq = (struct ifreq *)arg;
dev = netdev_by_name(ifreq->ifr_name);
if (!dev)
return -1;
ifreq->ifr_flags = dev->flags;
break;
case SIOCSIFFLAGS:
ifreq = (struct ifreq *)arg;
dev = netdev_by_name(ifreq->ifr_name);
if (!dev)
return -1;
if ((dev->flags & IFF_UP) != (ifreq->ifr_flags & IFF_UP)) {
if (ifreq->ifr_flags & IFF_UP)
dev->ops->open(dev);
else
dev->ops->stop(dev);
}
break;
case SIOCGIFADDR:
ifreq = (struct ifreq *)arg;
dev = netdev_by_name(ifreq->ifr_name);
if (!dev)
return -1;
iface = netdev_get_netif(dev, ifreq->ifr_addr.sa_family);
if (!iface)
return -1;
((struct sockaddr_in *)&ifreq->ifr_addr)->sin_addr = ((struct netif_ip *)iface)->unicast;
break;
case SIOCSIFADDR:
ifreq = (struct ifreq *)arg;
dev = netdev_by_name(ifreq->ifr_name);
if (!dev)
return -1;
iface = netdev_get_netif(dev, ifreq->ifr_addr.sa_family);
if (iface) {
if (ip_netif_reconfigure(iface, ((struct sockaddr_in *)&ifreq->ifr_addr)->sin_addr, ((struct netif_ip *)iface)->netmask, ((struct netif_ip *)iface)->gateway) == -1)
return -1;
} else {
iface = ip_netif_alloc(((struct sockaddr_in *)&ifreq->ifr_addr)->sin_addr, 0xffffffff, 0);
if (!iface)
return -1;
netdev_add_netif(dev, iface);
}
break;
case SIOCGIFNETMASK:
ifreq = (struct ifreq *)arg;
dev = netdev_by_name(ifreq->ifr_name);
if (!dev)
return -1;
iface = netdev_get_netif(dev, ifreq->ifr_addr.sa_family);
if (!iface)
return -1;
((struct sockaddr_in *)&ifreq->ifr_netmask)->sin_addr = ((struct netif_ip *)iface)->netmask;
break;
case SIOCSIFNETMASK:
ifreq = (struct ifreq *)arg;
dev = netdev_by_name(ifreq->ifr_name);
if (!dev)
return -1;
iface = netdev_get_netif(dev, ifreq->ifr_addr.sa_family);
if (!iface)
return -1;
if (ip_netif_reconfigure(iface, ((struct netif_ip *)iface)->unicast, ((struct sockaddr_in *)&ifreq->ifr_addr)->sin_addr, ((struct netif_ip *)iface)->gateway) == -1)
return -1;
break;
case SIOCGIFBRDADDR:
ifreq = (struct ifreq *)arg;
dev = netdev_by_name(ifreq->ifr_name);
if (!dev)
return -1;
iface = netdev_get_netif(dev, ifreq->ifr_addr.sa_family);
if (!iface)
return -1;
((struct sockaddr_in *)&ifreq->ifr_broadaddr)->sin_addr = ((struct netif_ip *)iface)->broadcast;
break;
case SIOCSIFBRDADDR:
/* TODO */
break;
case SIOCGIFMTU:
ifreq = (struct ifreq *)arg;
dev = netdev_by_name(ifreq->ifr_name);
if (!dev)
return -1;
ifreq->ifr_mtu = dev->mtu;
break;
case SIOCSIFMTU:
break;
default:
return -1;
}
return 0;
}
|
<reponame>growsimplee/django-helper
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
VERSION = open(os.path.join(here, 'VERSION')).read()
README = open(os.path.join(here, 'README.md')).read()
setup(
name='django-helper',
version=VERSION,
package_dir={'helper': 'helper'},
include_package_data=True,
packages=find_packages(),
description='Django Helper module for standardizing microservices',
long_description=README,
long_description_content_type="text/markdown",
url='https://github.com/growsimplee/django-helper',
install_requires=[
"boto3>=1.16.35",
"Django>=3.1",
"djangorestframework>=3.12"
],
classifiers=[
'Intended Audience :: Developers',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Framework :: Django'
]
)
|
import re
# Clean a string of HTML tags
def clean_html_tags(str):
clean_str = re.sub("<.*?>", "", str)
return clean_str
html_string = "<p>This is a <b>test</b> string</p>"
clean_str = clean_html_tags(html_string)
print(clean_str) #This is a test string |
module PoolParty
module Callbacks
module ClassMethods
def additional_callbacks(arr=[])
@additional_callbacks ||= arr
end
end
module InstanceMethods
def defined_callbacks
[
:before_bootstrap,
:after_bootstrap,
:before_configure,
:after_configure,
:after_create,
# TODO: Add after_launch_instance and after_terminate_instance
# :after_launch_instance,
# :after_terminate_instance,
self.class.additional_callbacks
].flatten
end
# Callbacks on bootstrap and configuration
def setup_callbacks
defined_callbacks.each do |meth|
unless respond_to?("call_#{meth}_callbacks".to_sym)
self.class.module_eval <<-EOE
def call_#{meth}_callbacks(*args)
plugin_store.each {|a| a.call_#{meth}_callbacks(*args) } if respond_to?(:plugin_store) && plugin_store
self.send :#{meth}, *args if respond_to?(:#{meth})
end
EOE
end
end
end
end
def self.included(receiver)
receiver.extend ClassMethods
receiver.send :include, InstanceMethods
end
end
end |
import { lighten, darken } from 'polished';
const primaryColor = '#272A2E';
const secondaryColor = darken(0.1, primaryColor);
const accentColor = '#D0021B';
const tertiaryColor = darken(0.1, '#fff');
const lightFontColor = '#fff';
const darkFontColor = 'rgb(50,50,50)';
const sansSerifFont = '"Alegreya Sans", sans-serif';
const serifFont = 'Alegreya, serif';
const theme = {
primaryColor,
secondaryColor,
accentColor,
tertiaryColor,
lightFontColor,
sansSerifFont,
serifFont,
darkFontColor,
};
export default theme;
|
import { ComponentClass, connect, Dispatch, MapDispatchToProps, MapStateToProps } from 'react-redux';
import { State } from '../state';
import { currentScreenChanged } from '../actions';
import ToolBar from '../toolbar';
import { ZeldaGame } from '../../ZeldaGame';
import { Position } from '../../Position';
import { Action } from 'redux-actions';
import { Map } from '../../Map';
const mapStateToProps: MapStateToProps<any, any, State> = (state: State, ownProps?: any): any => {
return {
};
};
const mapDispatchToProps: MapDispatchToProps<any, any> = (dispatch: Dispatch<Action<Map>>, ownProps?: any): any => {
return {
currentScreenChanged: (mapName: string) => {
// TODO: Need an "official" way to fetch the game
const game: ZeldaGame = (window as any).game;
const curScreenRow: number = game.map.currentScreenRow;
const curScreenCol: number = game.map.currentScreenCol;
const screen: Position = new Position(curScreenRow, curScreenCol);
const pos: Position = new Position();
game.setMap(mapName, screen, pos);
dispatch(currentScreenChanged());
}
};
};
const VisibleToolBar: ComponentClass<any> = connect(mapStateToProps, mapDispatchToProps)(ToolBar);
export default VisibleToolBar;
|
# add path for /opt/vc/bin(sbin)
PATH=$PATH:/opt/vc/bin:/usr/lib/klibc/bin
if [ $(id -u) -eq 0 ]; then
PATH=$PATH:/opt/vc/sbin
fi
|
<filename>script.js
(function () {
'use strict';
$(document).ready(function () {
$('#files').dataTable({
'paging': false,
'info': false,
'order': [[1, 'desc']]
});
$('form[name="delete"]').submit(function () {
if ($('form[name="delete"] input:checkbox:checked').length === 0) {
window.alert('Błąd! Proszę wybrać jakieś pliki do usunięcia.');
return false;
}
askAboutPassword($(this));
return true;
});
$('form[name="add"]').submit(function () {
if ($('form[name="add"] input[type="file"]').val() === '') {
window.alert('Błąd! Proszę wskazać plik do wysłania.');
return false;
}
askAboutPassword($(this));
return true;
});
$('.dl').click(function () {
var $form = createDownloadForm($(this).attr('href'));
askAboutPassword($form);
$form.appendTo('body'); // necessary. only chrome can submit in-memory form.
$form.submit();
$('body form[name="download"]').remove(); // appended, submitted, so remove now.
return false;
});
});
var askAboutPassword = function ($form) {
var password = window.prompt('Operacja wymaga <PASSWORD>. Proszę podać hasło: ');
$('<input>', {
'type': 'hidden',
'name': 'password',
'value': password
}).appendTo($form);
};
var createDownloadForm = function ($action) {
return $('<form>', {
'method': 'post',
'name': 'download',
'target': '_blank',
'action': $action
});
};
})(); |
import java.util.HashMap;
import java.util.Map;
public class PerformanceLogger {
private Map<String, Long> processStartTimeMap;
public PerformanceLogger() {
processStartTimeMap = new HashMap<>();
}
public void startProcess(String processName) {
processStartTimeMap.put(processName, System.currentTimeMillis());
}
public void stopProcess(String processName) {
if (processStartTimeMap.containsKey(processName)) {
long startTime = processStartTimeMap.get(processName);
long endTime = System.currentTimeMillis();
long totalTime = endTime - startTime;
processStartTimeMap.remove(processName);
logPerformance(processName, totalTime);
} else {
System.out.println("Error: Process " + processName + " was not started.");
}
}
private void logPerformance(String processName, long totalTime) {
System.out.println("Total time for process " + processName + ": " + totalTime + "ms");
}
public static void main(String[] args) {
PerformanceLogger logger = new PerformanceLogger();
// Measure and log the performance of process "doPullTranslation"
logger.startProcess("doPullTranslation");
// ... Perform the "doPullTranslation" process
logger.stopProcess("doPullTranslation");
// Measure and log the performance of another process
logger.startProcess("doSomethingElse");
// ... Perform the "doSomethingElse" process
logger.stopProcess("doSomethingElse");
}
} |
<filename>app.js
let parentEl = document.getElementById('seattle');
let parentEl2 = document.getElementById('tokyo');
let parentEl3 = document.getElementById('dubai');
let parentEl4 = document.getElementById('paris');
let parentEl5 = document.getElementById('lima');
let table = document.getElementById('salestable')
let tableheader = document.getElementById('tableheader')
let tablebody = document.getElementById('tablebody')
let tablefooter = document.getElementById('tablefooter')
let hours = ['6am', '7am','8am','9am','10am','11am','12pm','1pm','2pm','3pm','4pm','5pm','6pm','7pm']
// const allStores = []
function CookieStore(name, mincustomer, maxcustomer, avgcookies){
this.name = name;
this.mincustomer = mincustomer;
this.maxcustomer = maxcustomer;
this.avgcookies = avgcookies;
this.cookieSoldArray = [];
this.totalcookies = 0
}
CookieStore.prototype.avgCustomer = function(){
return Math.floor(Math.random()*(this.maxcustomer-this.mincustomer+1) + this.mincustomer)
}
CookieStore.prototype.hourTotal = function(){
for (let i = 0; i < hours.length; i++){
this.cookieSoldArray.push(Math.ceil(this.avgCustomer() * this.avgcookies))
this.totalcookies = this.totalcookies + this.cookieSoldArray[i]
}
console.log(this.cookieSoldArray)
}
CookieStore.prototype.cookieSold = function(){
this.hourTotal()
let tr = document.createElement('tr');
let th = document.createElement('th');
th.innerText = this.name;
tr.appendChild(th);
for (let i = 0; i < this.cookieSoldArray.length; i++){
let hourlyEl = document.createElement('td');
hourlyEl.innerText = this.cookieSoldArray[i]
tr.appendChild(hourlyEl);
}
let dailytotal = document.createElement('td');
dailytotal.innerText = this.totalcookies;
tr.appendChild(dailytotal);
tablebody.appendChild(tr);
}
let seattlestore = new CookieStore('Seattle', 23, 65, 6.3);
let tokyostore = new CookieStore('Tokyo',3,24,1.2);
let dubaistore = new CookieStore('Dubai',11,38,3.7);
let parisstore = new CookieStore('Tokyo',20,38,2.3);
let limastore = new CookieStore('Tokyo',2,16,4.6);
seattlestore.cookieSold();
tokyostore.cookieSold();
dubaistore.cookieSold();
parisstore.cookieSold();
limastore.cookieSold();
// function renderHeader(){
// let tr = document.createElement('tr');
// let th = document.createElement('th');
// th.innerText = ' ';
// tr.appendChild(th)
// for (let i = 0; i< hours.length; i++){
// let hour = document.createElement('td');
// hour.innerText = hours[i]
// tr.appendChild(hour)
// }
// let dailyhourtotal = document.createElement('td');
// dailyhourtotal.innerText = 'Daily Location Total';
// tr.appendChild(dailyhourtotal)
// tableheader.appendChild(tr)
// }
// renderHeader();
// let seattleShop = {
// city: 'seattle',
// minCustomer: 23,
// maxCustomer: 65,
// hours: ['6am', '7am','8am','9am','10am','11am','12pm','1pm','2pm','3pm','4pm','5pm','6pm','7pm'],
// avgCookieSale: 6.3,
// cookieSoldArray: [],
// avgCustomer: function(){
// return Math.floor(Math.random()*((this.maxCustomer-this.minCustomer)+1) + this.minCustomer)
// },
// hourTotal: function(){
// this.avgCustomer();
// for (let i = 0; i < this.hours.length; i++){
// this.cookieSoldArray.push(Math.ceil(this.avgCustomer() * this.avgCookieSale))
// }
// },
// cookieSold: function (){
// this.hourTotal()
// for (let i = 0; i < this.cookieSoldArray.length; i++){
// let hourlyEl = document.createElement('li');
// hourlyEl.innerText = this.hours[i] +' ' + this.cookieSoldArray[i];
// parentEl.appendChild(hourlyEl);
// }
// // parentElement.appendChild();
// }
// };
// console.log(parentEl);
// seattleShop.cookieSold();
// let tokyoShop = {
// city: 'tokyo',
// minCustomer: 3,
// maxCustomer: 24,
// hours: ['6am', '7am','8am','9am','10am','11am','12pm','1pm','2pm','3pm','4pm','5pm','6pm','7pm'],
// avgCookieSale: 1.2,
// cookieSoldArray: [],
// avgCustomer: function(){
// return Math.floor(Math.random()*((this.maxCustomer-this.minCustomer)+1) + this.minCustomer)
// },
// hourTotal: function(){
// this.avgCustomer();
// for (let i = 0; i < this.hours.length; i++){
// this.cookieSoldArray.push(Math.ceil(this.avgCustomer() * this.avgCookieSale))
// }
// },
// cookieSold: function (){
// this.hourTotal()
// for (let i = 0; i < this.cookieSoldArray.length; i++){
// let hourlyEl = document.createElement('li');
// hourlyEl.innerText = this.hours[i] +' ' + this.cookieSoldArray[i];
// parentEl2.appendChild(hourlyEl);
// }
// // parentElement.appendChild();
// }
// };
// console.log(parentEl2);
// tokyoShop.cookieSold();
// let dubaiShop = {
// city: 'dubai',
// minCustomer: 11,
// maxCustomer: 38,
// hours: ['6am', '7am','8am','9am','10am','11am','12pm','1pm','2pm','3pm','4pm','5pm','6pm','7pm'],
// avgCookieSale: 3.7,
// cookieSoldArray: [],
// avgCustomer: function(){
// return Math.floor(Math.random()*((this.maxCustomer-this.minCustomer)+1) + this.minCustomer)
// },
// hourTotal: function(){
// this.avgCustomer();
// for (let i = 0; i < this.hours.length; i++){
// this.cookieSoldArray.push(Math.ceil(this.avgCustomer() * this.avgCookieSale))
// }
// },
// cookieSold: function (){
// this.hourTotal()
// for (let i = 0; i < this.cookieSoldArray.length; i++){
// let hourlyEl = document.createElement('li');
// hourlyEl.innerText = this.hours[i] +' ' + this.cookieSoldArray[i];
// parentEl3.appendChild(hourlyEl);
// }
// // parentElement.appendChild();
// }
// };
// console.log(parentEl3);
// dubaiShop.cookieSold();
// let parisShop = {
// city: 'paris',
// minCustomer: 20,
// maxCustomer: 38,
// hours: ['6am', '7am','8am','9am','10am','11am','12pm','1pm','2pm','3pm','4pm','5pm','6pm','7pm'],
// avgCookieSale: 2.3,
// cookieSoldArray: [],
// avgCustomer: function(){
// return Math.floor(Math.random()*((this.maxCustomer-this.minCustomer)+1) + this.minCustomer)
// },
// hourTotal: function(){
// this.avgCustomer();
// for (let i = 0; i < this.hours.length; i++){
// this.cookieSoldArray.push(Math.ceil(this.avgCustomer() * this.avgCookieSale))
// }
// },
// cookieSold: function (){
// this.hourTotal()
// for (let i = 0; i < this.cookieSoldArray.length; i++){
// let hourlyEl = document.createElement('li');
// hourlyEl.innerText = this.hours[i] +' ' + this.cookieSoldArray[i];
// parentEl4.appendChild(hourlyEl);
// }
// // parentElement.appendChild();
// }
// };
// console.log(parentEl4);
// parisShop.cookieSold();
// let limaShop = {
// city: 'lima',
// minCustomer: 2,
// maxCustomer: 16,
// hours: ['6am', '7am','8am','9am','10am','11am','12pm','1pm','2pm','3pm','4pm','5pm','6pm','7pm'],
// avgCookieSale: 4.6,
// cookieSoldArray: [],
// avgCustomer: function(){
// return Math.floor(Math.random()*((this.maxCustomer-this.minCustomer)+1) + this.minCustomer)
// },
// hourTotal: function(){
// this.avgCustomer();
// for (let i = 0; i < this.hours.length; i++){
// this.cookieSoldArray.push(Math.ceil(this.avgCustomer() * this.avgCookieSale))
// }
// },
// cookieSold: function (){
// this.hourTotal()
// for (let i = 0; i < this.cookieSoldArray.length; i++){
// let hourlyEl = document.createElement('li');
// hourlyEl.innerText = this.hours[i] +' ' + this.cookieSoldArray[i];
// parentEl5.appendChild(hourlyEl);
// }
// // parentElement.appendChild();
// }
// };
// console.log(parentEl5);
// limaShop.cookieSold(); |
<filename>src/Auth.tsx
import React, { useEffect } from 'react';
import { connect } from 'react-redux';
import config from 'src/config/config';
import AppRouter from './AppRouter';
import history from 'src/redux/utils/history';
import { initClient, setActiveClient } from 'src/redux/modules/auth';
import { getOrganizationsList } from 'src/redux/modules/organization';
import { fetchAllVaults } from 'src/redux/modules/vault';
interface IAuthProps {
sessions: { [clientId: string]: any };
initClient: (config: any) => any;
setActiveClient: (client: string) => any;
activeClient: string;
getOrganizationsList: () => Promise<any>;
fetchAllVaults: () => Promise<any>;
}
const mapStateToProps = ({ auth }: any) => {
return {
sessions: auth.sessions,
activeClient: auth.activeClient,
};
}
const mapDispatchToProps = {
initClient,
setActiveClient,
getOrganizationsList,
fetchAllVaults,
};
const { keycloakConfig } = config;
const Auth: React.FC<IAuthProps> = (props) => {
const client = props.sessions[keycloakConfig.clientId];
useEffect(() => {
if (!client) {
props.initClient(keycloakConfig);
} else {
if (props.activeClient !== keycloakConfig.clientId) {
props.setActiveClient(keycloakConfig.clientId);
}
props.getOrganizationsList();
props.fetchAllVaults();
const currentUserEmail = client.currentUserEmail;
if (currentUserEmail && currentUserEmail !== window.heap?.identity) {
window.heap?.identify(currentUserEmail);
}
const pathToRedirect = localStorage.getItem('pathToRedirect');
if (pathToRedirect) {
localStorage.removeItem('pathToRedirect');
history.push(pathToRedirect);
}
}
}, [client]);
return (
<div id="main-content" className="main-content">
<AppRouter />
</div>
);
};
export default connect(mapStateToProps, mapDispatchToProps)(Auth);
|
#!/bin/bash
# Navigate to the static resources directory
cd $(dirname "${BASH_SOURCE[0]}")
cd ../looking_glass/static
# References:
# https://docs.aws.amazon.com/sdk-for-javascript/v2/developer-guide/setting-up-node-on-ec2-instance.html
# https://github.com/nvm-sh/nvm/blob/master/README.md
# https://www.npmjs.com/package/vis-network
# https://stackoverflow.com/questions/24514936/how-can-i-serve-npm-packages-using-flask
curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.35.3/install.sh | bash
. ~/.nvm/nvm.sh
nvm install node
npm install @egjs/hammerjs@2.0.17
npm install @types/hammerjs@2.0.36
npm install component-emitter@1.3.0
npm install keycharm@0.3.0
npm install moment@2.24.0
npm install timsort@0.3.0
npm install uuid@7.0.0
npm install vis-data@6.2.1
npm install vis-util@4.0.0
npm install vis-uuid@1.1.3
npm install vis-network@7.6.8
|
<reponame>fusepoolP3/skosjs
/**
* Created by IntelliJ IDEA.
* User: tkurz
* Date: 20.03.12
* Time: 11:12
* To change this template use File | Settings | File Templates.
*/
/**
* This is a simple demonstrator how you can write extensions.
* @param editor
* @return {Extension}
* @constructor
*/
function SimpleExtension(editor) {
var graph = undefined;
function Popup() {
var popup;
this.open = function(){
popup = editor.popup.custom("Simple Extension");
var text = "No graph selected";
if(graph) text = "Graph: " + graph;
popup.setContent($("<p></p>").text(text));
popup.open();
}
}
function Extension() {
editor.menu.createSeperator("Project");
editor.menu.createMenuItem("Project","Extension",function(){
var popup = new Popup();
popup.open();
});
//bindings
editor.event.bind(editor.EventCode.GRAPH.LOAD,function(event){
graph = event.data.uri;
});
}
return new Extension();
} |
def partition(arr, low, high):
i = (low - 1)
pivot = arr[high]
for j in range(low, high):
if arr[j] <= pivot:
i = i + 1
arr[i], arr[j] = arr[j], arr[i]
arr[i + 1], arr[high] = arr[high], arr[i + 1]
return (i + 1)
def quickSort(arr, low, high):
if low < high:
pi = partition(arr, low, high)
quickSort(arr, low, pi - 1)
quickSort(arr, pi + 1, high) |
<reponame>mxjoly/MagicSlate
package component;
import java.awt.*;
import java.awt.event.ActionListener;
import javax.swing.*;
import javax.swing.border.TitledBorder;
/**
* A JComboBox whose each items have icons and text
*/
public class CustomComboBox extends JPanel {
private static final long serialVersionUID = 795565633870638546L;
private JComboBox<Integer> box;
private String[] itemNames;
private Icon[] itemIcons;
/**
* @param itemNames a table that contains the names of items
* @param itemIcons a table that contains the icons of items
* @param name of the component that will be displayed
*/
public CustomComboBox(String[] itemNames, Icon[] itemIcons, ActionListener actions, String name) {
this.itemNames = itemNames;
this.itemIcons = itemIcons;
box = new JComboBox<Integer>();
for (int i = 0 ; i < itemNames.length ; i++){
box.addItem(i);
}
box.setRenderer(new ComboBoxRenderer());
box.setMaximumRowCount(3);
box.addActionListener(actions);
this.add(box);
this.setBorder(BorderFactory.createTitledBorder(
BorderFactory.createCompoundBorder(),
name,
TitledBorder.CENTER,
TitledBorder.TOP, null, Color.BLACK)
);
}
/**
* @return the name of selected item
*/
public String getSelectedItem(){
return itemNames[box.getSelectedIndex()].toUpperCase();
}
/**
* Reset the items selecting the first one
*/
public void reset(){
box.setSelectedIndex(0);
}
class ComboBoxRenderer extends JLabel implements ListCellRenderer<Integer> {
private static final long serialVersionUID = 1L;
public ComboBoxRenderer() {
setOpaque(true);
setHorizontalAlignment(LEFT);
setVerticalAlignment(CENTER);
}
/**
* Display cell with icon and description
*/
@Override
public Component getListCellRendererComponent(JList<? extends Integer> list, Integer value, int index, boolean isSelected, boolean cellHasFocus) {
if (isSelected) {
setBackground(list.getSelectionBackground());
setForeground(list.getSelectionForeground());
}
else {
setBackground(list.getBackground());
setForeground(list.getForeground());
}
setIcon(itemIcons[value]);
setText(itemNames[value]);
return this;
}
}
}
|
// Setter dan Getter (Method untuk mendapatkan dan mengubah property yang private)
class ProductA {
private _price: number = 0;
private _discount: number = 0.05;
set price(val: number) {
this._price = val;
}
get price(): number {
return this._price - this._price * this._discount;
}
}
const productA = new ProductA();
productA.price = 100000;
console.log(productA.price);
|
<reponame>acpatison/React-Portfolio
import 'bootstrap/dist/css/bootstrap.css';
import React from 'react';
import ReactDOM from 'react-dom';
import './index.css';
import App from './app';
import * as serviceWorkerRegister from './serviceWorkerRegister';
import webVitals from './webVitals';
ReactDOM.render(
<React.StrictMode>
<App />
</React.StrictMode>,
document.getElementById('root')
);
serviceWorkerRegister.unregister();
webVitals(); |
#!/bin/sh
REPO=$(dirname "$0")
PREVCC="$CC"
PREVCXX="$CXX"
if command -v clang &> /dev/null
then
echo "-- Clang found on system, great! Long live LLVM! :D"
export CC=clang
export CXX=clang++
fi
rm -rf "$REPO"/build
mkdir -p "$REPO"/build && cd "$REPO"/build || exit
cmake -DBUILD_SHARED_LIBS=Off -DORLP_ED25519_BUILD_DLL=Off -DORLP_ED25519_BUILD_TESTS=On ..
cmake --build . --config Release || exit
export CC="$PREVCC"
export CXX="$PREVCXX"
./run_tests || ./Release/run_tests.exe || exit
cd "$REPO" || exit |
#include <bits/stdc++.h>
using namespace std;
bool isUniqueChars(string str)
{
int checker = 0;
for (int i = 0; i < str.length(); i++)
{
int val = (str[i] - 'a');
if ((checker & (1 << val)) > 0)
return false;
checker |= (1 << val);
}
return true;
}
int main()
{
string str;
cin>>str;
cout << isUniqueChars(str) << endl;
return 0;
} |
function getAbsoluteMax(a, b) {
// First calculate the maximum number
let max = a > b ? a : b;
// Then calculate the absolute maximum
let absoluteMax = (max > 0) ? max : -max;
return absoluteMax;
} |
/*==================================================================*\
| EXIP - Embeddable EXI Processor in C |
|--------------------------------------------------------------------|
| This work is licensed under BSD 3-Clause License |
| The full license terms and conditions are located in LICENSE.txt |
\===================================================================*/
/**
* @file encodeTestEXI.c
* @brief Testing the EXI encoder
*
* @date Nov 4, 2010
* @author <NAME>
* @version 0.5
* @par[Revision] $Id$
*/
#include "encodeTestEXI.h"
#include "EXISerializer.h"
#include "stringManipulate.h"
#include <stdio.h>
#include <string.h>
#define OUTPUT_BUFFER_SIZE 200
const String NS_STR = {"http://www.ltu.se/EISLAB/schema-test", 36};
const String NS_NESTED_STR = {"http://www.ltu.se/EISLAB/nested-xsd", 35};
const String NS_TYPES_STR = {"http://www.ltu.se/EISLAB/types", 30};
const String NS_EMPTY_STR = {NULL, 0};
const String ELEM_ENCODE_STR = {"EXIPEncoder", 11};
const String ELEM_MULT_TEST_STR = {"MultipleXSDsTest", 16};
const String ELEM_DESCR_STR = {"description", 11};
const String ELEM_TYPE_TEST_STR = {"type-test", 9};
const String ELEM_TEST_SETUP_STR = {"testSetup", 9};
const String ELEM_BOOL_STR = {"bool", 4};
const String ELEM_INT_STR = {"int", 3};
const String ELEM_EXT_TYPES_STR = {"extendedTypeTest", 16};
const String ELEM_BYTE_TYPES_STR = {"byteTest", 8};
const String ELEM_DATE_TYPES_STR = {"dateTimeTest", 12};
const String ELEM_BIN_TYPES_STR = {"binaryTest", 10};
const String ELEM_ENUM_TYPES_STR = {"enumTest", 8};
const String ATTR_BYTE_STR = {"testByte", 8};
const String ATTR_VERSION_STR = {"version", 7};
const String ATTR_GOAL_STR = {"goal", 4};
const String ATTR_ID_STR = {"id", 2};
static char SOME_BINARY_DATA[] = {0x02, 0x6d, 0x2f, 0xa5, 0x20, 0xf2, 0x61, 0x9c, 0xee, 0x0f};
static String SOME_BINARY_DATA_BASE64 = {"i3sd7fatzxad", 12};
//static String ENUM_DATA_1 = {"hello", 5};
//static String ENUM_DATA_2 = {"hi", 2};
//static String ENUM_DATA_3 = {"hey", 3};
static String ENUM_DATA_4 = {"hej", 3};
#define TRY_CATCH_ENCODE(func) TRY_CATCH(func, serialize.closeEXIStream(&testStrm))
errorCode encode(EXIPSchema* schemaPtr, FILE *outfile, size_t (*outputStream)(void* buf, size_t size, void* stream))
{
errorCode tmp_err_code = EXIP_UNEXPECTED_ERROR;
EXIStream testStrm;
String uri;
String ln;
QName qname = {&uri, &ln, NULL};
String chVal;
char buf[OUTPUT_BUFFER_SIZE];
BinaryBuffer buffer;
EXITypeClass valueType;
buffer.buf = buf;
buffer.bufLen = OUTPUT_BUFFER_SIZE;
buffer.bufContent = 0;
// Serialization steps:
// I: First initialize the header of the stream
serialize.initHeader(&testStrm);
// II: Set any options in the header (including schemaID and schemaIDMode), if different from the defaults.
testStrm.header.has_cookie = TRUE;
testStrm.header.has_options = TRUE;
testStrm.header.opts.valueMaxLength = 300;
testStrm.header.opts.valuePartitionCapacity = 50;
SET_STRICT(testStrm.header.opts.enumOpt);
// III: Define an external stream for the output if any, otherwise set to NULL
buffer.ioStrm.readWriteToStream = outputStream;
buffer.ioStrm.stream = outfile;
// IV: Initialize the stream
TRY_CATCH_ENCODE(serialize.initStream(&testStrm, buffer, schemaPtr));
// V: Start building the stream step by step: header, document, element etc...
TRY_CATCH_ENCODE(serialize.exiHeader(&testStrm));
TRY_CATCH_ENCODE(serialize.startDocument(&testStrm));
qname.uri = &NS_STR;
qname.localName = &ELEM_MULT_TEST_STR;
TRY_CATCH_ENCODE(serialize.startElement(&testStrm, qname, &valueType)); // <MultipleXSDsTest>
qname.uri = &NS_STR;
qname.localName = &ELEM_ENCODE_STR;
TRY_CATCH_ENCODE(serialize.startElement(&testStrm, qname, &valueType)); // <EXIPEncoder>
// NOTE: attributes should come lexicographically sorted during serialization
qname.uri = &NS_EMPTY_STR;
qname.localName = &ATTR_BYTE_STR;
if(schemaPtr != NULL)
{
// schema mode
TRY_CATCH_ENCODE(serialize.attribute(&testStrm, qname, TRUE, &valueType)); // testByte="
TRY_CATCH_ENCODE(serialize.intData(&testStrm, 55));
}
else
{
// schema-less mode
TRY_CATCH_ENCODE(serialize.attribute(&testStrm, qname, TRUE, &valueType)); // testByte="
TRY_CATCH_ENCODE(asciiToString("55", &chVal, &testStrm.memList, FALSE));
TRY_CATCH_ENCODE(serialize.stringData(&testStrm, chVal));
}
qname.localName = &ATTR_VERSION_STR;
TRY_CATCH_ENCODE(serialize.attribute(&testStrm, qname, TRUE, &valueType)); // version="
TRY_CATCH_ENCODE(asciiToString("0.2", &chVal, &testStrm.memList, FALSE));
TRY_CATCH_ENCODE(serialize.stringData(&testStrm, chVal));
TRY_CATCH_ENCODE(asciiToString("This is an example of serializing EXI streams using EXIP low level API", &chVal, &testStrm.memList, FALSE));
TRY_CATCH_ENCODE(serialize.stringData(&testStrm, chVal));
TRY_CATCH_ENCODE(serialize.endElement(&testStrm)); // </EXIPEncoder>
qname.uri = &NS_STR;
qname.localName = &ELEM_DESCR_STR;
TRY_CATCH_ENCODE(serialize.startElement(&testStrm, qname, &valueType)); // <description>
TRY_CATCH_ENCODE(asciiToString("This is a test of processing XML schemes with multiple XSD files", &chVal, &testStrm.memList, FALSE));
TRY_CATCH_ENCODE(serialize.stringData(&testStrm, chVal));
TRY_CATCH_ENCODE(serialize.endElement(&testStrm)); // </description>
qname.uri = &NS_NESTED_STR;
qname.localName = &ELEM_TEST_SETUP_STR;
TRY_CATCH_ENCODE(serialize.startElement(&testStrm, qname, &valueType)); // <testSetup>
qname.uri = &NS_EMPTY_STR;
qname.localName = &ATTR_GOAL_STR;
TRY_CATCH_ENCODE(serialize.attribute(&testStrm, qname, TRUE, &valueType)); // goal="
TRY_CATCH_ENCODE(asciiToString("Verify that the implementation works!", &chVal, &testStrm.memList, FALSE));
TRY_CATCH_ENCODE(serialize.stringData(&testStrm, chVal));
TRY_CATCH_ENCODE(asciiToString("Simple test element with single attribute", &chVal, &testStrm.memList, FALSE));
TRY_CATCH_ENCODE(serialize.stringData(&testStrm, chVal));
TRY_CATCH_ENCODE(serialize.endElement(&testStrm)); // </testSetup>
qname.uri = &NS_STR;
qname.localName = &ELEM_TYPE_TEST_STR;
TRY_CATCH_ENCODE(serialize.startElement(&testStrm, qname, &valueType)); // <type-test>
if(schemaPtr != NULL)
{
// schema mode
qname.uri = &NS_EMPTY_STR;
qname.localName = &ATTR_ID_STR;
TRY_CATCH_ENCODE(serialize.attribute(&testStrm, qname, TRUE, &valueType)); // id="
TRY_CATCH_ENCODE(serialize.intData(&testStrm, 1001));
}
else
{
// schema-less mode
qname.uri = &NS_EMPTY_STR;
qname.localName = &ATTR_ID_STR;
TRY_CATCH_ENCODE(serialize.attribute(&testStrm, qname, TRUE, &valueType)); // id="
TRY_CATCH_ENCODE(asciiToString("1001", &chVal, &testStrm.memList, FALSE));
TRY_CATCH_ENCODE(serialize.stringData(&testStrm, chVal));
}
qname.uri = &NS_NESTED_STR;
qname.localName = &ELEM_BOOL_STR;
TRY_CATCH_ENCODE(serialize.startElement(&testStrm, qname, &valueType)); // <bool>
if(schemaPtr != NULL)
{
// schema mode
TRY_CATCH_ENCODE(serialize.booleanData(&testStrm, TRUE));
}
else
{
// schema-less mode
TRY_CATCH_ENCODE(asciiToString("true", &chVal, &testStrm.memList, FALSE));
TRY_CATCH_ENCODE(serialize.stringData(&testStrm, chVal));
}
TRY_CATCH_ENCODE(serialize.endElement(&testStrm)); // </bool>
TRY_CATCH_ENCODE(serialize.endElement(&testStrm)); // </type-test>
qname.uri = &NS_STR;
qname.localName = &ELEM_EXT_TYPES_STR;
TRY_CATCH_ENCODE(serialize.startElement(&testStrm, qname, &valueType)); // <extendedTypeTest>
qname.uri = &NS_EMPTY_STR;
qname.localName = &ELEM_BYTE_TYPES_STR;
TRY_CATCH_ENCODE(serialize.startElement(&testStrm, qname, &valueType)); // <byteTest>
if(schemaPtr != NULL)
{
// schema mode
TRY_CATCH_ENCODE(serialize.intData(&testStrm, 11));
}
else
{
// schema-less mode
TRY_CATCH_ENCODE(asciiToString("11", &chVal, &testStrm.memList, FALSE));
TRY_CATCH_ENCODE(serialize.stringData(&testStrm, chVal));
}
TRY_CATCH_ENCODE(serialize.endElement(&testStrm)); // </byteTest>
qname.uri = &NS_EMPTY_STR;
qname.localName = &ELEM_DATE_TYPES_STR;
TRY_CATCH_ENCODE(serialize.startElement(&testStrm, qname, &valueType)); // <dateTimeTest>
if(schemaPtr != NULL)
{
// schema mode
EXIPDateTime dt;
dt.presenceMask = FRACT_PRESENCE;
dt.dateTime.tm_year = 112; // 2012
dt.dateTime.tm_mon = 6; // July
dt.dateTime.tm_mday = 31;
dt.dateTime.tm_hour = 13;
dt.dateTime.tm_min = 33;
dt.dateTime.tm_sec = 55;
dt.fSecs.value = 839;
dt.fSecs.offset = 5;
TRY_CATCH_ENCODE(serialize.dateTimeData(&testStrm, dt));
}
else
{
// schema-less mode
TRY_CATCH_ENCODE(asciiToString("2012 Jul 31 13:33", &chVal, &testStrm.memList, FALSE));
TRY_CATCH_ENCODE(serialize.stringData(&testStrm, chVal));
}
TRY_CATCH_ENCODE(serialize.endElement(&testStrm)); // </dateTimeTest>
qname.uri = &NS_EMPTY_STR;
qname.localName = &ELEM_BIN_TYPES_STR;
TRY_CATCH_ENCODE(serialize.startElement(&testStrm, qname, &valueType)); // <binaryTest>
if(schemaPtr != NULL)
{
// schema mode
TRY_CATCH_ENCODE(serialize.binaryData(&testStrm, SOME_BINARY_DATA, 10));
}
else
{
// schema-less mode
TRY_CATCH_ENCODE(serialize.stringData(&testStrm, SOME_BINARY_DATA_BASE64));
}
TRY_CATCH_ENCODE(serialize.endElement(&testStrm)); // </binaryTest>
qname.uri = &NS_EMPTY_STR;
qname.localName = &ELEM_ENUM_TYPES_STR;
TRY_CATCH_ENCODE(serialize.startElement(&testStrm, qname, &valueType)); // <enumTest>
TRY_CATCH_ENCODE(serialize.stringData(&testStrm, ENUM_DATA_4));
TRY_CATCH_ENCODE(serialize.endElement(&testStrm)); // </enumTest>
TRY_CATCH_ENCODE(serialize.endElement(&testStrm)); // </extendedTypeTest>
TRY_CATCH_ENCODE(serialize.endElement(&testStrm)); // </MultipleXSDsTest>
TRY_CATCH_ENCODE(serialize.endDocument(&testStrm));
// VI: Free the memory allocated by the EXI stream object
TRY_CATCH_ENCODE(serialize.closeEXIStream(&testStrm));
return tmp_err_code;
}
|
#!/bin/sh
# Don't ask ssh password all the time
if [ "$(uname -s)" = "Darwin" ]; then
git config --global credential.helper osxkeychain
else
git config --global credential.helper cache
fi
# better diffs
if which diff-so-fancy > /dev/null 2>&1; then
git config --global core.pager "diff-so-fancy | less --tabs=4 -RFX"
fi
|
const { User } = require('../models')
const passport = require('passport')
const register = async (req, res, next) => {
const { username, password } = req.body
try {
const user = await User.register({ username }, password)
req.logIn(user, function (err) {
if (err) {
return next(err)
}
return res.redirect('/')
})
} catch (e) {
return res.redirect('/register?error=' + e)
}
}
const login = (req, res, next) => {
passport.authenticate('local',
(err, user, info) => {
if (err) {
return next(err)
}
if (!user) {
return res.redirect('/login?error=' + info)
}
req.logIn(user, function (err) {
if (err) {
return next(err)
}
return res.redirect('/')
})
})(req, res, next)
}
const logout = (req, res) => {
req.logOut()
res.redirect('/')
}
module.exports = {
login,
logout,
register
}
|
/*
* Copyright The Stargate Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package io.stargate.metrics.jersey.listener;
import io.micrometer.core.instrument.Counter;
import io.micrometer.core.instrument.MeterRegistry;
import io.micrometer.jersey2.server.JerseyTagsProvider;
import org.glassfish.jersey.server.monitoring.RequestEvent;
import org.glassfish.jersey.server.monitoring.RequestEventListener;
/**
* Simple {@link RequestEventListener} that increases a Micrometer counter once the request is
* finished.
*/
public class CounterRequestEventListener implements RequestEventListener {
private final MeterRegistry registry;
private final JerseyTagsProvider tagsProvider;
private final String metricName;
/**
* Default constructor.
*
* @param registry {@link MeterRegistry} to report to.
* @param tagsProvider {@link JerseyTagsProvider} to use for metrics tags based on the event. Note
* that only {@link JerseyTagsProvider#httpRequestTags(RequestEvent)} is consulted here, as
* the counting is done at the end of the event and this listener has no notion of
* long-running tasks.
* @param metricName Name of the metric to use.
*/
public CounterRequestEventListener(
MeterRegistry registry, JerseyTagsProvider tagsProvider, String metricName) {
this.registry = registry;
this.tagsProvider = tagsProvider;
this.metricName = metricName;
}
/** {@inheritDoc} */
@Override
public void onEvent(RequestEvent event) {
// only on finish, increase counter by one
RequestEvent.Type type = event.getType();
if (type == RequestEvent.Type.FINISHED) {
Counter counter = registry.counter(metricName, tagsProvider.httpRequestTags(event));
counter.increment();
}
}
}
|
<gh_stars>1-10
package com.minenash.soulguard.config;
import net.minecraft.particle.DustParticleEffect;
import net.minecraft.particle.ParticleTypes;
import net.minecraft.util.Identifier;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
public class Config {
public static int minutesUntilSoulIsVisibleToAllPlayers = -1;
public static int minutesUntilSoulDespawns = -1;
public static int percentXpLostOnDeath = 0;
public static int percentXpDroppedOnDeathAfterLoss = 0;
public static boolean dropRewardXpWhenKilledByPlayer = true;
public static boolean allowPlayersToInspectTheirSouls = true;
public static boolean allowPlayersToTeleportToTheirSoul = true;
public static boolean allowPlayersToHearCapturedSouls = false;
public static long timezoneOffset = 0;
public static String timezoneAbbreviation = null;
public static int exclusiveSoundRadius = 0;
public static List<SoulParticle> boundedParticles = Arrays.asList(
new SoulParticle(ParticleTypes.ENCHANT, 18, 0.5, 1, 1, 0.1, 1, 0.1, 0, 1, 0),
new SoulParticle(new DustParticleEffect(0.6F,0.8F,1,1F), 5, 0.5, 1, 1, 0.25, 1, 0.25, 0, 1, 0)
);
public static List<SoulParticle> releasedParticles = new ArrayList<>();
public static List<SoulParticle> lockedParticles = new ArrayList<>();
public static List<SoulSound> boundedSounds = Arrays.asList(
new SoulSound(new Identifier("entity.ghast.scream"), 1, 1, 10, 1, 0,0 ,0)
);
public static List<SoulSound> releasedSounds = new ArrayList<>();
public static List<SoulSound> lockedSounds = new ArrayList<>();
}
|
#!/bin/sh
# ssl-opt.sh
#
# This file is part of mbed TLS (https://tls.mbed.org)
#
# Copyright (c) 2016, ARM Limited, All Rights Reserved
#
# Purpose
#
# Executes tests to prove various TLS/SSL options and extensions.
#
# The goal is not to cover every ciphersuite/version, but instead to cover
# specific options (max fragment length, truncated hmac, etc) or procedures
# (session resumption from cache or ticket, renego, etc).
#
# The tests assume a build with default options, with exceptions expressed
# with a dependency. The tests focus on functionality and do not consider
# performance.
#
set -u
if cd $( dirname $0 ); then :; else
echo "cd $( dirname $0 ) failed" >&2
exit 1
fi
# default values, can be overriden by the environment
: ${P_SRV:=../programs/ssl/ssl_server2}
: ${P_CLI:=../programs/ssl/ssl_client2}
: ${P_PXY:=../programs/test/udp_proxy}
: ${OPENSSL_CMD:=openssl} # OPENSSL would conflict with the build system
: ${GNUTLS_CLI:=gnutls-cli}
: ${GNUTLS_SERV:=gnutls-serv}
: ${PERL:=perl}
O_SRV="$OPENSSL_CMD s_server -www -cert data_files/server5.crt -key data_files/server5.key"
O_CLI="echo 'GET / HTTP/1.0' | $OPENSSL_CMD s_client"
G_SRV="$GNUTLS_SERV --x509certfile data_files/server5.crt --x509keyfile data_files/server5.key"
G_CLI="echo 'GET / HTTP/1.0' | $GNUTLS_CLI --x509cafile data_files/test-ca_cat12.crt"
TCP_CLIENT="$PERL scripts/tcp_client.pl"
# alternative versions of OpenSSL and GnuTLS (no default path)
if [ -n "${OPENSSL_LEGACY:-}" ]; then
O_LEGACY_SRV="$OPENSSL_LEGACY s_server -www -cert data_files/server5.crt -key data_files/server5.key"
O_LEGACY_CLI="echo 'GET / HTTP/1.0' | $OPENSSL_LEGACY s_client"
else
O_LEGACY_SRV=false
O_LEGACY_CLI=false
fi
if [ -n "${GNUTLS_NEXT_SERV:-}" ]; then
G_NEXT_SRV="$GNUTLS_NEXT_SERV --x509certfile data_files/server5.crt --x509keyfile data_files/server5.key"
else
G_NEXT_SRV=false
fi
if [ -n "${GNUTLS_NEXT_CLI:-}" ]; then
G_NEXT_CLI="echo 'GET / HTTP/1.0' | $GNUTLS_NEXT_CLI --x509cafile data_files/test-ca_cat12.crt"
else
G_NEXT_CLI=false
fi
TESTS=0
FAILS=0
SKIPS=0
CONFIG_H='../include/mbedtls/config.h'
MEMCHECK=0
FILTER='.*'
EXCLUDE='^$'
SHOW_TEST_NUMBER=0
RUN_TEST_NUMBER=''
PRESERVE_LOGS=0
# Pick a "unique" server port in the range 10000-19999, and a proxy
# port which is this plus 10000. Each port number may be independently
# overridden by a command line option.
SRV_PORT=$(($$ % 10000 + 10000))
PXY_PORT=$((SRV_PORT + 10000))
print_usage() {
echo "Usage: $0 [options]"
printf " -h|--help\tPrint this help.\n"
printf " -m|--memcheck\tCheck memory leaks and errors.\n"
printf " -f|--filter\tOnly matching tests are executed (BRE; default: '$FILTER')\n"
printf " -e|--exclude\tMatching tests are excluded (BRE; default: '$EXCLUDE')\n"
printf " -n|--number\tExecute only numbered test (comma-separated, e.g. '245,256')\n"
printf " -s|--show-numbers\tShow test numbers in front of test names\n"
printf " -p|--preserve-logs\tPreserve logs of successful tests as well\n"
printf " --port\tTCP/UDP port (default: randomish 1xxxx)\n"
printf " --proxy-port\tTCP/UDP proxy port (default: randomish 2xxxx)\n"
printf " --seed\tInteger seed value to use for this test run\n"
}
get_options() {
while [ $# -gt 0 ]; do
case "$1" in
-f|--filter)
shift; FILTER=$1
;;
-e|--exclude)
shift; EXCLUDE=$1
;;
-m|--memcheck)
MEMCHECK=1
;;
-n|--number)
shift; RUN_TEST_NUMBER=$1
;;
-s|--show-numbers)
SHOW_TEST_NUMBER=1
;;
-p|--preserve-logs)
PRESERVE_LOGS=1
;;
--port)
shift; SRV_PORT=$1
;;
--proxy-port)
shift; PXY_PORT=$1
;;
--seed)
shift; SEED="$1"
;;
-h|--help)
print_usage
exit 0
;;
*)
echo "Unknown argument: '$1'"
print_usage
exit 1
;;
esac
shift
done
}
# Skip next test; use this macro to skip tests which are legitimate
# in theory and expected to be re-introduced at some point, but
# aren't expected to succeed at the moment due to problems outside
# our control (such as bugs in other TLS implementations).
skip_next_test() {
SKIP_NEXT="YES"
}
# skip next test if the flag is not enabled in config.h
requires_config_enabled() {
if grep "^#define $1" $CONFIG_H > /dev/null; then :; else
SKIP_NEXT="YES"
fi
}
# skip next test if the flag is enabled in config.h
requires_config_disabled() {
if grep "^#define $1" $CONFIG_H > /dev/null; then
SKIP_NEXT="YES"
fi
}
get_config_value_or_default() {
NAME="$1"
DEF_VAL=$( grep ".*#define.*${NAME}" ../include/mbedtls/config.h |
sed 's/^.*\s\([0-9]*\)$/\1/' )
../scripts/config.pl get $NAME || echo "$DEF_VAL"
}
requires_config_value_at_least() {
VAL=$( get_config_value_or_default "$1" )
if [ "$VAL" -lt "$2" ]; then
SKIP_NEXT="YES"
fi
}
requires_config_value_at_most() {
VAL=$( get_config_value_or_default "$1" )
if [ "$VAL" -gt "$2" ]; then
SKIP_NEXT="YES"
fi
}
# skip next test if OpenSSL doesn't support FALLBACK_SCSV
requires_openssl_with_fallback_scsv() {
if [ -z "${OPENSSL_HAS_FBSCSV:-}" ]; then
if $OPENSSL_CMD s_client -help 2>&1 | grep fallback_scsv >/dev/null
then
OPENSSL_HAS_FBSCSV="YES"
else
OPENSSL_HAS_FBSCSV="NO"
fi
fi
if [ "$OPENSSL_HAS_FBSCSV" = "NO" ]; then
SKIP_NEXT="YES"
fi
}
# skip next test if GnuTLS isn't available
requires_gnutls() {
if [ -z "${GNUTLS_AVAILABLE:-}" ]; then
if ( which "$GNUTLS_CLI" && which "$GNUTLS_SERV" ) >/dev/null 2>&1; then
GNUTLS_AVAILABLE="YES"
else
GNUTLS_AVAILABLE="NO"
fi
fi
if [ "$GNUTLS_AVAILABLE" = "NO" ]; then
SKIP_NEXT="YES"
fi
}
# skip next test if GnuTLS-next isn't available
requires_gnutls_next() {
if [ -z "${GNUTLS_NEXT_AVAILABLE:-}" ]; then
if ( which "${GNUTLS_NEXT_CLI:-}" && which "${GNUTLS_NEXT_SERV:-}" ) >/dev/null 2>&1; then
GNUTLS_NEXT_AVAILABLE="YES"
else
GNUTLS_NEXT_AVAILABLE="NO"
fi
fi
if [ "$GNUTLS_NEXT_AVAILABLE" = "NO" ]; then
SKIP_NEXT="YES"
fi
}
# skip next test if OpenSSL-legacy isn't available
requires_openssl_legacy() {
if [ -z "${OPENSSL_LEGACY_AVAILABLE:-}" ]; then
if which "${OPENSSL_LEGACY:-}" >/dev/null 2>&1; then
OPENSSL_LEGACY_AVAILABLE="YES"
else
OPENSSL_LEGACY_AVAILABLE="NO"
fi
fi
if [ "$OPENSSL_LEGACY_AVAILABLE" = "NO" ]; then
SKIP_NEXT="YES"
fi
}
# skip next test if IPv6 isn't available on this host
requires_ipv6() {
if [ -z "${HAS_IPV6:-}" ]; then
$P_SRV server_addr='::1' > $SRV_OUT 2>&1 &
SRV_PID=$!
sleep 1
kill $SRV_PID >/dev/null 2>&1
if grep "NET - Binding of the socket failed" $SRV_OUT >/dev/null; then
HAS_IPV6="NO"
else
HAS_IPV6="YES"
fi
rm -r $SRV_OUT
fi
if [ "$HAS_IPV6" = "NO" ]; then
SKIP_NEXT="YES"
fi
}
# Calculate the input & output maximum content lengths set in the config
MAX_CONTENT_LEN=$( ../scripts/config.pl get MBEDTLS_SSL_MAX_CONTENT_LEN || echo "16384")
MAX_IN_LEN=$( ../scripts/config.pl get MBEDTLS_SSL_IN_CONTENT_LEN || echo "$MAX_CONTENT_LEN")
MAX_OUT_LEN=$( ../scripts/config.pl get MBEDTLS_SSL_OUT_CONTENT_LEN || echo "$MAX_CONTENT_LEN")
if [ "$MAX_IN_LEN" -lt "$MAX_CONTENT_LEN" ]; then
MAX_CONTENT_LEN="$MAX_IN_LEN"
fi
if [ "$MAX_OUT_LEN" -lt "$MAX_CONTENT_LEN" ]; then
MAX_CONTENT_LEN="$MAX_OUT_LEN"
fi
# skip the next test if the SSL output buffer is less than 16KB
requires_full_size_output_buffer() {
if [ "$MAX_OUT_LEN" -ne 16384 ]; then
SKIP_NEXT="YES"
fi
}
# skip the next test if valgrind is in use
not_with_valgrind() {
if [ "$MEMCHECK" -gt 0 ]; then
SKIP_NEXT="YES"
fi
}
# skip the next test if valgrind is NOT in use
only_with_valgrind() {
if [ "$MEMCHECK" -eq 0 ]; then
SKIP_NEXT="YES"
fi
}
# multiply the client timeout delay by the given factor for the next test
client_needs_more_time() {
CLI_DELAY_FACTOR=$1
}
# wait for the given seconds after the client finished in the next test
server_needs_more_time() {
SRV_DELAY_SECONDS=$1
}
# print_name <name>
print_name() {
TESTS=$(( $TESTS + 1 ))
LINE=""
if [ "$SHOW_TEST_NUMBER" -gt 0 ]; then
LINE="$TESTS "
fi
LINE="$LINE$1"
printf "$LINE "
LEN=$(( 72 - `echo "$LINE" | wc -c` ))
for i in `seq 1 $LEN`; do printf '.'; done
printf ' '
}
# fail <message>
fail() {
echo "FAIL"
echo " ! $1"
mv $SRV_OUT o-srv-${TESTS}.log
mv $CLI_OUT o-cli-${TESTS}.log
if [ -n "$PXY_CMD" ]; then
mv $PXY_OUT o-pxy-${TESTS}.log
fi
echo " ! outputs saved to o-XXX-${TESTS}.log"
if [ "X${USER:-}" = Xbuildbot -o "X${LOGNAME:-}" = Xbuildbot -o "${LOG_FAILURE_ON_STDOUT:-0}" != 0 ]; then
echo " ! server output:"
cat o-srv-${TESTS}.log
echo " ! ========================================================"
echo " ! client output:"
cat o-cli-${TESTS}.log
if [ -n "$PXY_CMD" ]; then
echo " ! ========================================================"
echo " ! proxy output:"
cat o-pxy-${TESTS}.log
fi
echo ""
fi
FAILS=$(( $FAILS + 1 ))
}
# is_polar <cmd_line>
is_polar() {
echo "$1" | grep 'ssl_server2\|ssl_client2' > /dev/null
}
# openssl s_server doesn't have -www with DTLS
check_osrv_dtls() {
if echo "$SRV_CMD" | grep 's_server.*-dtls' >/dev/null; then
NEEDS_INPUT=1
SRV_CMD="$( echo $SRV_CMD | sed s/-www// )"
else
NEEDS_INPUT=0
fi
}
# provide input to commands that need it
provide_input() {
if [ $NEEDS_INPUT -eq 0 ]; then
return
fi
while true; do
echo "HTTP/1.0 200 OK"
sleep 1
done
}
# has_mem_err <log_file_name>
has_mem_err() {
if ( grep -F 'All heap blocks were freed -- no leaks are possible' "$1" &&
grep -F 'ERROR SUMMARY: 0 errors from 0 contexts' "$1" ) > /dev/null
then
return 1 # false: does not have errors
else
return 0 # true: has errors
fi
}
# Wait for process $2 to be listening on port $1
if type lsof >/dev/null 2>/dev/null; then
wait_server_start() {
START_TIME=$(date +%s)
if [ "$DTLS" -eq 1 ]; then
proto=UDP
else
proto=TCP
fi
# Make a tight loop, server normally takes less than 1s to start.
while ! lsof -a -n -b -i "$proto:$1" -p "$2" >/dev/null 2>/dev/null; do
if [ $(( $(date +%s) - $START_TIME )) -gt $DOG_DELAY ]; then
echo "SERVERSTART TIMEOUT"
echo "SERVERSTART TIMEOUT" >> $SRV_OUT
break
fi
# Linux and *BSD support decimal arguments to sleep. On other
# OSes this may be a tight loop.
sleep 0.1 2>/dev/null || true
done
}
else
echo "Warning: lsof not available, wait_server_start = sleep"
wait_server_start() {
sleep "$START_DELAY"
}
fi
# Given the client or server debug output, parse the unix timestamp that is
# included in the first 4 bytes of the random bytes and check that it's within
# acceptable bounds
check_server_hello_time() {
# Extract the time from the debug (lvl 3) output of the client
SERVER_HELLO_TIME="$(sed -n 's/.*server hello, current time: //p' < "$1")"
# Get the Unix timestamp for now
CUR_TIME=$(date +'%s')
THRESHOLD_IN_SECS=300
# Check if the ServerHello time was printed
if [ -z "$SERVER_HELLO_TIME" ]; then
return 1
fi
# Check the time in ServerHello is within acceptable bounds
if [ $SERVER_HELLO_TIME -lt $(( $CUR_TIME - $THRESHOLD_IN_SECS )) ]; then
# The time in ServerHello is at least 5 minutes before now
return 1
elif [ $SERVER_HELLO_TIME -gt $(( $CUR_TIME + $THRESHOLD_IN_SECS )) ]; then
# The time in ServerHello is at least 5 minutes later than now
return 1
else
return 0
fi
}
# wait for client to terminate and set CLI_EXIT
# must be called right after starting the client
wait_client_done() {
CLI_PID=$!
CLI_DELAY=$(( $DOG_DELAY * $CLI_DELAY_FACTOR ))
CLI_DELAY_FACTOR=1
( sleep $CLI_DELAY; echo "===CLIENT_TIMEOUT===" >> $CLI_OUT; kill $CLI_PID ) &
DOG_PID=$!
wait $CLI_PID
CLI_EXIT=$?
kill $DOG_PID >/dev/null 2>&1
wait $DOG_PID
echo "EXIT: $CLI_EXIT" >> $CLI_OUT
sleep $SRV_DELAY_SECONDS
SRV_DELAY_SECONDS=0
}
# check if the given command uses dtls and sets global variable DTLS
detect_dtls() {
if echo "$1" | grep 'dtls=1\|-dtls1\|-u' >/dev/null; then
DTLS=1
else
DTLS=0
fi
}
# Usage: run_test name [-p proxy_cmd] srv_cmd cli_cmd cli_exit [option [...]]
# Options: -s pattern pattern that must be present in server output
# -c pattern pattern that must be present in client output
# -u pattern lines after pattern must be unique in client output
# -f call shell function on client output
# -S pattern pattern that must be absent in server output
# -C pattern pattern that must be absent in client output
# -U pattern lines after pattern must be unique in server output
# -F call shell function on server output
run_test() {
NAME="$1"
shift 1
if echo "$NAME" | grep "$FILTER" | grep -v "$EXCLUDE" >/dev/null; then :
else
SKIP_NEXT="NO"
return
fi
print_name "$NAME"
# Do we only run numbered tests?
if [ "X$RUN_TEST_NUMBER" = "X" ]; then :
elif echo ",$RUN_TEST_NUMBER," | grep ",$TESTS," >/dev/null; then :
else
SKIP_NEXT="YES"
fi
# should we skip?
if [ "X$SKIP_NEXT" = "XYES" ]; then
SKIP_NEXT="NO"
echo "SKIP"
SKIPS=$(( $SKIPS + 1 ))
return
fi
# does this test use a proxy?
if [ "X$1" = "X-p" ]; then
PXY_CMD="$2"
shift 2
else
PXY_CMD=""
fi
# get commands and client output
SRV_CMD="$1"
CLI_CMD="$2"
CLI_EXPECT="$3"
shift 3
# fix client port
if [ -n "$PXY_CMD" ]; then
CLI_CMD=$( echo "$CLI_CMD" | sed s/+SRV_PORT/$PXY_PORT/g )
else
CLI_CMD=$( echo "$CLI_CMD" | sed s/+SRV_PORT/$SRV_PORT/g )
fi
# update DTLS variable
detect_dtls "$SRV_CMD"
# prepend valgrind to our commands if active
if [ "$MEMCHECK" -gt 0 ]; then
if is_polar "$SRV_CMD"; then
SRV_CMD="valgrind --leak-check=full $SRV_CMD"
fi
if is_polar "$CLI_CMD"; then
CLI_CMD="valgrind --leak-check=full $CLI_CMD"
fi
fi
TIMES_LEFT=2
while [ $TIMES_LEFT -gt 0 ]; do
TIMES_LEFT=$(( $TIMES_LEFT - 1 ))
# run the commands
if [ -n "$PXY_CMD" ]; then
echo "$PXY_CMD" > $PXY_OUT
$PXY_CMD >> $PXY_OUT 2>&1 &
PXY_PID=$!
# assume proxy starts faster than server
fi
check_osrv_dtls
echo "$SRV_CMD" > $SRV_OUT
provide_input | $SRV_CMD >> $SRV_OUT 2>&1 &
SRV_PID=$!
wait_server_start "$SRV_PORT" "$SRV_PID"
echo "$CLI_CMD" > $CLI_OUT
eval "$CLI_CMD" >> $CLI_OUT 2>&1 &
wait_client_done
sleep 0.05
# terminate the server (and the proxy)
kill $SRV_PID
wait $SRV_PID
if [ -n "$PXY_CMD" ]; then
kill $PXY_PID >/dev/null 2>&1
wait $PXY_PID
fi
# retry only on timeouts
if grep '===CLIENT_TIMEOUT===' $CLI_OUT >/dev/null; then
printf "RETRY "
else
TIMES_LEFT=0
fi
done
# check if the client and server went at least to the handshake stage
# (useful to avoid tests with only negative assertions and non-zero
# expected client exit to incorrectly succeed in case of catastrophic
# failure)
if is_polar "$SRV_CMD"; then
if grep "Performing the SSL/TLS handshake" $SRV_OUT >/dev/null; then :;
else
fail "server or client failed to reach handshake stage"
return
fi
fi
if is_polar "$CLI_CMD"; then
if grep "Performing the SSL/TLS handshake" $CLI_OUT >/dev/null; then :;
else
fail "server or client failed to reach handshake stage"
return
fi
fi
# check server exit code
if [ $? != 0 ]; then
fail "server fail"
return
fi
# check client exit code
if [ \( "$CLI_EXPECT" = 0 -a "$CLI_EXIT" != 0 \) -o \
\( "$CLI_EXPECT" != 0 -a "$CLI_EXIT" = 0 \) ]
then
fail "bad client exit code (expected $CLI_EXPECT, got $CLI_EXIT)"
return
fi
# check other assertions
# lines beginning with == are added by valgrind, ignore them
# lines with 'Serious error when reading debug info', are valgrind issues as well
while [ $# -gt 0 ]
do
case $1 in
"-s")
if grep -v '^==' $SRV_OUT | grep -v 'Serious error when reading debug info' | grep "$2" >/dev/null; then :; else
fail "pattern '$2' MUST be present in the Server output"
return
fi
;;
"-c")
if grep -v '^==' $CLI_OUT | grep -v 'Serious error when reading debug info' | grep "$2" >/dev/null; then :; else
fail "pattern '$2' MUST be present in the Client output"
return
fi
;;
"-S")
if grep -v '^==' $SRV_OUT | grep -v 'Serious error when reading debug info' | grep "$2" >/dev/null; then
fail "pattern '$2' MUST NOT be present in the Server output"
return
fi
;;
"-C")
if grep -v '^==' $CLI_OUT | grep -v 'Serious error when reading debug info' | grep "$2" >/dev/null; then
fail "pattern '$2' MUST NOT be present in the Client output"
return
fi
;;
# The filtering in the following two options (-u and -U) do the following
# - ignore valgrind output
# - filter out everything but lines right after the pattern occurances
# - keep one of each non-unique line
# - count how many lines remain
# A line with '--' will remain in the result from previous outputs, so the number of lines in the result will be 1
# if there were no duplicates.
"-U")
if [ $(grep -v '^==' $SRV_OUT | grep -v 'Serious error when reading debug info' | grep -A1 "$2" | grep -v "$2" | sort | uniq -d | wc -l) -gt 1 ]; then
fail "lines following pattern '$2' must be unique in Server output"
return
fi
;;
"-u")
if [ $(grep -v '^==' $CLI_OUT | grep -v 'Serious error when reading debug info' | grep -A1 "$2" | grep -v "$2" | sort | uniq -d | wc -l) -gt 1 ]; then
fail "lines following pattern '$2' must be unique in Client output"
return
fi
;;
"-F")
if ! $2 "$SRV_OUT"; then
fail "function call to '$2' failed on Server output"
return
fi
;;
"-f")
if ! $2 "$CLI_OUT"; then
fail "function call to '$2' failed on Client output"
return
fi
;;
*)
echo "Unknown test: $1" >&2
exit 1
esac
shift 2
done
# check valgrind's results
if [ "$MEMCHECK" -gt 0 ]; then
if is_polar "$SRV_CMD" && has_mem_err $SRV_OUT; then
fail "Server has memory errors"
return
fi
if is_polar "$CLI_CMD" && has_mem_err $CLI_OUT; then
fail "Client has memory errors"
return
fi
fi
# if we're here, everything is ok
echo "PASS"
if [ "$PRESERVE_LOGS" -gt 0 ]; then
mv $SRV_OUT o-srv-${TESTS}.log
mv $CLI_OUT o-cli-${TESTS}.log
if [ -n "$PXY_CMD" ]; then
mv $PXY_OUT o-pxy-${TESTS}.log
fi
fi
rm -f $SRV_OUT $CLI_OUT $PXY_OUT
}
cleanup() {
rm -f $CLI_OUT $SRV_OUT $PXY_OUT $SESSION
test -n "${SRV_PID:-}" && kill $SRV_PID >/dev/null 2>&1
test -n "${PXY_PID:-}" && kill $PXY_PID >/dev/null 2>&1
test -n "${CLI_PID:-}" && kill $CLI_PID >/dev/null 2>&1
test -n "${DOG_PID:-}" && kill $DOG_PID >/dev/null 2>&1
exit 1
}
#
# MAIN
#
get_options "$@"
# sanity checks, avoid an avalanche of errors
P_SRV_BIN="${P_SRV%%[ ]*}"
P_CLI_BIN="${P_CLI%%[ ]*}"
P_PXY_BIN="${P_PXY%%[ ]*}"
if [ ! -x "$P_SRV_BIN" ]; then
echo "Command '$P_SRV_BIN' is not an executable file"
exit 1
fi
if [ ! -x "$P_CLI_BIN" ]; then
echo "Command '$P_CLI_BIN' is not an executable file"
exit 1
fi
if [ ! -x "$P_PXY_BIN" ]; then
echo "Command '$P_PXY_BIN' is not an executable file"
exit 1
fi
if [ "$MEMCHECK" -gt 0 ]; then
if which valgrind >/dev/null 2>&1; then :; else
echo "Memcheck not possible. Valgrind not found"
exit 1
fi
fi
if which $OPENSSL_CMD >/dev/null 2>&1; then :; else
echo "Command '$OPENSSL_CMD' not found"
exit 1
fi
# used by watchdog
MAIN_PID="$$"
# We use somewhat arbitrary delays for tests:
# - how long do we wait for the server to start (when lsof not available)?
# - how long do we allow for the client to finish?
# (not to check performance, just to avoid waiting indefinitely)
# Things are slower with valgrind, so give extra time here.
#
# Note: without lsof, there is a trade-off between the running time of this
# script and the risk of spurious errors because we didn't wait long enough.
# The watchdog delay on the other hand doesn't affect normal running time of
# the script, only the case where a client or server gets stuck.
if [ "$MEMCHECK" -gt 0 ]; then
START_DELAY=6
DOG_DELAY=60
else
START_DELAY=2
DOG_DELAY=20
fi
# some particular tests need more time:
# - for the client, we multiply the usual watchdog limit by a factor
# - for the server, we sleep for a number of seconds after the client exits
# see client_need_more_time() and server_needs_more_time()
CLI_DELAY_FACTOR=1
SRV_DELAY_SECONDS=0
# fix commands to use this port, force IPv4 while at it
# +SRV_PORT will be replaced by either $SRV_PORT or $PXY_PORT later
P_SRV="$P_SRV server_addr=127.0.0.1 server_port=$SRV_PORT"
P_CLI="$P_CLI server_addr=127.0.0.1 server_port=+SRV_PORT"
P_PXY="$P_PXY server_addr=127.0.0.1 server_port=$SRV_PORT listen_addr=127.0.0.1 listen_port=$PXY_PORT ${SEED:+"seed=$SEED"}"
O_SRV="$O_SRV -accept $SRV_PORT -dhparam data_files/dhparams.pem"
O_CLI="$O_CLI -connect localhost:+SRV_PORT"
G_SRV="$G_SRV -p $SRV_PORT"
G_CLI="$G_CLI -p +SRV_PORT"
if [ -n "${OPENSSL_LEGACY:-}" ]; then
O_LEGACY_SRV="$O_LEGACY_SRV -accept $SRV_PORT -dhparam data_files/dhparams.pem"
O_LEGACY_CLI="$O_LEGACY_CLI -connect localhost:+SRV_PORT"
fi
if [ -n "${GNUTLS_NEXT_SERV:-}" ]; then
G_NEXT_SRV="$G_NEXT_SRV -p $SRV_PORT"
fi
if [ -n "${GNUTLS_NEXT_CLI:-}" ]; then
G_NEXT_CLI="$G_NEXT_CLI -p +SRV_PORT"
fi
# Allow SHA-1, because many of our test certificates use it
P_SRV="$P_SRV allow_sha1=1"
P_CLI="$P_CLI allow_sha1=1"
# Also pick a unique name for intermediate files
SRV_OUT="srv_out.$$"
CLI_OUT="cli_out.$$"
PXY_OUT="pxy_out.$$"
SESSION="session.$$"
SKIP_NEXT="NO"
trap cleanup INT TERM HUP
# Basic test
# Checks that:
# - things work with all ciphersuites active (used with config-full in all.sh)
# - the expected (highest security) parameters are selected
# ("signature_algorithm ext: 6" means SHA-512 (highest common hash))
run_test "Default" \
"$P_SRV debug_level=3" \
"$P_CLI" \
0 \
-s "Protocol is TLSv1.2" \
-s "Ciphersuite is TLS-ECDHE-RSA-WITH-CHACHA20-POLY1305-SHA256" \
-s "client hello v3, signature_algorithm ext: 6" \
-s "ECDHE curve: secp521r1" \
-S "error" \
-C "error"
run_test "Default, DTLS" \
"$P_SRV dtls=1" \
"$P_CLI dtls=1" \
0 \
-s "Protocol is DTLSv1.2" \
-s "Ciphersuite is TLS-ECDHE-RSA-WITH-CHACHA20-POLY1305-SHA256"
# Test current time in ServerHello
requires_config_enabled MBEDTLS_HAVE_TIME
run_test "ServerHello contains gmt_unix_time" \
"$P_SRV debug_level=3" \
"$P_CLI debug_level=3" \
0 \
-f "check_server_hello_time" \
-F "check_server_hello_time"
# Test for uniqueness of IVs in AEAD ciphersuites
run_test "Unique IV in GCM" \
"$P_SRV exchanges=20 debug_level=4" \
"$P_CLI exchanges=20 debug_level=4 force_ciphersuite=TLS-ECDHE-ECDSA-WITH-AES-256-GCM-SHA384" \
0 \
-u "IV used" \
-U "IV used"
# Tests for rc4 option
requires_config_enabled MBEDTLS_REMOVE_ARC4_CIPHERSUITES
run_test "RC4: server disabled, client enabled" \
"$P_SRV" \
"$P_CLI force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA" \
1 \
-s "SSL - The server has no ciphersuites in common"
requires_config_enabled MBEDTLS_REMOVE_ARC4_CIPHERSUITES
run_test "RC4: server half, client enabled" \
"$P_SRV arc4=1" \
"$P_CLI force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA" \
1 \
-s "SSL - The server has no ciphersuites in common"
run_test "RC4: server enabled, client disabled" \
"$P_SRV force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA" \
"$P_CLI" \
1 \
-s "SSL - The server has no ciphersuites in common"
run_test "RC4: both enabled" \
"$P_SRV force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA" \
"$P_CLI force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA" \
0 \
-S "SSL - None of the common ciphersuites is usable" \
-S "SSL - The server has no ciphersuites in common"
# Test empty CA list in CertificateRequest in TLS 1.1 and earlier
requires_gnutls
requires_config_enabled MBEDTLS_SSL_PROTO_TLS1_1
run_test "CertificateRequest with empty CA list, TLS 1.1 (GnuTLS server)" \
"$G_SRV"\
"$P_CLI force_version=tls1_1" \
0
requires_gnutls
requires_config_enabled MBEDTLS_SSL_PROTO_TLS1
run_test "CertificateRequest with empty CA list, TLS 1.0 (GnuTLS server)" \
"$G_SRV"\
"$P_CLI force_version=tls1" \
0
# Tests for SHA-1 support
requires_config_disabled MBEDTLS_TLS_DEFAULT_ALLOW_SHA1_IN_CERTIFICATES
run_test "SHA-1 forbidden by default in server certificate" \
"$P_SRV key_file=data_files/server2.key crt_file=data_files/server2.crt" \
"$P_CLI debug_level=2 allow_sha1=0" \
1 \
-c "The certificate is signed with an unacceptable hash"
requires_config_enabled MBEDTLS_TLS_DEFAULT_ALLOW_SHA1_IN_CERTIFICATES
run_test "SHA-1 forbidden by default in server certificate" \
"$P_SRV key_file=data_files/server2.key crt_file=data_files/server2.crt" \
"$P_CLI debug_level=2 allow_sha1=0" \
0
run_test "SHA-1 explicitly allowed in server certificate" \
"$P_SRV key_file=data_files/server2.key crt_file=data_files/server2.crt" \
"$P_CLI allow_sha1=1" \
0
run_test "SHA-256 allowed by default in server certificate" \
"$P_SRV key_file=data_files/server2.key crt_file=data_files/server2-sha256.crt" \
"$P_CLI allow_sha1=0" \
0
requires_config_disabled MBEDTLS_TLS_DEFAULT_ALLOW_SHA1_IN_CERTIFICATES
run_test "SHA-1 forbidden by default in client certificate" \
"$P_SRV auth_mode=required allow_sha1=0" \
"$P_CLI key_file=data_files/cli-rsa.key crt_file=data_files/cli-rsa-sha1.crt" \
1 \
-s "The certificate is signed with an unacceptable hash"
requires_config_enabled MBEDTLS_TLS_DEFAULT_ALLOW_SHA1_IN_CERTIFICATES
run_test "SHA-1 forbidden by default in client certificate" \
"$P_SRV auth_mode=required allow_sha1=0" \
"$P_CLI key_file=data_files/cli-rsa.key crt_file=data_files/cli-rsa-sha1.crt" \
0
run_test "SHA-1 explicitly allowed in client certificate" \
"$P_SRV auth_mode=required allow_sha1=1" \
"$P_CLI key_file=data_files/cli-rsa.key crt_file=data_files/cli-rsa-sha1.crt" \
0
run_test "SHA-256 allowed by default in client certificate" \
"$P_SRV auth_mode=required allow_sha1=0" \
"$P_CLI key_file=data_files/cli-rsa.key crt_file=data_files/cli-rsa-sha256.crt" \
0
# Tests for datagram packing
run_test "DTLS: multiple records in same datagram, client and server" \
"$P_SRV dtls=1 dgram_packing=1 debug_level=2" \
"$P_CLI dtls=1 dgram_packing=1 debug_level=2" \
0 \
-c "next record in same datagram" \
-s "next record in same datagram"
run_test "DTLS: multiple records in same datagram, client only" \
"$P_SRV dtls=1 dgram_packing=0 debug_level=2" \
"$P_CLI dtls=1 dgram_packing=1 debug_level=2" \
0 \
-s "next record in same datagram" \
-C "next record in same datagram"
run_test "DTLS: multiple records in same datagram, server only" \
"$P_SRV dtls=1 dgram_packing=1 debug_level=2" \
"$P_CLI dtls=1 dgram_packing=0 debug_level=2" \
0 \
-S "next record in same datagram" \
-c "next record in same datagram"
run_test "DTLS: multiple records in same datagram, neither client nor server" \
"$P_SRV dtls=1 dgram_packing=0 debug_level=2" \
"$P_CLI dtls=1 dgram_packing=0 debug_level=2" \
0 \
-S "next record in same datagram" \
-C "next record in same datagram"
# Tests for Truncated HMAC extension
run_test "Truncated HMAC: client default, server default" \
"$P_SRV debug_level=4" \
"$P_CLI force_ciphersuite=TLS-RSA-WITH-AES-128-CBC-SHA" \
0 \
-s "dumping 'expected mac' (20 bytes)" \
-S "dumping 'expected mac' (10 bytes)"
requires_config_enabled MBEDTLS_SSL_TRUNCATED_HMAC
run_test "Truncated HMAC: client disabled, server default" \
"$P_SRV debug_level=4" \
"$P_CLI force_ciphersuite=TLS-RSA-WITH-AES-128-CBC-SHA trunc_hmac=0" \
0 \
-s "dumping 'expected mac' (20 bytes)" \
-S "dumping 'expected mac' (10 bytes)"
requires_config_enabled MBEDTLS_SSL_TRUNCATED_HMAC
run_test "Truncated HMAC: client enabled, server default" \
"$P_SRV debug_level=4" \
"$P_CLI force_ciphersuite=TLS-RSA-WITH-AES-128-CBC-SHA trunc_hmac=1" \
0 \
-s "dumping 'expected mac' (20 bytes)" \
-S "dumping 'expected mac' (10 bytes)"
requires_config_enabled MBEDTLS_SSL_TRUNCATED_HMAC
run_test "Truncated HMAC: client enabled, server disabled" \
"$P_SRV debug_level=4 trunc_hmac=0" \
"$P_CLI force_ciphersuite=TLS-RSA-WITH-AES-128-CBC-SHA trunc_hmac=1" \
0 \
-s "dumping 'expected mac' (20 bytes)" \
-S "dumping 'expected mac' (10 bytes)"
requires_config_enabled MBEDTLS_SSL_TRUNCATED_HMAC
run_test "Truncated HMAC: client disabled, server enabled" \
"$P_SRV debug_level=4 trunc_hmac=1" \
"$P_CLI force_ciphersuite=TLS-RSA-WITH-AES-128-CBC-SHA trunc_hmac=0" \
0 \
-s "dumping 'expected mac' (20 bytes)" \
-S "dumping 'expected mac' (10 bytes)"
requires_config_enabled MBEDTLS_SSL_TRUNCATED_HMAC
run_test "Truncated HMAC: client enabled, server enabled" \
"$P_SRV debug_level=4 trunc_hmac=1" \
"$P_CLI force_ciphersuite=TLS-RSA-WITH-AES-128-CBC-SHA trunc_hmac=1" \
0 \
-S "dumping 'expected mac' (20 bytes)" \
-s "dumping 'expected mac' (10 bytes)"
run_test "Truncated HMAC, DTLS: client default, server default" \
"$P_SRV dtls=1 debug_level=4" \
"$P_CLI dtls=1 force_ciphersuite=TLS-RSA-WITH-AES-128-CBC-SHA" \
0 \
-s "dumping 'expected mac' (20 bytes)" \
-S "dumping 'expected mac' (10 bytes)"
requires_config_enabled MBEDTLS_SSL_TRUNCATED_HMAC
run_test "Truncated HMAC, DTLS: client disabled, server default" \
"$P_SRV dtls=1 debug_level=4" \
"$P_CLI dtls=1 force_ciphersuite=TLS-RSA-WITH-AES-128-CBC-SHA trunc_hmac=0" \
0 \
-s "dumping 'expected mac' (20 bytes)" \
-S "dumping 'expected mac' (10 bytes)"
requires_config_enabled MBEDTLS_SSL_TRUNCATED_HMAC
run_test "Truncated HMAC, DTLS: client enabled, server default" \
"$P_SRV dtls=1 debug_level=4" \
"$P_CLI dtls=1 force_ciphersuite=TLS-RSA-WITH-AES-128-CBC-SHA trunc_hmac=1" \
0 \
-s "dumping 'expected mac' (20 bytes)" \
-S "dumping 'expected mac' (10 bytes)"
requires_config_enabled MBEDTLS_SSL_TRUNCATED_HMAC
run_test "Truncated HMAC, DTLS: client enabled, server disabled" \
"$P_SRV dtls=1 debug_level=4 trunc_hmac=0" \
"$P_CLI dtls=1 force_ciphersuite=TLS-RSA-WITH-AES-128-CBC-SHA trunc_hmac=1" \
0 \
-s "dumping 'expected mac' (20 bytes)" \
-S "dumping 'expected mac' (10 bytes)"
requires_config_enabled MBEDTLS_SSL_TRUNCATED_HMAC
run_test "Truncated HMAC, DTLS: client disabled, server enabled" \
"$P_SRV dtls=1 debug_level=4 trunc_hmac=1" \
"$P_CLI dtls=1 force_ciphersuite=TLS-RSA-WITH-AES-128-CBC-SHA trunc_hmac=0" \
0 \
-s "dumping 'expected mac' (20 bytes)" \
-S "dumping 'expected mac' (10 bytes)"
requires_config_enabled MBEDTLS_SSL_TRUNCATED_HMAC
run_test "Truncated HMAC, DTLS: client enabled, server enabled" \
"$P_SRV dtls=1 debug_level=4 trunc_hmac=1" \
"$P_CLI dtls=1 force_ciphersuite=TLS-RSA-WITH-AES-128-CBC-SHA trunc_hmac=1" \
0 \
-S "dumping 'expected mac' (20 bytes)" \
-s "dumping 'expected mac' (10 bytes)"
# Tests for Encrypt-then-MAC extension
run_test "Encrypt then MAC: default" \
"$P_SRV debug_level=3 \
force_ciphersuite=TLS-RSA-WITH-AES-128-CBC-SHA" \
"$P_CLI debug_level=3" \
0 \
-c "client hello, adding encrypt_then_mac extension" \
-s "found encrypt then mac extension" \
-s "server hello, adding encrypt then mac extension" \
-c "found encrypt_then_mac extension" \
-c "using encrypt then mac" \
-s "using encrypt then mac"
run_test "Encrypt then MAC: client enabled, server disabled" \
"$P_SRV debug_level=3 etm=0 \
force_ciphersuite=TLS-RSA-WITH-AES-128-CBC-SHA" \
"$P_CLI debug_level=3 etm=1" \
0 \
-c "client hello, adding encrypt_then_mac extension" \
-s "found encrypt then mac extension" \
-S "server hello, adding encrypt then mac extension" \
-C "found encrypt_then_mac extension" \
-C "using encrypt then mac" \
-S "using encrypt then mac"
run_test "Encrypt then MAC: client enabled, aead cipher" \
"$P_SRV debug_level=3 etm=1 \
force_ciphersuite=TLS-RSA-WITH-AES-128-GCM-SHA256" \
"$P_CLI debug_level=3 etm=1" \
0 \
-c "client hello, adding encrypt_then_mac extension" \
-s "found encrypt then mac extension" \
-S "server hello, adding encrypt then mac extension" \
-C "found encrypt_then_mac extension" \
-C "using encrypt then mac" \
-S "using encrypt then mac"
run_test "Encrypt then MAC: client enabled, stream cipher" \
"$P_SRV debug_level=3 etm=1 \
force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA" \
"$P_CLI debug_level=3 etm=1 arc4=1 force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA" \
0 \
-c "client hello, adding encrypt_then_mac extension" \
-s "found encrypt then mac extension" \
-S "server hello, adding encrypt then mac extension" \
-C "found encrypt_then_mac extension" \
-C "using encrypt then mac" \
-S "using encrypt then mac"
run_test "Encrypt then MAC: client disabled, server enabled" \
"$P_SRV debug_level=3 etm=1 \
force_ciphersuite=TLS-RSA-WITH-AES-128-CBC-SHA" \
"$P_CLI debug_level=3 etm=0" \
0 \
-C "client hello, adding encrypt_then_mac extension" \
-S "found encrypt then mac extension" \
-S "server hello, adding encrypt then mac extension" \
-C "found encrypt_then_mac extension" \
-C "using encrypt then mac" \
-S "using encrypt then mac"
requires_config_enabled MBEDTLS_SSL_PROTO_SSL3
run_test "Encrypt then MAC: client SSLv3, server enabled" \
"$P_SRV debug_level=3 min_version=ssl3 \
force_ciphersuite=TLS-RSA-WITH-AES-128-CBC-SHA" \
"$P_CLI debug_level=3 force_version=ssl3" \
0 \
-C "client hello, adding encrypt_then_mac extension" \
-S "found encrypt then mac extension" \
-S "server hello, adding encrypt then mac extension" \
-C "found encrypt_then_mac extension" \
-C "using encrypt then mac" \
-S "using encrypt then mac"
requires_config_enabled MBEDTLS_SSL_PROTO_SSL3
run_test "Encrypt then MAC: client enabled, server SSLv3" \
"$P_SRV debug_level=3 force_version=ssl3 \
force_ciphersuite=TLS-RSA-WITH-AES-128-CBC-SHA" \
"$P_CLI debug_level=3 min_version=ssl3" \
0 \
-c "client hello, adding encrypt_then_mac extension" \
-S "found encrypt then mac extension" \
-S "server hello, adding encrypt then mac extension" \
-C "found encrypt_then_mac extension" \
-C "using encrypt then mac" \
-S "using encrypt then mac"
# Tests for Extended Master Secret extension
run_test "Extended Master Secret: default" \
"$P_SRV debug_level=3" \
"$P_CLI debug_level=3" \
0 \
-c "client hello, adding extended_master_secret extension" \
-s "found extended master secret extension" \
-s "server hello, adding extended master secret extension" \
-c "found extended_master_secret extension" \
-c "using extended master secret" \
-s "using extended master secret"
run_test "Extended Master Secret: client enabled, server disabled" \
"$P_SRV debug_level=3 extended_ms=0" \
"$P_CLI debug_level=3 extended_ms=1" \
0 \
-c "client hello, adding extended_master_secret extension" \
-s "found extended master secret extension" \
-S "server hello, adding extended master secret extension" \
-C "found extended_master_secret extension" \
-C "using extended master secret" \
-S "using extended master secret"
run_test "Extended Master Secret: client disabled, server enabled" \
"$P_SRV debug_level=3 extended_ms=1" \
"$P_CLI debug_level=3 extended_ms=0" \
0 \
-C "client hello, adding extended_master_secret extension" \
-S "found extended master secret extension" \
-S "server hello, adding extended master secret extension" \
-C "found extended_master_secret extension" \
-C "using extended master secret" \
-S "using extended master secret"
requires_config_enabled MBEDTLS_SSL_PROTO_SSL3
run_test "Extended Master Secret: client SSLv3, server enabled" \
"$P_SRV debug_level=3 min_version=ssl3" \
"$P_CLI debug_level=3 force_version=ssl3" \
0 \
-C "client hello, adding extended_master_secret extension" \
-S "found extended master secret extension" \
-S "server hello, adding extended master secret extension" \
-C "found extended_master_secret extension" \
-C "using extended master secret" \
-S "using extended master secret"
requires_config_enabled MBEDTLS_SSL_PROTO_SSL3
run_test "Extended Master Secret: client enabled, server SSLv3" \
"$P_SRV debug_level=3 force_version=ssl3" \
"$P_CLI debug_level=3 min_version=ssl3" \
0 \
-c "client hello, adding extended_master_secret extension" \
-S "found extended master secret extension" \
-S "server hello, adding extended master secret extension" \
-C "found extended_master_secret extension" \
-C "using extended master secret" \
-S "using extended master secret"
# Tests for FALLBACK_SCSV
run_test "Fallback SCSV: default" \
"$P_SRV debug_level=2" \
"$P_CLI debug_level=3 force_version=tls1_1" \
0 \
-C "adding FALLBACK_SCSV" \
-S "received FALLBACK_SCSV" \
-S "inapropriate fallback" \
-C "is a fatal alert message (msg 86)"
run_test "Fallback SCSV: explicitly disabled" \
"$P_SRV debug_level=2" \
"$P_CLI debug_level=3 force_version=tls1_1 fallback=0" \
0 \
-C "adding FALLBACK_SCSV" \
-S "received FALLBACK_SCSV" \
-S "inapropriate fallback" \
-C "is a fatal alert message (msg 86)"
run_test "Fallback SCSV: enabled" \
"$P_SRV debug_level=2" \
"$P_CLI debug_level=3 force_version=tls1_1 fallback=1" \
1 \
-c "adding FALLBACK_SCSV" \
-s "received FALLBACK_SCSV" \
-s "inapropriate fallback" \
-c "is a fatal alert message (msg 86)"
run_test "Fallback SCSV: enabled, max version" \
"$P_SRV debug_level=2" \
"$P_CLI debug_level=3 fallback=1" \
0 \
-c "adding FALLBACK_SCSV" \
-s "received FALLBACK_SCSV" \
-S "inapropriate fallback" \
-C "is a fatal alert message (msg 86)"
requires_openssl_with_fallback_scsv
run_test "Fallback SCSV: default, openssl server" \
"$O_SRV" \
"$P_CLI debug_level=3 force_version=tls1_1 fallback=0" \
0 \
-C "adding FALLBACK_SCSV" \
-C "is a fatal alert message (msg 86)"
requires_openssl_with_fallback_scsv
run_test "Fallback SCSV: enabled, openssl server" \
"$O_SRV" \
"$P_CLI debug_level=3 force_version=tls1_1 fallback=1" \
1 \
-c "adding FALLBACK_SCSV" \
-c "is a fatal alert message (msg 86)"
requires_openssl_with_fallback_scsv
run_test "Fallback SCSV: disabled, openssl client" \
"$P_SRV debug_level=2" \
"$O_CLI -tls1_1" \
0 \
-S "received FALLBACK_SCSV" \
-S "inapropriate fallback"
requires_openssl_with_fallback_scsv
run_test "Fallback SCSV: enabled, openssl client" \
"$P_SRV debug_level=2" \
"$O_CLI -tls1_1 -fallback_scsv" \
1 \
-s "received FALLBACK_SCSV" \
-s "inapropriate fallback"
requires_openssl_with_fallback_scsv
run_test "Fallback SCSV: enabled, max version, openssl client" \
"$P_SRV debug_level=2" \
"$O_CLI -fallback_scsv" \
0 \
-s "received FALLBACK_SCSV" \
-S "inapropriate fallback"
# Test sending and receiving empty application data records
run_test "Encrypt then MAC: empty application data record" \
"$P_SRV auth_mode=none debug_level=4 etm=1" \
"$P_CLI auth_mode=none etm=1 request_size=0 force_ciphersuite=TLS-ECDHE-RSA-WITH-AES-256-CBC-SHA" \
0 \
-S "0000: 0f 0f 0f 0f 0f 0f 0f 0f 0f 0f 0f 0f 0f 0f 0f 0f" \
-s "dumping 'input payload after decrypt' (0 bytes)" \
-c "0 bytes written in 1 fragments"
run_test "Default, no Encrypt then MAC: empty application data record" \
"$P_SRV auth_mode=none debug_level=4 etm=0" \
"$P_CLI auth_mode=none etm=0 request_size=0" \
0 \
-s "dumping 'input payload after decrypt' (0 bytes)" \
-c "0 bytes written in 1 fragments"
run_test "Encrypt then MAC, DTLS: empty application data record" \
"$P_SRV auth_mode=none debug_level=4 etm=1 dtls=1" \
"$P_CLI auth_mode=none etm=1 request_size=0 force_ciphersuite=TLS-ECDHE-RSA-WITH-AES-256-CBC-SHA dtls=1" \
0 \
-S "0000: 0f 0f 0f 0f 0f 0f 0f 0f 0f 0f 0f 0f 0f 0f 0f 0f" \
-s "dumping 'input payload after decrypt' (0 bytes)" \
-c "0 bytes written in 1 fragments"
run_test "Default, no Encrypt then MAC, DTLS: empty application data record" \
"$P_SRV auth_mode=none debug_level=4 etm=0 dtls=1" \
"$P_CLI auth_mode=none etm=0 request_size=0 dtls=1" \
0 \
-s "dumping 'input payload after decrypt' (0 bytes)" \
-c "0 bytes written in 1 fragments"
## ClientHello generated with
## "openssl s_client -CAfile tests/data_files/test-ca.crt -tls1_1 -connect localhost:4433 -cipher ..."
## then manually twiddling the ciphersuite list.
## The ClientHello content is spelled out below as a hex string as
## "prefix ciphersuite1 ciphersuite2 ciphersuite3 ciphersuite4 suffix".
## The expected response is an inappropriate_fallback alert.
requires_openssl_with_fallback_scsv
run_test "Fallback SCSV: beginning of list" \
"$P_SRV debug_level=2" \
"$TCP_CLIENT localhost $SRV_PORT '160301003e0100003a03022aafb94308dc22ca1086c65acc00e414384d76b61ecab37df1633b1ae1034dbe000008 5600 0031 0032 0033 0100000900230000000f000101' '15030200020256'" \
0 \
-s "received FALLBACK_SCSV" \
-s "inapropriate fallback"
requires_openssl_with_fallback_scsv
run_test "Fallback SCSV: end of list" \
"$P_SRV debug_level=2" \
"$TCP_CLIENT localhost $SRV_PORT '160301003e0100003a03022aafb94308dc22ca1086c65acc00e414384d76b61ecab37df1633b1ae1034dbe000008 0031 0032 0033 5600 0100000900230000000f000101' '15030200020256'" \
0 \
-s "received FALLBACK_SCSV" \
-s "inapropriate fallback"
## Here the expected response is a valid ServerHello prefix, up to the random.
requires_openssl_with_fallback_scsv
run_test "Fallback SCSV: not in list" \
"$P_SRV debug_level=2" \
"$TCP_CLIENT localhost $SRV_PORT '160301003e0100003a03022aafb94308dc22ca1086c65acc00e414384d76b61ecab37df1633b1ae1034dbe000008 0056 0031 0032 0033 0100000900230000000f000101' '16030200300200002c0302'" \
0 \
-S "received FALLBACK_SCSV" \
-S "inapropriate fallback"
# Tests for CBC 1/n-1 record splitting
run_test "CBC Record splitting: TLS 1.2, no splitting" \
"$P_SRV" \
"$P_CLI force_ciphersuite=TLS-RSA-WITH-AES-128-CBC-SHA \
request_size=123 force_version=tls1_2" \
0 \
-s "Read from client: 123 bytes read" \
-S "Read from client: 1 bytes read" \
-S "122 bytes read"
run_test "CBC Record splitting: TLS 1.1, no splitting" \
"$P_SRV" \
"$P_CLI force_ciphersuite=TLS-RSA-WITH-AES-128-CBC-SHA \
request_size=123 force_version=tls1_1" \
0 \
-s "Read from client: 123 bytes read" \
-S "Read from client: 1 bytes read" \
-S "122 bytes read"
run_test "CBC Record splitting: TLS 1.0, splitting" \
"$P_SRV" \
"$P_CLI force_ciphersuite=TLS-RSA-WITH-AES-128-CBC-SHA \
request_size=123 force_version=tls1" \
0 \
-S "Read from client: 123 bytes read" \
-s "Read from client: 1 bytes read" \
-s "122 bytes read"
requires_config_enabled MBEDTLS_SSL_PROTO_SSL3
run_test "CBC Record splitting: SSLv3, splitting" \
"$P_SRV min_version=ssl3" \
"$P_CLI force_ciphersuite=TLS-RSA-WITH-AES-128-CBC-SHA \
request_size=123 force_version=ssl3" \
0 \
-S "Read from client: 123 bytes read" \
-s "Read from client: 1 bytes read" \
-s "122 bytes read"
run_test "CBC Record splitting: TLS 1.0 RC4, no splitting" \
"$P_SRV arc4=1 force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA" \
"$P_CLI force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA \
request_size=123 force_version=tls1" \
0 \
-s "Read from client: 123 bytes read" \
-S "Read from client: 1 bytes read" \
-S "122 bytes read"
run_test "CBC Record splitting: TLS 1.0, splitting disabled" \
"$P_SRV" \
"$P_CLI force_ciphersuite=TLS-RSA-WITH-AES-128-CBC-SHA \
request_size=123 force_version=tls1 recsplit=0" \
0 \
-s "Read from client: 123 bytes read" \
-S "Read from client: 1 bytes read" \
-S "122 bytes read"
run_test "CBC Record splitting: TLS 1.0, splitting, nbio" \
"$P_SRV nbio=2" \
"$P_CLI nbio=2 force_ciphersuite=TLS-RSA-WITH-AES-128-CBC-SHA \
request_size=123 force_version=tls1" \
0 \
-S "Read from client: 123 bytes read" \
-s "Read from client: 1 bytes read" \
-s "122 bytes read"
# Tests for Session Tickets
run_test "Session resume using tickets: basic" \
"$P_SRV debug_level=3 tickets=1" \
"$P_CLI debug_level=3 tickets=1 reconnect=1" \
0 \
-c "client hello, adding session ticket extension" \
-s "found session ticket extension" \
-s "server hello, adding session ticket extension" \
-c "found session_ticket extension" \
-c "parse new session ticket" \
-S "session successfully restored from cache" \
-s "session successfully restored from ticket" \
-s "a session has been resumed" \
-c "a session has been resumed"
run_test "Session resume using tickets: cache disabled" \
"$P_SRV debug_level=3 tickets=1 cache_max=0" \
"$P_CLI debug_level=3 tickets=1 reconnect=1" \
0 \
-c "client hello, adding session ticket extension" \
-s "found session ticket extension" \
-s "server hello, adding session ticket extension" \
-c "found session_ticket extension" \
-c "parse new session ticket" \
-S "session successfully restored from cache" \
-s "session successfully restored from ticket" \
-s "a session has been resumed" \
-c "a session has been resumed"
run_test "Session resume using tickets: timeout" \
"$P_SRV debug_level=3 tickets=1 cache_max=0 ticket_timeout=1" \
"$P_CLI debug_level=3 tickets=1 reconnect=1 reco_delay=2" \
0 \
-c "client hello, adding session ticket extension" \
-s "found session ticket extension" \
-s "server hello, adding session ticket extension" \
-c "found session_ticket extension" \
-c "parse new session ticket" \
-S "session successfully restored from cache" \
-S "session successfully restored from ticket" \
-S "a session has been resumed" \
-C "a session has been resumed"
run_test "Session resume using tickets: openssl server" \
"$O_SRV" \
"$P_CLI debug_level=3 tickets=1 reconnect=1" \
0 \
-c "client hello, adding session ticket extension" \
-c "found session_ticket extension" \
-c "parse new session ticket" \
-c "a session has been resumed"
run_test "Session resume using tickets: openssl client" \
"$P_SRV debug_level=3 tickets=1" \
"( $O_CLI -sess_out $SESSION; \
$O_CLI -sess_in $SESSION; \
rm -f $SESSION )" \
0 \
-s "found session ticket extension" \
-s "server hello, adding session ticket extension" \
-S "session successfully restored from cache" \
-s "session successfully restored from ticket" \
-s "a session has been resumed"
# Tests for Session Tickets with DTLS
run_test "Session resume using tickets, DTLS: basic" \
"$P_SRV debug_level=3 dtls=1 tickets=1" \
"$P_CLI debug_level=3 dtls=1 tickets=1 reconnect=1" \
0 \
-c "client hello, adding session ticket extension" \
-s "found session ticket extension" \
-s "server hello, adding session ticket extension" \
-c "found session_ticket extension" \
-c "parse new session ticket" \
-S "session successfully restored from cache" \
-s "session successfully restored from ticket" \
-s "a session has been resumed" \
-c "a session has been resumed"
run_test "Session resume using tickets, DTLS: cache disabled" \
"$P_SRV debug_level=3 dtls=1 tickets=1 cache_max=0" \
"$P_CLI debug_level=3 dtls=1 tickets=1 reconnect=1" \
0 \
-c "client hello, adding session ticket extension" \
-s "found session ticket extension" \
-s "server hello, adding session ticket extension" \
-c "found session_ticket extension" \
-c "parse new session ticket" \
-S "session successfully restored from cache" \
-s "session successfully restored from ticket" \
-s "a session has been resumed" \
-c "a session has been resumed"
run_test "Session resume using tickets, DTLS: timeout" \
"$P_SRV debug_level=3 dtls=1 tickets=1 cache_max=0 ticket_timeout=1" \
"$P_CLI debug_level=3 dtls=1 tickets=1 reconnect=1 reco_delay=2" \
0 \
-c "client hello, adding session ticket extension" \
-s "found session ticket extension" \
-s "server hello, adding session ticket extension" \
-c "found session_ticket extension" \
-c "parse new session ticket" \
-S "session successfully restored from cache" \
-S "session successfully restored from ticket" \
-S "a session has been resumed" \
-C "a session has been resumed"
run_test "Session resume using tickets, DTLS: openssl server" \
"$O_SRV -dtls1" \
"$P_CLI dtls=1 debug_level=3 tickets=1 reconnect=1" \
0 \
-c "client hello, adding session ticket extension" \
-c "found session_ticket extension" \
-c "parse new session ticket" \
-c "a session has been resumed"
run_test "Session resume using tickets, DTLS: openssl client" \
"$P_SRV dtls=1 debug_level=3 tickets=1" \
"( $O_CLI -dtls1 -sess_out $SESSION; \
$O_CLI -dtls1 -sess_in $SESSION; \
rm -f $SESSION )" \
0 \
-s "found session ticket extension" \
-s "server hello, adding session ticket extension" \
-S "session successfully restored from cache" \
-s "session successfully restored from ticket" \
-s "a session has been resumed"
# Tests for Session Resume based on session-ID and cache
run_test "Session resume using cache: tickets enabled on client" \
"$P_SRV debug_level=3 tickets=0" \
"$P_CLI debug_level=3 tickets=1 reconnect=1" \
0 \
-c "client hello, adding session ticket extension" \
-s "found session ticket extension" \
-S "server hello, adding session ticket extension" \
-C "found session_ticket extension" \
-C "parse new session ticket" \
-s "session successfully restored from cache" \
-S "session successfully restored from ticket" \
-s "a session has been resumed" \
-c "a session has been resumed"
run_test "Session resume using cache: tickets enabled on server" \
"$P_SRV debug_level=3 tickets=1" \
"$P_CLI debug_level=3 tickets=0 reconnect=1" \
0 \
-C "client hello, adding session ticket extension" \
-S "found session ticket extension" \
-S "server hello, adding session ticket extension" \
-C "found session_ticket extension" \
-C "parse new session ticket" \
-s "session successfully restored from cache" \
-S "session successfully restored from ticket" \
-s "a session has been resumed" \
-c "a session has been resumed"
run_test "Session resume using cache: cache_max=0" \
"$P_SRV debug_level=3 tickets=0 cache_max=0" \
"$P_CLI debug_level=3 tickets=0 reconnect=1" \
0 \
-S "session successfully restored from cache" \
-S "session successfully restored from ticket" \
-S "a session has been resumed" \
-C "a session has been resumed"
run_test "Session resume using cache: cache_max=1" \
"$P_SRV debug_level=3 tickets=0 cache_max=1" \
"$P_CLI debug_level=3 tickets=0 reconnect=1" \
0 \
-s "session successfully restored from cache" \
-S "session successfully restored from ticket" \
-s "a session has been resumed" \
-c "a session has been resumed"
run_test "Session resume using cache: timeout > delay" \
"$P_SRV debug_level=3 tickets=0" \
"$P_CLI debug_level=3 tickets=0 reconnect=1 reco_delay=0" \
0 \
-s "session successfully restored from cache" \
-S "session successfully restored from ticket" \
-s "a session has been resumed" \
-c "a session has been resumed"
run_test "Session resume using cache: timeout < delay" \
"$P_SRV debug_level=3 tickets=0 cache_timeout=1" \
"$P_CLI debug_level=3 tickets=0 reconnect=1 reco_delay=2" \
0 \
-S "session successfully restored from cache" \
-S "session successfully restored from ticket" \
-S "a session has been resumed" \
-C "a session has been resumed"
run_test "Session resume using cache: no timeout" \
"$P_SRV debug_level=3 tickets=0 cache_timeout=0" \
"$P_CLI debug_level=3 tickets=0 reconnect=1 reco_delay=2" \
0 \
-s "session successfully restored from cache" \
-S "session successfully restored from ticket" \
-s "a session has been resumed" \
-c "a session has been resumed"
run_test "Session resume using cache: openssl client" \
"$P_SRV debug_level=3 tickets=0" \
"( $O_CLI -sess_out $SESSION; \
$O_CLI -sess_in $SESSION; \
rm -f $SESSION )" \
0 \
-s "found session ticket extension" \
-S "server hello, adding session ticket extension" \
-s "session successfully restored from cache" \
-S "session successfully restored from ticket" \
-s "a session has been resumed"
run_test "Session resume using cache: openssl server" \
"$O_SRV" \
"$P_CLI debug_level=3 tickets=0 reconnect=1" \
0 \
-C "found session_ticket extension" \
-C "parse new session ticket" \
-c "a session has been resumed"
# Tests for Session Resume based on session-ID and cache, DTLS
run_test "Session resume using cache, DTLS: tickets enabled on client" \
"$P_SRV dtls=1 debug_level=3 tickets=0" \
"$P_CLI dtls=1 debug_level=3 tickets=1 reconnect=1" \
0 \
-c "client hello, adding session ticket extension" \
-s "found session ticket extension" \
-S "server hello, adding session ticket extension" \
-C "found session_ticket extension" \
-C "parse new session ticket" \
-s "session successfully restored from cache" \
-S "session successfully restored from ticket" \
-s "a session has been resumed" \
-c "a session has been resumed"
run_test "Session resume using cache, DTLS: tickets enabled on server" \
"$P_SRV dtls=1 debug_level=3 tickets=1" \
"$P_CLI dtls=1 debug_level=3 tickets=0 reconnect=1" \
0 \
-C "client hello, adding session ticket extension" \
-S "found session ticket extension" \
-S "server hello, adding session ticket extension" \
-C "found session_ticket extension" \
-C "parse new session ticket" \
-s "session successfully restored from cache" \
-S "session successfully restored from ticket" \
-s "a session has been resumed" \
-c "a session has been resumed"
run_test "Session resume using cache, DTLS: cache_max=0" \
"$P_SRV dtls=1 debug_level=3 tickets=0 cache_max=0" \
"$P_CLI dtls=1 debug_level=3 tickets=0 reconnect=1" \
0 \
-S "session successfully restored from cache" \
-S "session successfully restored from ticket" \
-S "a session has been resumed" \
-C "a session has been resumed"
run_test "Session resume using cache, DTLS: cache_max=1" \
"$P_SRV dtls=1 debug_level=3 tickets=0 cache_max=1" \
"$P_CLI dtls=1 debug_level=3 tickets=0 reconnect=1" \
0 \
-s "session successfully restored from cache" \
-S "session successfully restored from ticket" \
-s "a session has been resumed" \
-c "a session has been resumed"
run_test "Session resume using cache, DTLS: timeout > delay" \
"$P_SRV dtls=1 debug_level=3 tickets=0" \
"$P_CLI dtls=1 debug_level=3 tickets=0 reconnect=1 reco_delay=0" \
0 \
-s "session successfully restored from cache" \
-S "session successfully restored from ticket" \
-s "a session has been resumed" \
-c "a session has been resumed"
run_test "Session resume using cache, DTLS: timeout < delay" \
"$P_SRV dtls=1 debug_level=3 tickets=0 cache_timeout=1" \
"$P_CLI dtls=1 debug_level=3 tickets=0 reconnect=1 reco_delay=2" \
0 \
-S "session successfully restored from cache" \
-S "session successfully restored from ticket" \
-S "a session has been resumed" \
-C "a session has been resumed"
run_test "Session resume using cache, DTLS: no timeout" \
"$P_SRV dtls=1 debug_level=3 tickets=0 cache_timeout=0" \
"$P_CLI dtls=1 debug_level=3 tickets=0 reconnect=1 reco_delay=2" \
0 \
-s "session successfully restored from cache" \
-S "session successfully restored from ticket" \
-s "a session has been resumed" \
-c "a session has been resumed"
run_test "Session resume using cache, DTLS: openssl client" \
"$P_SRV dtls=1 debug_level=3 tickets=0" \
"( $O_CLI -dtls1 -sess_out $SESSION; \
$O_CLI -dtls1 -sess_in $SESSION; \
rm -f $SESSION )" \
0 \
-s "found session ticket extension" \
-S "server hello, adding session ticket extension" \
-s "session successfully restored from cache" \
-S "session successfully restored from ticket" \
-s "a session has been resumed"
run_test "Session resume using cache, DTLS: openssl server" \
"$O_SRV -dtls1" \
"$P_CLI dtls=1 debug_level=3 tickets=0 reconnect=1" \
0 \
-C "found session_ticket extension" \
-C "parse new session ticket" \
-c "a session has been resumed"
# Tests for Max Fragment Length extension
if [ "$MAX_CONTENT_LEN" -lt "4096" ]; then
printf "${CONFIG_H} defines MBEDTLS_SSL_MAX_CONTENT_LEN to be less than 4096. Fragment length tests will fail.\n"
exit 1
fi
if [ $MAX_CONTENT_LEN -ne 16384 ]; then
printf "Using non-default maximum content length $MAX_CONTENT_LEN\n"
fi
requires_config_enabled MBEDTLS_SSL_MAX_FRAGMENT_LENGTH
run_test "Max fragment length: enabled, default" \
"$P_SRV debug_level=3" \
"$P_CLI debug_level=3" \
0 \
-c "Maximum fragment length is $MAX_CONTENT_LEN" \
-s "Maximum fragment length is $MAX_CONTENT_LEN" \
-C "client hello, adding max_fragment_length extension" \
-S "found max fragment length extension" \
-S "server hello, max_fragment_length extension" \
-C "found max_fragment_length extension"
requires_config_enabled MBEDTLS_SSL_MAX_FRAGMENT_LENGTH
run_test "Max fragment length: enabled, default, larger message" \
"$P_SRV debug_level=3" \
"$P_CLI debug_level=3 request_size=$(( $MAX_CONTENT_LEN + 1))" \
0 \
-c "Maximum fragment length is $MAX_CONTENT_LEN" \
-s "Maximum fragment length is $MAX_CONTENT_LEN" \
-C "client hello, adding max_fragment_length extension" \
-S "found max fragment length extension" \
-S "server hello, max_fragment_length extension" \
-C "found max_fragment_length extension" \
-c "$(( $MAX_CONTENT_LEN + 1)) bytes written in 2 fragments" \
-s "$MAX_CONTENT_LEN bytes read" \
-s "1 bytes read"
requires_config_enabled MBEDTLS_SSL_MAX_FRAGMENT_LENGTH
run_test "Max fragment length, DTLS: enabled, default, larger message" \
"$P_SRV debug_level=3 dtls=1" \
"$P_CLI debug_level=3 dtls=1 request_size=$(( $MAX_CONTENT_LEN + 1))" \
1 \
-c "Maximum fragment length is $MAX_CONTENT_LEN" \
-s "Maximum fragment length is $MAX_CONTENT_LEN" \
-C "client hello, adding max_fragment_length extension" \
-S "found max fragment length extension" \
-S "server hello, max_fragment_length extension" \
-C "found max_fragment_length extension" \
-c "fragment larger than.*maximum "
# Run some tests with MBEDTLS_SSL_MAX_FRAGMENT_LENGTH disabled
# (session fragment length will be 16384 regardless of mbedtls
# content length configuration.)
requires_config_disabled MBEDTLS_SSL_MAX_FRAGMENT_LENGTH
run_test "Max fragment length: disabled, larger message" \
"$P_SRV debug_level=3" \
"$P_CLI debug_level=3 request_size=$(( $MAX_CONTENT_LEN + 1))" \
0 \
-C "Maximum fragment length is 16384" \
-S "Maximum fragment length is 16384" \
-c "$(( $MAX_CONTENT_LEN + 1)) bytes written in 2 fragments" \
-s "$MAX_CONTENT_LEN bytes read" \
-s "1 bytes read"
requires_config_disabled MBEDTLS_SSL_MAX_FRAGMENT_LENGTH
run_test "Max fragment length DTLS: disabled, larger message" \
"$P_SRV debug_level=3 dtls=1" \
"$P_CLI debug_level=3 dtls=1 request_size=$(( $MAX_CONTENT_LEN + 1))" \
1 \
-C "Maximum fragment length is 16384" \
-S "Maximum fragment length is 16384" \
-c "fragment larger than.*maximum "
requires_config_enabled MBEDTLS_SSL_MAX_FRAGMENT_LENGTH
run_test "Max fragment length: used by client" \
"$P_SRV debug_level=3" \
"$P_CLI debug_level=3 max_frag_len=4096" \
0 \
-c "Maximum fragment length is 4096" \
-s "Maximum fragment length is 4096" \
-c "client hello, adding max_fragment_length extension" \
-s "found max fragment length extension" \
-s "server hello, max_fragment_length extension" \
-c "found max_fragment_length extension"
requires_config_enabled MBEDTLS_SSL_MAX_FRAGMENT_LENGTH
run_test "Max fragment length: used by server" \
"$P_SRV debug_level=3 max_frag_len=4096" \
"$P_CLI debug_level=3" \
0 \
-c "Maximum fragment length is $MAX_CONTENT_LEN" \
-s "Maximum fragment length is 4096" \
-C "client hello, adding max_fragment_length extension" \
-S "found max fragment length extension" \
-S "server hello, max_fragment_length extension" \
-C "found max_fragment_length extension"
requires_config_enabled MBEDTLS_SSL_MAX_FRAGMENT_LENGTH
requires_gnutls
run_test "Max fragment length: gnutls server" \
"$G_SRV" \
"$P_CLI debug_level=3 max_frag_len=4096" \
0 \
-c "Maximum fragment length is 4096" \
-c "client hello, adding max_fragment_length extension" \
-c "found max_fragment_length extension"
requires_config_enabled MBEDTLS_SSL_MAX_FRAGMENT_LENGTH
run_test "Max fragment length: client, message just fits" \
"$P_SRV debug_level=3" \
"$P_CLI debug_level=3 max_frag_len=2048 request_size=2048" \
0 \
-c "Maximum fragment length is 2048" \
-s "Maximum fragment length is 2048" \
-c "client hello, adding max_fragment_length extension" \
-s "found max fragment length extension" \
-s "server hello, max_fragment_length extension" \
-c "found max_fragment_length extension" \
-c "2048 bytes written in 1 fragments" \
-s "2048 bytes read"
requires_config_enabled MBEDTLS_SSL_MAX_FRAGMENT_LENGTH
run_test "Max fragment length: client, larger message" \
"$P_SRV debug_level=3" \
"$P_CLI debug_level=3 max_frag_len=2048 request_size=2345" \
0 \
-c "Maximum fragment length is 2048" \
-s "Maximum fragment length is 2048" \
-c "client hello, adding max_fragment_length extension" \
-s "found max fragment length extension" \
-s "server hello, max_fragment_length extension" \
-c "found max_fragment_length extension" \
-c "2345 bytes written in 2 fragments" \
-s "2048 bytes read" \
-s "297 bytes read"
requires_config_enabled MBEDTLS_SSL_MAX_FRAGMENT_LENGTH
run_test "Max fragment length: DTLS client, larger message" \
"$P_SRV debug_level=3 dtls=1" \
"$P_CLI debug_level=3 dtls=1 max_frag_len=2048 request_size=2345" \
1 \
-c "Maximum fragment length is 2048" \
-s "Maximum fragment length is 2048" \
-c "client hello, adding max_fragment_length extension" \
-s "found max fragment length extension" \
-s "server hello, max_fragment_length extension" \
-c "found max_fragment_length extension" \
-c "fragment larger than.*maximum"
# Tests for renegotiation
# Renegotiation SCSV always added, regardless of SSL_RENEGOTIATION
run_test "Renegotiation: none, for reference" \
"$P_SRV debug_level=3 exchanges=2 auth_mode=optional" \
"$P_CLI debug_level=3 exchanges=2" \
0 \
-C "client hello, adding renegotiation extension" \
-s "received TLS_EMPTY_RENEGOTIATION_INFO" \
-S "found renegotiation extension" \
-s "server hello, secure renegotiation extension" \
-c "found renegotiation extension" \
-C "=> renegotiate" \
-S "=> renegotiate" \
-S "write hello request"
requires_config_enabled MBEDTLS_SSL_RENEGOTIATION
run_test "Renegotiation: client-initiated" \
"$P_SRV debug_level=3 exchanges=2 renegotiation=1 auth_mode=optional" \
"$P_CLI debug_level=3 exchanges=2 renegotiation=1 renegotiate=1" \
0 \
-c "client hello, adding renegotiation extension" \
-s "received TLS_EMPTY_RENEGOTIATION_INFO" \
-s "found renegotiation extension" \
-s "server hello, secure renegotiation extension" \
-c "found renegotiation extension" \
-c "=> renegotiate" \
-s "=> renegotiate" \
-S "write hello request"
requires_config_enabled MBEDTLS_SSL_RENEGOTIATION
run_test "Renegotiation: server-initiated" \
"$P_SRV debug_level=3 exchanges=2 renegotiation=1 auth_mode=optional renegotiate=1" \
"$P_CLI debug_level=3 exchanges=2 renegotiation=1" \
0 \
-c "client hello, adding renegotiation extension" \
-s "received TLS_EMPTY_RENEGOTIATION_INFO" \
-s "found renegotiation extension" \
-s "server hello, secure renegotiation extension" \
-c "found renegotiation extension" \
-c "=> renegotiate" \
-s "=> renegotiate" \
-s "write hello request"
# Checks that no Signature Algorithm with SHA-1 gets negotiated. Negotiating SHA-1 would mean that
# the server did not parse the Signature Algorithm extension. This test is valid only if an MD
# algorithm stronger than SHA-1 is enabled in config.h
requires_config_enabled MBEDTLS_SSL_RENEGOTIATION
run_test "Renegotiation: Signature Algorithms parsing, client-initiated" \
"$P_SRV debug_level=3 exchanges=2 renegotiation=1 auth_mode=optional" \
"$P_CLI debug_level=3 exchanges=2 renegotiation=1 renegotiate=1" \
0 \
-c "client hello, adding renegotiation extension" \
-s "received TLS_EMPTY_RENEGOTIATION_INFO" \
-s "found renegotiation extension" \
-s "server hello, secure renegotiation extension" \
-c "found renegotiation extension" \
-c "=> renegotiate" \
-s "=> renegotiate" \
-S "write hello request" \
-S "client hello v3, signature_algorithm ext: 2" # Is SHA-1 negotiated?
# Checks that no Signature Algorithm with SHA-1 gets negotiated. Negotiating SHA-1 would mean that
# the server did not parse the Signature Algorithm extension. This test is valid only if an MD
# algorithm stronger than SHA-1 is enabled in config.h
requires_config_enabled MBEDTLS_SSL_RENEGOTIATION
run_test "Renegotiation: Signature Algorithms parsing, server-initiated" \
"$P_SRV debug_level=3 exchanges=2 renegotiation=1 auth_mode=optional renegotiate=1" \
"$P_CLI debug_level=3 exchanges=2 renegotiation=1" \
0 \
-c "client hello, adding renegotiation extension" \
-s "received TLS_EMPTY_RENEGOTIATION_INFO" \
-s "found renegotiation extension" \
-s "server hello, secure renegotiation extension" \
-c "found renegotiation extension" \
-c "=> renegotiate" \
-s "=> renegotiate" \
-s "write hello request" \
-S "client hello v3, signature_algorithm ext: 2" # Is SHA-1 negotiated?
requires_config_enabled MBEDTLS_SSL_RENEGOTIATION
run_test "Renegotiation: double" \
"$P_SRV debug_level=3 exchanges=2 renegotiation=1 auth_mode=optional renegotiate=1" \
"$P_CLI debug_level=3 exchanges=2 renegotiation=1 renegotiate=1" \
0 \
-c "client hello, adding renegotiation extension" \
-s "received TLS_EMPTY_RENEGOTIATION_INFO" \
-s "found renegotiation extension" \
-s "server hello, secure renegotiation extension" \
-c "found renegotiation extension" \
-c "=> renegotiate" \
-s "=> renegotiate" \
-s "write hello request"
requires_config_enabled MBEDTLS_SSL_RENEGOTIATION
run_test "Renegotiation: client-initiated, server-rejected" \
"$P_SRV debug_level=3 exchanges=2 renegotiation=0 auth_mode=optional" \
"$P_CLI debug_level=3 exchanges=2 renegotiation=1 renegotiate=1" \
1 \
-c "client hello, adding renegotiation extension" \
-s "received TLS_EMPTY_RENEGOTIATION_INFO" \
-S "found renegotiation extension" \
-s "server hello, secure renegotiation extension" \
-c "found renegotiation extension" \
-c "=> renegotiate" \
-S "=> renegotiate" \
-S "write hello request" \
-c "SSL - Unexpected message at ServerHello in renegotiation" \
-c "failed"
requires_config_enabled MBEDTLS_SSL_RENEGOTIATION
run_test "Renegotiation: server-initiated, client-rejected, default" \
"$P_SRV debug_level=3 exchanges=2 renegotiation=1 renegotiate=1 auth_mode=optional" \
"$P_CLI debug_level=3 exchanges=2 renegotiation=0" \
0 \
-C "client hello, adding renegotiation extension" \
-s "received TLS_EMPTY_RENEGOTIATION_INFO" \
-S "found renegotiation extension" \
-s "server hello, secure renegotiation extension" \
-c "found renegotiation extension" \
-C "=> renegotiate" \
-S "=> renegotiate" \
-s "write hello request" \
-S "SSL - An unexpected message was received from our peer" \
-S "failed"
requires_config_enabled MBEDTLS_SSL_RENEGOTIATION
run_test "Renegotiation: server-initiated, client-rejected, not enforced" \
"$P_SRV debug_level=3 exchanges=2 renegotiation=1 renegotiate=1 \
renego_delay=-1 auth_mode=optional" \
"$P_CLI debug_level=3 exchanges=2 renegotiation=0" \
0 \
-C "client hello, adding renegotiation extension" \
-s "received TLS_EMPTY_RENEGOTIATION_INFO" \
-S "found renegotiation extension" \
-s "server hello, secure renegotiation extension" \
-c "found renegotiation extension" \
-C "=> renegotiate" \
-S "=> renegotiate" \
-s "write hello request" \
-S "SSL - An unexpected message was received from our peer" \
-S "failed"
# delay 2 for 1 alert record + 1 application data record
requires_config_enabled MBEDTLS_SSL_RENEGOTIATION
run_test "Renegotiation: server-initiated, client-rejected, delay 2" \
"$P_SRV debug_level=3 exchanges=2 renegotiation=1 renegotiate=1 \
renego_delay=2 auth_mode=optional" \
"$P_CLI debug_level=3 exchanges=2 renegotiation=0" \
0 \
-C "client hello, adding renegotiation extension" \
-s "received TLS_EMPTY_RENEGOTIATION_INFO" \
-S "found renegotiation extension" \
-s "server hello, secure renegotiation extension" \
-c "found renegotiation extension" \
-C "=> renegotiate" \
-S "=> renegotiate" \
-s "write hello request" \
-S "SSL - An unexpected message was received from our peer" \
-S "failed"
requires_config_enabled MBEDTLS_SSL_RENEGOTIATION
run_test "Renegotiation: server-initiated, client-rejected, delay 0" \
"$P_SRV debug_level=3 exchanges=2 renegotiation=1 renegotiate=1 \
renego_delay=0 auth_mode=optional" \
"$P_CLI debug_level=3 exchanges=2 renegotiation=0" \
0 \
-C "client hello, adding renegotiation extension" \
-s "received TLS_EMPTY_RENEGOTIATION_INFO" \
-S "found renegotiation extension" \
-s "server hello, secure renegotiation extension" \
-c "found renegotiation extension" \
-C "=> renegotiate" \
-S "=> renegotiate" \
-s "write hello request" \
-s "SSL - An unexpected message was received from our peer"
requires_config_enabled MBEDTLS_SSL_RENEGOTIATION
run_test "Renegotiation: server-initiated, client-accepted, delay 0" \
"$P_SRV debug_level=3 exchanges=2 renegotiation=1 renegotiate=1 \
renego_delay=0 auth_mode=optional" \
"$P_CLI debug_level=3 exchanges=2 renegotiation=1" \
0 \
-c "client hello, adding renegotiation extension" \
-s "received TLS_EMPTY_RENEGOTIATION_INFO" \
-s "found renegotiation extension" \
-s "server hello, secure renegotiation extension" \
-c "found renegotiation extension" \
-c "=> renegotiate" \
-s "=> renegotiate" \
-s "write hello request" \
-S "SSL - An unexpected message was received from our peer" \
-S "failed"
requires_config_enabled MBEDTLS_SSL_RENEGOTIATION
run_test "Renegotiation: periodic, just below period" \
"$P_SRV debug_level=3 exchanges=9 renegotiation=1 renego_period=3 auth_mode=optional" \
"$P_CLI debug_level=3 exchanges=2 renegotiation=1" \
0 \
-C "client hello, adding renegotiation extension" \
-s "received TLS_EMPTY_RENEGOTIATION_INFO" \
-S "found renegotiation extension" \
-s "server hello, secure renegotiation extension" \
-c "found renegotiation extension" \
-S "record counter limit reached: renegotiate" \
-C "=> renegotiate" \
-S "=> renegotiate" \
-S "write hello request" \
-S "SSL - An unexpected message was received from our peer" \
-S "failed"
# one extra exchange to be able to complete renego
requires_config_enabled MBEDTLS_SSL_RENEGOTIATION
run_test "Renegotiation: periodic, just above period" \
"$P_SRV debug_level=3 exchanges=9 renegotiation=1 renego_period=3 auth_mode=optional" \
"$P_CLI debug_level=3 exchanges=4 renegotiation=1" \
0 \
-c "client hello, adding renegotiation extension" \
-s "received TLS_EMPTY_RENEGOTIATION_INFO" \
-s "found renegotiation extension" \
-s "server hello, secure renegotiation extension" \
-c "found renegotiation extension" \
-s "record counter limit reached: renegotiate" \
-c "=> renegotiate" \
-s "=> renegotiate" \
-s "write hello request" \
-S "SSL - An unexpected message was received from our peer" \
-S "failed"
requires_config_enabled MBEDTLS_SSL_RENEGOTIATION
run_test "Renegotiation: periodic, two times period" \
"$P_SRV debug_level=3 exchanges=9 renegotiation=1 renego_period=3 auth_mode=optional" \
"$P_CLI debug_level=3 exchanges=7 renegotiation=1" \
0 \
-c "client hello, adding renegotiation extension" \
-s "received TLS_EMPTY_RENEGOTIATION_INFO" \
-s "found renegotiation extension" \
-s "server hello, secure renegotiation extension" \
-c "found renegotiation extension" \
-s "record counter limit reached: renegotiate" \
-c "=> renegotiate" \
-s "=> renegotiate" \
-s "write hello request" \
-S "SSL - An unexpected message was received from our peer" \
-S "failed"
requires_config_enabled MBEDTLS_SSL_RENEGOTIATION
run_test "Renegotiation: periodic, above period, disabled" \
"$P_SRV debug_level=3 exchanges=9 renegotiation=0 renego_period=3 auth_mode=optional" \
"$P_CLI debug_level=3 exchanges=4 renegotiation=1" \
0 \
-C "client hello, adding renegotiation extension" \
-s "received TLS_EMPTY_RENEGOTIATION_INFO" \
-S "found renegotiation extension" \
-s "server hello, secure renegotiation extension" \
-c "found renegotiation extension" \
-S "record counter limit reached: renegotiate" \
-C "=> renegotiate" \
-S "=> renegotiate" \
-S "write hello request" \
-S "SSL - An unexpected message was received from our peer" \
-S "failed"
requires_config_enabled MBEDTLS_SSL_RENEGOTIATION
run_test "Renegotiation: nbio, client-initiated" \
"$P_SRV debug_level=3 nbio=2 exchanges=2 renegotiation=1 auth_mode=optional" \
"$P_CLI debug_level=3 nbio=2 exchanges=2 renegotiation=1 renegotiate=1" \
0 \
-c "client hello, adding renegotiation extension" \
-s "received TLS_EMPTY_RENEGOTIATION_INFO" \
-s "found renegotiation extension" \
-s "server hello, secure renegotiation extension" \
-c "found renegotiation extension" \
-c "=> renegotiate" \
-s "=> renegotiate" \
-S "write hello request"
requires_config_enabled MBEDTLS_SSL_RENEGOTIATION
run_test "Renegotiation: nbio, server-initiated" \
"$P_SRV debug_level=3 nbio=2 exchanges=2 renegotiation=1 renegotiate=1 auth_mode=optional" \
"$P_CLI debug_level=3 nbio=2 exchanges=2 renegotiation=1" \
0 \
-c "client hello, adding renegotiation extension" \
-s "received TLS_EMPTY_RENEGOTIATION_INFO" \
-s "found renegotiation extension" \
-s "server hello, secure renegotiation extension" \
-c "found renegotiation extension" \
-c "=> renegotiate" \
-s "=> renegotiate" \
-s "write hello request"
requires_config_enabled MBEDTLS_SSL_RENEGOTIATION
run_test "Renegotiation: openssl server, client-initiated" \
"$O_SRV -www" \
"$P_CLI debug_level=3 exchanges=1 renegotiation=1 renegotiate=1" \
0 \
-c "client hello, adding renegotiation extension" \
-c "found renegotiation extension" \
-c "=> renegotiate" \
-C "ssl_hanshake() returned" \
-C "error" \
-c "HTTP/1.0 200 [Oo][Kk]"
requires_gnutls
requires_config_enabled MBEDTLS_SSL_RENEGOTIATION
run_test "Renegotiation: gnutls server strict, client-initiated" \
"$G_SRV --priority=NORMAL:%SAFE_RENEGOTIATION" \
"$P_CLI debug_level=3 exchanges=1 renegotiation=1 renegotiate=1" \
0 \
-c "client hello, adding renegotiation extension" \
-c "found renegotiation extension" \
-c "=> renegotiate" \
-C "ssl_hanshake() returned" \
-C "error" \
-c "HTTP/1.0 200 [Oo][Kk]"
requires_gnutls
requires_config_enabled MBEDTLS_SSL_RENEGOTIATION
run_test "Renegotiation: gnutls server unsafe, client-initiated default" \
"$G_SRV --priority=NORMAL:%DISABLE_SAFE_RENEGOTIATION" \
"$P_CLI debug_level=3 exchanges=1 renegotiation=1 renegotiate=1" \
1 \
-c "client hello, adding renegotiation extension" \
-C "found renegotiation extension" \
-c "=> renegotiate" \
-c "mbedtls_ssl_handshake() returned" \
-c "error" \
-C "HTTP/1.0 200 [Oo][Kk]"
requires_gnutls
requires_config_enabled MBEDTLS_SSL_RENEGOTIATION
run_test "Renegotiation: gnutls server unsafe, client-inititated no legacy" \
"$G_SRV --priority=NORMAL:%DISABLE_SAFE_RENEGOTIATION" \
"$P_CLI debug_level=3 exchanges=1 renegotiation=1 renegotiate=1 \
allow_legacy=0" \
1 \
-c "client hello, adding renegotiation extension" \
-C "found renegotiation extension" \
-c "=> renegotiate" \
-c "mbedtls_ssl_handshake() returned" \
-c "error" \
-C "HTTP/1.0 200 [Oo][Kk]"
requires_gnutls
requires_config_enabled MBEDTLS_SSL_RENEGOTIATION
run_test "Renegotiation: gnutls server unsafe, client-inititated legacy" \
"$G_SRV --priority=NORMAL:%DISABLE_SAFE_RENEGOTIATION" \
"$P_CLI debug_level=3 exchanges=1 renegotiation=1 renegotiate=1 \
allow_legacy=1" \
0 \
-c "client hello, adding renegotiation extension" \
-C "found renegotiation extension" \
-c "=> renegotiate" \
-C "ssl_hanshake() returned" \
-C "error" \
-c "HTTP/1.0 200 [Oo][Kk]"
requires_config_enabled MBEDTLS_SSL_RENEGOTIATION
run_test "Renegotiation: DTLS, client-initiated" \
"$P_SRV debug_level=3 dtls=1 exchanges=2 renegotiation=1" \
"$P_CLI debug_level=3 dtls=1 exchanges=2 renegotiation=1 renegotiate=1" \
0 \
-c "client hello, adding renegotiation extension" \
-s "received TLS_EMPTY_RENEGOTIATION_INFO" \
-s "found renegotiation extension" \
-s "server hello, secure renegotiation extension" \
-c "found renegotiation extension" \
-c "=> renegotiate" \
-s "=> renegotiate" \
-S "write hello request"
requires_config_enabled MBEDTLS_SSL_RENEGOTIATION
run_test "Renegotiation: DTLS, server-initiated" \
"$P_SRV debug_level=3 dtls=1 exchanges=2 renegotiation=1 renegotiate=1" \
"$P_CLI debug_level=3 dtls=1 exchanges=2 renegotiation=1 \
read_timeout=1000 max_resend=2" \
0 \
-c "client hello, adding renegotiation extension" \
-s "received TLS_EMPTY_RENEGOTIATION_INFO" \
-s "found renegotiation extension" \
-s "server hello, secure renegotiation extension" \
-c "found renegotiation extension" \
-c "=> renegotiate" \
-s "=> renegotiate" \
-s "write hello request"
requires_config_enabled MBEDTLS_SSL_RENEGOTIATION
run_test "Renegotiation: DTLS, renego_period overflow" \
"$P_SRV debug_level=3 dtls=1 exchanges=4 renegotiation=1 renego_period=18446462598732840962 auth_mode=optional" \
"$P_CLI debug_level=3 dtls=1 exchanges=4 renegotiation=1" \
0 \
-c "client hello, adding renegotiation extension" \
-s "received TLS_EMPTY_RENEGOTIATION_INFO" \
-s "found renegotiation extension" \
-s "server hello, secure renegotiation extension" \
-s "record counter limit reached: renegotiate" \
-c "=> renegotiate" \
-s "=> renegotiate" \
-s "write hello request"
requires_gnutls
requires_config_enabled MBEDTLS_SSL_RENEGOTIATION
run_test "Renegotiation: DTLS, gnutls server, client-initiated" \
"$G_SRV -u --mtu 4096" \
"$P_CLI debug_level=3 dtls=1 exchanges=1 renegotiation=1 renegotiate=1" \
0 \
-c "client hello, adding renegotiation extension" \
-c "found renegotiation extension" \
-c "=> renegotiate" \
-C "mbedtls_ssl_handshake returned" \
-C "error" \
-s "Extra-header:"
# Test for the "secure renegotation" extension only (no actual renegotiation)
requires_gnutls
run_test "Renego ext: gnutls server strict, client default" \
"$G_SRV --priority=NORMAL:%SAFE_RENEGOTIATION" \
"$P_CLI debug_level=3" \
0 \
-c "found renegotiation extension" \
-C "error" \
-c "HTTP/1.0 200 [Oo][Kk]"
requires_gnutls
run_test "Renego ext: gnutls server unsafe, client default" \
"$G_SRV --priority=NORMAL:%DISABLE_SAFE_RENEGOTIATION" \
"$P_CLI debug_level=3" \
0 \
-C "found renegotiation extension" \
-C "error" \
-c "HTTP/1.0 200 [Oo][Kk]"
requires_gnutls
run_test "Renego ext: gnutls server unsafe, client break legacy" \
"$G_SRV --priority=NORMAL:%DISABLE_SAFE_RENEGOTIATION" \
"$P_CLI debug_level=3 allow_legacy=-1" \
1 \
-C "found renegotiation extension" \
-c "error" \
-C "HTTP/1.0 200 [Oo][Kk]"
requires_gnutls
run_test "Renego ext: gnutls client strict, server default" \
"$P_SRV debug_level=3" \
"$G_CLI --priority=NORMAL:%SAFE_RENEGOTIATION localhost" \
0 \
-s "received TLS_EMPTY_RENEGOTIATION_INFO\|found renegotiation extension" \
-s "server hello, secure renegotiation extension"
requires_gnutls
run_test "Renego ext: gnutls client unsafe, server default" \
"$P_SRV debug_level=3" \
"$G_CLI --priority=NORMAL:%DISABLE_SAFE_RENEGOTIATION localhost" \
0 \
-S "received TLS_EMPTY_RENEGOTIATION_INFO\|found renegotiation extension" \
-S "server hello, secure renegotiation extension"
requires_gnutls
run_test "Renego ext: gnutls client unsafe, server break legacy" \
"$P_SRV debug_level=3 allow_legacy=-1" \
"$G_CLI --priority=NORMAL:%DISABLE_SAFE_RENEGOTIATION localhost" \
1 \
-S "received TLS_EMPTY_RENEGOTIATION_INFO\|found renegotiation extension" \
-S "server hello, secure renegotiation extension"
# Tests for silently dropping trailing extra bytes in .der certificates
requires_gnutls
run_test "DER format: no trailing bytes" \
"$P_SRV crt_file=data_files/server5-der0.crt \
key_file=data_files/server5.key" \
"$G_CLI localhost" \
0 \
-c "Handshake was completed" \
requires_gnutls
run_test "DER format: with a trailing zero byte" \
"$P_SRV crt_file=data_files/server5-der1a.crt \
key_file=data_files/server5.key" \
"$G_CLI localhost" \
0 \
-c "Handshake was completed" \
requires_gnutls
run_test "DER format: with a trailing random byte" \
"$P_SRV crt_file=data_files/server5-der1b.crt \
key_file=data_files/server5.key" \
"$G_CLI localhost" \
0 \
-c "Handshake was completed" \
requires_gnutls
run_test "DER format: with 2 trailing random bytes" \
"$P_SRV crt_file=data_files/server5-der2.crt \
key_file=data_files/server5.key" \
"$G_CLI localhost" \
0 \
-c "Handshake was completed" \
requires_gnutls
run_test "DER format: with 4 trailing random bytes" \
"$P_SRV crt_file=data_files/server5-der4.crt \
key_file=data_files/server5.key" \
"$G_CLI localhost" \
0 \
-c "Handshake was completed" \
requires_gnutls
run_test "DER format: with 8 trailing random bytes" \
"$P_SRV crt_file=data_files/server5-der8.crt \
key_file=data_files/server5.key" \
"$G_CLI localhost" \
0 \
-c "Handshake was completed" \
requires_gnutls
run_test "DER format: with 9 trailing random bytes" \
"$P_SRV crt_file=data_files/server5-der9.crt \
key_file=data_files/server5.key" \
"$G_CLI localhost" \
0 \
-c "Handshake was completed" \
# Tests for auth_mode
run_test "Authentication: server badcert, client required" \
"$P_SRV crt_file=data_files/server5-badsign.crt \
key_file=data_files/server5.key" \
"$P_CLI debug_level=1 auth_mode=required" \
1 \
-c "x509_verify_cert() returned" \
-c "! The certificate is not correctly signed by the trusted CA" \
-c "! mbedtls_ssl_handshake returned" \
-c "X509 - Certificate verification failed"
run_test "Authentication: server badcert, client optional" \
"$P_SRV crt_file=data_files/server5-badsign.crt \
key_file=data_files/server5.key" \
"$P_CLI debug_level=1 auth_mode=optional" \
0 \
-c "x509_verify_cert() returned" \
-c "! The certificate is not correctly signed by the trusted CA" \
-C "! mbedtls_ssl_handshake returned" \
-C "X509 - Certificate verification failed"
run_test "Authentication: server goodcert, client optional, no trusted CA" \
"$P_SRV" \
"$P_CLI debug_level=3 auth_mode=optional ca_file=none ca_path=none" \
0 \
-c "x509_verify_cert() returned" \
-c "! The certificate is not correctly signed by the trusted CA" \
-c "! Certificate verification flags"\
-C "! mbedtls_ssl_handshake returned" \
-C "X509 - Certificate verification failed" \
-C "SSL - No CA Chain is set, but required to operate"
run_test "Authentication: server goodcert, client required, no trusted CA" \
"$P_SRV" \
"$P_CLI debug_level=3 auth_mode=required ca_file=none ca_path=none" \
1 \
-c "x509_verify_cert() returned" \
-c "! The certificate is not correctly signed by the trusted CA" \
-c "! Certificate verification flags"\
-c "! mbedtls_ssl_handshake returned" \
-c "SSL - No CA Chain is set, but required to operate"
# The purpose of the next two tests is to test the client's behaviour when receiving a server
# certificate with an unsupported elliptic curve. This should usually not happen because
# the client informs the server about the supported curves - it does, though, in the
# corner case of a static ECDH suite, because the server doesn't check the curve on that
# occasion (to be fixed). If that bug's fixed, the test needs to be altered to use a
# different means to have the server ignoring the client's supported curve list.
requires_config_enabled MBEDTLS_ECP_C
run_test "Authentication: server ECDH p256v1, client required, p256v1 unsupported" \
"$P_SRV debug_level=1 key_file=data_files/server5.key \
crt_file=data_files/server5.ku-ka.crt" \
"$P_CLI debug_level=3 auth_mode=required curves=secp521r1" \
1 \
-c "bad certificate (EC key curve)"\
-c "! Certificate verification flags"\
-C "bad server certificate (ECDH curve)" # Expect failure at earlier verification stage
requires_config_enabled MBEDTLS_ECP_C
run_test "Authentication: server ECDH p256v1, client optional, p256v1 unsupported" \
"$P_SRV debug_level=1 key_file=data_files/server5.key \
crt_file=data_files/server5.ku-ka.crt" \
"$P_CLI debug_level=3 auth_mode=optional curves=secp521r1" \
1 \
-c "bad certificate (EC key curve)"\
-c "! Certificate verification flags"\
-c "bad server certificate (ECDH curve)" # Expect failure only at ECDH params check
run_test "Authentication: server badcert, client none" \
"$P_SRV crt_file=data_files/server5-badsign.crt \
key_file=data_files/server5.key" \
"$P_CLI debug_level=1 auth_mode=none" \
0 \
-C "x509_verify_cert() returned" \
-C "! The certificate is not correctly signed by the trusted CA" \
-C "! mbedtls_ssl_handshake returned" \
-C "X509 - Certificate verification failed"
run_test "Authentication: client SHA256, server required" \
"$P_SRV auth_mode=required" \
"$P_CLI debug_level=3 crt_file=data_files/server6.crt \
key_file=data_files/server6.key \
force_ciphersuite=TLS-ECDHE-ECDSA-WITH-AES-256-GCM-SHA384" \
0 \
-c "Supported Signature Algorithm found: 4," \
-c "Supported Signature Algorithm found: 5,"
run_test "Authentication: client SHA384, server required" \
"$P_SRV auth_mode=required" \
"$P_CLI debug_level=3 crt_file=data_files/server6.crt \
key_file=data_files/server6.key \
force_ciphersuite=TLS-ECDHE-ECDSA-WITH-AES-128-GCM-SHA256" \
0 \
-c "Supported Signature Algorithm found: 4," \
-c "Supported Signature Algorithm found: 5,"
requires_config_enabled MBEDTLS_SSL_PROTO_SSL3
run_test "Authentication: client has no cert, server required (SSLv3)" \
"$P_SRV debug_level=3 min_version=ssl3 auth_mode=required" \
"$P_CLI debug_level=3 force_version=ssl3 crt_file=none \
key_file=data_files/server5.key" \
1 \
-S "skip write certificate request" \
-C "skip parse certificate request" \
-c "got a certificate request" \
-c "got no certificate to send" \
-S "x509_verify_cert() returned" \
-s "client has no certificate" \
-s "! mbedtls_ssl_handshake returned" \
-c "! mbedtls_ssl_handshake returned" \
-s "No client certification received from the client, but required by the authentication mode"
run_test "Authentication: client has no cert, server required (TLS)" \
"$P_SRV debug_level=3 auth_mode=required" \
"$P_CLI debug_level=3 crt_file=none \
key_file=data_files/server5.key" \
1 \
-S "skip write certificate request" \
-C "skip parse certificate request" \
-c "got a certificate request" \
-c "= write certificate$" \
-C "skip write certificate$" \
-S "x509_verify_cert() returned" \
-s "client has no certificate" \
-s "! mbedtls_ssl_handshake returned" \
-c "! mbedtls_ssl_handshake returned" \
-s "No client certification received from the client, but required by the authentication mode"
run_test "Authentication: client badcert, server required" \
"$P_SRV debug_level=3 auth_mode=required" \
"$P_CLI debug_level=3 crt_file=data_files/server5-badsign.crt \
key_file=data_files/server5.key" \
1 \
-S "skip write certificate request" \
-C "skip parse certificate request" \
-c "got a certificate request" \
-C "skip write certificate" \
-C "skip write certificate verify" \
-S "skip parse certificate verify" \
-s "x509_verify_cert() returned" \
-s "! The certificate is not correctly signed by the trusted CA" \
-s "! mbedtls_ssl_handshake returned" \
-s "send alert level=2 message=48" \
-c "! mbedtls_ssl_handshake returned" \
-s "X509 - Certificate verification failed"
# We don't check that the client receives the alert because it might
# detect that its write end of the connection is closed and abort
# before reading the alert message.
run_test "Authentication: client cert not trusted, server required" \
"$P_SRV debug_level=3 auth_mode=required" \
"$P_CLI debug_level=3 crt_file=data_files/server5-selfsigned.crt \
key_file=data_files/server5.key" \
1 \
-S "skip write certificate request" \
-C "skip parse certificate request" \
-c "got a certificate request" \
-C "skip write certificate" \
-C "skip write certificate verify" \
-S "skip parse certificate verify" \
-s "x509_verify_cert() returned" \
-s "! The certificate is not correctly signed by the trusted CA" \
-s "! mbedtls_ssl_handshake returned" \
-c "! mbedtls_ssl_handshake returned" \
-s "X509 - Certificate verification failed"
run_test "Authentication: client badcert, server optional" \
"$P_SRV debug_level=3 auth_mode=optional" \
"$P_CLI debug_level=3 crt_file=data_files/server5-badsign.crt \
key_file=data_files/server5.key" \
0 \
-S "skip write certificate request" \
-C "skip parse certificate request" \
-c "got a certificate request" \
-C "skip write certificate" \
-C "skip write certificate verify" \
-S "skip parse certificate verify" \
-s "x509_verify_cert() returned" \
-s "! The certificate is not correctly signed by the trusted CA" \
-S "! mbedtls_ssl_handshake returned" \
-C "! mbedtls_ssl_handshake returned" \
-S "X509 - Certificate verification failed"
run_test "Authentication: client badcert, server none" \
"$P_SRV debug_level=3 auth_mode=none" \
"$P_CLI debug_level=3 crt_file=data_files/server5-badsign.crt \
key_file=data_files/server5.key" \
0 \
-s "skip write certificate request" \
-C "skip parse certificate request" \
-c "got no certificate request" \
-c "skip write certificate" \
-c "skip write certificate verify" \
-s "skip parse certificate verify" \
-S "x509_verify_cert() returned" \
-S "! The certificate is not correctly signed by the trusted CA" \
-S "! mbedtls_ssl_handshake returned" \
-C "! mbedtls_ssl_handshake returned" \
-S "X509 - Certificate verification failed"
run_test "Authentication: client no cert, server optional" \
"$P_SRV debug_level=3 auth_mode=optional" \
"$P_CLI debug_level=3 crt_file=none key_file=none" \
0 \
-S "skip write certificate request" \
-C "skip parse certificate request" \
-c "got a certificate request" \
-C "skip write certificate$" \
-C "got no certificate to send" \
-S "SSLv3 client has no certificate" \
-c "skip write certificate verify" \
-s "skip parse certificate verify" \
-s "! Certificate was missing" \
-S "! mbedtls_ssl_handshake returned" \
-C "! mbedtls_ssl_handshake returned" \
-S "X509 - Certificate verification failed"
run_test "Authentication: openssl client no cert, server optional" \
"$P_SRV debug_level=3 auth_mode=optional" \
"$O_CLI" \
0 \
-S "skip write certificate request" \
-s "skip parse certificate verify" \
-s "! Certificate was missing" \
-S "! mbedtls_ssl_handshake returned" \
-S "X509 - Certificate verification failed"
run_test "Authentication: client no cert, openssl server optional" \
"$O_SRV -verify 10" \
"$P_CLI debug_level=3 crt_file=none key_file=none" \
0 \
-C "skip parse certificate request" \
-c "got a certificate request" \
-C "skip write certificate$" \
-c "skip write certificate verify" \
-C "! mbedtls_ssl_handshake returned"
run_test "Authentication: client no cert, openssl server required" \
"$O_SRV -Verify 10" \
"$P_CLI debug_level=3 crt_file=none key_file=none" \
1 \
-C "skip parse certificate request" \
-c "got a certificate request" \
-C "skip write certificate$" \
-c "skip write certificate verify" \
-c "! mbedtls_ssl_handshake returned"
requires_config_enabled MBEDTLS_SSL_PROTO_SSL3
run_test "Authentication: client no cert, ssl3" \
"$P_SRV debug_level=3 auth_mode=optional force_version=ssl3" \
"$P_CLI debug_level=3 crt_file=none key_file=none min_version=ssl3" \
0 \
-S "skip write certificate request" \
-C "skip parse certificate request" \
-c "got a certificate request" \
-C "skip write certificate$" \
-c "skip write certificate verify" \
-c "got no certificate to send" \
-s "SSLv3 client has no certificate" \
-s "skip parse certificate verify" \
-s "! Certificate was missing" \
-S "! mbedtls_ssl_handshake returned" \
-C "! mbedtls_ssl_handshake returned" \
-S "X509 - Certificate verification failed"
# The "max_int chain" tests assume that MAX_INTERMEDIATE_CA is set to its
# default value (8)
MAX_IM_CA='8'
MAX_IM_CA_CONFIG=$( ../scripts/config.pl get MBEDTLS_X509_MAX_INTERMEDIATE_CA)
if [ -n "$MAX_IM_CA_CONFIG" ] && [ "$MAX_IM_CA_CONFIG" -ne "$MAX_IM_CA" ]; then
printf "The ${CONFIG_H} file contains a value for the configuration of\n"
printf "MBEDTLS_X509_MAX_INTERMEDIATE_CA that is different from the script’s\n"
printf "test value of ${MAX_IM_CA}. \n"
printf "\n"
printf "The tests assume this value and if it changes, the tests in this\n"
printf "script should also be adjusted.\n"
printf "\n"
exit 1
fi
requires_full_size_output_buffer
run_test "Authentication: server max_int chain, client default" \
"$P_SRV crt_file=data_files/dir-maxpath/c09.pem \
key_file=data_files/dir-maxpath/09.key" \
"$P_CLI server_name=CA09 ca_file=data_files/dir-maxpath/00.crt" \
0 \
-C "X509 - A fatal error occured"
requires_full_size_output_buffer
run_test "Authentication: server max_int+1 chain, client default" \
"$P_SRV crt_file=data_files/dir-maxpath/c10.pem \
key_file=data_files/dir-maxpath/10.key" \
"$P_CLI server_name=CA10 ca_file=data_files/dir-maxpath/00.crt" \
1 \
-c "X509 - A fatal error occured"
requires_full_size_output_buffer
run_test "Authentication: server max_int+1 chain, client optional" \
"$P_SRV crt_file=data_files/dir-maxpath/c10.pem \
key_file=data_files/dir-maxpath/10.key" \
"$P_CLI server_name=CA10 ca_file=data_files/dir-maxpath/00.crt \
auth_mode=optional" \
1 \
-c "X509 - A fatal error occured"
requires_full_size_output_buffer
run_test "Authentication: server max_int+1 chain, client none" \
"$P_SRV crt_file=data_files/dir-maxpath/c10.pem \
key_file=data_files/dir-maxpath/10.key" \
"$P_CLI server_name=CA10 ca_file=data_files/dir-maxpath/00.crt \
auth_mode=none" \
0 \
-C "X509 - A fatal error occured"
requires_full_size_output_buffer
run_test "Authentication: client max_int+1 chain, server default" \
"$P_SRV ca_file=data_files/dir-maxpath/00.crt" \
"$P_CLI crt_file=data_files/dir-maxpath/c10.pem \
key_file=data_files/dir-maxpath/10.key" \
0 \
-S "X509 - A fatal error occured"
requires_full_size_output_buffer
run_test "Authentication: client max_int+1 chain, server optional" \
"$P_SRV ca_file=data_files/dir-maxpath/00.crt auth_mode=optional" \
"$P_CLI crt_file=data_files/dir-maxpath/c10.pem \
key_file=data_files/dir-maxpath/10.key" \
1 \
-s "X509 - A fatal error occured"
requires_full_size_output_buffer
run_test "Authentication: client max_int+1 chain, server required" \
"$P_SRV ca_file=data_files/dir-maxpath/00.crt auth_mode=required" \
"$P_CLI crt_file=data_files/dir-maxpath/c10.pem \
key_file=data_files/dir-maxpath/10.key" \
1 \
-s "X509 - A fatal error occured"
requires_full_size_output_buffer
run_test "Authentication: client max_int chain, server required" \
"$P_SRV ca_file=data_files/dir-maxpath/00.crt auth_mode=required" \
"$P_CLI crt_file=data_files/dir-maxpath/c09.pem \
key_file=data_files/dir-maxpath/09.key" \
0 \
-S "X509 - A fatal error occured"
# Tests for CA list in CertificateRequest messages
run_test "Authentication: send CA list in CertificateRequest (default)" \
"$P_SRV debug_level=3 auth_mode=required" \
"$P_CLI crt_file=data_files/server6.crt \
key_file=data_files/server6.key" \
0 \
-s "requested DN"
run_test "Authentication: do not send CA list in CertificateRequest" \
"$P_SRV debug_level=3 auth_mode=required cert_req_ca_list=0" \
"$P_CLI crt_file=data_files/server6.crt \
key_file=data_files/server6.key" \
0 \
-S "requested DN"
run_test "Authentication: send CA list in CertificateRequest, client self signed" \
"$P_SRV debug_level=3 auth_mode=required cert_req_ca_list=0" \
"$P_CLI debug_level=3 crt_file=data_files/server5-selfsigned.crt \
key_file=data_files/server5.key" \
1 \
-S "requested DN" \
-s "x509_verify_cert() returned" \
-s "! The certificate is not correctly signed by the trusted CA" \
-s "! mbedtls_ssl_handshake returned" \
-c "! mbedtls_ssl_handshake returned" \
-s "X509 - Certificate verification failed"
# Tests for certificate selection based on SHA verson
run_test "Certificate hash: client TLS 1.2 -> SHA-2" \
"$P_SRV crt_file=data_files/server5.crt \
key_file=data_files/server5.key \
crt_file2=data_files/server5-sha1.crt \
key_file2=data_files/server5.key" \
"$P_CLI force_version=tls1_2" \
0 \
-c "signed using.*ECDSA with SHA256" \
-C "signed using.*ECDSA with SHA1"
run_test "Certificate hash: client TLS 1.1 -> SHA-1" \
"$P_SRV crt_file=data_files/server5.crt \
key_file=data_files/server5.key \
crt_file2=data_files/server5-sha1.crt \
key_file2=data_files/server5.key" \
"$P_CLI force_version=tls1_1" \
0 \
-C "signed using.*ECDSA with SHA256" \
-c "signed using.*ECDSA with SHA1"
run_test "Certificate hash: client TLS 1.0 -> SHA-1" \
"$P_SRV crt_file=data_files/server5.crt \
key_file=data_files/server5.key \
crt_file2=data_files/server5-sha1.crt \
key_file2=data_files/server5.key" \
"$P_CLI force_version=tls1" \
0 \
-C "signed using.*ECDSA with SHA256" \
-c "signed using.*ECDSA with SHA1"
run_test "Certificate hash: client TLS 1.1, no SHA-1 -> SHA-2 (order 1)" \
"$P_SRV crt_file=data_files/server5.crt \
key_file=data_files/server5.key \
crt_file2=data_files/server6.crt \
key_file2=data_files/server6.key" \
"$P_CLI force_version=tls1_1" \
0 \
-c "serial number.*09" \
-c "signed using.*ECDSA with SHA256" \
-C "signed using.*ECDSA with SHA1"
run_test "Certificate hash: client TLS 1.1, no SHA-1 -> SHA-2 (order 2)" \
"$P_SRV crt_file=data_files/server6.crt \
key_file=data_files/server6.key \
crt_file2=data_files/server5.crt \
key_file2=data_files/server5.key" \
"$P_CLI force_version=tls1_1" \
0 \
-c "serial number.*0A" \
-c "signed using.*ECDSA with SHA256" \
-C "signed using.*ECDSA with SHA1"
# tests for SNI
run_test "SNI: no SNI callback" \
"$P_SRV debug_level=3 \
crt_file=data_files/server5.crt key_file=data_files/server5.key" \
"$P_CLI server_name=localhost" \
0 \
-S "parse ServerName extension" \
-c "issuer name *: C=NL, O=PolarSSL, CN=Polarssl Test EC CA" \
-c "subject name *: C=NL, O=PolarSSL, CN=localhost"
run_test "SNI: matching cert 1" \
"$P_SRV debug_level=3 \
crt_file=data_files/server5.crt key_file=data_files/server5.key \
sni=localhost,data_files/server2.crt,data_files/server2.key,-,-,-,polarssl.example,data_files/server1-nospace.crt,data_files/server1.key,-,-,-" \
"$P_CLI server_name=localhost" \
0 \
-s "parse ServerName extension" \
-c "issuer name *: C=NL, O=PolarSSL, CN=PolarSSL Test CA" \
-c "subject name *: C=NL, O=PolarSSL, CN=localhost"
run_test "SNI: matching cert 2" \
"$P_SRV debug_level=3 \
crt_file=data_files/server5.crt key_file=data_files/server5.key \
sni=localhost,data_files/server2.crt,data_files/server2.key,-,-,-,polarssl.example,data_files/server1-nospace.crt,data_files/server1.key,-,-,-" \
"$P_CLI server_name=polarssl.example" \
0 \
-s "parse ServerName extension" \
-c "issuer name *: C=NL, O=PolarSSL, CN=PolarSSL Test CA" \
-c "subject name *: C=NL, O=PolarSSL, CN=polarssl.example"
run_test "SNI: no matching cert" \
"$P_SRV debug_level=3 \
crt_file=data_files/server5.crt key_file=data_files/server5.key \
sni=localhost,data_files/server2.crt,data_files/server2.key,-,-,-,polarssl.example,data_files/server1-nospace.crt,data_files/server1.key,-,-,-" \
"$P_CLI server_name=nonesuch.example" \
1 \
-s "parse ServerName extension" \
-s "ssl_sni_wrapper() returned" \
-s "mbedtls_ssl_handshake returned" \
-c "mbedtls_ssl_handshake returned" \
-c "SSL - A fatal alert message was received from our peer"
run_test "SNI: client auth no override: optional" \
"$P_SRV debug_level=3 auth_mode=optional \
crt_file=data_files/server5.crt key_file=data_files/server5.key \
sni=localhost,data_files/server2.crt,data_files/server2.key,-,-,-" \
"$P_CLI debug_level=3 server_name=localhost" \
0 \
-S "skip write certificate request" \
-C "skip parse certificate request" \
-c "got a certificate request" \
-C "skip write certificate" \
-C "skip write certificate verify" \
-S "skip parse certificate verify"
run_test "SNI: client auth override: none -> optional" \
"$P_SRV debug_level=3 auth_mode=none \
crt_file=data_files/server5.crt key_file=data_files/server5.key \
sni=localhost,data_files/server2.crt,data_files/server2.key,-,-,optional" \
"$P_CLI debug_level=3 server_name=localhost" \
0 \
-S "skip write certificate request" \
-C "skip parse certificate request" \
-c "got a certificate request" \
-C "skip write certificate" \
-C "skip write certificate verify" \
-S "skip parse certificate verify"
run_test "SNI: client auth override: optional -> none" \
"$P_SRV debug_level=3 auth_mode=optional \
crt_file=data_files/server5.crt key_file=data_files/server5.key \
sni=localhost,data_files/server2.crt,data_files/server2.key,-,-,none" \
"$P_CLI debug_level=3 server_name=localhost" \
0 \
-s "skip write certificate request" \
-C "skip parse certificate request" \
-c "got no certificate request" \
-c "skip write certificate" \
-c "skip write certificate verify" \
-s "skip parse certificate verify"
run_test "SNI: CA no override" \
"$P_SRV debug_level=3 auth_mode=optional \
crt_file=data_files/server5.crt key_file=data_files/server5.key \
ca_file=data_files/test-ca.crt \
sni=localhost,data_files/server2.crt,data_files/server2.key,-,-,required" \
"$P_CLI debug_level=3 server_name=localhost \
crt_file=data_files/server6.crt key_file=data_files/server6.key" \
1 \
-S "skip write certificate request" \
-C "skip parse certificate request" \
-c "got a certificate request" \
-C "skip write certificate" \
-C "skip write certificate verify" \
-S "skip parse certificate verify" \
-s "x509_verify_cert() returned" \
-s "! The certificate is not correctly signed by the trusted CA" \
-S "The certificate has been revoked (is on a CRL)"
run_test "SNI: CA override" \
"$P_SRV debug_level=3 auth_mode=optional \
crt_file=data_files/server5.crt key_file=data_files/server5.key \
ca_file=data_files/test-ca.crt \
sni=localhost,data_files/server2.crt,data_files/server2.key,data_files/test-ca2.crt,-,required" \
"$P_CLI debug_level=3 server_name=localhost \
crt_file=data_files/server6.crt key_file=data_files/server6.key" \
0 \
-S "skip write certificate request" \
-C "skip parse certificate request" \
-c "got a certificate request" \
-C "skip write certificate" \
-C "skip write certificate verify" \
-S "skip parse certificate verify" \
-S "x509_verify_cert() returned" \
-S "! The certificate is not correctly signed by the trusted CA" \
-S "The certificate has been revoked (is on a CRL)"
run_test "SNI: CA override with CRL" \
"$P_SRV debug_level=3 auth_mode=optional \
crt_file=data_files/server5.crt key_file=data_files/server5.key \
ca_file=data_files/test-ca.crt \
sni=localhost,data_files/server2.crt,data_files/server2.key,data_files/test-ca2.crt,data_files/crl-ec-sha256.pem,required" \
"$P_CLI debug_level=3 server_name=localhost \
crt_file=data_files/server6.crt key_file=data_files/server6.key" \
1 \
-S "skip write certificate request" \
-C "skip parse certificate request" \
-c "got a certificate request" \
-C "skip write certificate" \
-C "skip write certificate verify" \
-S "skip parse certificate verify" \
-s "x509_verify_cert() returned" \
-S "! The certificate is not correctly signed by the trusted CA" \
-s "The certificate has been revoked (is on a CRL)"
# Tests for SNI and DTLS
run_test "SNI: DTLS, no SNI callback" \
"$P_SRV debug_level=3 dtls=1 \
crt_file=data_files/server5.crt key_file=data_files/server5.key" \
"$P_CLI server_name=localhost dtls=1" \
0 \
-S "parse ServerName extension" \
-c "issuer name *: C=NL, O=PolarSSL, CN=Polarssl Test EC CA" \
-c "subject name *: C=NL, O=PolarSSL, CN=localhost"
run_test "SNI: DTLS, matching cert 1" \
"$P_SRV debug_level=3 dtls=1 \
crt_file=data_files/server5.crt key_file=data_files/server5.key \
sni=localhost,data_files/server2.crt,data_files/server2.key,-,-,-,polarssl.example,data_files/server1-nospace.crt,data_files/server1.key,-,-,-" \
"$P_CLI server_name=localhost dtls=1" \
0 \
-s "parse ServerName extension" \
-c "issuer name *: C=NL, O=PolarSSL, CN=PolarSSL Test CA" \
-c "subject name *: C=NL, O=PolarSSL, CN=localhost"
run_test "SNI: DTLS, matching cert 2" \
"$P_SRV debug_level=3 dtls=1 \
crt_file=data_files/server5.crt key_file=data_files/server5.key \
sni=localhost,data_files/server2.crt,data_files/server2.key,-,-,-,polarssl.example,data_files/server1-nospace.crt,data_files/server1.key,-,-,-" \
"$P_CLI server_name=polarssl.example dtls=1" \
0 \
-s "parse ServerName extension" \
-c "issuer name *: C=NL, O=PolarSSL, CN=PolarSSL Test CA" \
-c "subject name *: C=NL, O=PolarSSL, CN=polarssl.example"
run_test "SNI: DTLS, no matching cert" \
"$P_SRV debug_level=3 dtls=1 \
crt_file=data_files/server5.crt key_file=data_files/server5.key \
sni=localhost,data_files/server2.crt,data_files/server2.key,-,-,-,polarssl.example,data_files/server1-nospace.crt,data_files/server1.key,-,-,-" \
"$P_CLI server_name=nonesuch.example dtls=1" \
1 \
-s "parse ServerName extension" \
-s "ssl_sni_wrapper() returned" \
-s "mbedtls_ssl_handshake returned" \
-c "mbedtls_ssl_handshake returned" \
-c "SSL - A fatal alert message was received from our peer"
run_test "SNI: DTLS, client auth no override: optional" \
"$P_SRV debug_level=3 auth_mode=optional dtls=1 \
crt_file=data_files/server5.crt key_file=data_files/server5.key \
sni=localhost,data_files/server2.crt,data_files/server2.key,-,-,-" \
"$P_CLI debug_level=3 server_name=localhost dtls=1" \
0 \
-S "skip write certificate request" \
-C "skip parse certificate request" \
-c "got a certificate request" \
-C "skip write certificate" \
-C "skip write certificate verify" \
-S "skip parse certificate verify"
run_test "SNI: DTLS, client auth override: none -> optional" \
"$P_SRV debug_level=3 auth_mode=none dtls=1 \
crt_file=data_files/server5.crt key_file=data_files/server5.key \
sni=localhost,data_files/server2.crt,data_files/server2.key,-,-,optional" \
"$P_CLI debug_level=3 server_name=localhost dtls=1" \
0 \
-S "skip write certificate request" \
-C "skip parse certificate request" \
-c "got a certificate request" \
-C "skip write certificate" \
-C "skip write certificate verify" \
-S "skip parse certificate verify"
run_test "SNI: DTLS, client auth override: optional -> none" \
"$P_SRV debug_level=3 auth_mode=optional dtls=1 \
crt_file=data_files/server5.crt key_file=data_files/server5.key \
sni=localhost,data_files/server2.crt,data_files/server2.key,-,-,none" \
"$P_CLI debug_level=3 server_name=localhost dtls=1" \
0 \
-s "skip write certificate request" \
-C "skip parse certificate request" \
-c "got no certificate request" \
-c "skip write certificate" \
-c "skip write certificate verify" \
-s "skip parse certificate verify"
run_test "SNI: DTLS, CA no override" \
"$P_SRV debug_level=3 auth_mode=optional dtls=1 \
crt_file=data_files/server5.crt key_file=data_files/server5.key \
ca_file=data_files/test-ca.crt \
sni=localhost,data_files/server2.crt,data_files/server2.key,-,-,required" \
"$P_CLI debug_level=3 server_name=localhost dtls=1 \
crt_file=data_files/server6.crt key_file=data_files/server6.key" \
1 \
-S "skip write certificate request" \
-C "skip parse certificate request" \
-c "got a certificate request" \
-C "skip write certificate" \
-C "skip write certificate verify" \
-S "skip parse certificate verify" \
-s "x509_verify_cert() returned" \
-s "! The certificate is not correctly signed by the trusted CA" \
-S "The certificate has been revoked (is on a CRL)"
run_test "SNI: DTLS, CA override" \
"$P_SRV debug_level=3 auth_mode=optional dtls=1 \
crt_file=data_files/server5.crt key_file=data_files/server5.key \
ca_file=data_files/test-ca.crt \
sni=localhost,data_files/server2.crt,data_files/server2.key,data_files/test-ca2.crt,-,required" \
"$P_CLI debug_level=3 server_name=localhost dtls=1 \
crt_file=data_files/server6.crt key_file=data_files/server6.key" \
0 \
-S "skip write certificate request" \
-C "skip parse certificate request" \
-c "got a certificate request" \
-C "skip write certificate" \
-C "skip write certificate verify" \
-S "skip parse certificate verify" \
-S "x509_verify_cert() returned" \
-S "! The certificate is not correctly signed by the trusted CA" \
-S "The certificate has been revoked (is on a CRL)"
run_test "SNI: DTLS, CA override with CRL" \
"$P_SRV debug_level=3 auth_mode=optional \
crt_file=data_files/server5.crt key_file=data_files/server5.key dtls=1 \
ca_file=data_files/test-ca.crt \
sni=localhost,data_files/server2.crt,data_files/server2.key,data_files/test-ca2.crt,data_files/crl-ec-sha256.pem,required" \
"$P_CLI debug_level=3 server_name=localhost dtls=1 \
crt_file=data_files/server6.crt key_file=data_files/server6.key" \
1 \
-S "skip write certificate request" \
-C "skip parse certificate request" \
-c "got a certificate request" \
-C "skip write certificate" \
-C "skip write certificate verify" \
-S "skip parse certificate verify" \
-s "x509_verify_cert() returned" \
-S "! The certificate is not correctly signed by the trusted CA" \
-s "The certificate has been revoked (is on a CRL)"
# Tests for non-blocking I/O: exercise a variety of handshake flows
run_test "Non-blocking I/O: basic handshake" \
"$P_SRV nbio=2 tickets=0 auth_mode=none" \
"$P_CLI nbio=2 tickets=0" \
0 \
-S "mbedtls_ssl_handshake returned" \
-C "mbedtls_ssl_handshake returned" \
-c "Read from server: .* bytes read"
run_test "Non-blocking I/O: client auth" \
"$P_SRV nbio=2 tickets=0 auth_mode=required" \
"$P_CLI nbio=2 tickets=0" \
0 \
-S "mbedtls_ssl_handshake returned" \
-C "mbedtls_ssl_handshake returned" \
-c "Read from server: .* bytes read"
run_test "Non-blocking I/O: ticket" \
"$P_SRV nbio=2 tickets=1 auth_mode=none" \
"$P_CLI nbio=2 tickets=1" \
0 \
-S "mbedtls_ssl_handshake returned" \
-C "mbedtls_ssl_handshake returned" \
-c "Read from server: .* bytes read"
run_test "Non-blocking I/O: ticket + client auth" \
"$P_SRV nbio=2 tickets=1 auth_mode=required" \
"$P_CLI nbio=2 tickets=1" \
0 \
-S "mbedtls_ssl_handshake returned" \
-C "mbedtls_ssl_handshake returned" \
-c "Read from server: .* bytes read"
run_test "Non-blocking I/O: ticket + client auth + resume" \
"$P_SRV nbio=2 tickets=1 auth_mode=required" \
"$P_CLI nbio=2 tickets=1 reconnect=1" \
0 \
-S "mbedtls_ssl_handshake returned" \
-C "mbedtls_ssl_handshake returned" \
-c "Read from server: .* bytes read"
run_test "Non-blocking I/O: ticket + resume" \
"$P_SRV nbio=2 tickets=1 auth_mode=none" \
"$P_CLI nbio=2 tickets=1 reconnect=1" \
0 \
-S "mbedtls_ssl_handshake returned" \
-C "mbedtls_ssl_handshake returned" \
-c "Read from server: .* bytes read"
run_test "Non-blocking I/O: session-id resume" \
"$P_SRV nbio=2 tickets=0 auth_mode=none" \
"$P_CLI nbio=2 tickets=0 reconnect=1" \
0 \
-S "mbedtls_ssl_handshake returned" \
-C "mbedtls_ssl_handshake returned" \
-c "Read from server: .* bytes read"
# Tests for event-driven I/O: exercise a variety of handshake flows
run_test "Event-driven I/O: basic handshake" \
"$P_SRV event=1 tickets=0 auth_mode=none" \
"$P_CLI event=1 tickets=0" \
0 \
-S "mbedtls_ssl_handshake returned" \
-C "mbedtls_ssl_handshake returned" \
-c "Read from server: .* bytes read"
run_test "Event-driven I/O: client auth" \
"$P_SRV event=1 tickets=0 auth_mode=required" \
"$P_CLI event=1 tickets=0" \
0 \
-S "mbedtls_ssl_handshake returned" \
-C "mbedtls_ssl_handshake returned" \
-c "Read from server: .* bytes read"
run_test "Event-driven I/O: ticket" \
"$P_SRV event=1 tickets=1 auth_mode=none" \
"$P_CLI event=1 tickets=1" \
0 \
-S "mbedtls_ssl_handshake returned" \
-C "mbedtls_ssl_handshake returned" \
-c "Read from server: .* bytes read"
run_test "Event-driven I/O: ticket + client auth" \
"$P_SRV event=1 tickets=1 auth_mode=required" \
"$P_CLI event=1 tickets=1" \
0 \
-S "mbedtls_ssl_handshake returned" \
-C "mbedtls_ssl_handshake returned" \
-c "Read from server: .* bytes read"
run_test "Event-driven I/O: ticket + client auth + resume" \
"$P_SRV event=1 tickets=1 auth_mode=required" \
"$P_CLI event=1 tickets=1 reconnect=1" \
0 \
-S "mbedtls_ssl_handshake returned" \
-C "mbedtls_ssl_handshake returned" \
-c "Read from server: .* bytes read"
run_test "Event-driven I/O: ticket + resume" \
"$P_SRV event=1 tickets=1 auth_mode=none" \
"$P_CLI event=1 tickets=1 reconnect=1" \
0 \
-S "mbedtls_ssl_handshake returned" \
-C "mbedtls_ssl_handshake returned" \
-c "Read from server: .* bytes read"
run_test "Event-driven I/O: session-id resume" \
"$P_SRV event=1 tickets=0 auth_mode=none" \
"$P_CLI event=1 tickets=0 reconnect=1" \
0 \
-S "mbedtls_ssl_handshake returned" \
-C "mbedtls_ssl_handshake returned" \
-c "Read from server: .* bytes read"
run_test "Event-driven I/O, DTLS: basic handshake" \
"$P_SRV dtls=1 event=1 tickets=0 auth_mode=none" \
"$P_CLI dtls=1 event=1 tickets=0" \
0 \
-c "Read from server: .* bytes read"
run_test "Event-driven I/O, DTLS: client auth" \
"$P_SRV dtls=1 event=1 tickets=0 auth_mode=required" \
"$P_CLI dtls=1 event=1 tickets=0" \
0 \
-c "Read from server: .* bytes read"
run_test "Event-driven I/O, DTLS: ticket" \
"$P_SRV dtls=1 event=1 tickets=1 auth_mode=none" \
"$P_CLI dtls=1 event=1 tickets=1" \
0 \
-c "Read from server: .* bytes read"
run_test "Event-driven I/O, DTLS: ticket + client auth" \
"$P_SRV dtls=1 event=1 tickets=1 auth_mode=required" \
"$P_CLI dtls=1 event=1 tickets=1" \
0 \
-c "Read from server: .* bytes read"
run_test "Event-driven I/O, DTLS: ticket + client auth + resume" \
"$P_SRV dtls=1 event=1 tickets=1 auth_mode=required" \
"$P_CLI dtls=1 event=1 tickets=1 reconnect=1" \
0 \
-c "Read from server: .* bytes read"
run_test "Event-driven I/O, DTLS: ticket + resume" \
"$P_SRV dtls=1 event=1 tickets=1 auth_mode=none" \
"$P_CLI dtls=1 event=1 tickets=1 reconnect=1" \
0 \
-c "Read from server: .* bytes read"
run_test "Event-driven I/O, DTLS: session-id resume" \
"$P_SRV dtls=1 event=1 tickets=0 auth_mode=none" \
"$P_CLI dtls=1 event=1 tickets=0 reconnect=1" \
0 \
-c "Read from server: .* bytes read"
# This test demonstrates the need for the mbedtls_ssl_check_pending function.
# During session resumption, the client will send its ApplicationData record
# within the same datagram as the Finished messages. In this situation, the
# server MUST NOT idle on the underlying transport after handshake completion,
# because the ApplicationData request has already been queued internally.
run_test "Event-driven I/O, DTLS: session-id resume, UDP packing" \
-p "$P_PXY pack=50" \
"$P_SRV dtls=1 event=1 tickets=0 auth_mode=required" \
"$P_CLI dtls=1 event=1 tickets=0 reconnect=1" \
0 \
-c "Read from server: .* bytes read"
# Tests for version negotiation
run_test "Version check: all -> 1.2" \
"$P_SRV" \
"$P_CLI" \
0 \
-S "mbedtls_ssl_handshake returned" \
-C "mbedtls_ssl_handshake returned" \
-s "Protocol is TLSv1.2" \
-c "Protocol is TLSv1.2"
run_test "Version check: cli max 1.1 -> 1.1" \
"$P_SRV" \
"$P_CLI max_version=tls1_1" \
0 \
-S "mbedtls_ssl_handshake returned" \
-C "mbedtls_ssl_handshake returned" \
-s "Protocol is TLSv1.1" \
-c "Protocol is TLSv1.1"
run_test "Version check: srv max 1.1 -> 1.1" \
"$P_SRV max_version=tls1_1" \
"$P_CLI" \
0 \
-S "mbedtls_ssl_handshake returned" \
-C "mbedtls_ssl_handshake returned" \
-s "Protocol is TLSv1.1" \
-c "Protocol is TLSv1.1"
run_test "Version check: cli+srv max 1.1 -> 1.1" \
"$P_SRV max_version=tls1_1" \
"$P_CLI max_version=tls1_1" \
0 \
-S "mbedtls_ssl_handshake returned" \
-C "mbedtls_ssl_handshake returned" \
-s "Protocol is TLSv1.1" \
-c "Protocol is TLSv1.1"
run_test "Version check: cli max 1.1, srv min 1.1 -> 1.1" \
"$P_SRV min_version=tls1_1" \
"$P_CLI max_version=tls1_1" \
0 \
-S "mbedtls_ssl_handshake returned" \
-C "mbedtls_ssl_handshake returned" \
-s "Protocol is TLSv1.1" \
-c "Protocol is TLSv1.1"
run_test "Version check: cli min 1.1, srv max 1.1 -> 1.1" \
"$P_SRV max_version=tls1_1" \
"$P_CLI min_version=tls1_1" \
0 \
-S "mbedtls_ssl_handshake returned" \
-C "mbedtls_ssl_handshake returned" \
-s "Protocol is TLSv1.1" \
-c "Protocol is TLSv1.1"
run_test "Version check: cli min 1.2, srv max 1.1 -> fail" \
"$P_SRV max_version=tls1_1" \
"$P_CLI min_version=tls1_2" \
1 \
-s "mbedtls_ssl_handshake returned" \
-c "mbedtls_ssl_handshake returned" \
-c "SSL - Handshake protocol not within min/max boundaries"
run_test "Version check: srv min 1.2, cli max 1.1 -> fail" \
"$P_SRV min_version=tls1_2" \
"$P_CLI max_version=tls1_1" \
1 \
-s "mbedtls_ssl_handshake returned" \
-c "mbedtls_ssl_handshake returned" \
-s "SSL - Handshake protocol not within min/max boundaries"
# Tests for ALPN extension
run_test "ALPN: none" \
"$P_SRV debug_level=3" \
"$P_CLI debug_level=3" \
0 \
-C "client hello, adding alpn extension" \
-S "found alpn extension" \
-C "got an alert message, type: \\[2:120]" \
-S "server hello, adding alpn extension" \
-C "found alpn extension " \
-C "Application Layer Protocol is" \
-S "Application Layer Protocol is"
run_test "ALPN: client only" \
"$P_SRV debug_level=3" \
"$P_CLI debug_level=3 alpn=abc,1234" \
0 \
-c "client hello, adding alpn extension" \
-s "found alpn extension" \
-C "got an alert message, type: \\[2:120]" \
-S "server hello, adding alpn extension" \
-C "found alpn extension " \
-c "Application Layer Protocol is (none)" \
-S "Application Layer Protocol is"
run_test "ALPN: server only" \
"$P_SRV debug_level=3 alpn=abc,1234" \
"$P_CLI debug_level=3" \
0 \
-C "client hello, adding alpn extension" \
-S "found alpn extension" \
-C "got an alert message, type: \\[2:120]" \
-S "server hello, adding alpn extension" \
-C "found alpn extension " \
-C "Application Layer Protocol is" \
-s "Application Layer Protocol is (none)"
run_test "ALPN: both, common cli1-srv1" \
"$P_SRV debug_level=3 alpn=abc,1234" \
"$P_CLI debug_level=3 alpn=abc,1234" \
0 \
-c "client hello, adding alpn extension" \
-s "found alpn extension" \
-C "got an alert message, type: \\[2:120]" \
-s "server hello, adding alpn extension" \
-c "found alpn extension" \
-c "Application Layer Protocol is abc" \
-s "Application Layer Protocol is abc"
run_test "ALPN: both, common cli2-srv1" \
"$P_SRV debug_level=3 alpn=abc,1234" \
"$P_CLI debug_level=3 alpn=1234,abc" \
0 \
-c "client hello, adding alpn extension" \
-s "found alpn extension" \
-C "got an alert message, type: \\[2:120]" \
-s "server hello, adding alpn extension" \
-c "found alpn extension" \
-c "Application Layer Protocol is abc" \
-s "Application Layer Protocol is abc"
run_test "ALPN: both, common cli1-srv2" \
"$P_SRV debug_level=3 alpn=abc,1234" \
"$P_CLI debug_level=3 alpn=1234,abcde" \
0 \
-c "client hello, adding alpn extension" \
-s "found alpn extension" \
-C "got an alert message, type: \\[2:120]" \
-s "server hello, adding alpn extension" \
-c "found alpn extension" \
-c "Application Layer Protocol is 1234" \
-s "Application Layer Protocol is 1234"
run_test "ALPN: both, no common" \
"$P_SRV debug_level=3 alpn=abc,123" \
"$P_CLI debug_level=3 alpn=1234,abcde" \
1 \
-c "client hello, adding alpn extension" \
-s "found alpn extension" \
-c "got an alert message, type: \\[2:120]" \
-S "server hello, adding alpn extension" \
-C "found alpn extension" \
-C "Application Layer Protocol is 1234" \
-S "Application Layer Protocol is 1234"
# Tests for keyUsage in leaf certificates, part 1:
# server-side certificate/suite selection
run_test "keyUsage srv: RSA, digitalSignature -> (EC)DHE-RSA" \
"$P_SRV key_file=data_files/server2.key \
crt_file=data_files/server2.ku-ds.crt" \
"$P_CLI" \
0 \
-c "Ciphersuite is TLS-[EC]*DHE-RSA-WITH-"
run_test "keyUsage srv: RSA, keyEncipherment -> RSA" \
"$P_SRV key_file=data_files/server2.key \
crt_file=data_files/server2.ku-ke.crt" \
"$P_CLI" \
0 \
-c "Ciphersuite is TLS-RSA-WITH-"
run_test "keyUsage srv: RSA, keyAgreement -> fail" \
"$P_SRV key_file=data_files/server2.key \
crt_file=data_files/server2.ku-ka.crt" \
"$P_CLI" \
1 \
-C "Ciphersuite is "
run_test "keyUsage srv: ECDSA, digitalSignature -> ECDHE-ECDSA" \
"$P_SRV key_file=data_files/server5.key \
crt_file=data_files/server5.ku-ds.crt" \
"$P_CLI" \
0 \
-c "Ciphersuite is TLS-ECDHE-ECDSA-WITH-"
run_test "keyUsage srv: ECDSA, keyAgreement -> ECDH-" \
"$P_SRV key_file=data_files/server5.key \
crt_file=data_files/server5.ku-ka.crt" \
"$P_CLI" \
0 \
-c "Ciphersuite is TLS-ECDH-"
run_test "keyUsage srv: ECDSA, keyEncipherment -> fail" \
"$P_SRV key_file=data_files/server5.key \
crt_file=data_files/server5.ku-ke.crt" \
"$P_CLI" \
1 \
-C "Ciphersuite is "
# Tests for keyUsage in leaf certificates, part 2:
# client-side checking of server cert
run_test "keyUsage cli: DigitalSignature+KeyEncipherment, RSA: OK" \
"$O_SRV -key data_files/server2.key \
-cert data_files/server2.ku-ds_ke.crt" \
"$P_CLI debug_level=1 \
force_ciphersuite=TLS-RSA-WITH-AES-128-CBC-SHA" \
0 \
-C "bad certificate (usage extensions)" \
-C "Processing of the Certificate handshake message failed" \
-c "Ciphersuite is TLS-"
run_test "keyUsage cli: DigitalSignature+KeyEncipherment, DHE-RSA: OK" \
"$O_SRV -key data_files/server2.key \
-cert data_files/server2.ku-ds_ke.crt" \
"$P_CLI debug_level=1 \
force_ciphersuite=TLS-DHE-RSA-WITH-AES-128-CBC-SHA" \
0 \
-C "bad certificate (usage extensions)" \
-C "Processing of the Certificate handshake message failed" \
-c "Ciphersuite is TLS-"
run_test "keyUsage cli: KeyEncipherment, RSA: OK" \
"$O_SRV -key data_files/server2.key \
-cert data_files/server2.ku-ke.crt" \
"$P_CLI debug_level=1 \
force_ciphersuite=TLS-RSA-WITH-AES-128-CBC-SHA" \
0 \
-C "bad certificate (usage extensions)" \
-C "Processing of the Certificate handshake message failed" \
-c "Ciphersuite is TLS-"
run_test "keyUsage cli: KeyEncipherment, DHE-RSA: fail" \
"$O_SRV -key data_files/server2.key \
-cert data_files/server2.ku-ke.crt" \
"$P_CLI debug_level=1 \
force_ciphersuite=TLS-DHE-RSA-WITH-AES-128-CBC-SHA" \
1 \
-c "bad certificate (usage extensions)" \
-c "Processing of the Certificate handshake message failed" \
-C "Ciphersuite is TLS-"
run_test "keyUsage cli: KeyEncipherment, DHE-RSA: fail, soft" \
"$O_SRV -key data_files/server2.key \
-cert data_files/server2.ku-ke.crt" \
"$P_CLI debug_level=1 auth_mode=optional \
force_ciphersuite=TLS-DHE-RSA-WITH-AES-128-CBC-SHA" \
0 \
-c "bad certificate (usage extensions)" \
-C "Processing of the Certificate handshake message failed" \
-c "Ciphersuite is TLS-" \
-c "! Usage does not match the keyUsage extension"
run_test "keyUsage cli: DigitalSignature, DHE-RSA: OK" \
"$O_SRV -key data_files/server2.key \
-cert data_files/server2.ku-ds.crt" \
"$P_CLI debug_level=1 \
force_ciphersuite=TLS-DHE-RSA-WITH-AES-128-CBC-SHA" \
0 \
-C "bad certificate (usage extensions)" \
-C "Processing of the Certificate handshake message failed" \
-c "Ciphersuite is TLS-"
run_test "keyUsage cli: DigitalSignature, RSA: fail" \
"$O_SRV -key data_files/server2.key \
-cert data_files/server2.ku-ds.crt" \
"$P_CLI debug_level=1 \
force_ciphersuite=TLS-RSA-WITH-AES-128-CBC-SHA" \
1 \
-c "bad certificate (usage extensions)" \
-c "Processing of the Certificate handshake message failed" \
-C "Ciphersuite is TLS-"
run_test "keyUsage cli: DigitalSignature, RSA: fail, soft" \
"$O_SRV -key data_files/server2.key \
-cert data_files/server2.ku-ds.crt" \
"$P_CLI debug_level=1 auth_mode=optional \
force_ciphersuite=TLS-RSA-WITH-AES-128-CBC-SHA" \
0 \
-c "bad certificate (usage extensions)" \
-C "Processing of the Certificate handshake message failed" \
-c "Ciphersuite is TLS-" \
-c "! Usage does not match the keyUsage extension"
# Tests for keyUsage in leaf certificates, part 3:
# server-side checking of client cert
run_test "keyUsage cli-auth: RSA, DigitalSignature: OK" \
"$P_SRV debug_level=1 auth_mode=optional" \
"$O_CLI -key data_files/server2.key \
-cert data_files/server2.ku-ds.crt" \
0 \
-S "bad certificate (usage extensions)" \
-S "Processing of the Certificate handshake message failed"
run_test "keyUsage cli-auth: RSA, KeyEncipherment: fail (soft)" \
"$P_SRV debug_level=1 auth_mode=optional" \
"$O_CLI -key data_files/server2.key \
-cert data_files/server2.ku-ke.crt" \
0 \
-s "bad certificate (usage extensions)" \
-S "Processing of the Certificate handshake message failed"
run_test "keyUsage cli-auth: RSA, KeyEncipherment: fail (hard)" \
"$P_SRV debug_level=1 auth_mode=required" \
"$O_CLI -key data_files/server2.key \
-cert data_files/server2.ku-ke.crt" \
1 \
-s "bad certificate (usage extensions)" \
-s "Processing of the Certificate handshake message failed"
run_test "keyUsage cli-auth: ECDSA, DigitalSignature: OK" \
"$P_SRV debug_level=1 auth_mode=optional" \
"$O_CLI -key data_files/server5.key \
-cert data_files/server5.ku-ds.crt" \
0 \
-S "bad certificate (usage extensions)" \
-S "Processing of the Certificate handshake message failed"
run_test "keyUsage cli-auth: ECDSA, KeyAgreement: fail (soft)" \
"$P_SRV debug_level=1 auth_mode=optional" \
"$O_CLI -key data_files/server5.key \
-cert data_files/server5.ku-ka.crt" \
0 \
-s "bad certificate (usage extensions)" \
-S "Processing of the Certificate handshake message failed"
# Tests for extendedKeyUsage, part 1: server-side certificate/suite selection
run_test "extKeyUsage srv: serverAuth -> OK" \
"$P_SRV key_file=data_files/server5.key \
crt_file=data_files/server5.eku-srv.crt" \
"$P_CLI" \
0
run_test "extKeyUsage srv: serverAuth,clientAuth -> OK" \
"$P_SRV key_file=data_files/server5.key \
crt_file=data_files/server5.eku-srv.crt" \
"$P_CLI" \
0
run_test "extKeyUsage srv: codeSign,anyEKU -> OK" \
"$P_SRV key_file=data_files/server5.key \
crt_file=data_files/server5.eku-cs_any.crt" \
"$P_CLI" \
0
run_test "extKeyUsage srv: codeSign -> fail" \
"$P_SRV key_file=data_files/server5.key \
crt_file=data_files/server5.eku-cli.crt" \
"$P_CLI" \
1
# Tests for extendedKeyUsage, part 2: client-side checking of server cert
run_test "extKeyUsage cli: serverAuth -> OK" \
"$O_SRV -key data_files/server5.key \
-cert data_files/server5.eku-srv.crt" \
"$P_CLI debug_level=1" \
0 \
-C "bad certificate (usage extensions)" \
-C "Processing of the Certificate handshake message failed" \
-c "Ciphersuite is TLS-"
run_test "extKeyUsage cli: serverAuth,clientAuth -> OK" \
"$O_SRV -key data_files/server5.key \
-cert data_files/server5.eku-srv_cli.crt" \
"$P_CLI debug_level=1" \
0 \
-C "bad certificate (usage extensions)" \
-C "Processing of the Certificate handshake message failed" \
-c "Ciphersuite is TLS-"
run_test "extKeyUsage cli: codeSign,anyEKU -> OK" \
"$O_SRV -key data_files/server5.key \
-cert data_files/server5.eku-cs_any.crt" \
"$P_CLI debug_level=1" \
0 \
-C "bad certificate (usage extensions)" \
-C "Processing of the Certificate handshake message failed" \
-c "Ciphersuite is TLS-"
run_test "extKeyUsage cli: codeSign -> fail" \
"$O_SRV -key data_files/server5.key \
-cert data_files/server5.eku-cs.crt" \
"$P_CLI debug_level=1" \
1 \
-c "bad certificate (usage extensions)" \
-c "Processing of the Certificate handshake message failed" \
-C "Ciphersuite is TLS-"
# Tests for extendedKeyUsage, part 3: server-side checking of client cert
run_test "extKeyUsage cli-auth: clientAuth -> OK" \
"$P_SRV debug_level=1 auth_mode=optional" \
"$O_CLI -key data_files/server5.key \
-cert data_files/server5.eku-cli.crt" \
0 \
-S "bad certificate (usage extensions)" \
-S "Processing of the Certificate handshake message failed"
run_test "extKeyUsage cli-auth: serverAuth,clientAuth -> OK" \
"$P_SRV debug_level=1 auth_mode=optional" \
"$O_CLI -key data_files/server5.key \
-cert data_files/server5.eku-srv_cli.crt" \
0 \
-S "bad certificate (usage extensions)" \
-S "Processing of the Certificate handshake message failed"
run_test "extKeyUsage cli-auth: codeSign,anyEKU -> OK" \
"$P_SRV debug_level=1 auth_mode=optional" \
"$O_CLI -key data_files/server5.key \
-cert data_files/server5.eku-cs_any.crt" \
0 \
-S "bad certificate (usage extensions)" \
-S "Processing of the Certificate handshake message failed"
run_test "extKeyUsage cli-auth: codeSign -> fail (soft)" \
"$P_SRV debug_level=1 auth_mode=optional" \
"$O_CLI -key data_files/server5.key \
-cert data_files/server5.eku-cs.crt" \
0 \
-s "bad certificate (usage extensions)" \
-S "Processing of the Certificate handshake message failed"
run_test "extKeyUsage cli-auth: codeSign -> fail (hard)" \
"$P_SRV debug_level=1 auth_mode=required" \
"$O_CLI -key data_files/server5.key \
-cert data_files/server5.eku-cs.crt" \
1 \
-s "bad certificate (usage extensions)" \
-s "Processing of the Certificate handshake message failed"
# Tests for DHM parameters loading
run_test "DHM parameters: reference" \
"$P_SRV" \
"$P_CLI force_ciphersuite=TLS-DHE-RSA-WITH-AES-128-CBC-SHA \
debug_level=3" \
0 \
-c "value of 'DHM: P ' (2048 bits)" \
-c "value of 'DHM: G ' (2 bits)"
run_test "DHM parameters: other parameters" \
"$P_SRV dhm_file=data_files/dhparams.pem" \
"$P_CLI force_ciphersuite=TLS-DHE-RSA-WITH-AES-128-CBC-SHA \
debug_level=3" \
0 \
-c "value of 'DHM: P ' (1024 bits)" \
-c "value of 'DHM: G ' (2 bits)"
# Tests for DHM client-side size checking
run_test "DHM size: server default, client default, OK" \
"$P_SRV" \
"$P_CLI force_ciphersuite=TLS-DHE-RSA-WITH-AES-128-CBC-SHA \
debug_level=1" \
0 \
-C "DHM prime too short:"
run_test "DHM size: server default, client 2048, OK" \
"$P_SRV" \
"$P_CLI force_ciphersuite=TLS-DHE-RSA-WITH-AES-128-CBC-SHA \
debug_level=1 dhmlen=2048" \
0 \
-C "DHM prime too short:"
run_test "DHM size: server 1024, client default, OK" \
"$P_SRV dhm_file=data_files/dhparams.pem" \
"$P_CLI force_ciphersuite=TLS-DHE-RSA-WITH-AES-128-CBC-SHA \
debug_level=1" \
0 \
-C "DHM prime too short:"
run_test "DHM size: server 1000, client default, rejected" \
"$P_SRV dhm_file=data_files/dh.1000.pem" \
"$P_CLI force_ciphersuite=TLS-DHE-RSA-WITH-AES-128-CBC-SHA \
debug_level=1" \
1 \
-c "DHM prime too short:"
run_test "DHM size: server default, client 2049, rejected" \
"$P_SRV" \
"$P_CLI force_ciphersuite=TLS-DHE-RSA-WITH-AES-128-CBC-SHA \
debug_level=1 dhmlen=2049" \
1 \
-c "DHM prime too short:"
# Tests for PSK callback
run_test "PSK callback: psk, no callback" \
"$P_SRV psk=abc123 psk_identity=foo" \
"$P_CLI force_ciphersuite=TLS-PSK-WITH-AES-128-CBC-SHA \
psk_identity=foo psk=abc123" \
0 \
-S "SSL - None of the common ciphersuites is usable" \
-S "SSL - Unknown identity received" \
-S "SSL - Verification of the message MAC failed"
run_test "PSK callback: no psk, no callback" \
"$P_SRV" \
"$P_CLI force_ciphersuite=TLS-PSK-WITH-AES-128-CBC-SHA \
psk_identity=foo psk=abc123" \
1 \
-s "SSL - None of the common ciphersuites is usable" \
-S "SSL - Unknown identity received" \
-S "SSL - Verification of the message MAC failed"
run_test "PSK callback: callback overrides other settings" \
"$P_SRV psk=abc123 psk_identity=foo psk_list=abc,dead,def,beef" \
"$P_CLI force_ciphersuite=TLS-PSK-WITH-AES-128-CBC-SHA \
psk_identity=foo psk=abc123" \
1 \
-S "SSL - None of the common ciphersuites is usable" \
-s "SSL - Unknown identity received" \
-S "SSL - Verification of the message MAC failed"
run_test "PSK callback: first id matches" \
"$P_SRV psk_list=abc,dead,def,beef" \
"$P_CLI force_ciphersuite=TLS-PSK-WITH-AES-128-CBC-SHA \
psk_identity=abc psk=dead" \
0 \
-S "SSL - None of the common ciphersuites is usable" \
-S "SSL - Unknown identity received" \
-S "SSL - Verification of the message MAC failed"
run_test "PSK callback: second id matches" \
"$P_SRV psk_list=abc,dead,def,beef" \
"$P_CLI force_ciphersuite=TLS-PSK-WITH-AES-128-CBC-SHA \
psk_identity=def psk=beef" \
0 \
-S "SSL - None of the common ciphersuites is usable" \
-S "SSL - Unknown identity received" \
-S "SSL - Verification of the message MAC failed"
run_test "PSK callback: no match" \
"$P_SRV psk_list=abc,dead,def,beef" \
"$P_CLI force_ciphersuite=TLS-PSK-WITH-AES-128-CBC-SHA \
psk_identity=ghi psk=beef" \
1 \
-S "SSL - None of the common ciphersuites is usable" \
-s "SSL - Unknown identity received" \
-S "SSL - Verification of the message MAC failed"
run_test "PSK callback: wrong key" \
"$P_SRV psk_list=abc,dead,def,beef" \
"$P_CLI force_ciphersuite=TLS-PSK-WITH-AES-128-CBC-SHA \
psk_identity=abc psk=beef" \
1 \
-S "SSL - None of the common ciphersuites is usable" \
-S "SSL - Unknown identity received" \
-s "SSL - Verification of the message MAC failed"
# Tests for EC J-PAKE
requires_config_enabled MBEDTLS_KEY_EXCHANGE_ECJPAKE
run_test "ECJPAKE: client not configured" \
"$P_SRV debug_level=3" \
"$P_CLI debug_level=3" \
0 \
-C "add ciphersuite: c0ff" \
-C "adding ecjpake_kkpp extension" \
-S "found ecjpake kkpp extension" \
-S "skip ecjpake kkpp extension" \
-S "ciphersuite mismatch: ecjpake not configured" \
-S "server hello, ecjpake kkpp extension" \
-C "found ecjpake_kkpp extension" \
-S "None of the common ciphersuites is usable"
requires_config_enabled MBEDTLS_KEY_EXCHANGE_ECJPAKE
run_test "ECJPAKE: server not configured" \
"$P_SRV debug_level=3" \
"$P_CLI debug_level=3 ecjpake_pw=bla \
force_ciphersuite=TLS-ECJPAKE-WITH-AES-128-CCM-8" \
1 \
-c "add ciphersuite: c0ff" \
-c "adding ecjpake_kkpp extension" \
-s "found ecjpake kkpp extension" \
-s "skip ecjpake kkpp extension" \
-s "ciphersuite mismatch: ecjpake not configured" \
-S "server hello, ecjpake kkpp extension" \
-C "found ecjpake_kkpp extension" \
-s "None of the common ciphersuites is usable"
requires_config_enabled MBEDTLS_KEY_EXCHANGE_ECJPAKE
run_test "ECJPAKE: working, TLS" \
"$P_SRV debug_level=3 ecjpake_pw=bla" \
"$P_CLI debug_level=3 ecjpake_pw=bla \
force_ciphersuite=TLS-ECJPAKE-WITH-AES-128-CCM-8" \
0 \
-c "add ciphersuite: c0ff" \
-c "adding ecjpake_kkpp extension" \
-C "re-using cached ecjpake parameters" \
-s "found ecjpake kkpp extension" \
-S "skip ecjpake kkpp extension" \
-S "ciphersuite mismatch: ecjpake not configured" \
-s "server hello, ecjpake kkpp extension" \
-c "found ecjpake_kkpp extension" \
-S "None of the common ciphersuites is usable" \
-S "SSL - Verification of the message MAC failed"
server_needs_more_time 1
requires_config_enabled MBEDTLS_KEY_EXCHANGE_ECJPAKE
run_test "ECJPAKE: password mismatch, TLS" \
"$P_SRV debug_level=3 ecjpake_pw=bla" \
"$P_CLI debug_level=3 ecjpake_pw=bad \
force_ciphersuite=TLS-ECJPAKE-WITH-AES-128-CCM-8" \
1 \
-C "re-using cached ecjpake parameters" \
-s "SSL - Verification of the message MAC failed"
requires_config_enabled MBEDTLS_KEY_EXCHANGE_ECJPAKE
run_test "ECJPAKE: working, DTLS" \
"$P_SRV debug_level=3 dtls=1 ecjpake_pw=bla" \
"$P_CLI debug_level=3 dtls=1 ecjpake_pw=bla \
force_ciphersuite=TLS-ECJPAKE-WITH-AES-128-CCM-8" \
0 \
-c "re-using cached ecjpake parameters" \
-S "SSL - Verification of the message MAC failed"
requires_config_enabled MBEDTLS_KEY_EXCHANGE_ECJPAKE
run_test "ECJPAKE: working, DTLS, no cookie" \
"$P_SRV debug_level=3 dtls=1 ecjpake_pw=bla cookies=0" \
"$P_CLI debug_level=3 dtls=1 ecjpake_pw=bla \
force_ciphersuite=TLS-ECJPAKE-WITH-AES-128-CCM-8" \
0 \
-C "re-using cached ecjpake parameters" \
-S "SSL - Verification of the message MAC failed"
server_needs_more_time 1
requires_config_enabled MBEDTLS_KEY_EXCHANGE_ECJPAKE
run_test "ECJPAKE: password mismatch, DTLS" \
"$P_SRV debug_level=3 dtls=1 ecjpake_pw=bla" \
"$P_CLI debug_level=3 dtls=1 ecjpake_pw=bad \
force_ciphersuite=TLS-ECJPAKE-WITH-AES-128-CCM-8" \
1 \
-c "re-using cached ecjpake parameters" \
-s "SSL - Verification of the message MAC failed"
# for tests with configs/config-thread.h
requires_config_enabled MBEDTLS_KEY_EXCHANGE_ECJPAKE
run_test "ECJPAKE: working, DTLS, nolog" \
"$P_SRV dtls=1 ecjpake_pw=bla" \
"$P_CLI dtls=1 ecjpake_pw=bla \
force_ciphersuite=TLS-ECJPAKE-WITH-AES-128-CCM-8" \
0
# Tests for ciphersuites per version
requires_config_enabled MBEDTLS_SSL_PROTO_SSL3
run_test "Per-version suites: SSL3" \
"$P_SRV min_version=ssl3 version_suites=TLS-RSA-WITH-3DES-EDE-CBC-SHA,TLS-RSA-WITH-AES-256-CBC-SHA,TLS-RSA-WITH-AES-128-CBC-SHA,TLS-RSA-WITH-AES-128-GCM-SHA256" \
"$P_CLI force_version=ssl3" \
0 \
-c "Ciphersuite is TLS-RSA-WITH-3DES-EDE-CBC-SHA"
run_test "Per-version suites: TLS 1.0" \
"$P_SRV arc4=1 version_suites=TLS-RSA-WITH-3DES-EDE-CBC-SHA,TLS-RSA-WITH-AES-256-CBC-SHA,TLS-RSA-WITH-AES-128-CBC-SHA,TLS-RSA-WITH-AES-128-GCM-SHA256" \
"$P_CLI force_version=tls1 arc4=1" \
0 \
-c "Ciphersuite is TLS-RSA-WITH-AES-256-CBC-SHA"
run_test "Per-version suites: TLS 1.1" \
"$P_SRV version_suites=TLS-RSA-WITH-3DES-EDE-CBC-SHA,TLS-RSA-WITH-AES-256-CBC-SHA,TLS-RSA-WITH-AES-128-CBC-SHA,TLS-RSA-WITH-AES-128-GCM-SHA256" \
"$P_CLI force_version=tls1_1" \
0 \
-c "Ciphersuite is TLS-RSA-WITH-AES-128-CBC-SHA"
run_test "Per-version suites: TLS 1.2" \
"$P_SRV version_suites=TLS-RSA-WITH-3DES-EDE-CBC-SHA,TLS-RSA-WITH-AES-256-CBC-SHA,TLS-RSA-WITH-AES-128-CBC-SHA,TLS-RSA-WITH-AES-128-GCM-SHA256" \
"$P_CLI force_version=tls1_2" \
0 \
-c "Ciphersuite is TLS-RSA-WITH-AES-128-GCM-SHA256"
# Test for ClientHello without extensions
requires_gnutls
run_test "ClientHello without extensions, SHA-1 allowed" \
"$P_SRV debug_level=3" \
"$G_CLI --priority=NORMAL:%NO_EXTENSIONS:%DISABLE_SAFE_RENEGOTIATION localhost" \
0 \
-s "dumping 'client hello extensions' (0 bytes)"
requires_gnutls
run_test "ClientHello without extensions, SHA-1 forbidden in certificates on server" \
"$P_SRV debug_level=3 key_file=data_files/server2.key crt_file=data_files/server2.crt allow_sha1=0" \
"$G_CLI --priority=NORMAL:%NO_EXTENSIONS:%DISABLE_SAFE_RENEGOTIATION localhost" \
0 \
-s "dumping 'client hello extensions' (0 bytes)"
# Tests for mbedtls_ssl_get_bytes_avail()
run_test "mbedtls_ssl_get_bytes_avail: no extra data" \
"$P_SRV" \
"$P_CLI request_size=100" \
0 \
-s "Read from client: 100 bytes read$"
run_test "mbedtls_ssl_get_bytes_avail: extra data" \
"$P_SRV" \
"$P_CLI request_size=500" \
0 \
-s "Read from client: 500 bytes read (.*+.*)"
# Tests for small client packets
requires_config_enabled MBEDTLS_SSL_PROTO_SSL3
run_test "Small client packet SSLv3 BlockCipher" \
"$P_SRV min_version=ssl3" \
"$P_CLI request_size=1 force_version=ssl3 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CBC-SHA" \
0 \
-s "Read from client: 1 bytes read"
requires_config_enabled MBEDTLS_SSL_PROTO_SSL3
run_test "Small client packet SSLv3 StreamCipher" \
"$P_SRV min_version=ssl3 arc4=1 force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA" \
"$P_CLI request_size=1 force_version=ssl3 \
force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA" \
0 \
-s "Read from client: 1 bytes read"
run_test "Small client packet TLS 1.0 BlockCipher" \
"$P_SRV" \
"$P_CLI request_size=1 force_version=tls1 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CBC-SHA" \
0 \
-s "Read from client: 1 bytes read"
run_test "Small client packet TLS 1.0 BlockCipher, without EtM" \
"$P_SRV" \
"$P_CLI request_size=1 force_version=tls1 etm=0 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CBC-SHA" \
0 \
-s "Read from client: 1 bytes read"
requires_config_enabled MBEDTLS_SSL_TRUNCATED_HMAC
run_test "Small client packet TLS 1.0 BlockCipher, truncated MAC" \
"$P_SRV trunc_hmac=1" \
"$P_CLI request_size=1 force_version=tls1 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CBC-SHA trunc_hmac=1" \
0 \
-s "Read from client: 1 bytes read"
requires_config_enabled MBEDTLS_SSL_TRUNCATED_HMAC
run_test "Small client packet TLS 1.0 BlockCipher, without EtM, truncated MAC" \
"$P_SRV trunc_hmac=1" \
"$P_CLI request_size=1 force_version=tls1 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CBC-SHA trunc_hmac=1 etm=0" \
0 \
-s "Read from client: 1 bytes read"
run_test "Small client packet TLS 1.0 StreamCipher" \
"$P_SRV arc4=1 force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA" \
"$P_CLI request_size=1 force_version=tls1 \
force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA" \
0 \
-s "Read from client: 1 bytes read"
run_test "Small client packet TLS 1.0 StreamCipher, without EtM" \
"$P_SRV arc4=1 force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA" \
"$P_CLI request_size=1 force_version=tls1 \
force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA etm=0" \
0 \
-s "Read from client: 1 bytes read"
requires_config_enabled MBEDTLS_SSL_TRUNCATED_HMAC
run_test "Small client packet TLS 1.0 StreamCipher, truncated MAC" \
"$P_SRV arc4=1 force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA trunc_hmac=1" \
"$P_CLI request_size=1 force_version=tls1 \
force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA trunc_hmac=1" \
0 \
-s "Read from client: 1 bytes read"
requires_config_enabled MBEDTLS_SSL_TRUNCATED_HMAC
run_test "Small client packet TLS 1.0 StreamCipher, without EtM, truncated MAC" \
"$P_SRV arc4=1 force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA trunc_hmac=1" \
"$P_CLI request_size=1 force_version=tls1 force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA \
trunc_hmac=1 etm=0" \
0 \
-s "Read from client: 1 bytes read"
run_test "Small client packet TLS 1.1 BlockCipher" \
"$P_SRV" \
"$P_CLI request_size=1 force_version=tls1_1 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CBC-SHA" \
0 \
-s "Read from client: 1 bytes read"
run_test "Small client packet TLS 1.1 BlockCipher, without EtM" \
"$P_SRV" \
"$P_CLI request_size=1 force_version=tls1_1 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CBC-SHA etm=0" \
0 \
-s "Read from client: 1 bytes read"
requires_config_enabled MBEDTLS_SSL_TRUNCATED_HMAC
run_test "Small client packet TLS 1.1 BlockCipher, truncated MAC" \
"$P_SRV trunc_hmac=1" \
"$P_CLI request_size=1 force_version=tls1_1 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CBC-SHA trunc_hmac=1" \
0 \
-s "Read from client: 1 bytes read"
requires_config_enabled MBEDTLS_SSL_TRUNCATED_HMAC
run_test "Small client packet TLS 1.1 BlockCipher, without EtM, truncated MAC" \
"$P_SRV trunc_hmac=1" \
"$P_CLI request_size=1 force_version=tls1_1 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CBC-SHA trunc_hmac=1 etm=0" \
0 \
-s "Read from client: 1 bytes read"
run_test "Small client packet TLS 1.1 StreamCipher" \
"$P_SRV arc4=1 force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA" \
"$P_CLI request_size=1 force_version=tls1_1 \
force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA" \
0 \
-s "Read from client: 1 bytes read"
run_test "Small client packet TLS 1.1 StreamCipher, without EtM" \
"$P_SRV arc4=1 force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA" \
"$P_CLI request_size=1 force_version=tls1_1 \
force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA etm=0" \
0 \
-s "Read from client: 1 bytes read"
requires_config_enabled MBEDTLS_SSL_TRUNCATED_HMAC
run_test "Small client packet TLS 1.1 StreamCipher, truncated MAC" \
"$P_SRV arc4=1 force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA trunc_hmac=1" \
"$P_CLI request_size=1 force_version=tls1_1 \
force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA trunc_hmac=1" \
0 \
-s "Read from client: 1 bytes read"
requires_config_enabled MBEDTLS_SSL_TRUNCATED_HMAC
run_test "Small client packet TLS 1.1 StreamCipher, without EtM, truncated MAC" \
"$P_SRV arc4=1 force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA trunc_hmac=1" \
"$P_CLI request_size=1 force_version=tls1_1 \
force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA trunc_hmac=1 etm=0" \
0 \
-s "Read from client: 1 bytes read"
run_test "Small client packet TLS 1.2 BlockCipher" \
"$P_SRV" \
"$P_CLI request_size=1 force_version=tls1_2 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CBC-SHA" \
0 \
-s "Read from client: 1 bytes read"
run_test "Small client packet TLS 1.2 BlockCipher, without EtM" \
"$P_SRV" \
"$P_CLI request_size=1 force_version=tls1_2 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CBC-SHA etm=0" \
0 \
-s "Read from client: 1 bytes read"
run_test "Small client packet TLS 1.2 BlockCipher larger MAC" \
"$P_SRV" \
"$P_CLI request_size=1 force_version=tls1_2 \
force_ciphersuite=TLS-ECDHE-RSA-WITH-AES-256-CBC-SHA384" \
0 \
-s "Read from client: 1 bytes read"
requires_config_enabled MBEDTLS_SSL_TRUNCATED_HMAC
run_test "Small client packet TLS 1.2 BlockCipher, truncated MAC" \
"$P_SRV trunc_hmac=1" \
"$P_CLI request_size=1 force_version=tls1_2 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CBC-SHA trunc_hmac=1" \
0 \
-s "Read from client: 1 bytes read"
requires_config_enabled MBEDTLS_SSL_TRUNCATED_HMAC
run_test "Small client packet TLS 1.2 BlockCipher, without EtM, truncated MAC" \
"$P_SRV trunc_hmac=1" \
"$P_CLI request_size=1 force_version=tls1_2 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CBC-SHA trunc_hmac=1 etm=0" \
0 \
-s "Read from client: 1 bytes read"
run_test "Small client packet TLS 1.2 StreamCipher" \
"$P_SRV arc4=1 force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA" \
"$P_CLI request_size=1 force_version=tls1_2 \
force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA" \
0 \
-s "Read from client: 1 bytes read"
run_test "Small client packet TLS 1.2 StreamCipher, without EtM" \
"$P_SRV arc4=1 force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA" \
"$P_CLI request_size=1 force_version=tls1_2 \
force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA etm=0" \
0 \
-s "Read from client: 1 bytes read"
requires_config_enabled MBEDTLS_SSL_TRUNCATED_HMAC
run_test "Small client packet TLS 1.2 StreamCipher, truncated MAC" \
"$P_SRV arc4=1 force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA trunc_hmac=1" \
"$P_CLI request_size=1 force_version=tls1_2 \
force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA trunc_hmac=1" \
0 \
-s "Read from client: 1 bytes read"
requires_config_enabled MBEDTLS_SSL_TRUNCATED_HMAC
run_test "Small client packet TLS 1.2 StreamCipher, without EtM, truncated MAC" \
"$P_SRV arc4=1 force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA trunc_hmac=1" \
"$P_CLI request_size=1 force_version=tls1_2 \
force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA trunc_hmac=1 etm=0" \
0 \
-s "Read from client: 1 bytes read"
run_test "Small client packet TLS 1.2 AEAD" \
"$P_SRV" \
"$P_CLI request_size=1 force_version=tls1_2 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CCM" \
0 \
-s "Read from client: 1 bytes read"
run_test "Small client packet TLS 1.2 AEAD shorter tag" \
"$P_SRV" \
"$P_CLI request_size=1 force_version=tls1_2 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CCM-8" \
0 \
-s "Read from client: 1 bytes read"
# Tests for small client packets in DTLS
requires_config_enabled MBEDTLS_SSL_PROTO_DTLS
run_test "Small client packet DTLS 1.0" \
"$P_SRV dtls=1 force_version=dtls1" \
"$P_CLI dtls=1 request_size=1 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CBC-SHA" \
0 \
-s "Read from client: 1 bytes read"
requires_config_enabled MBEDTLS_SSL_PROTO_DTLS
run_test "Small client packet DTLS 1.0, without EtM" \
"$P_SRV dtls=1 force_version=dtls1 etm=0" \
"$P_CLI dtls=1 request_size=1 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CBC-SHA" \
0 \
-s "Read from client: 1 bytes read"
requires_config_enabled MBEDTLS_SSL_PROTO_DTLS
requires_config_enabled MBEDTLS_SSL_TRUNCATED_HMAC
run_test "Small client packet DTLS 1.0, truncated hmac" \
"$P_SRV dtls=1 force_version=dtls1 trunc_hmac=1" \
"$P_CLI dtls=1 request_size=1 trunc_hmac=1 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CBC-SHA" \
0 \
-s "Read from client: 1 bytes read"
requires_config_enabled MBEDTLS_SSL_PROTO_DTLS
requires_config_enabled MBEDTLS_SSL_TRUNCATED_HMAC
run_test "Small client packet DTLS 1.0, without EtM, truncated MAC" \
"$P_SRV dtls=1 force_version=dtls1 trunc_hmac=1 etm=0" \
"$P_CLI dtls=1 request_size=1 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CBC-SHA trunc_hmac=1"\
0 \
-s "Read from client: 1 bytes read"
requires_config_enabled MBEDTLS_SSL_PROTO_DTLS
run_test "Small client packet DTLS 1.2" \
"$P_SRV dtls=1 force_version=dtls1_2" \
"$P_CLI dtls=1 request_size=1 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CBC-SHA" \
0 \
-s "Read from client: 1 bytes read"
requires_config_enabled MBEDTLS_SSL_PROTO_DTLS
run_test "Small client packet DTLS 1.2, without EtM" \
"$P_SRV dtls=1 force_version=dtls1_2 etm=0" \
"$P_CLI dtls=1 request_size=1 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CBC-SHA" \
0 \
-s "Read from client: 1 bytes read"
requires_config_enabled MBEDTLS_SSL_PROTO_DTLS
requires_config_enabled MBEDTLS_SSL_TRUNCATED_HMAC
run_test "Small client packet DTLS 1.2, truncated hmac" \
"$P_SRV dtls=1 force_version=dtls1_2 trunc_hmac=1" \
"$P_CLI dtls=1 request_size=1 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CBC-SHA trunc_hmac=1" \
0 \
-s "Read from client: 1 bytes read"
requires_config_enabled MBEDTLS_SSL_PROTO_DTLS
requires_config_enabled MBEDTLS_SSL_TRUNCATED_HMAC
run_test "Small client packet DTLS 1.2, without EtM, truncated MAC" \
"$P_SRV dtls=1 force_version=dtls1_2 trunc_hmac=1 etm=0" \
"$P_CLI dtls=1 request_size=1 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CBC-SHA trunc_hmac=1"\
0 \
-s "Read from client: 1 bytes read"
# Tests for small server packets
requires_config_enabled MBEDTLS_SSL_PROTO_SSL3
run_test "Small server packet SSLv3 BlockCipher" \
"$P_SRV response_size=1 min_version=ssl3" \
"$P_CLI force_version=ssl3 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CBC-SHA" \
0 \
-c "Read from server: 1 bytes read"
requires_config_enabled MBEDTLS_SSL_PROTO_SSL3
run_test "Small server packet SSLv3 StreamCipher" \
"$P_SRV response_size=1 min_version=ssl3 arc4=1 force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA" \
"$P_CLI force_version=ssl3 \
force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA" \
0 \
-c "Read from server: 1 bytes read"
run_test "Small server packet TLS 1.0 BlockCipher" \
"$P_SRV response_size=1" \
"$P_CLI force_version=tls1 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CBC-SHA" \
0 \
-c "Read from server: 1 bytes read"
run_test "Small server packet TLS 1.0 BlockCipher, without EtM" \
"$P_SRV response_size=1" \
"$P_CLI force_version=tls1 etm=0 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CBC-SHA" \
0 \
-c "Read from server: 1 bytes read"
requires_config_enabled MBEDTLS_SSL_TRUNCATED_HMAC
run_test "Small server packet TLS 1.0 BlockCipher, truncated MAC" \
"$P_SRV response_size=1 trunc_hmac=1" \
"$P_CLI force_version=tls1 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CBC-SHA trunc_hmac=1" \
0 \
-c "Read from server: 1 bytes read"
requires_config_enabled MBEDTLS_SSL_TRUNCATED_HMAC
run_test "Small server packet TLS 1.0 BlockCipher, without EtM, truncated MAC" \
"$P_SRV response_size=1 trunc_hmac=1" \
"$P_CLI force_version=tls1 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CBC-SHA trunc_hmac=1 etm=0" \
0 \
-c "Read from server: 1 bytes read"
run_test "Small server packet TLS 1.0 StreamCipher" \
"$P_SRV response_size=1 arc4=1 force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA" \
"$P_CLI force_version=tls1 \
force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA" \
0 \
-c "Read from server: 1 bytes read"
run_test "Small server packet TLS 1.0 StreamCipher, without EtM" \
"$P_SRV response_size=1 arc4=1 force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA" \
"$P_CLI force_version=tls1 \
force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA etm=0" \
0 \
-c "Read from server: 1 bytes read"
requires_config_enabled MBEDTLS_SSL_TRUNCATED_HMAC
run_test "Small server packet TLS 1.0 StreamCipher, truncated MAC" \
"$P_SRV response_size=1 arc4=1 force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA trunc_hmac=1" \
"$P_CLI force_version=tls1 \
force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA trunc_hmac=1" \
0 \
-c "Read from server: 1 bytes read"
requires_config_enabled MBEDTLS_SSL_TRUNCATED_HMAC
run_test "Small server packet TLS 1.0 StreamCipher, without EtM, truncated MAC" \
"$P_SRV response_size=1 arc4=1 force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA trunc_hmac=1" \
"$P_CLI force_version=tls1 force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA \
trunc_hmac=1 etm=0" \
0 \
-c "Read from server: 1 bytes read"
run_test "Small server packet TLS 1.1 BlockCipher" \
"$P_SRV response_size=1" \
"$P_CLI force_version=tls1_1 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CBC-SHA" \
0 \
-c "Read from server: 1 bytes read"
run_test "Small server packet TLS 1.1 BlockCipher, without EtM" \
"$P_SRV response_size=1" \
"$P_CLI force_version=tls1_1 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CBC-SHA etm=0" \
0 \
-c "Read from server: 1 bytes read"
requires_config_enabled MBEDTLS_SSL_TRUNCATED_HMAC
run_test "Small server packet TLS 1.1 BlockCipher, truncated MAC" \
"$P_SRV response_size=1 trunc_hmac=1" \
"$P_CLI force_version=tls1_1 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CBC-SHA trunc_hmac=1" \
0 \
-c "Read from server: 1 bytes read"
requires_config_enabled MBEDTLS_SSL_TRUNCATED_HMAC
run_test "Small server packet TLS 1.1 BlockCipher, without EtM, truncated MAC" \
"$P_SRV response_size=1 trunc_hmac=1" \
"$P_CLI force_version=tls1_1 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CBC-SHA trunc_hmac=1 etm=0" \
0 \
-c "Read from server: 1 bytes read"
run_test "Small server packet TLS 1.1 StreamCipher" \
"$P_SRV response_size=1 arc4=1 force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA" \
"$P_CLI force_version=tls1_1 \
force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA" \
0 \
-c "Read from server: 1 bytes read"
run_test "Small server packet TLS 1.1 StreamCipher, without EtM" \
"$P_SRV response_size=1 arc4=1 force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA" \
"$P_CLI force_version=tls1_1 \
force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA etm=0" \
0 \
-c "Read from server: 1 bytes read"
requires_config_enabled MBEDTLS_SSL_TRUNCATED_HMAC
run_test "Small server packet TLS 1.1 StreamCipher, truncated MAC" \
"$P_SRV response_size=1 arc4=1 force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA trunc_hmac=1" \
"$P_CLI force_version=tls1_1 \
force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA trunc_hmac=1" \
0 \
-c "Read from server: 1 bytes read"
requires_config_enabled MBEDTLS_SSL_TRUNCATED_HMAC
run_test "Small server packet TLS 1.1 StreamCipher, without EtM, truncated MAC" \
"$P_SRV response_size=1 arc4=1 force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA trunc_hmac=1" \
"$P_CLI force_version=tls1_1 \
force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA trunc_hmac=1 etm=0" \
0 \
-c "Read from server: 1 bytes read"
run_test "Small server packet TLS 1.2 BlockCipher" \
"$P_SRV response_size=1" \
"$P_CLI force_version=tls1_2 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CBC-SHA" \
0 \
-c "Read from server: 1 bytes read"
run_test "Small server packet TLS 1.2 BlockCipher, without EtM" \
"$P_SRV response_size=1" \
"$P_CLI force_version=tls1_2 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CBC-SHA etm=0" \
0 \
-c "Read from server: 1 bytes read"
run_test "Small server packet TLS 1.2 BlockCipher larger MAC" \
"$P_SRV response_size=1" \
"$P_CLI force_version=tls1_2 \
force_ciphersuite=TLS-ECDHE-RSA-WITH-AES-256-CBC-SHA384" \
0 \
-c "Read from server: 1 bytes read"
requires_config_enabled MBEDTLS_SSL_TRUNCATED_HMAC
run_test "Small server packet TLS 1.2 BlockCipher, truncated MAC" \
"$P_SRV response_size=1 trunc_hmac=1" \
"$P_CLI force_version=tls1_2 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CBC-SHA trunc_hmac=1" \
0 \
-c "Read from server: 1 bytes read"
requires_config_enabled MBEDTLS_SSL_TRUNCATED_HMAC
run_test "Small server packet TLS 1.2 BlockCipher, without EtM, truncated MAC" \
"$P_SRV response_size=1 trunc_hmac=1" \
"$P_CLI force_version=tls1_2 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CBC-SHA trunc_hmac=1 etm=0" \
0 \
-c "Read from server: 1 bytes read"
run_test "Small server packet TLS 1.2 StreamCipher" \
"$P_SRV response_size=1 arc4=1 force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA" \
"$P_CLI force_version=tls1_2 \
force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA" \
0 \
-c "Read from server: 1 bytes read"
run_test "Small server packet TLS 1.2 StreamCipher, without EtM" \
"$P_SRV response_size=1 arc4=1 force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA" \
"$P_CLI force_version=tls1_2 \
force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA etm=0" \
0 \
-c "Read from server: 1 bytes read"
requires_config_enabled MBEDTLS_SSL_TRUNCATED_HMAC
run_test "Small server packet TLS 1.2 StreamCipher, truncated MAC" \
"$P_SRV response_size=1 arc4=1 force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA trunc_hmac=1" \
"$P_CLI force_version=tls1_2 \
force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA trunc_hmac=1" \
0 \
-c "Read from server: 1 bytes read"
requires_config_enabled MBEDTLS_SSL_TRUNCATED_HMAC
run_test "Small server packet TLS 1.2 StreamCipher, without EtM, truncated MAC" \
"$P_SRV response_size=1 arc4=1 force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA trunc_hmac=1" \
"$P_CLI force_version=tls1_2 \
force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA trunc_hmac=1 etm=0" \
0 \
-c "Read from server: 1 bytes read"
run_test "Small server packet TLS 1.2 AEAD" \
"$P_SRV response_size=1" \
"$P_CLI force_version=tls1_2 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CCM" \
0 \
-c "Read from server: 1 bytes read"
run_test "Small server packet TLS 1.2 AEAD shorter tag" \
"$P_SRV response_size=1" \
"$P_CLI force_version=tls1_2 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CCM-8" \
0 \
-c "Read from server: 1 bytes read"
# Tests for small server packets in DTLS
requires_config_enabled MBEDTLS_SSL_PROTO_DTLS
run_test "Small server packet DTLS 1.0" \
"$P_SRV dtls=1 response_size=1 force_version=dtls1" \
"$P_CLI dtls=1 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CBC-SHA" \
0 \
-c "Read from server: 1 bytes read"
requires_config_enabled MBEDTLS_SSL_PROTO_DTLS
run_test "Small server packet DTLS 1.0, without EtM" \
"$P_SRV dtls=1 response_size=1 force_version=dtls1 etm=0" \
"$P_CLI dtls=1 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CBC-SHA" \
0 \
-c "Read from server: 1 bytes read"
requires_config_enabled MBEDTLS_SSL_PROTO_DTLS
requires_config_enabled MBEDTLS_SSL_TRUNCATED_HMAC
run_test "Small server packet DTLS 1.0, truncated hmac" \
"$P_SRV dtls=1 response_size=1 force_version=dtls1 trunc_hmac=1" \
"$P_CLI dtls=1 trunc_hmac=1 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CBC-SHA" \
0 \
-c "Read from server: 1 bytes read"
requires_config_enabled MBEDTLS_SSL_PROTO_DTLS
requires_config_enabled MBEDTLS_SSL_TRUNCATED_HMAC
run_test "Small server packet DTLS 1.0, without EtM, truncated MAC" \
"$P_SRV dtls=1 response_size=1 force_version=dtls1 trunc_hmac=1 etm=0" \
"$P_CLI dtls=1 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CBC-SHA trunc_hmac=1"\
0 \
-c "Read from server: 1 bytes read"
requires_config_enabled MBEDTLS_SSL_PROTO_DTLS
run_test "Small server packet DTLS 1.2" \
"$P_SRV dtls=1 response_size=1 force_version=dtls1_2" \
"$P_CLI dtls=1 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CBC-SHA" \
0 \
-c "Read from server: 1 bytes read"
requires_config_enabled MBEDTLS_SSL_PROTO_DTLS
run_test "Small server packet DTLS 1.2, without EtM" \
"$P_SRV dtls=1 response_size=1 force_version=dtls1_2 etm=0" \
"$P_CLI dtls=1 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CBC-SHA" \
0 \
-c "Read from server: 1 bytes read"
requires_config_enabled MBEDTLS_SSL_PROTO_DTLS
requires_config_enabled MBEDTLS_SSL_TRUNCATED_HMAC
run_test "Small server packet DTLS 1.2, truncated hmac" \
"$P_SRV dtls=1 response_size=1 force_version=dtls1_2 trunc_hmac=1" \
"$P_CLI dtls=1 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CBC-SHA trunc_hmac=1" \
0 \
-c "Read from server: 1 bytes read"
requires_config_enabled MBEDTLS_SSL_PROTO_DTLS
requires_config_enabled MBEDTLS_SSL_TRUNCATED_HMAC
run_test "Small server packet DTLS 1.2, without EtM, truncated MAC" \
"$P_SRV dtls=1 response_size=1 force_version=dtls1_2 trunc_hmac=1 etm=0" \
"$P_CLI dtls=1 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CBC-SHA trunc_hmac=1"\
0 \
-c "Read from server: 1 bytes read"
# A test for extensions in SSLv3
requires_config_enabled MBEDTLS_SSL_PROTO_SSL3
run_test "SSLv3 with extensions, server side" \
"$P_SRV min_version=ssl3 debug_level=3" \
"$P_CLI force_version=ssl3 tickets=1 max_frag_len=4096 alpn=abc,1234" \
0 \
-S "dumping 'client hello extensions'" \
-S "server hello, total extension length:"
# Test for large client packets
# How many fragments do we expect to write $1 bytes?
fragments_for_write() {
echo "$(( ( $1 + $MAX_OUT_LEN - 1 ) / $MAX_OUT_LEN ))"
}
requires_config_enabled MBEDTLS_SSL_PROTO_SSL3
run_test "Large client packet SSLv3 BlockCipher" \
"$P_SRV min_version=ssl3" \
"$P_CLI request_size=16384 force_version=ssl3 recsplit=0 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CBC-SHA" \
0 \
-c "16384 bytes written in $(fragments_for_write 16384) fragments" \
-s "Read from client: $MAX_CONTENT_LEN bytes read"
requires_config_enabled MBEDTLS_SSL_PROTO_SSL3
run_test "Large client packet SSLv3 StreamCipher" \
"$P_SRV min_version=ssl3 arc4=1 force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA" \
"$P_CLI request_size=16384 force_version=ssl3 \
force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA" \
0 \
-c "16384 bytes written in $(fragments_for_write 16384) fragments" \
-s "Read from client: $MAX_CONTENT_LEN bytes read"
run_test "Large client packet TLS 1.0 BlockCipher" \
"$P_SRV" \
"$P_CLI request_size=16384 force_version=tls1 recsplit=0 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CBC-SHA" \
0 \
-c "16384 bytes written in $(fragments_for_write 16384) fragments" \
-s "Read from client: $MAX_CONTENT_LEN bytes read"
run_test "Large client packet TLS 1.0 BlockCipher, without EtM" \
"$P_SRV" \
"$P_CLI request_size=16384 force_version=tls1 etm=0 recsplit=0 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CBC-SHA" \
0 \
-s "Read from client: $MAX_CONTENT_LEN bytes read"
requires_config_enabled MBEDTLS_SSL_TRUNCATED_HMAC
run_test "Large client packet TLS 1.0 BlockCipher, truncated MAC" \
"$P_SRV trunc_hmac=1" \
"$P_CLI request_size=16384 force_version=tls1 recsplit=0 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CBC-SHA trunc_hmac=1" \
0 \
-c "16384 bytes written in $(fragments_for_write 16384) fragments" \
-s "Read from client: $MAX_CONTENT_LEN bytes read"
requires_config_enabled MBEDTLS_SSL_TRUNCATED_HMAC
run_test "Large client packet TLS 1.0 BlockCipher, without EtM, truncated MAC" \
"$P_SRV trunc_hmac=1" \
"$P_CLI request_size=16384 force_version=tls1 etm=0 recsplit=0 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CBC-SHA trunc_hmac=1" \
0 \
-s "Read from client: $MAX_CONTENT_LEN bytes read"
run_test "Large client packet TLS 1.0 StreamCipher" \
"$P_SRV arc4=1 force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA" \
"$P_CLI request_size=16384 force_version=tls1 \
force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA" \
0 \
-s "Read from client: $MAX_CONTENT_LEN bytes read"
run_test "Large client packet TLS 1.0 StreamCipher, without EtM" \
"$P_SRV arc4=1 force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA" \
"$P_CLI request_size=16384 force_version=tls1 \
force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA etm=0" \
0 \
-s "Read from client: $MAX_CONTENT_LEN bytes read"
requires_config_enabled MBEDTLS_SSL_TRUNCATED_HMAC
run_test "Large client packet TLS 1.0 StreamCipher, truncated MAC" \
"$P_SRV arc4=1 force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA trunc_hmac=1" \
"$P_CLI request_size=16384 force_version=tls1 \
force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA trunc_hmac=1" \
0 \
-s "Read from client: $MAX_CONTENT_LEN bytes read"
requires_config_enabled MBEDTLS_SSL_TRUNCATED_HMAC
run_test "Large client packet TLS 1.0 StreamCipher, without EtM, truncated MAC" \
"$P_SRV arc4=1 force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA trunc_hmac=1" \
"$P_CLI request_size=16384 force_version=tls1 \
force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA trunc_hmac=1 etm=0" \
0 \
-c "16384 bytes written in $(fragments_for_write 16384) fragments" \
-s "Read from client: $MAX_CONTENT_LEN bytes read"
run_test "Large client packet TLS 1.1 BlockCipher" \
"$P_SRV" \
"$P_CLI request_size=16384 force_version=tls1_1 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CBC-SHA" \
0 \
-c "16384 bytes written in $(fragments_for_write 16384) fragments" \
-s "Read from client: $MAX_CONTENT_LEN bytes read"
run_test "Large client packet TLS 1.1 BlockCipher, without EtM" \
"$P_SRV" \
"$P_CLI request_size=16384 force_version=tls1_1 etm=0 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CBC-SHA" \
0 \
-s "Read from client: $MAX_CONTENT_LEN bytes read"
requires_config_enabled MBEDTLS_SSL_TRUNCATED_HMAC
run_test "Large client packet TLS 1.1 BlockCipher, truncated MAC" \
"$P_SRV trunc_hmac=1" \
"$P_CLI request_size=16384 force_version=tls1_1 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CBC-SHA trunc_hmac=1" \
0 \
-s "Read from client: $MAX_CONTENT_LEN bytes read"
requires_config_enabled MBEDTLS_SSL_TRUNCATED_HMAC
run_test "Large client packet TLS 1.1 BlockCipher, without EtM, truncated MAC" \
"$P_SRV trunc_hmac=1" \
"$P_CLI request_size=16384 force_version=tls1_1 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CBC-SHA trunc_hmac=1 etm=0" \
0 \
-s "Read from client: $MAX_CONTENT_LEN bytes read"
run_test "Large client packet TLS 1.1 StreamCipher" \
"$P_SRV arc4=1 force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA" \
"$P_CLI request_size=16384 force_version=tls1_1 \
force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA" \
0 \
-c "16384 bytes written in $(fragments_for_write 16384) fragments" \
-s "Read from client: $MAX_CONTENT_LEN bytes read"
run_test "Large client packet TLS 1.1 StreamCipher, without EtM" \
"$P_SRV arc4=1 force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA" \
"$P_CLI request_size=16384 force_version=tls1_1 \
force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA etm=0" \
0 \
-c "16384 bytes written in $(fragments_for_write 16384) fragments" \
-s "Read from client: $MAX_CONTENT_LEN bytes read"
requires_config_enabled MBEDTLS_SSL_TRUNCATED_HMAC
run_test "Large client packet TLS 1.1 StreamCipher, truncated MAC" \
"$P_SRV arc4=1 force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA trunc_hmac=1" \
"$P_CLI request_size=16384 force_version=tls1_1 \
force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA trunc_hmac=1" \
0 \
-s "Read from client: $MAX_CONTENT_LEN bytes read"
requires_config_enabled MBEDTLS_SSL_TRUNCATED_HMAC
run_test "Large client packet TLS 1.1 StreamCipher, without EtM, truncated MAC" \
"$P_SRV arc4=1 force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA trunc_hmac=1" \
"$P_CLI request_size=16384 force_version=tls1_1 \
force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA trunc_hmac=1 etm=0" \
0 \
-c "16384 bytes written in $(fragments_for_write 16384) fragments" \
-s "Read from client: $MAX_CONTENT_LEN bytes read"
run_test "Large client packet TLS 1.2 BlockCipher" \
"$P_SRV" \
"$P_CLI request_size=16384 force_version=tls1_2 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CBC-SHA" \
0 \
-c "16384 bytes written in $(fragments_for_write 16384) fragments" \
-s "Read from client: $MAX_CONTENT_LEN bytes read"
run_test "Large client packet TLS 1.2 BlockCipher, without EtM" \
"$P_SRV" \
"$P_CLI request_size=16384 force_version=tls1_2 etm=0 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CBC-SHA" \
0 \
-s "Read from client: $MAX_CONTENT_LEN bytes read"
run_test "Large client packet TLS 1.2 BlockCipher larger MAC" \
"$P_SRV" \
"$P_CLI request_size=16384 force_version=tls1_2 \
force_ciphersuite=TLS-ECDHE-RSA-WITH-AES-256-CBC-SHA384" \
0 \
-c "16384 bytes written in $(fragments_for_write 16384) fragments" \
-s "Read from client: $MAX_CONTENT_LEN bytes read"
requires_config_enabled MBEDTLS_SSL_TRUNCATED_HMAC
run_test "Large client packet TLS 1.2 BlockCipher, truncated MAC" \
"$P_SRV trunc_hmac=1" \
"$P_CLI request_size=16384 force_version=tls1_2 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CBC-SHA trunc_hmac=1" \
0 \
-s "Read from client: $MAX_CONTENT_LEN bytes read"
requires_config_enabled MBEDTLS_SSL_TRUNCATED_HMAC
run_test "Large client packet TLS 1.2 BlockCipher, without EtM, truncated MAC" \
"$P_SRV trunc_hmac=1" \
"$P_CLI request_size=16384 force_version=tls1_2 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CBC-SHA trunc_hmac=1 etm=0" \
0 \
-c "16384 bytes written in $(fragments_for_write 16384) fragments" \
-s "Read from client: $MAX_CONTENT_LEN bytes read"
run_test "Large client packet TLS 1.2 StreamCipher" \
"$P_SRV arc4=1 force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA" \
"$P_CLI request_size=16384 force_version=tls1_2 \
force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA" \
0 \
-c "16384 bytes written in $(fragments_for_write 16384) fragments" \
-s "Read from client: $MAX_CONTENT_LEN bytes read"
run_test "Large client packet TLS 1.2 StreamCipher, without EtM" \
"$P_SRV arc4=1 force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA" \
"$P_CLI request_size=16384 force_version=tls1_2 \
force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA etm=0" \
0 \
-s "Read from client: $MAX_CONTENT_LEN bytes read"
requires_config_enabled MBEDTLS_SSL_TRUNCATED_HMAC
run_test "Large client packet TLS 1.2 StreamCipher, truncated MAC" \
"$P_SRV arc4=1 force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA trunc_hmac=1" \
"$P_CLI request_size=16384 force_version=tls1_2 \
force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA trunc_hmac=1" \
0 \
-s "Read from client: $MAX_CONTENT_LEN bytes read"
requires_config_enabled MBEDTLS_SSL_TRUNCATED_HMAC
run_test "Large client packet TLS 1.2 StreamCipher, without EtM, truncated MAC" \
"$P_SRV arc4=1 force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA trunc_hmac=1" \
"$P_CLI request_size=16384 force_version=tls1_2 \
force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA trunc_hmac=1 etm=0" \
0 \
-c "16384 bytes written in $(fragments_for_write 16384) fragments" \
-s "Read from client: $MAX_CONTENT_LEN bytes read"
run_test "Large client packet TLS 1.2 AEAD" \
"$P_SRV" \
"$P_CLI request_size=16384 force_version=tls1_2 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CCM" \
0 \
-c "16384 bytes written in $(fragments_for_write 16384) fragments" \
-s "Read from client: $MAX_CONTENT_LEN bytes read"
run_test "Large client packet TLS 1.2 AEAD shorter tag" \
"$P_SRV" \
"$P_CLI request_size=16384 force_version=tls1_2 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CCM-8" \
0 \
-c "16384 bytes written in $(fragments_for_write 16384) fragments" \
-s "Read from client: $MAX_CONTENT_LEN bytes read"
# Test for large server packets
requires_config_enabled MBEDTLS_SSL_PROTO_SSL3
run_test "Large server packet SSLv3 StreamCipher" \
"$P_SRV response_size=16384 min_version=ssl3 arc4=1 force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA" \
"$P_CLI force_version=ssl3 \
force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA" \
0 \
-c "Read from server: 16384 bytes read"
# Checking next 4 tests logs for 1n-1 split against BEAST too
requires_config_enabled MBEDTLS_SSL_PROTO_SSL3
run_test "Large server packet SSLv3 BlockCipher" \
"$P_SRV response_size=16384 min_version=ssl3" \
"$P_CLI force_version=ssl3 recsplit=0 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CBC-SHA" \
0 \
-c "Read from server: 1 bytes read"\
-c "16383 bytes read"\
-C "Read from server: 16384 bytes read"
run_test "Large server packet TLS 1.0 BlockCipher" \
"$P_SRV response_size=16384" \
"$P_CLI force_version=tls1 recsplit=0 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CBC-SHA" \
0 \
-c "Read from server: 1 bytes read"\
-c "16383 bytes read"\
-C "Read from server: 16384 bytes read"
run_test "Large server packet TLS 1.0 BlockCipher, without EtM" \
"$P_SRV response_size=16384" \
"$P_CLI force_version=tls1 etm=0 recsplit=0 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CBC-SHA" \
0 \
-c "Read from server: 1 bytes read"\
-c "16383 bytes read"\
-C "Read from server: 16384 bytes read"
requires_config_enabled MBEDTLS_SSL_TRUNCATED_HMAC
run_test "Large server packet TLS 1.0 BlockCipher truncated MAC" \
"$P_SRV response_size=16384" \
"$P_CLI force_version=tls1 recsplit=0 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CBC-SHA \
trunc_hmac=1" \
0 \
-c "Read from server: 1 bytes read"\
-c "16383 bytes read"\
-C "Read from server: 16384 bytes read"
requires_config_enabled MBEDTLS_SSL_TRUNCATED_HMAC
run_test "Large server packet TLS 1.0 StreamCipher truncated MAC" \
"$P_SRV response_size=16384 arc4=1 force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA" \
"$P_CLI force_version=tls1 \
force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA \
trunc_hmac=1" \
0 \
-s "16384 bytes written in 1 fragments" \
-c "Read from server: 16384 bytes read"
run_test "Large server packet TLS 1.0 StreamCipher" \
"$P_SRV response_size=16384 arc4=1 force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA" \
"$P_CLI force_version=tls1 \
force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA" \
0 \
-s "16384 bytes written in 1 fragments" \
-c "Read from server: 16384 bytes read"
run_test "Large server packet TLS 1.0 StreamCipher, without EtM" \
"$P_SRV response_size=16384 arc4=1 force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA" \
"$P_CLI force_version=tls1 \
force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA etm=0" \
0 \
-s "16384 bytes written in 1 fragments" \
-c "Read from server: 16384 bytes read"
requires_config_enabled MBEDTLS_SSL_TRUNCATED_HMAC
run_test "Large server packet TLS 1.0 StreamCipher, truncated MAC" \
"$P_SRV response_size=16384 arc4=1 force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA trunc_hmac=1" \
"$P_CLI force_version=tls1 \
force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA trunc_hmac=1" \
0 \
-s "16384 bytes written in 1 fragments" \
-c "Read from server: 16384 bytes read"
requires_config_enabled MBEDTLS_SSL_TRUNCATED_HMAC
run_test "Large server packet TLS 1.0 StreamCipher, without EtM, truncated MAC" \
"$P_SRV response_size=16384 arc4=1 force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA trunc_hmac=1" \
"$P_CLI force_version=tls1 \
force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA trunc_hmac=1 etm=0" \
0 \
-s "16384 bytes written in 1 fragments" \
-c "Read from server: 16384 bytes read"
run_test "Large server packet TLS 1.1 BlockCipher" \
"$P_SRV response_size=16384" \
"$P_CLI force_version=tls1_1 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CBC-SHA" \
0 \
-c "Read from server: 16384 bytes read"
run_test "Large server packet TLS 1.1 BlockCipher, without EtM" \
"$P_SRV response_size=16384" \
"$P_CLI force_version=tls1_1 etm=0 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CBC-SHA" \
0 \
-s "16384 bytes written in 1 fragments" \
-c "Read from server: 16384 bytes read"
requires_config_enabled MBEDTLS_SSL_TRUNCATED_HMAC
run_test "Large server packet TLS 1.1 BlockCipher truncated MAC" \
"$P_SRV response_size=16384" \
"$P_CLI force_version=tls1_1 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CBC-SHA \
trunc_hmac=1" \
0 \
-c "Read from server: 16384 bytes read"
requires_config_enabled MBEDTLS_SSL_TRUNCATED_HMAC
run_test "Large server packet TLS 1.1 BlockCipher, without EtM, truncated MAC" \
"$P_SRV response_size=16384 trunc_hmac=1" \
"$P_CLI force_version=tls1_1 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CBC-SHA trunc_hmac=1 etm=0" \
0 \
-s "16384 bytes written in 1 fragments" \
-c "Read from server: 16384 bytes read"
run_test "Large server packet TLS 1.1 StreamCipher" \
"$P_SRV response_size=16384 arc4=1 force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA" \
"$P_CLI force_version=tls1_1 \
force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA" \
0 \
-c "Read from server: 16384 bytes read"
run_test "Large server packet TLS 1.1 StreamCipher, without EtM" \
"$P_SRV response_size=16384 arc4=1 force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA" \
"$P_CLI force_version=tls1_1 \
force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA etm=0" \
0 \
-s "16384 bytes written in 1 fragments" \
-c "Read from server: 16384 bytes read"
requires_config_enabled MBEDTLS_SSL_TRUNCATED_HMAC
run_test "Large server packet TLS 1.1 StreamCipher truncated MAC" \
"$P_SRV response_size=16384 arc4=1 force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA" \
"$P_CLI force_version=tls1_1 \
force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA \
trunc_hmac=1" \
0 \
-c "Read from server: 16384 bytes read"
run_test "Large server packet TLS 1.1 StreamCipher, without EtM, truncated MAC" \
"$P_SRV response_size=16384 arc4=1 force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA trunc_hmac=1" \
"$P_CLI force_version=tls1_1 \
force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA trunc_hmac=1 etm=0" \
0 \
-s "16384 bytes written in 1 fragments" \
-c "Read from server: 16384 bytes read"
run_test "Large server packet TLS 1.2 BlockCipher" \
"$P_SRV response_size=16384" \
"$P_CLI force_version=tls1_2 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CBC-SHA" \
0 \
-c "Read from server: 16384 bytes read"
run_test "Large server packet TLS 1.2 BlockCipher, without EtM" \
"$P_SRV response_size=16384" \
"$P_CLI force_version=tls1_2 etm=0 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CBC-SHA" \
0 \
-s "16384 bytes written in 1 fragments" \
-c "Read from server: 16384 bytes read"
run_test "Large server packet TLS 1.2 BlockCipher larger MAC" \
"$P_SRV response_size=16384" \
"$P_CLI force_version=tls1_2 \
force_ciphersuite=TLS-ECDHE-RSA-WITH-AES-256-CBC-SHA384" \
0 \
-c "Read from server: 16384 bytes read"
requires_config_enabled MBEDTLS_SSL_TRUNCATED_HMAC
run_test "Large server packet TLS 1.2 BlockCipher truncated MAC" \
"$P_SRV response_size=16384" \
"$P_CLI force_version=tls1_2 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CBC-SHA \
trunc_hmac=1" \
0 \
-c "Read from server: 16384 bytes read"
run_test "Large server packet TLS 1.2 BlockCipher, without EtM, truncated MAC" \
"$P_SRV response_size=16384 trunc_hmac=1" \
"$P_CLI force_version=tls1_2 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CBC-SHA trunc_hmac=1 etm=0" \
0 \
-s "16384 bytes written in 1 fragments" \
-c "Read from server: 16384 bytes read"
run_test "Large server packet TLS 1.2 StreamCipher" \
"$P_SRV response_size=16384 arc4=1 force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA" \
"$P_CLI force_version=tls1_2 \
force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA" \
0 \
-s "16384 bytes written in 1 fragments" \
-c "Read from server: 16384 bytes read"
run_test "Large server packet TLS 1.2 StreamCipher, without EtM" \
"$P_SRV response_size=16384 arc4=1 force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA" \
"$P_CLI force_version=tls1_2 \
force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA etm=0" \
0 \
-s "16384 bytes written in 1 fragments" \
-c "Read from server: 16384 bytes read"
requires_config_enabled MBEDTLS_SSL_TRUNCATED_HMAC
run_test "Large server packet TLS 1.2 StreamCipher truncated MAC" \
"$P_SRV response_size=16384 arc4=1 force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA" \
"$P_CLI force_version=tls1_2 \
force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA \
trunc_hmac=1" \
0 \
-c "Read from server: 16384 bytes read"
requires_config_enabled MBEDTLS_SSL_TRUNCATED_HMAC
run_test "Large server packet TLS 1.2 StreamCipher, without EtM, truncated MAC" \
"$P_SRV response_size=16384 arc4=1 force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA trunc_hmac=1" \
"$P_CLI force_version=tls1_2 \
force_ciphersuite=TLS-RSA-WITH-RC4-128-SHA trunc_hmac=1 etm=0" \
0 \
-s "16384 bytes written in 1 fragments" \
-c "Read from server: 16384 bytes read"
run_test "Large server packet TLS 1.2 AEAD" \
"$P_SRV response_size=16384" \
"$P_CLI force_version=tls1_2 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CCM" \
0 \
-c "Read from server: 16384 bytes read"
run_test "Large server packet TLS 1.2 AEAD shorter tag" \
"$P_SRV response_size=16384" \
"$P_CLI force_version=tls1_2 \
force_ciphersuite=TLS-RSA-WITH-AES-256-CCM-8" \
0 \
-c "Read from server: 16384 bytes read"
# Tests of asynchronous private key support in SSL
requires_config_enabled MBEDTLS_SSL_ASYNC_PRIVATE
run_test "SSL async private: sign, delay=0" \
"$P_SRV \
async_operations=s async_private_delay1=0 async_private_delay2=0" \
"$P_CLI" \
0 \
-s "Async sign callback: using key slot " \
-s "Async resume (slot [0-9]): sign done, status=0"
requires_config_enabled MBEDTLS_SSL_ASYNC_PRIVATE
run_test "SSL async private: sign, delay=1" \
"$P_SRV \
async_operations=s async_private_delay1=1 async_private_delay2=1" \
"$P_CLI" \
0 \
-s "Async sign callback: using key slot " \
-s "Async resume (slot [0-9]): call 0 more times." \
-s "Async resume (slot [0-9]): sign done, status=0"
requires_config_enabled MBEDTLS_SSL_ASYNC_PRIVATE
run_test "SSL async private: sign, delay=2" \
"$P_SRV \
async_operations=s async_private_delay1=2 async_private_delay2=2" \
"$P_CLI" \
0 \
-s "Async sign callback: using key slot " \
-U "Async sign callback: using key slot " \
-s "Async resume (slot [0-9]): call 1 more times." \
-s "Async resume (slot [0-9]): call 0 more times." \
-s "Async resume (slot [0-9]): sign done, status=0"
# Test that the async callback correctly signs the 36-byte hash of TLS 1.0/1.1
# with RSA PKCS#1v1.5 as used in TLS 1.0/1.1.
requires_config_enabled MBEDTLS_SSL_ASYNC_PRIVATE
requires_config_enabled MBEDTLS_SSL_PROTO_TLS1_1
run_test "SSL async private: sign, RSA, TLS 1.1" \
"$P_SRV key_file=data_files/server2.key crt_file=data_files/server2.crt \
async_operations=s async_private_delay1=0 async_private_delay2=0" \
"$P_CLI force_version=tls1_1" \
0 \
-s "Async sign callback: using key slot " \
-s "Async resume (slot [0-9]): sign done, status=0"
requires_config_enabled MBEDTLS_SSL_ASYNC_PRIVATE
run_test "SSL async private: sign, SNI" \
"$P_SRV debug_level=3 \
async_operations=s async_private_delay1=0 async_private_delay2=0 \
crt_file=data_files/server5.crt key_file=data_files/server5.key \
sni=localhost,data_files/server2.crt,data_files/server2.key,-,-,-,polarssl.example,data_files/server1-nospace.crt,data_files/server1.key,-,-,-" \
"$P_CLI server_name=polarssl.example" \
0 \
-s "Async sign callback: using key slot " \
-s "Async resume (slot [0-9]): sign done, status=0" \
-s "parse ServerName extension" \
-c "issuer name *: C=NL, O=PolarSSL, CN=PolarSSL Test CA" \
-c "subject name *: C=NL, O=PolarSSL, CN=polarssl.example"
requires_config_enabled MBEDTLS_SSL_ASYNC_PRIVATE
run_test "SSL async private: decrypt, delay=0" \
"$P_SRV \
async_operations=d async_private_delay1=0 async_private_delay2=0" \
"$P_CLI force_ciphersuite=TLS-RSA-WITH-AES-128-CBC-SHA" \
0 \
-s "Async decrypt callback: using key slot " \
-s "Async resume (slot [0-9]): decrypt done, status=0"
requires_config_enabled MBEDTLS_SSL_ASYNC_PRIVATE
run_test "SSL async private: decrypt, delay=1" \
"$P_SRV \
async_operations=d async_private_delay1=1 async_private_delay2=1" \
"$P_CLI force_ciphersuite=TLS-RSA-WITH-AES-128-CBC-SHA" \
0 \
-s "Async decrypt callback: using key slot " \
-s "Async resume (slot [0-9]): call 0 more times." \
-s "Async resume (slot [0-9]): decrypt done, status=0"
requires_config_enabled MBEDTLS_SSL_ASYNC_PRIVATE
run_test "SSL async private: decrypt RSA-PSK, delay=0" \
"$P_SRV psk=abc123 \
async_operations=d async_private_delay1=0 async_private_delay2=0" \
"$P_CLI psk=abc123 \
force_ciphersuite=TLS-RSA-PSK-WITH-AES-128-CBC-SHA256" \
0 \
-s "Async decrypt callback: using key slot " \
-s "Async resume (slot [0-9]): decrypt done, status=0"
requires_config_enabled MBEDTLS_SSL_ASYNC_PRIVATE
run_test "SSL async private: decrypt RSA-PSK, delay=1" \
"$P_SRV psk=abc123 \
async_operations=d async_private_delay1=1 async_private_delay2=1" \
"$P_CLI psk=abc123 \
force_ciphersuite=TLS-RSA-PSK-WITH-AES-128-CBC-SHA256" \
0 \
-s "Async decrypt callback: using key slot " \
-s "Async resume (slot [0-9]): call 0 more times." \
-s "Async resume (slot [0-9]): decrypt done, status=0"
requires_config_enabled MBEDTLS_SSL_ASYNC_PRIVATE
run_test "SSL async private: sign callback not present" \
"$P_SRV \
async_operations=d async_private_delay1=1 async_private_delay2=1" \
"$P_CLI; [ \$? -eq 1 ] &&
$P_CLI force_ciphersuite=TLS-RSA-WITH-AES-128-CBC-SHA" \
0 \
-S "Async sign callback" \
-s "! mbedtls_ssl_handshake returned" \
-s "The own private key or pre-shared key is not set, but needed" \
-s "Async resume (slot [0-9]): decrypt done, status=0" \
-s "Successful connection"
requires_config_enabled MBEDTLS_SSL_ASYNC_PRIVATE
run_test "SSL async private: decrypt callback not present" \
"$P_SRV debug_level=1 \
async_operations=s async_private_delay1=1 async_private_delay2=1" \
"$P_CLI force_ciphersuite=TLS-RSA-WITH-AES-128-CBC-SHA;
[ \$? -eq 1 ] && $P_CLI" \
0 \
-S "Async decrypt callback" \
-s "! mbedtls_ssl_handshake returned" \
-s "got no RSA private key" \
-s "Async resume (slot [0-9]): sign done, status=0" \
-s "Successful connection"
# key1: ECDSA, key2: RSA; use key1 from slot 0
requires_config_enabled MBEDTLS_SSL_ASYNC_PRIVATE
run_test "SSL async private: slot 0 used with key1" \
"$P_SRV \
async_operations=s async_private_delay1=1 \
key_file=data_files/server5.key crt_file=data_files/server5.crt \
key_file2=data_files/server2.key crt_file2=data_files/server2.crt" \
"$P_CLI force_ciphersuite=TLS-ECDHE-ECDSA-WITH-AES-128-CBC-SHA256" \
0 \
-s "Async sign callback: using key slot 0," \
-s "Async resume (slot 0): call 0 more times." \
-s "Async resume (slot 0): sign done, status=0"
# key1: ECDSA, key2: RSA; use key2 from slot 0
requires_config_enabled MBEDTLS_SSL_ASYNC_PRIVATE
run_test "SSL async private: slot 0 used with key2" \
"$P_SRV \
async_operations=s async_private_delay2=1 \
key_file=data_files/server5.key crt_file=data_files/server5.crt \
key_file2=data_files/server2.key crt_file2=data_files/server2.crt" \
"$P_CLI force_ciphersuite=TLS-ECDHE-RSA-WITH-AES-128-CBC-SHA256" \
0 \
-s "Async sign callback: using key slot 0," \
-s "Async resume (slot 0): call 0 more times." \
-s "Async resume (slot 0): sign done, status=0"
# key1: ECDSA, key2: RSA; use key2 from slot 1
requires_config_enabled MBEDTLS_SSL_ASYNC_PRIVATE
run_test "SSL async private: slot 1 used with key2" \
"$P_SRV \
async_operations=s async_private_delay1=1 async_private_delay2=1 \
key_file=data_files/server5.key crt_file=data_files/server5.crt \
key_file2=data_files/server2.key crt_file2=data_files/server2.crt" \
"$P_CLI force_ciphersuite=TLS-ECDHE-RSA-WITH-AES-128-CBC-SHA256" \
0 \
-s "Async sign callback: using key slot 1," \
-s "Async resume (slot 1): call 0 more times." \
-s "Async resume (slot 1): sign done, status=0"
# key1: ECDSA, key2: RSA; use key2 directly
requires_config_enabled MBEDTLS_SSL_ASYNC_PRIVATE
run_test "SSL async private: fall back to transparent key" \
"$P_SRV \
async_operations=s async_private_delay1=1 \
key_file=data_files/server5.key crt_file=data_files/server5.crt \
key_file2=data_files/server2.key crt_file2=data_files/server2.crt " \
"$P_CLI force_ciphersuite=TLS-ECDHE-RSA-WITH-AES-128-CBC-SHA256" \
0 \
-s "Async sign callback: no key matches this certificate."
requires_config_enabled MBEDTLS_SSL_ASYNC_PRIVATE
run_test "SSL async private: sign, error in start" \
"$P_SRV \
async_operations=s async_private_delay1=1 async_private_delay2=1 \
async_private_error=1" \
"$P_CLI" \
1 \
-s "Async sign callback: injected error" \
-S "Async resume" \
-S "Async cancel" \
-s "! mbedtls_ssl_handshake returned"
requires_config_enabled MBEDTLS_SSL_ASYNC_PRIVATE
run_test "SSL async private: sign, cancel after start" \
"$P_SRV \
async_operations=s async_private_delay1=1 async_private_delay2=1 \
async_private_error=2" \
"$P_CLI" \
1 \
-s "Async sign callback: using key slot " \
-S "Async resume" \
-s "Async cancel"
requires_config_enabled MBEDTLS_SSL_ASYNC_PRIVATE
run_test "SSL async private: sign, error in resume" \
"$P_SRV \
async_operations=s async_private_delay1=1 async_private_delay2=1 \
async_private_error=3" \
"$P_CLI" \
1 \
-s "Async sign callback: using key slot " \
-s "Async resume callback: sign done but injected error" \
-S "Async cancel" \
-s "! mbedtls_ssl_handshake returned"
requires_config_enabled MBEDTLS_SSL_ASYNC_PRIVATE
run_test "SSL async private: decrypt, error in start" \
"$P_SRV \
async_operations=d async_private_delay1=1 async_private_delay2=1 \
async_private_error=1" \
"$P_CLI force_ciphersuite=TLS-RSA-WITH-AES-128-CBC-SHA" \
1 \
-s "Async decrypt callback: injected error" \
-S "Async resume" \
-S "Async cancel" \
-s "! mbedtls_ssl_handshake returned"
requires_config_enabled MBEDTLS_SSL_ASYNC_PRIVATE
run_test "SSL async private: decrypt, cancel after start" \
"$P_SRV \
async_operations=d async_private_delay1=1 async_private_delay2=1 \
async_private_error=2" \
"$P_CLI force_ciphersuite=TLS-RSA-WITH-AES-128-CBC-SHA" \
1 \
-s "Async decrypt callback: using key slot " \
-S "Async resume" \
-s "Async cancel"
requires_config_enabled MBEDTLS_SSL_ASYNC_PRIVATE
run_test "SSL async private: decrypt, error in resume" \
"$P_SRV \
async_operations=d async_private_delay1=1 async_private_delay2=1 \
async_private_error=3" \
"$P_CLI force_ciphersuite=TLS-RSA-WITH-AES-128-CBC-SHA" \
1 \
-s "Async decrypt callback: using key slot " \
-s "Async resume callback: decrypt done but injected error" \
-S "Async cancel" \
-s "! mbedtls_ssl_handshake returned"
requires_config_enabled MBEDTLS_SSL_ASYNC_PRIVATE
run_test "SSL async private: cancel after start then operate correctly" \
"$P_SRV \
async_operations=s async_private_delay1=1 async_private_delay2=1 \
async_private_error=-2" \
"$P_CLI; [ \$? -eq 1 ] && $P_CLI" \
0 \
-s "Async cancel" \
-s "! mbedtls_ssl_handshake returned" \
-s "Async resume" \
-s "Successful connection"
requires_config_enabled MBEDTLS_SSL_ASYNC_PRIVATE
run_test "SSL async private: error in resume then operate correctly" \
"$P_SRV \
async_operations=s async_private_delay1=1 async_private_delay2=1 \
async_private_error=-3" \
"$P_CLI; [ \$? -eq 1 ] && $P_CLI" \
0 \
-s "! mbedtls_ssl_handshake returned" \
-s "Async resume" \
-s "Successful connection"
# key1: ECDSA, key2: RSA; use key1 through async, then key2 directly
requires_config_enabled MBEDTLS_SSL_ASYNC_PRIVATE
run_test "SSL async private: cancel after start then fall back to transparent key" \
"$P_SRV \
async_operations=s async_private_delay1=1 async_private_error=-2 \
key_file=data_files/server5.key crt_file=data_files/server5.crt \
key_file2=data_files/server2.key crt_file2=data_files/server2.crt" \
"$P_CLI force_ciphersuite=TLS-ECDHE-ECDSA-WITH-AES-128-CBC-SHA256;
[ \$? -eq 1 ] &&
$P_CLI force_ciphersuite=TLS-ECDHE-RSA-WITH-AES-128-CBC-SHA256" \
0 \
-s "Async sign callback: using key slot 0" \
-S "Async resume" \
-s "Async cancel" \
-s "! mbedtls_ssl_handshake returned" \
-s "Async sign callback: no key matches this certificate." \
-s "Successful connection"
# key1: ECDSA, key2: RSA; use key1 through async, then key2 directly
requires_config_enabled MBEDTLS_SSL_ASYNC_PRIVATE
run_test "SSL async private: sign, error in resume then fall back to transparent key" \
"$P_SRV \
async_operations=s async_private_delay1=1 async_private_error=-3 \
key_file=data_files/server5.key crt_file=data_files/server5.crt \
key_file2=data_files/server2.key crt_file2=data_files/server2.crt" \
"$P_CLI force_ciphersuite=TLS-ECDHE-ECDSA-WITH-AES-128-CBC-SHA256;
[ \$? -eq 1 ] &&
$P_CLI force_ciphersuite=TLS-ECDHE-RSA-WITH-AES-128-CBC-SHA256" \
0 \
-s "Async resume" \
-s "! mbedtls_ssl_handshake returned" \
-s "Async sign callback: no key matches this certificate." \
-s "Successful connection"
requires_config_enabled MBEDTLS_SSL_ASYNC_PRIVATE
requires_config_enabled MBEDTLS_SSL_RENEGOTIATION
run_test "SSL async private: renegotiation: client-initiated; sign" \
"$P_SRV \
async_operations=s async_private_delay1=1 async_private_delay2=1 \
exchanges=2 renegotiation=1" \
"$P_CLI exchanges=2 renegotiation=1 renegotiate=1" \
0 \
-s "Async sign callback: using key slot " \
-s "Async resume (slot [0-9]): sign done, status=0"
requires_config_enabled MBEDTLS_SSL_ASYNC_PRIVATE
requires_config_enabled MBEDTLS_SSL_RENEGOTIATION
run_test "SSL async private: renegotiation: server-initiated; sign" \
"$P_SRV \
async_operations=s async_private_delay1=1 async_private_delay2=1 \
exchanges=2 renegotiation=1 renegotiate=1" \
"$P_CLI exchanges=2 renegotiation=1" \
0 \
-s "Async sign callback: using key slot " \
-s "Async resume (slot [0-9]): sign done, status=0"
requires_config_enabled MBEDTLS_SSL_ASYNC_PRIVATE
requires_config_enabled MBEDTLS_SSL_RENEGOTIATION
run_test "SSL async private: renegotiation: client-initiated; decrypt" \
"$P_SRV \
async_operations=d async_private_delay1=1 async_private_delay2=1 \
exchanges=2 renegotiation=1" \
"$P_CLI exchanges=2 renegotiation=1 renegotiate=1 \
force_ciphersuite=TLS-RSA-WITH-AES-128-CBC-SHA" \
0 \
-s "Async decrypt callback: using key slot " \
-s "Async resume (slot [0-9]): decrypt done, status=0"
requires_config_enabled MBEDTLS_SSL_ASYNC_PRIVATE
requires_config_enabled MBEDTLS_SSL_RENEGOTIATION
run_test "SSL async private: renegotiation: server-initiated; decrypt" \
"$P_SRV \
async_operations=d async_private_delay1=1 async_private_delay2=1 \
exchanges=2 renegotiation=1 renegotiate=1" \
"$P_CLI exchanges=2 renegotiation=1 \
force_ciphersuite=TLS-RSA-WITH-AES-128-CBC-SHA" \
0 \
-s "Async decrypt callback: using key slot " \
-s "Async resume (slot [0-9]): decrypt done, status=0"
# Tests for ECC extensions (rfc 4492)
requires_config_enabled MBEDTLS_AES_C
requires_config_enabled MBEDTLS_CIPHER_MODE_CBC
requires_config_enabled MBEDTLS_SHA256_C
requires_config_enabled MBEDTLS_KEY_EXCHANGE_RSA_ENABLED
run_test "Force a non ECC ciphersuite in the client side" \
"$P_SRV debug_level=3" \
"$P_CLI debug_level=3 force_ciphersuite=TLS-RSA-WITH-AES-128-CBC-SHA256" \
0 \
-C "client hello, adding supported_elliptic_curves extension" \
-C "client hello, adding supported_point_formats extension" \
-S "found supported elliptic curves extension" \
-S "found supported point formats extension"
requires_config_enabled MBEDTLS_AES_C
requires_config_enabled MBEDTLS_CIPHER_MODE_CBC
requires_config_enabled MBEDTLS_SHA256_C
requires_config_enabled MBEDTLS_KEY_EXCHANGE_RSA_ENABLED
run_test "Force a non ECC ciphersuite in the server side" \
"$P_SRV debug_level=3 force_ciphersuite=TLS-RSA-WITH-AES-128-CBC-SHA256" \
"$P_CLI debug_level=3" \
0 \
-C "found supported_point_formats extension" \
-S "server hello, supported_point_formats extension"
requires_config_enabled MBEDTLS_AES_C
requires_config_enabled MBEDTLS_CIPHER_MODE_CBC
requires_config_enabled MBEDTLS_SHA256_C
requires_config_enabled MBEDTLS_KEY_EXCHANGE_ECDHE_ECDSA_ENABLED
run_test "Force an ECC ciphersuite in the client side" \
"$P_SRV debug_level=3" \
"$P_CLI debug_level=3 force_ciphersuite=TLS-ECDHE-ECDSA-WITH-AES-128-CBC-SHA256" \
0 \
-c "client hello, adding supported_elliptic_curves extension" \
-c "client hello, adding supported_point_formats extension" \
-s "found supported elliptic curves extension" \
-s "found supported point formats extension"
requires_config_enabled MBEDTLS_AES_C
requires_config_enabled MBEDTLS_CIPHER_MODE_CBC
requires_config_enabled MBEDTLS_SHA256_C
requires_config_enabled MBEDTLS_KEY_EXCHANGE_ECDHE_ECDSA_ENABLED
run_test "Force an ECC ciphersuite in the server side" \
"$P_SRV debug_level=3 force_ciphersuite=TLS-ECDHE-ECDSA-WITH-AES-128-CBC-SHA256" \
"$P_CLI debug_level=3" \
0 \
-c "found supported_point_formats extension" \
-s "server hello, supported_point_formats extension"
# Tests for DTLS HelloVerifyRequest
run_test "DTLS cookie: enabled" \
"$P_SRV dtls=1 debug_level=2" \
"$P_CLI dtls=1 debug_level=2" \
0 \
-s "cookie verification failed" \
-s "cookie verification passed" \
-S "cookie verification skipped" \
-c "received hello verify request" \
-s "hello verification requested" \
-S "SSL - The requested feature is not available"
run_test "DTLS cookie: disabled" \
"$P_SRV dtls=1 debug_level=2 cookies=0" \
"$P_CLI dtls=1 debug_level=2" \
0 \
-S "cookie verification failed" \
-S "cookie verification passed" \
-s "cookie verification skipped" \
-C "received hello verify request" \
-S "hello verification requested" \
-S "SSL - The requested feature is not available"
run_test "DTLS cookie: default (failing)" \
"$P_SRV dtls=1 debug_level=2 cookies=-1" \
"$P_CLI dtls=1 debug_level=2 hs_timeout=100-400" \
1 \
-s "cookie verification failed" \
-S "cookie verification passed" \
-S "cookie verification skipped" \
-C "received hello verify request" \
-S "hello verification requested" \
-s "SSL - The requested feature is not available"
requires_ipv6
run_test "DTLS cookie: enabled, IPv6" \
"$P_SRV dtls=1 debug_level=2 server_addr=::1" \
"$P_CLI dtls=1 debug_level=2 server_addr=::1" \
0 \
-s "cookie verification failed" \
-s "cookie verification passed" \
-S "cookie verification skipped" \
-c "received hello verify request" \
-s "hello verification requested" \
-S "SSL - The requested feature is not available"
run_test "DTLS cookie: enabled, nbio" \
"$P_SRV dtls=1 nbio=2 debug_level=2" \
"$P_CLI dtls=1 nbio=2 debug_level=2" \
0 \
-s "cookie verification failed" \
-s "cookie verification passed" \
-S "cookie verification skipped" \
-c "received hello verify request" \
-s "hello verification requested" \
-S "SSL - The requested feature is not available"
# Tests for client reconnecting from the same port with DTLS
not_with_valgrind # spurious resend
run_test "DTLS client reconnect from same port: reference" \
"$P_SRV dtls=1 exchanges=2 read_timeout=1000" \
"$P_CLI dtls=1 exchanges=2 debug_level=2 hs_timeout=500-1000" \
0 \
-C "resend" \
-S "The operation timed out" \
-S "Client initiated reconnection from same port"
not_with_valgrind # spurious resend
run_test "DTLS client reconnect from same port: reconnect" \
"$P_SRV dtls=1 exchanges=2 read_timeout=1000" \
"$P_CLI dtls=1 exchanges=2 debug_level=2 hs_timeout=500-1000 reconnect_hard=1" \
0 \
-C "resend" \
-S "The operation timed out" \
-s "Client initiated reconnection from same port"
not_with_valgrind # server/client too slow to respond in time (next test has higher timeouts)
run_test "DTLS client reconnect from same port: reconnect, nbio, no valgrind" \
"$P_SRV dtls=1 exchanges=2 read_timeout=1000 nbio=2" \
"$P_CLI dtls=1 exchanges=2 debug_level=2 hs_timeout=500-1000 reconnect_hard=1" \
0 \
-S "The operation timed out" \
-s "Client initiated reconnection from same port"
only_with_valgrind # Only with valgrind, do previous test but with higher read_timeout and hs_timeout
run_test "DTLS client reconnect from same port: reconnect, nbio, valgrind" \
"$P_SRV dtls=1 exchanges=2 read_timeout=2000 nbio=2 hs_timeout=1500-6000" \
"$P_CLI dtls=1 exchanges=2 debug_level=2 hs_timeout=1500-3000 reconnect_hard=1" \
0 \
-S "The operation timed out" \
-s "Client initiated reconnection from same port"
run_test "DTLS client reconnect from same port: no cookies" \
"$P_SRV dtls=1 exchanges=2 read_timeout=1000 cookies=0" \
"$P_CLI dtls=1 exchanges=2 debug_level=2 hs_timeout=500-8000 reconnect_hard=1" \
0 \
-s "The operation timed out" \
-S "Client initiated reconnection from same port"
# Tests for various cases of client authentication with DTLS
# (focused on handshake flows and message parsing)
run_test "DTLS client auth: required" \
"$P_SRV dtls=1 auth_mode=required" \
"$P_CLI dtls=1" \
0 \
-s "Verifying peer X.509 certificate... ok"
run_test "DTLS client auth: optional, client has no cert" \
"$P_SRV dtls=1 auth_mode=optional" \
"$P_CLI dtls=1 crt_file=none key_file=none" \
0 \
-s "! Certificate was missing"
run_test "DTLS client auth: none, client has no cert" \
"$P_SRV dtls=1 auth_mode=none" \
"$P_CLI dtls=1 crt_file=none key_file=none debug_level=2" \
0 \
-c "skip write certificate$" \
-s "! Certificate verification was skipped"
run_test "DTLS wrong PSK: badmac alert" \
"$P_SRV dtls=1 psk=abc123 force_ciphersuite=TLS-PSK-WITH-AES-128-GCM-SHA256" \
"$P_CLI dtls=1 psk=abc124" \
1 \
-s "SSL - Verification of the message MAC failed" \
-c "SSL - A fatal alert message was received from our peer"
# Tests for receiving fragmented handshake messages with DTLS
requires_gnutls
run_test "DTLS reassembly: no fragmentation (gnutls server)" \
"$G_SRV -u --mtu 2048 -a" \
"$P_CLI dtls=1 debug_level=2" \
0 \
-C "found fragmented DTLS handshake message" \
-C "error"
requires_gnutls
run_test "DTLS reassembly: some fragmentation (gnutls server)" \
"$G_SRV -u --mtu 512" \
"$P_CLI dtls=1 debug_level=2" \
0 \
-c "found fragmented DTLS handshake message" \
-C "error"
requires_gnutls
run_test "DTLS reassembly: more fragmentation (gnutls server)" \
"$G_SRV -u --mtu 128" \
"$P_CLI dtls=1 debug_level=2" \
0 \
-c "found fragmented DTLS handshake message" \
-C "error"
requires_gnutls
run_test "DTLS reassembly: more fragmentation, nbio (gnutls server)" \
"$G_SRV -u --mtu 128" \
"$P_CLI dtls=1 nbio=2 debug_level=2" \
0 \
-c "found fragmented DTLS handshake message" \
-C "error"
requires_gnutls
requires_config_enabled MBEDTLS_SSL_RENEGOTIATION
run_test "DTLS reassembly: fragmentation, renego (gnutls server)" \
"$G_SRV -u --mtu 256" \
"$P_CLI debug_level=3 dtls=1 renegotiation=1 renegotiate=1" \
0 \
-c "found fragmented DTLS handshake message" \
-c "client hello, adding renegotiation extension" \
-c "found renegotiation extension" \
-c "=> renegotiate" \
-C "mbedtls_ssl_handshake returned" \
-C "error" \
-s "Extra-header:"
requires_gnutls
requires_config_enabled MBEDTLS_SSL_RENEGOTIATION
run_test "DTLS reassembly: fragmentation, nbio, renego (gnutls server)" \
"$G_SRV -u --mtu 256" \
"$P_CLI debug_level=3 nbio=2 dtls=1 renegotiation=1 renegotiate=1" \
0 \
-c "found fragmented DTLS handshake message" \
-c "client hello, adding renegotiation extension" \
-c "found renegotiation extension" \
-c "=> renegotiate" \
-C "mbedtls_ssl_handshake returned" \
-C "error" \
-s "Extra-header:"
run_test "DTLS reassembly: no fragmentation (openssl server)" \
"$O_SRV -dtls1 -mtu 2048" \
"$P_CLI dtls=1 debug_level=2" \
0 \
-C "found fragmented DTLS handshake message" \
-C "error"
run_test "DTLS reassembly: some fragmentation (openssl server)" \
"$O_SRV -dtls1 -mtu 768" \
"$P_CLI dtls=1 debug_level=2" \
0 \
-c "found fragmented DTLS handshake message" \
-C "error"
run_test "DTLS reassembly: more fragmentation (openssl server)" \
"$O_SRV -dtls1 -mtu 256" \
"$P_CLI dtls=1 debug_level=2" \
0 \
-c "found fragmented DTLS handshake message" \
-C "error"
run_test "DTLS reassembly: fragmentation, nbio (openssl server)" \
"$O_SRV -dtls1 -mtu 256" \
"$P_CLI dtls=1 nbio=2 debug_level=2" \
0 \
-c "found fragmented DTLS handshake message" \
-C "error"
# Tests for sending fragmented handshake messages with DTLS
#
# Use client auth when we need the client to send large messages,
# and use large cert chains on both sides too (the long chains we have all use
# both RSA and ECDSA, but ideally we should have long chains with either).
# Sizes reached (UDP payload):
# - 2037B for server certificate
# - 1542B for client certificate
# - 1013B for newsessionticket
# - all others below 512B
# All those tests assume MAX_CONTENT_LEN is at least 2048
requires_config_enabled MBEDTLS_SSL_PROTO_DTLS
requires_config_enabled MBEDTLS_RSA_C
requires_config_enabled MBEDTLS_ECDSA_C
requires_config_enabled MBEDTLS_SSL_MAX_FRAGMENT_LENGTH
run_test "DTLS fragmenting: none (for reference)" \
"$P_SRV dtls=1 debug_level=2 auth_mode=required \
crt_file=data_files/server7_int-ca.crt \
key_file=data_files/server7.key \
max_frag_len=4096" \
"$P_CLI dtls=1 debug_level=2 \
crt_file=data_files/server8_int-ca2.crt \
key_file=data_files/server8.key \
max_frag_len=4096" \
0 \
-S "found fragmented DTLS handshake message" \
-C "found fragmented DTLS handshake message" \
-C "error"
requires_config_enabled MBEDTLS_SSL_PROTO_DTLS
requires_config_enabled MBEDTLS_RSA_C
requires_config_enabled MBEDTLS_ECDSA_C
requires_config_enabled MBEDTLS_SSL_MAX_FRAGMENT_LENGTH
run_test "DTLS fragmenting: server only (max_frag_len)" \
"$P_SRV dtls=1 debug_level=2 auth_mode=required \
crt_file=data_files/server7_int-ca.crt \
key_file=data_files/server7.key \
max_frag_len=1024" \
"$P_CLI dtls=1 debug_level=2 \
crt_file=data_files/server8_int-ca2.crt \
key_file=data_files/server8.key \
max_frag_len=2048" \
0 \
-S "found fragmented DTLS handshake message" \
-c "found fragmented DTLS handshake message" \
-C "error"
# With the MFL extension, the server has no way of forcing
# the client to not exceed a certain MTU; hence, the following
# test can't be replicated with an MTU proxy such as the one
# `client-initiated, server only (max_frag_len)` below.
requires_config_enabled MBEDTLS_SSL_PROTO_DTLS
requires_config_enabled MBEDTLS_RSA_C
requires_config_enabled MBEDTLS_ECDSA_C
requires_config_enabled MBEDTLS_SSL_MAX_FRAGMENT_LENGTH
run_test "DTLS fragmenting: server only (more) (max_frag_len)" \
"$P_SRV dtls=1 debug_level=2 auth_mode=required \
crt_file=data_files/server7_int-ca.crt \
key_file=data_files/server7.key \
max_frag_len=512" \
"$P_CLI dtls=1 debug_level=2 \
crt_file=data_files/server8_int-ca2.crt \
key_file=data_files/server8.key \
max_frag_len=4096" \
0 \
-S "found fragmented DTLS handshake message" \
-c "found fragmented DTLS handshake message" \
-C "error"
requires_config_enabled MBEDTLS_SSL_PROTO_DTLS
requires_config_enabled MBEDTLS_RSA_C
requires_config_enabled MBEDTLS_ECDSA_C
requires_config_enabled MBEDTLS_SSL_MAX_FRAGMENT_LENGTH
run_test "DTLS fragmenting: client-initiated, server only (max_frag_len)" \
"$P_SRV dtls=1 debug_level=2 auth_mode=none \
crt_file=data_files/server7_int-ca.crt \
key_file=data_files/server7.key \
max_frag_len=2048" \
"$P_CLI dtls=1 debug_level=2 \
crt_file=data_files/server8_int-ca2.crt \
key_file=data_files/server8.key \
max_frag_len=512" \
0 \
-S "found fragmented DTLS handshake message" \
-c "found fragmented DTLS handshake message" \
-C "error"
# While not required by the standard defining the MFL extension
# (according to which it only applies to records, not to datagrams),
# Mbed TLS will never send datagrams larger than MFL + { Max record expansion },
# as otherwise there wouldn't be any means to communicate MTU restrictions
# to the peer.
# The next test checks that no datagrams significantly larger than the
# negotiated MFL are sent.
requires_config_enabled MBEDTLS_SSL_PROTO_DTLS
requires_config_enabled MBEDTLS_RSA_C
requires_config_enabled MBEDTLS_ECDSA_C
requires_config_enabled MBEDTLS_SSL_MAX_FRAGMENT_LENGTH
run_test "DTLS fragmenting: client-initiated, server only (max_frag_len), proxy MTU" \
-p "$P_PXY mtu=560" \
"$P_SRV dtls=1 debug_level=2 auth_mode=none \
crt_file=data_files/server7_int-ca.crt \
key_file=data_files/server7.key \
max_frag_len=2048" \
"$P_CLI dtls=1 debug_level=2 \
crt_file=data_files/server8_int-ca2.crt \
key_file=data_files/server8.key \
max_frag_len=512" \
0 \
-S "found fragmented DTLS handshake message" \
-c "found fragmented DTLS handshake message" \
-C "error"
requires_config_enabled MBEDTLS_SSL_PROTO_DTLS
requires_config_enabled MBEDTLS_RSA_C
requires_config_enabled MBEDTLS_ECDSA_C
requires_config_enabled MBEDTLS_SSL_MAX_FRAGMENT_LENGTH
run_test "DTLS fragmenting: client-initiated, both (max_frag_len)" \
"$P_SRV dtls=1 debug_level=2 auth_mode=required \
crt_file=data_files/server7_int-ca.crt \
key_file=data_files/server7.key \
max_frag_len=2048" \
"$P_CLI dtls=1 debug_level=2 \
crt_file=data_files/server8_int-ca2.crt \
key_file=data_files/server8.key \
max_frag_len=512" \
0 \
-s "found fragmented DTLS handshake message" \
-c "found fragmented DTLS handshake message" \
-C "error"
# While not required by the standard defining the MFL extension
# (according to which it only applies to records, not to datagrams),
# Mbed TLS will never send datagrams larger than MFL + { Max record expansion },
# as otherwise there wouldn't be any means to communicate MTU restrictions
# to the peer.
# The next test checks that no datagrams significantly larger than the
# negotiated MFL are sent.
requires_config_enabled MBEDTLS_SSL_PROTO_DTLS
requires_config_enabled MBEDTLS_RSA_C
requires_config_enabled MBEDTLS_ECDSA_C
requires_config_enabled MBEDTLS_SSL_MAX_FRAGMENT_LENGTH
run_test "DTLS fragmenting: client-initiated, both (max_frag_len), proxy MTU" \
-p "$P_PXY mtu=560" \
"$P_SRV dtls=1 debug_level=2 auth_mode=required \
crt_file=data_files/server7_int-ca.crt \
key_file=data_files/server7.key \
max_frag_len=2048" \
"$P_CLI dtls=1 debug_level=2 \
crt_file=data_files/server8_int-ca2.crt \
key_file=data_files/server8.key \
max_frag_len=512" \
0 \
-s "found fragmented DTLS handshake message" \
-c "found fragmented DTLS handshake message" \
-C "error"
requires_config_enabled MBEDTLS_SSL_PROTO_DTLS
requires_config_enabled MBEDTLS_RSA_C
requires_config_enabled MBEDTLS_ECDSA_C
run_test "DTLS fragmenting: none (for reference) (MTU)" \
"$P_SRV dtls=1 debug_level=2 auth_mode=required \
crt_file=data_files/server7_int-ca.crt \
key_file=data_files/server7.key \
mtu=4096" \
"$P_CLI dtls=1 debug_level=2 \
crt_file=data_files/server8_int-ca2.crt \
key_file=data_files/server8.key \
mtu=4096" \
0 \
-S "found fragmented DTLS handshake message" \
-C "found fragmented DTLS handshake message" \
-C "error"
requires_config_enabled MBEDTLS_SSL_PROTO_DTLS
requires_config_enabled MBEDTLS_RSA_C
requires_config_enabled MBEDTLS_ECDSA_C
run_test "DTLS fragmenting: client (MTU)" \
"$P_SRV dtls=1 debug_level=2 auth_mode=required \
crt_file=data_files/server7_int-ca.crt \
key_file=data_files/server7.key \
mtu=4096" \
"$P_CLI dtls=1 debug_level=2 \
crt_file=data_files/server8_int-ca2.crt \
key_file=data_files/server8.key \
mtu=512" \
0 \
-s "found fragmented DTLS handshake message" \
-C "found fragmented DTLS handshake message" \
-C "error"
requires_config_enabled MBEDTLS_SSL_PROTO_DTLS
requires_config_enabled MBEDTLS_RSA_C
requires_config_enabled MBEDTLS_ECDSA_C
run_test "DTLS fragmenting: server (MTU)" \
"$P_SRV dtls=1 debug_level=2 auth_mode=required \
crt_file=data_files/server7_int-ca.crt \
key_file=data_files/server7.key \
mtu=512" \
"$P_CLI dtls=1 debug_level=2 \
crt_file=data_files/server8_int-ca2.crt \
key_file=data_files/server8.key \
mtu=2048" \
0 \
-S "found fragmented DTLS handshake message" \
-c "found fragmented DTLS handshake message" \
-C "error"
requires_config_enabled MBEDTLS_SSL_PROTO_DTLS
requires_config_enabled MBEDTLS_RSA_C
requires_config_enabled MBEDTLS_ECDSA_C
run_test "DTLS fragmenting: both (MTU)" \
-p "$P_PXY mtu=512" \
"$P_SRV dtls=1 debug_level=2 auth_mode=required \
crt_file=data_files/server7_int-ca.crt \
key_file=data_files/server7.key \
mtu=512" \
"$P_CLI dtls=1 debug_level=2 \
crt_file=data_files/server8_int-ca2.crt \
key_file=data_files/server8.key \
mtu=512" \
0 \
-s "found fragmented DTLS handshake message" \
-c "found fragmented DTLS handshake message" \
-C "error"
# Test for automatic MTU reduction on repeated resend
not_with_valgrind
requires_config_enabled MBEDTLS_SSL_PROTO_DTLS
requires_config_enabled MBEDTLS_RSA_C
requires_config_enabled MBEDTLS_ECDSA_C
run_test "DTLS fragmenting: proxy MTU: auto-reduction" \
-p "$P_PXY mtu=508" \
"$P_SRV dtls=1 debug_level=2 auth_mode=required \
crt_file=data_files/server7_int-ca.crt \
key_file=data_files/server7.key\
hs_timeout=100-400" \
"$P_CLI dtls=1 debug_level=2 \
crt_file=data_files/server8_int-ca2.crt \
key_file=data_files/server8.key \
hs_timeout=100-400" \
0 \
-s "found fragmented DTLS handshake message" \
-c "found fragmented DTLS handshake message" \
-C "error"
only_with_valgrind
requires_config_enabled MBEDTLS_SSL_PROTO_DTLS
requires_config_enabled MBEDTLS_RSA_C
requires_config_enabled MBEDTLS_ECDSA_C
run_test "DTLS fragmenting: proxy MTU: auto-reduction" \
-p "$P_PXY mtu=508" \
"$P_SRV dtls=1 debug_level=2 auth_mode=required \
crt_file=data_files/server7_int-ca.crt \
key_file=data_files/server7.key\
hs_timeout=250-10000" \
"$P_CLI dtls=1 debug_level=2 \
crt_file=data_files/server8_int-ca2.crt \
key_file=data_files/server8.key \
hs_timeout=250-10000" \
0 \
-s "found fragmented DTLS handshake message" \
-c "found fragmented DTLS handshake message" \
-C "error"
# the proxy shouldn't drop or mess up anything, so we shouldn't need to resend
# OTOH the client might resend if the server is to slow to reset after sending
# a HelloVerifyRequest, so only check for no retransmission server-side
not_with_valgrind # spurious resend due to timeout
requires_config_enabled MBEDTLS_SSL_PROTO_DTLS
requires_config_enabled MBEDTLS_RSA_C
requires_config_enabled MBEDTLS_ECDSA_C
run_test "DTLS fragmenting: proxy MTU, simple handshake" \
-p "$P_PXY mtu=512" \
"$P_SRV dtls=1 debug_level=2 auth_mode=required \
crt_file=data_files/server7_int-ca.crt \
key_file=data_files/server7.key \
mtu=512" \
"$P_CLI dtls=1 debug_level=2 \
crt_file=data_files/server8_int-ca2.crt \
key_file=data_files/server8.key \
mtu=512" \
0 \
-S "resend" \
-s "found fragmented DTLS handshake message" \
-c "found fragmented DTLS handshake message" \
-C "error"
not_with_valgrind # spurious resend due to timeout
requires_config_enabled MBEDTLS_SSL_PROTO_DTLS
requires_config_enabled MBEDTLS_RSA_C
requires_config_enabled MBEDTLS_ECDSA_C
run_test "DTLS fragmenting: proxy MTU, simple handshake, nbio" \
-p "$P_PXY mtu=512" \
"$P_SRV dtls=1 debug_level=2 auth_mode=required \
crt_file=data_files/server7_int-ca.crt \
key_file=data_files/server7.key \
mtu=512 nbio=2" \
"$P_CLI dtls=1 debug_level=2 \
crt_file=data_files/server8_int-ca2.crt \
key_file=data_files/server8.key \
mtu=512 nbio=2" \
0 \
-S "resend" \
-s "found fragmented DTLS handshake message" \
-c "found fragmented DTLS handshake message" \
-C "error"
# This ensures things still work after session_reset().
# It also exercises the "resumed handshake" flow.
# Since we don't support reading fragmented ClientHello yet,
# up the MTU to 1450 (larger than ClientHello with session ticket,
# but still smaller than client's Certificate to ensure fragmentation).
# A resend on the client-side might happen if the server is
# slow to reset, therefore omitting '-C "resend"' below.
# reco_delay avoids races where the client reconnects before the server has
# resumed listening, which would result in a spurious resend.
not_with_valgrind # spurious resend due to timeout
requires_config_enabled MBEDTLS_SSL_PROTO_DTLS
requires_config_enabled MBEDTLS_RSA_C
requires_config_enabled MBEDTLS_ECDSA_C
run_test "DTLS fragmenting: proxy MTU, resumed handshake" \
-p "$P_PXY mtu=1450" \
"$P_SRV dtls=1 debug_level=2 auth_mode=required \
crt_file=data_files/server7_int-ca.crt \
key_file=data_files/server7.key \
mtu=1450" \
"$P_CLI dtls=1 debug_level=2 \
crt_file=data_files/server8_int-ca2.crt \
key_file=data_files/server8.key \
mtu=1450 reconnect=1 reco_delay=1" \
0 \
-S "resend" \
-s "found fragmented DTLS handshake message" \
-c "found fragmented DTLS handshake message" \
-C "error"
# A resend on the client-side might happen if the server is
# slow to reset, therefore omitting '-C "resend"' below.
not_with_valgrind # spurious resend due to timeout
requires_config_enabled MBEDTLS_SSL_PROTO_DTLS
requires_config_enabled MBEDTLS_RSA_C
requires_config_enabled MBEDTLS_ECDSA_C
requires_config_enabled MBEDTLS_SHA256_C
requires_config_enabled MBEDTLS_KEY_EXCHANGE_ECDHE_ECDSA
requires_config_enabled MBEDTLS_SSL_RENEGOTIATION
requires_config_enabled MBEDTLS_CHACHAPOLY_C
run_test "DTLS fragmenting: proxy MTU, ChachaPoly renego" \
-p "$P_PXY mtu=512" \
"$P_SRV dtls=1 debug_level=2 auth_mode=required \
crt_file=data_files/server7_int-ca.crt \
key_file=data_files/server7.key \
exchanges=2 renegotiation=1 \
force_ciphersuite=TLS-ECDHE-ECDSA-WITH-CHACHA20-POLY1305-SHA256 \
mtu=512" \
"$P_CLI dtls=1 debug_level=2 \
crt_file=data_files/server8_int-ca2.crt \
key_file=data_files/server8.key \
exchanges=2 renegotiation=1 renegotiate=1 \
mtu=512" \
0 \
-S "resend" \
-s "found fragmented DTLS handshake message" \
-c "found fragmented DTLS handshake message" \
-C "error"
# A resend on the client-side might happen if the server is
# slow to reset, therefore omitting '-C "resend"' below.
not_with_valgrind # spurious resend due to timeout
requires_config_enabled MBEDTLS_SSL_PROTO_DTLS
requires_config_enabled MBEDTLS_RSA_C
requires_config_enabled MBEDTLS_ECDSA_C
requires_config_enabled MBEDTLS_SHA256_C
requires_config_enabled MBEDTLS_KEY_EXCHANGE_ECDHE_ECDSA
requires_config_enabled MBEDTLS_SSL_RENEGOTIATION
requires_config_enabled MBEDTLS_AES_C
requires_config_enabled MBEDTLS_GCM_C
run_test "DTLS fragmenting: proxy MTU, AES-GCM renego" \
-p "$P_PXY mtu=512" \
"$P_SRV dtls=1 debug_level=2 auth_mode=required \
crt_file=data_files/server7_int-ca.crt \
key_file=data_files/server7.key \
exchanges=2 renegotiation=1 \
force_ciphersuite=TLS-ECDHE-ECDSA-WITH-AES-128-GCM-SHA256 \
mtu=512" \
"$P_CLI dtls=1 debug_level=2 \
crt_file=data_files/server8_int-ca2.crt \
key_file=data_files/server8.key \
exchanges=2 renegotiation=1 renegotiate=1 \
mtu=512" \
0 \
-S "resend" \
-s "found fragmented DTLS handshake message" \
-c "found fragmented DTLS handshake message" \
-C "error"
# A resend on the client-side might happen if the server is
# slow to reset, therefore omitting '-C "resend"' below.
not_with_valgrind # spurious resend due to timeout
requires_config_enabled MBEDTLS_SSL_PROTO_DTLS
requires_config_enabled MBEDTLS_RSA_C
requires_config_enabled MBEDTLS_ECDSA_C
requires_config_enabled MBEDTLS_SHA256_C
requires_config_enabled MBEDTLS_KEY_EXCHANGE_ECDHE_ECDSA
requires_config_enabled MBEDTLS_SSL_RENEGOTIATION
requires_config_enabled MBEDTLS_AES_C
requires_config_enabled MBEDTLS_CCM_C
run_test "DTLS fragmenting: proxy MTU, AES-CCM renego" \
-p "$P_PXY mtu=512" \
"$P_SRV dtls=1 debug_level=2 auth_mode=required \
crt_file=data_files/server7_int-ca.crt \
key_file=data_files/server7.key \
exchanges=2 renegotiation=1 \
force_ciphersuite=TLS-ECDHE-ECDSA-WITH-AES-128-CCM-8 \
mtu=512" \
"$P_CLI dtls=1 debug_level=2 \
crt_file=data_files/server8_int-ca2.crt \
key_file=data_files/server8.key \
exchanges=2 renegotiation=1 renegotiate=1 \
mtu=512" \
0 \
-S "resend" \
-s "found fragmented DTLS handshake message" \
-c "found fragmented DTLS handshake message" \
-C "error"
# A resend on the client-side might happen if the server is
# slow to reset, therefore omitting '-C "resend"' below.
not_with_valgrind # spurious resend due to timeout
requires_config_enabled MBEDTLS_SSL_PROTO_DTLS
requires_config_enabled MBEDTLS_RSA_C
requires_config_enabled MBEDTLS_ECDSA_C
requires_config_enabled MBEDTLS_SHA256_C
requires_config_enabled MBEDTLS_KEY_EXCHANGE_ECDHE_ECDSA
requires_config_enabled MBEDTLS_SSL_RENEGOTIATION
requires_config_enabled MBEDTLS_AES_C
requires_config_enabled MBEDTLS_CIPHER_MODE_CBC
requires_config_enabled MBEDTLS_SSL_ENCRYPT_THEN_MAC
run_test "DTLS fragmenting: proxy MTU, AES-CBC EtM renego" \
-p "$P_PXY mtu=512" \
"$P_SRV dtls=1 debug_level=2 auth_mode=required \
crt_file=data_files/server7_int-ca.crt \
key_file=data_files/server7.key \
exchanges=2 renegotiation=1 \
force_ciphersuite=TLS-ECDHE-ECDSA-WITH-AES-128-CBC-SHA256 \
mtu=512" \
"$P_CLI dtls=1 debug_level=2 \
crt_file=data_files/server8_int-ca2.crt \
key_file=data_files/server8.key \
exchanges=2 renegotiation=1 renegotiate=1 \
mtu=512" \
0 \
-S "resend" \
-s "found fragmented DTLS handshake message" \
-c "found fragmented DTLS handshake message" \
-C "error"
# A resend on the client-side might happen if the server is
# slow to reset, therefore omitting '-C "resend"' below.
not_with_valgrind # spurious resend due to timeout
requires_config_enabled MBEDTLS_SSL_PROTO_DTLS
requires_config_enabled MBEDTLS_RSA_C
requires_config_enabled MBEDTLS_ECDSA_C
requires_config_enabled MBEDTLS_SHA256_C
requires_config_enabled MBEDTLS_KEY_EXCHANGE_ECDHE_ECDSA
requires_config_enabled MBEDTLS_SSL_RENEGOTIATION
requires_config_enabled MBEDTLS_AES_C
requires_config_enabled MBEDTLS_CIPHER_MODE_CBC
run_test "DTLS fragmenting: proxy MTU, AES-CBC non-EtM renego" \
-p "$P_PXY mtu=512" \
"$P_SRV dtls=1 debug_level=2 auth_mode=required \
crt_file=data_files/server7_int-ca.crt \
key_file=data_files/server7.key \
exchanges=2 renegotiation=1 \
force_ciphersuite=TLS-ECDHE-ECDSA-WITH-AES-128-CBC-SHA256 etm=0 \
mtu=512" \
"$P_CLI dtls=1 debug_level=2 \
crt_file=data_files/server8_int-ca2.crt \
key_file=data_files/server8.key \
exchanges=2 renegotiation=1 renegotiate=1 \
mtu=512" \
0 \
-S "resend" \
-s "found fragmented DTLS handshake message" \
-c "found fragmented DTLS handshake message" \
-C "error"
requires_config_enabled MBEDTLS_SSL_PROTO_DTLS
requires_config_enabled MBEDTLS_RSA_C
requires_config_enabled MBEDTLS_ECDSA_C
client_needs_more_time 2
run_test "DTLS fragmenting: proxy MTU + 3d" \
-p "$P_PXY mtu=512 drop=8 delay=8 duplicate=8" \
"$P_SRV dgram_packing=0 dtls=1 debug_level=2 auth_mode=required \
crt_file=data_files/server7_int-ca.crt \
key_file=data_files/server7.key \
hs_timeout=250-10000 mtu=512" \
"$P_CLI dgram_packing=0 dtls=1 debug_level=2 \
crt_file=data_files/server8_int-ca2.crt \
key_file=data_files/server8.key \
hs_timeout=250-10000 mtu=512" \
0 \
-s "found fragmented DTLS handshake message" \
-c "found fragmented DTLS handshake message" \
-C "error"
requires_config_enabled MBEDTLS_SSL_PROTO_DTLS
requires_config_enabled MBEDTLS_RSA_C
requires_config_enabled MBEDTLS_ECDSA_C
client_needs_more_time 2
run_test "DTLS fragmenting: proxy MTU + 3d, nbio" \
-p "$P_PXY mtu=512 drop=8 delay=8 duplicate=8" \
"$P_SRV dtls=1 debug_level=2 auth_mode=required \
crt_file=data_files/server7_int-ca.crt \
key_file=data_files/server7.key \
hs_timeout=250-10000 mtu=512 nbio=2" \
"$P_CLI dtls=1 debug_level=2 \
crt_file=data_files/server8_int-ca2.crt \
key_file=data_files/server8.key \
hs_timeout=250-10000 mtu=512 nbio=2" \
0 \
-s "found fragmented DTLS handshake message" \
-c "found fragmented DTLS handshake message" \
-C "error"
# interop tests for DTLS fragmentating with reliable connection
#
# here and below we just want to test that the we fragment in a way that
# pleases other implementations, so we don't need the peer to fragment
requires_gnutls
requires_config_enabled MBEDTLS_SSL_PROTO_DTLS
requires_config_enabled MBEDTLS_RSA_C
requires_config_enabled MBEDTLS_ECDSA_C
requires_config_enabled MBEDTLS_SSL_PROTO_TLS1_2
requires_gnutls
run_test "DTLS fragmenting: gnutls server, DTLS 1.2" \
"$G_SRV -u" \
"$P_CLI dtls=1 debug_level=2 \
crt_file=data_files/server8_int-ca2.crt \
key_file=data_files/server8.key \
mtu=512 force_version=dtls1_2" \
0 \
-c "fragmenting handshake message" \
-C "error"
requires_gnutls
requires_config_enabled MBEDTLS_SSL_PROTO_DTLS
requires_config_enabled MBEDTLS_RSA_C
requires_config_enabled MBEDTLS_ECDSA_C
requires_config_enabled MBEDTLS_SSL_PROTO_TLS1_1
requires_gnutls
run_test "DTLS fragmenting: gnutls server, DTLS 1.0" \
"$G_SRV -u" \
"$P_CLI dtls=1 debug_level=2 \
crt_file=data_files/server8_int-ca2.crt \
key_file=data_files/server8.key \
mtu=512 force_version=dtls1" \
0 \
-c "fragmenting handshake message" \
-C "error"
# We use --insecure for the GnuTLS client because it expects
# the hostname / IP it connects to to be the name used in the
# certificate obtained from the server. Here, however, it
# connects to 127.0.0.1 while our test certificates use 'localhost'
# as the server name in the certificate. This will make the
# certifiate validation fail, but passing --insecure makes
# GnuTLS continue the connection nonetheless.
requires_config_enabled MBEDTLS_SSL_PROTO_DTLS
requires_config_enabled MBEDTLS_RSA_C
requires_config_enabled MBEDTLS_ECDSA_C
requires_config_enabled MBEDTLS_SSL_PROTO_TLS1_2
requires_gnutls
run_test "DTLS fragmenting: gnutls client, DTLS 1.2" \
"$P_SRV dtls=1 debug_level=2 \
crt_file=data_files/server7_int-ca.crt \
key_file=data_files/server7.key \
mtu=512 force_version=dtls1_2" \
"$G_CLI -u --insecure 127.0.0.1" \
0 \
-s "fragmenting handshake message"
# See previous test for the reason to use --insecure
requires_config_enabled MBEDTLS_SSL_PROTO_DTLS
requires_config_enabled MBEDTLS_RSA_C
requires_config_enabled MBEDTLS_ECDSA_C
requires_config_enabled MBEDTLS_SSL_PROTO_TLS1_1
requires_gnutls
run_test "DTLS fragmenting: gnutls client, DTLS 1.0" \
"$P_SRV dtls=1 debug_level=2 \
crt_file=data_files/server7_int-ca.crt \
key_file=data_files/server7.key \
mtu=512 force_version=dtls1" \
"$G_CLI -u --insecure 127.0.0.1" \
0 \
-s "fragmenting handshake message"
requires_config_enabled MBEDTLS_SSL_PROTO_DTLS
requires_config_enabled MBEDTLS_RSA_C
requires_config_enabled MBEDTLS_ECDSA_C
requires_config_enabled MBEDTLS_SSL_PROTO_TLS1_2
run_test "DTLS fragmenting: openssl server, DTLS 1.2" \
"$O_SRV -dtls1_2 -verify 10" \
"$P_CLI dtls=1 debug_level=2 \
crt_file=data_files/server8_int-ca2.crt \
key_file=data_files/server8.key \
mtu=512 force_version=dtls1_2" \
0 \
-c "fragmenting handshake message" \
-C "error"
requires_config_enabled MBEDTLS_SSL_PROTO_DTLS
requires_config_enabled MBEDTLS_RSA_C
requires_config_enabled MBEDTLS_ECDSA_C
requires_config_enabled MBEDTLS_SSL_PROTO_TLS1_1
run_test "DTLS fragmenting: openssl server, DTLS 1.0" \
"$O_SRV -dtls1 -verify 10" \
"$P_CLI dtls=1 debug_level=2 \
crt_file=data_files/server8_int-ca2.crt \
key_file=data_files/server8.key \
mtu=512 force_version=dtls1" \
0 \
-c "fragmenting handshake message" \
-C "error"
requires_config_enabled MBEDTLS_SSL_PROTO_DTLS
requires_config_enabled MBEDTLS_RSA_C
requires_config_enabled MBEDTLS_ECDSA_C
requires_config_enabled MBEDTLS_SSL_PROTO_TLS1_2
run_test "DTLS fragmenting: openssl client, DTLS 1.2" \
"$P_SRV dtls=1 debug_level=2 \
crt_file=data_files/server7_int-ca.crt \
key_file=data_files/server7.key \
mtu=512 force_version=dtls1_2" \
"$O_CLI -dtls1_2" \
0 \
-s "fragmenting handshake message"
requires_config_enabled MBEDTLS_SSL_PROTO_DTLS
requires_config_enabled MBEDTLS_RSA_C
requires_config_enabled MBEDTLS_ECDSA_C
requires_config_enabled MBEDTLS_SSL_PROTO_TLS1_1
run_test "DTLS fragmenting: openssl client, DTLS 1.0" \
"$P_SRV dtls=1 debug_level=2 \
crt_file=data_files/server7_int-ca.crt \
key_file=data_files/server7.key \
mtu=512 force_version=dtls1" \
"$O_CLI -dtls1" \
0 \
-s "fragmenting handshake message"
# interop tests for DTLS fragmentating with unreliable connection
#
# again we just want to test that the we fragment in a way that
# pleases other implementations, so we don't need the peer to fragment
requires_gnutls_next
requires_config_enabled MBEDTLS_SSL_PROTO_DTLS
requires_config_enabled MBEDTLS_RSA_C
requires_config_enabled MBEDTLS_ECDSA_C
requires_config_enabled MBEDTLS_SSL_PROTO_TLS1_2
client_needs_more_time 4
run_test "DTLS fragmenting: 3d, gnutls server, DTLS 1.2" \
-p "$P_PXY drop=8 delay=8 duplicate=8" \
"$G_NEXT_SRV -u" \
"$P_CLI dgram_packing=0 dtls=1 debug_level=2 \
crt_file=data_files/server8_int-ca2.crt \
key_file=data_files/server8.key \
hs_timeout=250-60000 mtu=512 force_version=dtls1_2" \
0 \
-c "fragmenting handshake message" \
-C "error"
requires_gnutls_next
requires_config_enabled MBEDTLS_SSL_PROTO_DTLS
requires_config_enabled MBEDTLS_RSA_C
requires_config_enabled MBEDTLS_ECDSA_C
requires_config_enabled MBEDTLS_SSL_PROTO_TLS1_1
client_needs_more_time 4
run_test "DTLS fragmenting: 3d, gnutls server, DTLS 1.0" \
-p "$P_PXY drop=8 delay=8 duplicate=8" \
"$G_NEXT_SRV -u" \
"$P_CLI dgram_packing=0 dtls=1 debug_level=2 \
crt_file=data_files/server8_int-ca2.crt \
key_file=data_files/server8.key \
hs_timeout=250-60000 mtu=512 force_version=dtls1" \
0 \
-c "fragmenting handshake message" \
-C "error"
## The two tests below are disabled due to a bug in GnuTLS client that causes
## handshake failures when the NewSessionTicket message is lost, see
## https://gitlab.com/gnutls/gnutls/issues/543
## We can re-enable them when a fixed version fo GnuTLS is available
## and installed in our CI system.
skip_next_test
requires_gnutls
requires_config_enabled MBEDTLS_SSL_PROTO_DTLS
requires_config_enabled MBEDTLS_RSA_C
requires_config_enabled MBEDTLS_ECDSA_C
requires_config_enabled MBEDTLS_SSL_PROTO_TLS1_2
client_needs_more_time 4
run_test "DTLS fragmenting: 3d, gnutls client, DTLS 1.2" \
-p "$P_PXY drop=8 delay=8 duplicate=8" \
"$P_SRV dtls=1 debug_level=2 \
crt_file=data_files/server7_int-ca.crt \
key_file=data_files/server7.key \
hs_timeout=250-60000 mtu=512 force_version=dtls1_2" \
"$G_CLI -u --insecure 127.0.0.1" \
0 \
-s "fragmenting handshake message"
skip_next_test
requires_gnutls
requires_config_enabled MBEDTLS_SSL_PROTO_DTLS
requires_config_enabled MBEDTLS_RSA_C
requires_config_enabled MBEDTLS_ECDSA_C
requires_config_enabled MBEDTLS_SSL_PROTO_TLS1_1
client_needs_more_time 4
run_test "DTLS fragmenting: 3d, gnutls client, DTLS 1.0" \
-p "$P_PXY drop=8 delay=8 duplicate=8" \
"$P_SRV dtls=1 debug_level=2 \
crt_file=data_files/server7_int-ca.crt \
key_file=data_files/server7.key \
hs_timeout=250-60000 mtu=512 force_version=dtls1" \
"$G_CLI -u --insecure 127.0.0.1" \
0 \
-s "fragmenting handshake message"
## Interop test with OpenSSL might triger a bug in recent versions (that
## probably won't be fixed before 1.1.1X), so we use an old version that
## doesn't have this bug, but unfortunately it doesn't have support for DTLS
## 1.2 either, so the DTLS 1.2 tests are commented for now.
## Bug report: https://github.com/openssl/openssl/issues/6902
## They should be re-enabled (and the DTLS 1.0 switched back to a non-legacy
## version of OpenSSL once a fixed version of OpenSSL is available)
skip_next_test
requires_config_enabled MBEDTLS_SSL_PROTO_DTLS
requires_config_enabled MBEDTLS_RSA_C
requires_config_enabled MBEDTLS_ECDSA_C
requires_config_enabled MBEDTLS_SSL_PROTO_TLS1_2
client_needs_more_time 4
run_test "DTLS fragmenting: 3d, openssl server, DTLS 1.2" \
-p "$P_PXY drop=8 delay=8 duplicate=8" \
"$O_SRV -dtls1_2 -verify 10" \
"$P_CLI dtls=1 debug_level=2 \
crt_file=data_files/server8_int-ca2.crt \
key_file=data_files/server8.key \
hs_timeout=250-60000 mtu=512 force_version=dtls1_2" \
0 \
-c "fragmenting handshake message" \
-C "error"
requires_openssl_legacy
requires_config_enabled MBEDTLS_SSL_PROTO_DTLS
requires_config_enabled MBEDTLS_RSA_C
requires_config_enabled MBEDTLS_ECDSA_C
requires_config_enabled MBEDTLS_SSL_PROTO_TLS1_1
client_needs_more_time 4
run_test "DTLS fragmenting: 3d, openssl server, DTLS 1.0" \
-p "$P_PXY drop=8 delay=8 duplicate=8" \
"$O_LEGACY_SRV -dtls1 -verify 10" \
"$P_CLI dgram_packing=0 dtls=1 debug_level=2 \
crt_file=data_files/server8_int-ca2.crt \
key_file=data_files/server8.key \
hs_timeout=250-60000 mtu=512 force_version=dtls1" \
0 \
-c "fragmenting handshake message" \
-C "error"
## see comment on the previous-previous test
## requires_config_enabled MBEDTLS_SSL_PROTO_DTLS
## requires_config_enabled MBEDTLS_RSA_C
## requires_config_enabled MBEDTLS_ECDSA_C
## requires_config_enabled MBEDTLS_SSL_PROTO_TLS1_2
## client_needs_more_time 4
## run_test "DTLS fragmenting: 3d, openssl client, DTLS 1.2" \
## -p "$P_PXY drop=8 delay=8 duplicate=8" \
## "$P_SRV dtls=1 debug_level=2 \
## crt_file=data_files/server7_int-ca.crt \
## key_file=data_files/server7.key \
## hs_timeout=250-60000 mtu=512 force_version=dtls1_2" \
## "$O_CLI -dtls1_2" \
## 0 \
## -s "fragmenting handshake message"
# -nbio is added to prevent s_client from blocking in case of duplicated
# messages at the end of the handshake
requires_openssl_legacy
requires_config_enabled MBEDTLS_SSL_PROTO_DTLS
requires_config_enabled MBEDTLS_RSA_C
requires_config_enabled MBEDTLS_ECDSA_C
requires_config_enabled MBEDTLS_SSL_PROTO_TLS1_1
client_needs_more_time 4
run_test "DTLS fragmenting: 3d, openssl client, DTLS 1.0" \
-p "$P_PXY drop=8 delay=8 duplicate=8" \
"$P_SRV dgram_packing=0 dtls=1 debug_level=2 \
crt_file=data_files/server7_int-ca.crt \
key_file=data_files/server7.key \
hs_timeout=250-60000 mtu=512 force_version=dtls1" \
"$O_LEGACY_CLI -nbio -dtls1" \
0 \
-s "fragmenting handshake message"
# Tests for specific things with "unreliable" UDP connection
not_with_valgrind # spurious resend due to timeout
run_test "DTLS proxy: reference" \
-p "$P_PXY" \
"$P_SRV dtls=1 debug_level=2" \
"$P_CLI dtls=1 debug_level=2" \
0 \
-C "replayed record" \
-S "replayed record" \
-C "record from another epoch" \
-S "record from another epoch" \
-C "discarding invalid record" \
-S "discarding invalid record" \
-S "resend" \
-s "Extra-header:" \
-c "HTTP/1.0 200 OK"
not_with_valgrind # spurious resend due to timeout
run_test "DTLS proxy: duplicate every packet" \
-p "$P_PXY duplicate=1" \
"$P_SRV dtls=1 dgram_packing=0 debug_level=2" \
"$P_CLI dtls=1 dgram_packing=0 debug_level=2" \
0 \
-c "replayed record" \
-s "replayed record" \
-c "record from another epoch" \
-s "record from another epoch" \
-S "resend" \
-s "Extra-header:" \
-c "HTTP/1.0 200 OK"
run_test "DTLS proxy: duplicate every packet, server anti-replay off" \
-p "$P_PXY duplicate=1" \
"$P_SRV dtls=1 dgram_packing=0 debug_level=2 anti_replay=0" \
"$P_CLI dtls=1 dgram_packing=0 debug_level=2" \
0 \
-c "replayed record" \
-S "replayed record" \
-c "record from another epoch" \
-s "record from another epoch" \
-c "resend" \
-s "resend" \
-s "Extra-header:" \
-c "HTTP/1.0 200 OK"
run_test "DTLS proxy: multiple records in same datagram" \
-p "$P_PXY pack=50" \
"$P_SRV dtls=1 dgram_packing=0 debug_level=2" \
"$P_CLI dtls=1 dgram_packing=0 debug_level=2" \
0 \
-c "next record in same datagram" \
-s "next record in same datagram"
run_test "DTLS proxy: multiple records in same datagram, duplicate every packet" \
-p "$P_PXY pack=50 duplicate=1" \
"$P_SRV dtls=1 dgram_packing=0 debug_level=2" \
"$P_CLI dtls=1 dgram_packing=0 debug_level=2" \
0 \
-c "next record in same datagram" \
-s "next record in same datagram"
run_test "DTLS proxy: inject invalid AD record, default badmac_limit" \
-p "$P_PXY bad_ad=1" \
"$P_SRV dtls=1 dgram_packing=0 debug_level=1" \
"$P_CLI dtls=1 dgram_packing=0 debug_level=1 read_timeout=100" \
0 \
-c "discarding invalid record (mac)" \
-s "discarding invalid record (mac)" \
-s "Extra-header:" \
-c "HTTP/1.0 200 OK" \
-S "too many records with bad MAC" \
-S "Verification of the message MAC failed"
run_test "DTLS proxy: inject invalid AD record, badmac_limit 1" \
-p "$P_PXY bad_ad=1" \
"$P_SRV dtls=1 dgram_packing=0 debug_level=1 badmac_limit=1" \
"$P_CLI dtls=1 dgram_packing=0 debug_level=1 read_timeout=100" \
1 \
-C "discarding invalid record (mac)" \
-S "discarding invalid record (mac)" \
-S "Extra-header:" \
-C "HTTP/1.0 200 OK" \
-s "too many records with bad MAC" \
-s "Verification of the message MAC failed"
run_test "DTLS proxy: inject invalid AD record, badmac_limit 2" \
-p "$P_PXY bad_ad=1" \
"$P_SRV dtls=1 dgram_packing=0 debug_level=1 badmac_limit=2" \
"$P_CLI dtls=1 dgram_packing=0 debug_level=1 read_timeout=100" \
0 \
-c "discarding invalid record (mac)" \
-s "discarding invalid record (mac)" \
-s "Extra-header:" \
-c "HTTP/1.0 200 OK" \
-S "too many records with bad MAC" \
-S "Verification of the message MAC failed"
run_test "DTLS proxy: inject invalid AD record, badmac_limit 2, exchanges 2"\
-p "$P_PXY bad_ad=1" \
"$P_SRV dtls=1 dgram_packing=0 debug_level=1 badmac_limit=2 exchanges=2" \
"$P_CLI dtls=1 dgram_packing=0 debug_level=1 read_timeout=100 exchanges=2" \
1 \
-c "discarding invalid record (mac)" \
-s "discarding invalid record (mac)" \
-s "Extra-header:" \
-c "HTTP/1.0 200 OK" \
-s "too many records with bad MAC" \
-s "Verification of the message MAC failed"
run_test "DTLS proxy: delay ChangeCipherSpec" \
-p "$P_PXY delay_ccs=1" \
"$P_SRV dtls=1 debug_level=1 dgram_packing=0" \
"$P_CLI dtls=1 debug_level=1 dgram_packing=0" \
0 \
-c "record from another epoch" \
-s "record from another epoch" \
-s "Extra-header:" \
-c "HTTP/1.0 200 OK"
# Tests for reordering support with DTLS
run_test "DTLS reordering: Buffer out-of-order handshake message on client" \
-p "$P_PXY delay_srv=ServerHello" \
"$P_SRV dgram_packing=0 cookies=0 dtls=1 debug_level=2" \
"$P_CLI dgram_packing=0 dtls=1 debug_level=2" \
0 \
-c "Buffering HS message" \
-c "Next handshake message has been buffered - load"\
-S "Buffering HS message" \
-S "Next handshake message has been buffered - load"\
-C "Injecting buffered CCS message" \
-C "Remember CCS message" \
-S "Injecting buffered CCS message" \
-S "Remember CCS message"
run_test "DTLS reordering: Buffer out-of-order handshake message fragment on client" \
-p "$P_PXY delay_srv=ServerHello" \
"$P_SRV mtu=512 dgram_packing=0 cookies=0 dtls=1 debug_level=2" \
"$P_CLI dgram_packing=0 dtls=1 debug_level=2" \
0 \
-c "Buffering HS message" \
-c "found fragmented DTLS handshake message"\
-c "Next handshake message 1 not or only partially bufffered" \
-c "Next handshake message has been buffered - load"\
-S "Buffering HS message" \
-S "Next handshake message has been buffered - load"\
-C "Injecting buffered CCS message" \
-C "Remember CCS message" \
-S "Injecting buffered CCS message" \
-S "Remember CCS message"
# The client buffers the ServerKeyExchange before receiving the fragmented
# Certificate message; at the time of writing, together these are aroudn 1200b
# in size, so that the bound below ensures that the certificate can be reassembled
# while keeping the ServerKeyExchange.
requires_config_value_at_least "MBEDTLS_SSL_DTLS_MAX_BUFFERING" 1300
run_test "DTLS reordering: Buffer out-of-order hs msg before reassembling next" \
-p "$P_PXY delay_srv=Certificate delay_srv=Certificate" \
"$P_SRV mtu=512 dgram_packing=0 cookies=0 dtls=1 debug_level=2" \
"$P_CLI dgram_packing=0 dtls=1 debug_level=2" \
0 \
-c "Buffering HS message" \
-c "Next handshake message has been buffered - load"\
-C "attempt to make space by freeing buffered messages" \
-S "Buffering HS message" \
-S "Next handshake message has been buffered - load"\
-C "Injecting buffered CCS message" \
-C "Remember CCS message" \
-S "Injecting buffered CCS message" \
-S "Remember CCS message"
# The size constraints ensure that the delayed certificate message can't
# be reassembled while keeping the ServerKeyExchange message, but it can
# when dropping it first.
requires_config_value_at_least "MBEDTLS_SSL_DTLS_MAX_BUFFERING" 900
requires_config_value_at_most "MBEDTLS_SSL_DTLS_MAX_BUFFERING" 1299
run_test "DTLS reordering: Buffer out-of-order hs msg before reassembling next, free buffered msg" \
-p "$P_PXY delay_srv=Certificate delay_srv=Certificate" \
"$P_SRV mtu=512 dgram_packing=0 cookies=0 dtls=1 debug_level=2" \
"$P_CLI dgram_packing=0 dtls=1 debug_level=2" \
0 \
-c "Buffering HS message" \
-c "attempt to make space by freeing buffered future messages" \
-c "Enough space available after freeing buffered HS messages" \
-S "Buffering HS message" \
-S "Next handshake message has been buffered - load"\
-C "Injecting buffered CCS message" \
-C "Remember CCS message" \
-S "Injecting buffered CCS message" \
-S "Remember CCS message"
run_test "DTLS reordering: Buffer out-of-order handshake message on server" \
-p "$P_PXY delay_cli=Certificate" \
"$P_SRV dgram_packing=0 auth_mode=required cookies=0 dtls=1 debug_level=2" \
"$P_CLI dgram_packing=0 dtls=1 debug_level=2" \
0 \
-C "Buffering HS message" \
-C "Next handshake message has been buffered - load"\
-s "Buffering HS message" \
-s "Next handshake message has been buffered - load" \
-C "Injecting buffered CCS message" \
-C "Remember CCS message" \
-S "Injecting buffered CCS message" \
-S "Remember CCS message"
run_test "DTLS reordering: Buffer out-of-order CCS message on client"\
-p "$P_PXY delay_srv=NewSessionTicket" \
"$P_SRV dgram_packing=0 cookies=0 dtls=1 debug_level=2" \
"$P_CLI dgram_packing=0 dtls=1 debug_level=2" \
0 \
-C "Buffering HS message" \
-C "Next handshake message has been buffered - load"\
-S "Buffering HS message" \
-S "Next handshake message has been buffered - load" \
-c "Injecting buffered CCS message" \
-c "Remember CCS message" \
-S "Injecting buffered CCS message" \
-S "Remember CCS message"
run_test "DTLS reordering: Buffer out-of-order CCS message on server"\
-p "$P_PXY delay_cli=ClientKeyExchange" \
"$P_SRV dgram_packing=0 cookies=0 dtls=1 debug_level=2" \
"$P_CLI dgram_packing=0 dtls=1 debug_level=2" \
0 \
-C "Buffering HS message" \
-C "Next handshake message has been buffered - load"\
-S "Buffering HS message" \
-S "Next handshake message has been buffered - load" \
-C "Injecting buffered CCS message" \
-C "Remember CCS message" \
-s "Injecting buffered CCS message" \
-s "Remember CCS message"
run_test "DTLS reordering: Buffer encrypted Finished message" \
-p "$P_PXY delay_ccs=1" \
"$P_SRV dgram_packing=0 cookies=0 dtls=1 debug_level=2" \
"$P_CLI dgram_packing=0 dtls=1 debug_level=2" \
0 \
-s "Buffer record from epoch 1" \
-s "Found buffered record from current epoch - load" \
-c "Buffer record from epoch 1" \
-c "Found buffered record from current epoch - load"
# In this test, both the fragmented NewSessionTicket and the ChangeCipherSpec
# from the server are delayed, so that the encrypted Finished message
# is received and buffered. When the fragmented NewSessionTicket comes
# in afterwards, the encrypted Finished message must be freed in order
# to make space for the NewSessionTicket to be reassembled.
# This works only in very particular circumstances:
# - MBEDTLS_SSL_DTLS_MAX_BUFFERING must be large enough to allow buffering
# of the NewSessionTicket, but small enough to also allow buffering of
# the encrypted Finished message.
# - The MTU setting on the server must be so small that the NewSessionTicket
# needs to be fragmented.
# - All messages sent by the server must be small enough to be either sent
# without fragmentation or be reassembled within the bounds of
# MBEDTLS_SSL_DTLS_MAX_BUFFERING. Achieve this by testing with a PSK-based
# handshake, omitting CRTs.
requires_config_value_at_least "MBEDTLS_SSL_DTLS_MAX_BUFFERING" 240
requires_config_value_at_most "MBEDTLS_SSL_DTLS_MAX_BUFFERING" 280
run_test "DTLS reordering: Buffer encrypted Finished message, drop for fragmented NewSessionTicket" \
-p "$P_PXY delay_srv=NewSessionTicket delay_srv=NewSessionTicket delay_ccs=1" \
"$P_SRV mtu=190 dgram_packing=0 psk=abc123 psk_identity=foo cookies=0 dtls=1 debug_level=2" \
"$P_CLI dgram_packing=0 dtls=1 debug_level=2 force_ciphersuite=TLS-PSK-WITH-AES-128-CCM-8 psk=abc123 psk_identity=foo" \
0 \
-s "Buffer record from epoch 1" \
-s "Found buffered record from current epoch - load" \
-c "Buffer record from epoch 1" \
-C "Found buffered record from current epoch - load" \
-c "Enough space available after freeing future epoch record"
# Tests for "randomly unreliable connection": try a variety of flows and peers
client_needs_more_time 2
run_test "DTLS proxy: 3d (drop, delay, duplicate), \"short\" PSK handshake" \
-p "$P_PXY drop=5 delay=5 duplicate=5" \
"$P_SRV dtls=1 dgram_packing=0 hs_timeout=250-10000 tickets=0 auth_mode=none \
psk=abc123" \
"$P_CLI dtls=1 dgram_packing=0 hs_timeout=250-10000 tickets=0 psk=abc123 \
force_ciphersuite=TLS-PSK-WITH-AES-128-CCM-8" \
0 \
-s "Extra-header:" \
-c "HTTP/1.0 200 OK"
client_needs_more_time 2
run_test "DTLS proxy: 3d, \"short\" RSA handshake" \
-p "$P_PXY drop=5 delay=5 duplicate=5" \
"$P_SRV dtls=1 dgram_packing=0 hs_timeout=250-10000 tickets=0 auth_mode=none" \
"$P_CLI dtls=1 dgram_packing=0 hs_timeout=250-10000 tickets=0 \
force_ciphersuite=TLS-RSA-WITH-AES-128-CBC-SHA" \
0 \
-s "Extra-header:" \
-c "HTTP/1.0 200 OK"
client_needs_more_time 2
run_test "DTLS proxy: 3d, \"short\" (no ticket, no cli_auth) FS handshake" \
-p "$P_PXY drop=5 delay=5 duplicate=5" \
"$P_SRV dtls=1 dgram_packing=0 hs_timeout=250-10000 tickets=0 auth_mode=none" \
"$P_CLI dtls=1 dgram_packing=0 hs_timeout=250-10000 tickets=0" \
0 \
-s "Extra-header:" \
-c "HTTP/1.0 200 OK"
client_needs_more_time 2
run_test "DTLS proxy: 3d, FS, client auth" \
-p "$P_PXY drop=5 delay=5 duplicate=5" \
"$P_SRV dtls=1 dgram_packing=0 hs_timeout=250-10000 tickets=0 auth_mode=required" \
"$P_CLI dtls=1 dgram_packing=0 hs_timeout=250-10000 tickets=0" \
0 \
-s "Extra-header:" \
-c "HTTP/1.0 200 OK"
client_needs_more_time 2
run_test "DTLS proxy: 3d, FS, ticket" \
-p "$P_PXY drop=5 delay=5 duplicate=5" \
"$P_SRV dtls=1 dgram_packing=0 hs_timeout=250-10000 tickets=1 auth_mode=none" \
"$P_CLI dtls=1 dgram_packing=0 hs_timeout=250-10000 tickets=1" \
0 \
-s "Extra-header:" \
-c "HTTP/1.0 200 OK"
client_needs_more_time 2
run_test "DTLS proxy: 3d, max handshake (FS, ticket + client auth)" \
-p "$P_PXY drop=5 delay=5 duplicate=5" \
"$P_SRV dtls=1 dgram_packing=0 hs_timeout=250-10000 tickets=1 auth_mode=required" \
"$P_CLI dtls=1 dgram_packing=0 hs_timeout=250-10000 tickets=1" \
0 \
-s "Extra-header:" \
-c "HTTP/1.0 200 OK"
client_needs_more_time 2
run_test "DTLS proxy: 3d, max handshake, nbio" \
-p "$P_PXY drop=5 delay=5 duplicate=5" \
"$P_SRV dtls=1 dgram_packing=0 hs_timeout=250-10000 nbio=2 tickets=1 \
auth_mode=required" \
"$P_CLI dtls=1 dgram_packing=0 hs_timeout=250-10000 nbio=2 tickets=1" \
0 \
-s "Extra-header:" \
-c "HTTP/1.0 200 OK"
client_needs_more_time 4
run_test "DTLS proxy: 3d, min handshake, resumption" \
-p "$P_PXY drop=5 delay=5 duplicate=5" \
"$P_SRV dtls=1 dgram_packing=0 hs_timeout=250-10000 tickets=0 auth_mode=none \
psk=abc123 debug_level=3" \
"$P_CLI dtls=1 dgram_packing=0 hs_timeout=250-10000 tickets=0 psk=abc123 \
debug_level=3 reconnect=1 read_timeout=1000 max_resend=10 \
force_ciphersuite=TLS-PSK-WITH-AES-128-CCM-8" \
0 \
-s "a session has been resumed" \
-c "a session has been resumed" \
-s "Extra-header:" \
-c "HTTP/1.0 200 OK"
client_needs_more_time 4
run_test "DTLS proxy: 3d, min handshake, resumption, nbio" \
-p "$P_PXY drop=5 delay=5 duplicate=5" \
"$P_SRV dtls=1 dgram_packing=0 hs_timeout=250-10000 tickets=0 auth_mode=none \
psk=abc123 debug_level=3 nbio=2" \
"$P_CLI dtls=1 dgram_packing=0 hs_timeout=250-10000 tickets=0 psk=abc123 \
debug_level=3 reconnect=1 read_timeout=1000 max_resend=10 \
force_ciphersuite=TLS-PSK-WITH-AES-128-CCM-8 nbio=2" \
0 \
-s "a session has been resumed" \
-c "a session has been resumed" \
-s "Extra-header:" \
-c "HTTP/1.0 200 OK"
client_needs_more_time 4
requires_config_enabled MBEDTLS_SSL_RENEGOTIATION
run_test "DTLS proxy: 3d, min handshake, client-initiated renego" \
-p "$P_PXY drop=5 delay=5 duplicate=5" \
"$P_SRV dtls=1 dgram_packing=0 hs_timeout=250-10000 tickets=0 auth_mode=none \
psk=abc123 renegotiation=1 debug_level=2" \
"$P_CLI dtls=1 dgram_packing=0 hs_timeout=250-10000 tickets=0 psk=abc123 \
renegotiate=1 debug_level=2 \
force_ciphersuite=TLS-PSK-WITH-AES-128-CCM-8" \
0 \
-c "=> renegotiate" \
-s "=> renegotiate" \
-s "Extra-header:" \
-c "HTTP/1.0 200 OK"
client_needs_more_time 4
requires_config_enabled MBEDTLS_SSL_RENEGOTIATION
run_test "DTLS proxy: 3d, min handshake, client-initiated renego, nbio" \
-p "$P_PXY drop=5 delay=5 duplicate=5" \
"$P_SRV dtls=1 dgram_packing=0 hs_timeout=250-10000 tickets=0 auth_mode=none \
psk=abc123 renegotiation=1 debug_level=2" \
"$P_CLI dtls=1 dgram_packing=0 hs_timeout=250-10000 tickets=0 psk=abc123 \
renegotiate=1 debug_level=2 \
force_ciphersuite=TLS-PSK-WITH-AES-128-CCM-8" \
0 \
-c "=> renegotiate" \
-s "=> renegotiate" \
-s "Extra-header:" \
-c "HTTP/1.0 200 OK"
client_needs_more_time 4
requires_config_enabled MBEDTLS_SSL_RENEGOTIATION
run_test "DTLS proxy: 3d, min handshake, server-initiated renego" \
-p "$P_PXY drop=5 delay=5 duplicate=5" \
"$P_SRV dtls=1 dgram_packing=0 hs_timeout=250-10000 tickets=0 auth_mode=none \
psk=abc123 renegotiate=1 renegotiation=1 exchanges=4 \
debug_level=2" \
"$P_CLI dtls=1 dgram_packing=0 hs_timeout=250-10000 tickets=0 psk=abc123 \
renegotiation=1 exchanges=4 debug_level=2 \
force_ciphersuite=TLS-PSK-WITH-AES-128-CCM-8" \
0 \
-c "=> renegotiate" \
-s "=> renegotiate" \
-s "Extra-header:" \
-c "HTTP/1.0 200 OK"
client_needs_more_time 4
requires_config_enabled MBEDTLS_SSL_RENEGOTIATION
run_test "DTLS proxy: 3d, min handshake, server-initiated renego, nbio" \
-p "$P_PXY drop=5 delay=5 duplicate=5" \
"$P_SRV dtls=1 dgram_packing=0 hs_timeout=250-10000 tickets=0 auth_mode=none \
psk=abc123 renegotiate=1 renegotiation=1 exchanges=4 \
debug_level=2 nbio=2" \
"$P_CLI dtls=1 dgram_packing=0 hs_timeout=250-10000 tickets=0 psk=abc123 \
renegotiation=1 exchanges=4 debug_level=2 nbio=2 \
force_ciphersuite=TLS-PSK-WITH-AES-128-CCM-8" \
0 \
-c "=> renegotiate" \
-s "=> renegotiate" \
-s "Extra-header:" \
-c "HTTP/1.0 200 OK"
client_needs_more_time 6
not_with_valgrind # risk of non-mbedtls peer timing out
run_test "DTLS proxy: 3d, openssl server" \
-p "$P_PXY drop=5 delay=5 duplicate=5 protect_hvr=1" \
"$O_SRV -dtls1 -mtu 2048" \
"$P_CLI dgram_packing=0 dtls=1 hs_timeout=250-60000 tickets=0" \
0 \
-c "HTTP/1.0 200 OK"
client_needs_more_time 8
not_with_valgrind # risk of non-mbedtls peer timing out
run_test "DTLS proxy: 3d, openssl server, fragmentation" \
-p "$P_PXY drop=5 delay=5 duplicate=5 protect_hvr=1" \
"$O_SRV -dtls1 -mtu 768" \
"$P_CLI dgram_packing=0 dtls=1 hs_timeout=250-60000 tickets=0" \
0 \
-c "HTTP/1.0 200 OK"
client_needs_more_time 8
not_with_valgrind # risk of non-mbedtls peer timing out
run_test "DTLS proxy: 3d, openssl server, fragmentation, nbio" \
-p "$P_PXY drop=5 delay=5 duplicate=5 protect_hvr=1" \
"$O_SRV -dtls1 -mtu 768" \
"$P_CLI dgram_packing=0 dtls=1 hs_timeout=250-60000 nbio=2 tickets=0" \
0 \
-c "HTTP/1.0 200 OK"
requires_gnutls
client_needs_more_time 6
not_with_valgrind # risk of non-mbedtls peer timing out
run_test "DTLS proxy: 3d, gnutls server" \
-p "$P_PXY drop=5 delay=5 duplicate=5" \
"$G_SRV -u --mtu 2048 -a" \
"$P_CLI dgram_packing=0 dtls=1 hs_timeout=250-60000" \
0 \
-s "Extra-header:" \
-c "Extra-header:"
requires_gnutls
client_needs_more_time 8
not_with_valgrind # risk of non-mbedtls peer timing out
run_test "DTLS proxy: 3d, gnutls server, fragmentation" \
-p "$P_PXY drop=5 delay=5 duplicate=5" \
"$G_SRV -u --mtu 512" \
"$P_CLI dgram_packing=0 dtls=1 hs_timeout=250-60000" \
0 \
-s "Extra-header:" \
-c "Extra-header:"
requires_gnutls
client_needs_more_time 8
not_with_valgrind # risk of non-mbedtls peer timing out
run_test "DTLS proxy: 3d, gnutls server, fragmentation, nbio" \
-p "$P_PXY drop=5 delay=5 duplicate=5" \
"$G_SRV -u --mtu 512" \
"$P_CLI dgram_packing=0 dtls=1 hs_timeout=250-60000 nbio=2" \
0 \
-s "Extra-header:" \
-c "Extra-header:"
# Final report
echo "------------------------------------------------------------------------"
if [ $FAILS = 0 ]; then
printf "PASSED"
else
printf "FAILED"
fi
PASSES=$(( $TESTS - $FAILS ))
echo " ($PASSES / $TESTS tests ($SKIPS skipped))"
exit $FAILS
|
#!/bin/bash
## Choose extra configure options depending on the operating system
## (mac or linux)
##
if [ `uname` == Darwin ] ; then
extra_config_options="LDFLAGS=-Wl,-headerpad_max_install_names"
fi
## Configure and make
./configure --prefix=$PREFIX \
--with-kinwalker \
--with-cluster \
--disable-lto \
--without-doc \
--without-tutorial \
--without-tutorial-pdf \
${extra_config_options} \
&&\
make -j${CPU_COUNT}
## Install
make install
|
#!/bin/sh
systemctl status docker 1>/dev/null 2>&1 || STOP_DOCKER=1
systemctl start docker
docker system prune -f
docker system prune --volume -f
docker system prune --all -f
[ "${STOP_DOCKER}" = "1" ] && systemctl stop docker
|
$(document).on('ready', function () {
$('.dragg-taller-div').draggable({
helper: 'clone',
zIndex: 2000,
handle: '.panel-heading',
start: function (event, ui) {
ui.helper.css("width", "373px");
ui.helper.find('.panel-footer').remove();
ui.helper.find('.horario-taller').remove();
}
}).hover(function () {
}, function () {
});
$('.alert-agotado').each(function () {
$('#materias > .panel-body').append($(this));
});
$('.dragg-taller-div .btn-taller-insc').on('click', function () {
set_event_insc($(this).closest('.panel').data('id'), $(this).closest('.panel'));
});
$('#inscribir_panel .panel-body').droppable({
accept: '.dragg-taller-div',
drop: function (event, ui) {
var id = ui.draggable.data("id");
set_event_insc(id, ui.draggable);
}
});
$('#inscribir_panel .panel-footer .btn-primary').on('click', function () {
var data = '';
$('#inscribir_panel .panel-body .alert').each(function () {
data += 'id[]=' + $(this).data('id') + '&';
});
if (data.length > 0) {
data = data.substring(0, data.length - 1);
}
$.ajax({
url: base_url + 'admin/inscribir/insert',
type: 'POST',
data: data + '&user_name=' + $("#user_name").data('id'),
dataType: 'json',
success: function (data) {
if (data.status === "MSG") {
alerts(data.type, data.message, '', function () {
if (data.type == 'success') {
var html = '';
$.each(data.bauchers, function(key, baucher) {
html += '<tr>';
html += '<td>' + baucher.folio + '</td>';
html += '<td>' + baucher.taller + '</td>';
html += '<td>' + baucher.fecha_expedicion + '</td>';
html += '<td>No pagado</td>';
html += '<td><a data-events="0" class="btn btn-link" data-id="' + baucher.id + '" href="' + base_url + 'alumnos/inscripcion/get_pdf/' + baucher.id + 'id" target="_blank">Imprimir</a></td>';
html += '<tr>';
$('#materias .panel').each(function(){
if($(this).data('id') == baucher.taller_id){
$(this).remove();
}
});
});
$('#table_bauchers tbody').append(html);
add_events_inscripcion();
$('#inscribir_panel .panel-body .alert').remove();
$('#inscribir_panel .panel-body p').show();
$.each(data.bauchers, function(key, baucher) {
get_pdf(baucher.id);
});
}
});
}
}
});
});
add_events_inscripcion();
});
function add_events_inscripcion() {
$('#table_bauchers tbody tr td a').each(function () {
if ($(this).data('events') == 0) {
$(this).data('events' , 1);
$(this).on('click', function (event) {
event.preventDefault();
get_pdf($(this).data('id'));
});
}
});
}
function get_pdf(id) {
$.ajax({
url: base_url + 'admin/inscribir/get_pdf/' + id + '/' + $("#user_name").data('id'),
type: 'POST',
dataType: 'json',
success: function (data) {
if (data.status === "OK") {
window.open(data.url);
} else {
alerts(data.type, data.message);
}
}
});
}
function set_event_insc(id, $dragg) {
var esta = false;
var $this = $('#inscribir_panel > .panel-body');
$this.children('div').each(function () {
if ($(this).data("id") == id) {
esta = true;
}
});
if (!esta) {
var html = '<div data-id="' + id + '" data-events="0" class="alert alert-info col-md-5" style="margin:5px">'
+ '<div>' + $dragg.find('.name-taller').data('name') + '</div>'
+ '<div><strong> ' + $dragg.find('.salon-taller').text() + '</strong></div>'
+ '<div>Grupo: ' + $dragg.find('.grupo-taller').text() + '</div>'
+ '<button class="btn btn-xs btn-link pull-right"><span class="glyphicon glyphicon-remove"></span></button>'
+ '<div>$<span class="costo"> ' + $dragg.find('.costo-span').text() + '</span></div></div>';
$this.find('p').hide();
$this.append(html);
var costo = 0;
$this.children("div").each(function () {
if ($(this).data("events") == 0) {
events_div_insc($(this));
}
costo += parseInt($(this).find('.costo').text());
});
$('#costo_total').val('$' + costo);
}
}
function events_div_insc($this) {
$this.find('.btn-link').on('click', function () {
var $parent = $('#inscribir_panel .panel-body');
$(this).closest('.alert').remove();
var costo = 0;
$parent.children("div").each(function () {
costo += parseInt($(this).find('.costo').text());
});
if ($parent.children("div").length !== 0) {
$('#costo_total').val('$' + costo);
} else {
$('#costo_total').val('');
$parent.find('p').show();
}
});
} |
<reponame>ideacrew/pa_edidb
module LegacySpec
def be_true
be_truthy
end
def be_false
be_falsey
end
end
|
<gh_stars>0
//createStore is the function for creating the Redux store.
import { createStore } from 'redux';
import rootReducer from "../reducers/index"
// createStore takes a reducer
// as the first argument, rootReducer
const store = createStore(rootReducer)
export default store;
|
while read i; do
echo $i
SAMPLE=$i
SERVER=ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/phase3/data/${SAMPLE}/alignment/
FILE=$(curl ${SERVER} | grep -E '\.mapped.ILLUMINA.*bam$' | awk '{print $9}')
echo path is ${SERVER}${FILE}
samtools view -b ${SERVER}${FILE} 6:161033785-161066618 -o ${FILE}.lpa.bam
bamToFastq -i ${FILE}.lpa.bam -fq ${FILE}.lpa.fq
rm ${FILE}.lpa.bam*
done <samples.txt
|
"use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.basic_elaboration_folder_note = void 0;
var basic_elaboration_folder_note = {
"viewBox": "0 0 64 64",
"children": [{
"name": "polygon",
"attribs": {
"fill": "none",
"stroke": "#000000",
"stroke-width": "2",
"stroke-miterlimit": "10",
"points": "63,18 63,54 1,54 1,10 22,10 30,18 "
},
"children": []
}, {
"name": "g",
"attribs": {},
"children": [{
"name": "circle",
"attribs": {
"fill": "none",
"stroke": "#000000",
"stroke-width": "2",
"stroke-miterlimit": "10",
"cx": "35",
"cy": "41",
"r": "3"
},
"children": [{
"name": "circle",
"attribs": {
"fill": "none",
"stroke": "#000000",
"stroke-width": "2",
"stroke-miterlimit": "10",
"cx": "35",
"cy": "41",
"r": "3"
},
"children": []
}]
}, {
"name": "circle",
"attribs": {
"fill": "none",
"stroke": "#000000",
"stroke-width": "2",
"stroke-miterlimit": "10",
"cx": "25",
"cy": "43",
"r": "3"
},
"children": [{
"name": "circle",
"attribs": {
"fill": "none",
"stroke": "#000000",
"stroke-width": "2",
"stroke-miterlimit": "10",
"cx": "25",
"cy": "43",
"r": "3"
},
"children": []
}]
}, {
"name": "polyline",
"attribs": {
"fill": "none",
"stroke": "#000000",
"stroke-width": "2",
"stroke-miterlimit": "10",
"points": "38,41 38,26 28,28 28,43 \t"
},
"children": [{
"name": "polyline",
"attribs": {
"fill": "none",
"stroke": "#000000",
"stroke-width": "2",
"stroke-miterlimit": "10",
"points": "38,41 38,26 28,28 28,43 \t"
},
"children": []
}]
}, {
"name": "line",
"attribs": {
"fill": "none",
"stroke": "#000000",
"stroke-width": "2",
"stroke-miterlimit": "10",
"x1": "28",
"y1": "32",
"x2": "38",
"y2": "30"
},
"children": [{
"name": "line",
"attribs": {
"fill": "none",
"stroke": "#000000",
"stroke-width": "2",
"stroke-miterlimit": "10",
"x1": "28",
"y1": "32",
"x2": "38",
"y2": "30"
},
"children": []
}]
}]
}]
};
exports.basic_elaboration_folder_note = basic_elaboration_folder_note; |
# Initialize an empty list to store the elements
elements = []
while True:
# Display the menu options
print("Введіть 1, щоб вибрати стратегію 1.")
print("Введіть 2, щоб вибрати стратегію 2.")
print("Введіть 3, щоб генерувати дані.")
print("Введіть 4, щоб видалити елемент за вказаною позицією.")
print("Введіть 5, щоб видалити декілька елементів в межах початкової та кінцевої позиції.")
print("Введіть 6, щоб використати метод для роботи зі списками.")
print("Введіть 7, щоб вивести список.")
print("Введіть 8, щоб вийти.")
# Read user input
choice = int(input("Введіть ваш вибір: "))
if choice == 1:
# Implement strategy 1
pass # Replace 'pass' with the implementation of strategy 1
elif choice == 2:
# Implement strategy 2
pass # Replace 'pass' with the implementation of strategy 2
elif choice == 3:
# Generate data and add it to the list
data = input("Введіть дані для додавання до списку: ")
elements.append(data)
elif choice == 4:
# Delete an element at a specified position
position = int(input("Введіть позицію для видалення елемента: "))
if 0 <= position < len(elements):
del elements[position]
else:
print("Недійсна позиція")
elif choice == 5:
# Delete multiple elements within a specified range of positions
start = int(input("Введіть початкову позицію для видалення: "))
end = int(input("Введіть кінцеву позицію для видалення: "))
if 0 <= start < len(elements) and 0 <= end < len(elements) and start <= end:
del elements[start:end+1]
else:
print("Недійсний діапазон позицій")
elif choice == 6:
# Use a method for working with lists
# Implement the method for working with lists
pass # Replace 'pass' with the implementation of the list method
elif choice == 7:
# Display the list
print("Список елементів:", elements)
elif choice == 8:
# Exit the program
print("Програма завершена.")
break
else:
print("Недійсний вибір. Будь ласка, виберіть від 1 до 8.") |
def max_of_two(a,b):
if a> b:
return a
else:
return b
max_of_two(10,11) |
package com.yoga.utility.quartz;
import com.yoga.core.exception.BusinessException;
import com.yoga.core.utils.DateUtil;
import org.hibernate.service.spi.ServiceException;
import org.quartz.*;
import org.quartz.impl.matchers.GroupMatcher;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import java.util.*;
@Service
public class QuartzService {
@Autowired
private Scheduler scheduler;
public List<QuartzTask> list() {
final List<QuartzTask> list = new ArrayList<>();
try {
for (final String groupJob : scheduler.getJobGroupNames()) {
for (final JobKey jobKey : scheduler.getJobKeys(GroupMatcher.groupEquals(groupJob))) {
final List<? extends Trigger> triggers = scheduler.getTriggersOfJob(jobKey);
for (final Trigger trigger : triggers) {
QuartzTask info = updateJob(trigger, jobKey);
list.add(info);
}
}
}
} catch (SchedulerException ex) {
ex.printStackTrace();
}
return list;
}
public QuartzTask get(final String jobName, final String jobGroup) {
try {
final TriggerKey triggerKey = TriggerKey.triggerKey(jobName, jobGroup);
if (!scheduler.checkExists(triggerKey)) throw new BusinessException("定时任务不存在!");
final Trigger trigger = scheduler.getTrigger(triggerKey);
final JobKey jobKey = trigger.getJobKey();
return updateJob(trigger, jobKey);
} catch (Exception e) {
throw new BusinessException(e.getMessage());
}
}
private QuartzTask updateJob(Trigger trigger, JobKey jobKey) throws SchedulerException {
final Trigger.TriggerState triggerState = scheduler.getTriggerState(trigger.getKey());
final JobDetail jobDetail = scheduler.getJobDetail(jobKey);
String cronExpression = "";
String createTime = "";
if (trigger instanceof CronTrigger) {
final CronTrigger cronTrigger = (CronTrigger) trigger;
cronExpression = cronTrigger.getCronExpression();
createTime = cronTrigger.getDescription();
}
final QuartzTask info = new QuartzTask();
info.setName(jobKey.getName());
info.setGroup(jobKey.getGroup());
info.setDescription(jobDetail.getDescription());
info.setStatus(triggerState.name());
info.setExpression(cronExpression);
info.setCreateTime(createTime);
info.setDataMap(jobDetail.getJobDataMap());
return info;
}
public void add(final QuartzTask info) {
final String jobName = info.jobClass();
final String jobGroup = info.getGroup();
final String cronExpression = info.getExpression();
final String jobDescription = info.getDescription();
final String createTime = DateUtil.format(new Date(), "yyyy-MM-dd HH:mm:ss");
try {
TriggerKey triggerKey = TriggerKey.triggerKey(jobName, jobGroup);
if (!scheduler.checkExists(triggerKey)) {
CronScheduleBuilder cronScheduleBuilder = CronScheduleBuilder.cronSchedule(cronExpression)
.withMisfireHandlingInstructionDoNothing();
CronTrigger cronTrigger = TriggerBuilder.newTrigger().withIdentity(triggerKey)
.withDescription(createTime).withSchedule(cronScheduleBuilder).build();
Class<? extends Job> clazz = Class.forName(jobName).asSubclass(Job.class);
JobKey jobKey = JobKey.jobKey(jobName, jobGroup);
JobDataMap dataMap = info.getDataMap() == null ? new JobDataMap() : info.getDataMap();
JobDetail jobDetail = JobBuilder.newJob(clazz).withIdentity(jobKey)
.withDescription(jobDescription)
.usingJobData(dataMap)
.build();
scheduler.scheduleJob(jobDetail, cronTrigger);
} /*else {
CronScheduleBuilder cronScheduleBuilder = CronScheduleBuilder.cronSchedule(cronExpression)
.withMisfireHandlingInstructionDoNothing();
CronTrigger cronTrigger = TriggerBuilder.newTrigger().withIdentity(triggerKey)
.withDescription(createTime).withSchedule(cronScheduleBuilder).build();
JobKey jobKey = new JobKey(jobName, jobGroup);
JobBuilder jobBuilder = scheduler.getJobDetail(jobKey).getJobBuilder();
JobDetail jobDetail = scheduler.getJobDetail(jobKey);
JobDataMap dataMap = info.getDataMap() == null ? jobDetail.getJobDataMap() : info.getDataMap();
jobDetail = jobBuilder.usingJobData(dataMap).withDescription(jobDescription).build();
Set<Trigger> triggerSet = new HashSet<>();
triggerSet.add(cronTrigger);
scheduler.scheduleJob(jobDetail, triggerSet, true);
} //*/
} catch (Exception e) {
e.printStackTrace();
throw new ServiceException("类名不存在或执行表达式错误");
}
}
public void edit(final QuartzTask info) {
final String jobName = info.jobClass();
final String jobGroup = info.getGroup();
final String cronExpression = info.getExpression();
final String jobDescription = info.getDescription();
final String createTime = DateUtil.format(new Date(), "yyyy-MM-dd HH:mm:ss");
try {
TriggerKey triggerKey = TriggerKey.triggerKey(jobName, jobGroup);
if (!scheduler.checkExists(triggerKey)) throw new ServiceException(String.format("Job不存在, jobName:{%s},jobGroup:{%s}", jobName, jobGroup));
CronScheduleBuilder cronScheduleBuilder = CronScheduleBuilder.cronSchedule(cronExpression)
.withMisfireHandlingInstructionDoNothing();
CronTrigger cronTrigger = TriggerBuilder.newTrigger().withIdentity(triggerKey)
.withDescription(createTime).withSchedule(cronScheduleBuilder).build();
JobKey jobKey = new JobKey(jobName, jobGroup);
JobBuilder jobBuilder = scheduler.getJobDetail(jobKey).getJobBuilder();
JobDetail jobDetail = scheduler.getJobDetail(jobKey);
if (info.getDataMap() == null) info.setDataMap(jobDetail.getJobDataMap());
jobDetail = jobBuilder.usingJobData(info.getDataMap()).withDescription(jobDescription).build();
Set<Trigger> triggerSet = new HashSet<>();
triggerSet.add(cronTrigger);
scheduler.scheduleJob(jobDetail, triggerSet, true);
} catch (SchedulerException e) {
e.printStackTrace();
throw new ServiceException("类名不存在或执行表达式错误");
}
}
public void delete(final String jobName, final String jobGroup) {
try {
final TriggerKey triggerKey = TriggerKey.triggerKey(jobName, jobGroup);
if (scheduler.checkExists(triggerKey)) {
scheduler.pauseTrigger(triggerKey);
scheduler.unscheduleJob(triggerKey);
}
} catch (SchedulerException e) {
e.printStackTrace();
throw new ServiceException(e.getMessage());
}
}
public void delete(final Class<?> clazz, final String jobGroup) {
try {
final TriggerKey triggerKey = TriggerKey.triggerKey(clazz.getName(), jobGroup);
if (scheduler.checkExists(triggerKey)) {
scheduler.pauseTrigger(triggerKey);
scheduler.unscheduleJob(triggerKey);
}
} catch (SchedulerException e) {
e.printStackTrace();
throw new ServiceException(e.getMessage());
}
}
public void pause(final String jobName, final String jobGroup) {
try {
final TriggerKey triggerKey = TriggerKey.triggerKey(jobName, jobGroup);
if (scheduler.checkExists(triggerKey)) {
scheduler.pauseTrigger(triggerKey);
}
} catch (SchedulerException e) {
e.printStackTrace();
throw new ServiceException(e.getMessage());
}
}
public void resume(final String jobName, final String jobGroup) {
try {
final TriggerKey triggerKey = TriggerKey.triggerKey(jobName, jobGroup);
if (scheduler.checkExists(triggerKey)) {
scheduler.resumeTrigger(triggerKey);
}
} catch (SchedulerException e) {
e.printStackTrace();
throw new ServiceException(e.getMessage());
}
}
}
|
<reponame>nevermined-io/cryptoarts<filename>client/src/components/templates/Asset/ArtworkFile.tsx<gh_stars>1-10
import React, { PureComponent } from 'react'
import { Logger, DDO, File, Account } from '@nevermined-io/nevermined-sdk-js'
import Button from '../../atoms/Button'
import Spinner from '../../atoms/Spinner'
import { User, Market } from '../../../context'
import styles from './ArtworkFile.module.scss'
import ReactGA from 'react-ga'
import Modal from '../../atoms/Modal'
export const messages: any = {
99: 'Decrypting file URL...',
0: '1/3<br />Checking balance...',
1: '2/3<br />Locking payment...',
2: '2/3<br />Payment confirmed. Requesting access...',
3: '3/3<br /> Access granted. Consuming file...'
}
interface ArtworkFileProps {
file: File
ddo: DDO
price: number
}
interface ArtworkFileState {
isLoading: boolean
error: string
step: number
isModalOpen: boolean
unsoldNfts: number
}
export default class ArtworkFile extends PureComponent<
ArtworkFileProps,
ArtworkFileState
> {
public static contextType = User
public state = {
isLoading: false,
error: '',
step: 99,
isModalOpen: false,
unsoldNfts: 0,
}
public componentDidMount() {
this.getUnsoldNfts()
}
private handleToggleModal = () => {
this.setState({ isModalOpen: !this.state.isModalOpen })
const { ddo, file } = this.props
const { index } = file
this.purchaseAsset(ddo, index ?? -1)
}
private resetState = () =>
this.setState({
isLoading: true,
error: '',
step: 99,
})
private getUnsoldNfts = async () => {
const { ddo } = this.props
const { sdk } = this.context
const { owner } = await sdk.nfts.details(ddo.id)
const account = new Account(owner)
sdk.nfts.balance(ddo.id, account)
.then((balance: string) => {
this.setState({ unsoldNfts: Number(balance) })
console.log(balance)
})
}
private purchaseAsset = async (ddo: DDO, index: number) => {
this.resetState()
ReactGA.event({
category: 'Purchase',
action: 'purchaseAsset-start ' + ddo.id
})
const { sdk } = this.context
try {
const accounts = await sdk.accounts.list()
// check balance
this.setState({ step: 0 })
const balance = await accounts[0].getNeverminedBalance()
if (balance < this.props.price) {
throw Error(`Not Enough balance: ${balance}`)
}
// order nft and lock payment
this.setState({ step: 1 })
await sdk.nfts
.order(ddo.id, 1, accounts[0])
// requesting access
this.setState({ step: 2 })
const path = await sdk.nfts.access(ddo.id, accounts[0])
this.setState({ step: 3 })
Logger.log('path', path)
ReactGA.event({
category: 'Purchase',
action: 'purchaseAsset-end ' + ddo.id
})
this.setState({ isLoading: false })
} catch (error) {
Logger.error('error', error.message)
this.setState({
isLoading: false,
error: `${error.message}. Sorry about that, can you try again?`
})
ReactGA.event({
category: 'Purchase',
action: 'purchaseAsset-error ' + error.message
})
}
}
private renderSuccessful() {
return (
<div className={styles.modal}>
<div
className={styles.iconSuccessful}
/>
<div className={styles.text}>
<span>Purchase Successful!</span>
<p>Congratulations, you were able to sucessfully purchase this artwork</p>
</div>
<Button
primary
className={styles.buttonSuccessful}
onClick={this.handleToggleModal}
>
Complete
</Button>
</div>
)
}
private renderFailed() {
const { error } = this.state
return (
<div className={styles.modal}>
<div
className={styles.iconFailed}
/>
<div className={styles.text}>
<span>Purchase Failed!</span>
<p>{error}</p>
</div>
<Button
primary
className={styles.buttonFailed}
onClick={this.handleToggleModal}
>
Return
</Button>
</div>
)
}
private renderModalContent() {
const { error } = this.state
if (error !== '') {
return this.renderFailed()
} else {
return this.renderSuccessful()
}
}
public render() {
const { isLoading, step, unsoldNfts } = this.state
const { isLogged } = this.context
return (
<div>
<Market.Consumer>
{market => (
<Button
primary
fullWidth
onClick={this.handleToggleModal}
disabled={!isLogged || !market.networkMatch || unsoldNfts < 1}
name="Download"
>
{unsoldNfts > 0 ? 'BUY NOW' : 'SOLD OUT'}
</Button>
)}
</Market.Consumer>
<Modal
title=''
isOpen={this.state.isModalOpen}
toggleModal={this.handleToggleModal}
overrideButton={true}
>
{isLoading ? (
<Spinner message={messages[step]} />
) : (
<>
{this.renderModalContent()}
</>
)
}
</Modal>
</div>
)
}
}
|
#! /bin/bash
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
source $DIR/common.sh
if [ -f ~/.liquidfiles/credentials ]; then
mv ~/.liquidfiles/credentials ~/.liquidfiles/.credentials
fi
$EXEC messages > /dev/null
status=$?
if [ $status -eq 0 ]; then
if [ -f ~/.liquidfiles/.credentials ]; then
mv ~/.liquidfiles/.credentials ~/.liquidfiles/credentials
fi
echo "Error: Credentials not working"
echo "Test FAILED."
exit 99
fi
$EXEC get_api_key --server=$SERVER -k --username=xustup@gmail.com --password=TestPassword_1 -s
KEY=${KEY##* }
if [ ! -f ~/.liquidfiles/credentials ]; then
if [ -f ~/.liquidfiles/.credentials ]; then
mv ~/.liquidfiles/.credentials ~/.liquidfiles/credentials
fi
echo "Credentials didn't saved."
echo "Test FAILED."
exit 99
fi
$EXEC messages
test_status "Credentials not working."
rm ~/.liquidfiles/credentials
if [ -f ~/.liquidfiles/.credentials ]; then
mv ~/.liquidfiles/.credentials ~/.liquidfiles/credentials
fi
echo "Test PASSED."
|
<gh_stars>1-10
#ifndef AES_H
#define AES_H
#include <stdint.h>
#include <stdlib.h>
#include <oqs/aes.h>
#define AES128_KEYBYTES 16
#define AES192_KEYBYTES 24
#define AES256_KEYBYTES 32
#define AESCTR_NONCEBYTES 12
#define AES_BLOCKBYTES 16
typedef void * aes128ctx;
static void aes128_keyexp(aes128ctx *r, const unsigned char *key) {
OQS_AES128_load_schedule(key, r, 1);
}
static void aes128_ecb(unsigned char *out, const unsigned char *in, size_t nblocks, aes128ctx *ctx) {
OQS_AES128_ECB_enc_sch(in, nblocks * AES_BLOCKBYTES, *ctx, out);
OQS_AES128_free_schedule(*ctx);
// FIXME: PQClean AES API expects that aes128_ecb can be called multiple
// times with the same key schedule, but this instantiation does not, since
// it frees the key schedule immediately
}
#endif
|
<reponame>evazion/ruby-booru
require "danbooru/resource"
class Danbooru::Resource::Posts < Danbooru::Resource
def search(workers: 2, by: :page, **params)
all(workers: workers, by: by, **params)
end
def tag(id, tags)
tags = tags.join(" ") if tags.is_a?(Array)
update(id, "post[old_tag_string]": "", "post[tag_string]": tags)
end
end
|
/*
* Copyright 2013 Stanford University.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the
* distribution.
*
* - Neither the name of the copyright holders nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* A simple implementation.
* Author: <NAME> <<EMAIL>>
*/
#include "test/experimental_quh/job_index.h"
namespace nimbus {
JobIndex::JobIndex() {
_job_table.clear();
}
JobIndex::~JobIndex() {
while (!_job_table.empty()) {
delete _job_table.back();
_job_table.pop_back();
}
}
int JobIndex::QueryJobEntry(
const VariableRegionSet& read_set,
const VariableRegionSet& write_set,
JobIdSet* result) {
int number = 0;
VariableRegionSet temp_read_set(read_set);
VariableRegionSet temp_write_set(write_set);
result->clear();
bool test_read_after_write, test_write_after_read, test_write_after_write;
for (JobEntryTable::reverse_iterator index = _job_table.rbegin();
index != _job_table.rend();
index++) {
test_read_after_write =
temp_read_set.IntersectsAndDelete((*index)->write_set);
test_write_after_write =
temp_write_set.IntersectsAndDelete((*index)->write_set);
test_write_after_read =
temp_write_set.IntersectsTest((*index)->read_set);
if (test_read_after_write || test_write_after_write ||
test_write_after_read) {
result->insert((*index)->job_id);
number++;
}
}
return number;
}
bool JobIndex::AddJobEntry(
const job_id_t job_id,
const VariableRegionSet& read_set,
const VariableRegionSet& write_set) {
JobEntry* temp = new JobEntry;
temp->job_id = job_id;
temp->read_set.CopyFrom(read_set);
temp->write_set.CopyFrom(write_set);
_job_table.push_back(temp);
return true;
}
// TODO(quhang) Never run.
bool JobIndex::DeleteJobEntry(const job_id_t job_id) {
for (JobEntryTable::iterator index = _job_table.begin();
index != _job_table.end();
index++)
if ((*index)->job_id == job_id) {
delete (*index);
_job_table.erase(index);
return true;
}
return false;
}
// TODO(quhang) Never run.
int JobIndex::AllJobEntries(JobIdSet* result) {
int number = 0;
for (JobEntryTable::reverse_iterator index = _job_table.rbegin();
index != _job_table.rend();
index++) {
result->insert((*index)->job_id);
number++;
}
return number;
}
} // namespace nimbus
|
<filename>tests/events/bootup.js
module.exports = {
event:"ready",
execute({client}){
client.user.setActivity("I'm ready!");
}
} |
import { Request, Response, Router } from 'express';
import { generateRandomPairs } from './helpers';
import { VoteResult } from '../types';
import { DummyStorage } from '../store';
const router = Router();
const storage = new DummyStorage();
router.get('/', async (req: Request, res: Response) => {
const pairs = await generateRandomPairs(req);
res.json(pairs);
});
router.post('/', async (req: Request, res: Response) => {
const { body }: {body: VoteResult} = req;
// Initialize storage for user
const { sessionID } = req;
if (!storage.get(sessionID)) {
storage.put(sessionID, {
imagesCounter: 0,
result: 0,
});
}
// Save result
const answer = +body.answer === 1 && body.label === 'real' && !body.pass;
const result = storage.increment(sessionID, answer, body.pass);
if (result.shouldStop) {
storage.reset(sessionID);
}
res.json(result);
});
export default router;
|
from sklearn.cluster import KMeans
X = [[1, 2], [3, 4], [5, 6], [9, 1], [2, 8]]
kmeans = KMeans(n_clusters=2).fit(X)
clusters = kmeans.labels_
cluster_0 = [X[i] for i in range(len(clusters)) if clusters[i] == 0]
cluster_1 = [X[i] for i in range(len(clusters)) if clusters[i] == 1]
print(cluster_0) # [[1, 2], [3, 4], [5, 6]]
print(cluster_1) # [[9, 1], [2, 8]] |
<reponame>sergirubio/PyTangoArchiving<filename>PyTangoArchiving/hdbpp/check_and_recover_attributes.py
import PyTangoArchiving as pta, fandango as fn, PyTangoArchiving.hdbpp.maintenance as ptam
import traceback
#dbs = ['hdbacc','hdbct','hdbdi','hdbpc','hdbrf','hdbvc']
dbs = pta.get_hdbpp_databases()
checks = dict((d,pta.check.check_db_schema(d,subscribe=False)) for d in dbs)
for db in dbs:
check,api = checks[db],checks[db].api
print('>'*80)
print('\nrecovering %d lost attributes from %s\n' % (len(check.lost),db))
perlost = [a for a in check.lost if api.is_periodic_archived(a)]
evlost = [a for a in check.lost if not api.is_periodic_archived(a)]
errors = [a for a in evlost if api.get_attribute_errors(a)]
recover = [a for a in errors if fn.tango.check_attribute_events(a)]
failed = []
for a in evlost:
print('recovering %s' % a)
if a in errors and a not in recover:
print('%s not recoverable' % a)
continue
try:
d = api.get_attribute_subscriber(a)
dp = fn.get_device(d)
dp.AttributeStop(a)
fn.wait(0.5)
dp.AttributeStart(a)
except:
failed.append(a)
print(a,d,traceback.format_exc())
periods = dict((a,api.get_periodic_attribute_period(a)) for a in perlost)
for per in api.get_periodic_archivers():
perattrs = api.get_periodic_archiver_attributes(per)
if len([a for a in perattrs if a in perlost]) > 0.3*len(perattrs):
fn.Astor(per).stop_servers()
fn.wait(5.)
fn.Astor(per).start_servers()
else:
for attr in [p for p in perattrs if p in perlost]:
period = periods[attr]
print('recovering %s' % attr)
try:
d = api.get_periodic_attribute_archiver(attr)
dp = fn.get_device(d)
dp.AttributeRemove(attr)
fn.wait(.5)
dp.AttributeAdd([attr,str(int(period))])
fn.wait(.5)
print('%s done' % attr)
except:
failed.append(attr)
print(attr,d,traceback.format_exc())
print('attributes not recoverable: %s' % str([a for a in errors if a not in recover]))
print('attributes failed: %s' % str(failed))
|
#!/bin/bash
#
# You should only work under the /scratch/users/<username> directory.
#
# Example job submission script
#
# -= Resources =-
#
#SBATCH --job-name=e-nonex-cardiac-sim
#SBATCH --nodes=2
#SBATCH --ntasks-per-node=32
#SBATCH --partition=short
##SBATCH --exclusive
##SBATCH --constraint=e52695v4,36cpu
#SBATCH --time=02:00:00
#SBATCH --output=./outputs/e-2-cardiacsim-%j.out
#SBATCH --mail-type=ALL
# #SBATCH --mail-user=bbarlas15@ku.edu.tr
#SBATCH --mem-per-cpu=1000M
################################################################################
################################################################################
## Load openmpi version 3.0.0
echo "Loading openmpi module ..."
module load openmpi/3.0.0
## Load GCC-7.2.1
echo "Loading GCC module ..."
module load gcc/7.3.0
echo ""
echo "======================================================================================"
#env
echo "======================================================================================"
echo ""
# Set stack size to unlimited
echo "Setting stack size to unlimited..."
ulimit -s unlimited
ulimit -l unlimited
ulimit -a
echo
echo "Serial version ..."
./cardiacsim-serial -n 400 -t 100
echo "Serial version ..."
./cardiacsim-serial -n 1024 -t 100
# Different MPI+OpenMP configurations
# [1 + 32] [2 + 16] [4 + 8] [8 + 4] [16 + 2] [32 + 1]
echo "Serial version ..."
./cardiacsim-serial -n 256 -t 10000
echo "1 MPI n=256"
mpirun -np 1 ./cardiacsim -n 256 -t 404 -x 1 -y 4
echo "Serial version ..."
./cardiacsim-serial -n 512 -t 5000
echo "2 MPI n=256"
mpirun -np 2 ./cardiacsim -n 512 -t 152 -x 1 -y 2
echo "Serial version ..."
./cardiacsim-serial -n 1024 -t 2500
echo "4 MPI n=256"
mpirun -np 4 ./cardiacsim -n 1024 -t 44 -x 1 -y 4
echo "Serial version ..."
./cardiacsim-serial -n 2048 -t 1250
echo "8 MPI n=256"
mpirun -np 8 ./cardiacsim -n 2048 -t 12 -x 1 -y 8
echo "Serial version ..."
./cardiacsim-serial -n 4096 -t 725
echo "16 MPI n=256"
mpirun -np 16 ./cardiacsim -n 4096 -t 3 -x 1 -y 16
echo "Serial version ..."
./cardiacsim-serial -n 8192 -t 1
echo "32 MPI n=256"
mpirun -np 32 ./cardiacsim -n 8192 -t 1 -x 1 -y 32
#....
echo "Finished with execution!" |
package malte0811.controlengineering.gui.panel;
import com.mojang.blaze3d.vertex.PoseStack;
import malte0811.controlengineering.ControlEngineering;
import malte0811.controlengineering.controlpanels.PlacedComponent;
import malte0811.controlengineering.gui.StackedScreen;
import malte0811.controlengineering.util.ScreenUtils;
import malte0811.controlengineering.util.math.Vec2d;
import net.minecraft.client.Minecraft;
import net.minecraft.client.gui.screens.inventory.MenuAccess;
import net.minecraft.network.chat.Component;
import net.minecraft.network.chat.TranslatableComponent;
import net.minecraft.util.Mth;
import javax.annotation.Nonnull;
import java.util.List;
public class PanelDesignScreen extends StackedScreen implements MenuAccess<PanelDesignMenu> {
public static final String REQUIRED_VS_AVAILABLE_TAPE = ControlEngineering.MODID + ".gui.reqVsAvTape";
private static final int BORDER = 20;
@Nonnull
private final PanelDesignMenu container;
private int panelLayoutXMin;
private int panelLayoutYMax;
public PanelDesignScreen(@Nonnull PanelDesignMenu container, Component title) {
super(title);
this.container = container;
}
@Override
protected void init() {
super.init();
final int usedHeight = height - 2 * BORDER;
final int availableWidth = width - 2 * BORDER;
final int selectorWidth = Mth.clamp(availableWidth - usedHeight, 100, usedHeight / 2);
final int panelSize = Mth.clamp(usedHeight, 150, availableWidth - selectorWidth);
final int usedWidth = selectorWidth + panelSize;
final int offset = (width - usedWidth) / 2;
this.panelLayoutXMin = selectorWidth + offset;
this.panelLayoutYMax = BORDER + panelSize;
PanelLayout panelLayout = new PanelLayout(panelLayoutXMin, BORDER, panelSize, container.getComponents());
addRenderableWidget(new ComponentSelector(offset, BORDER, selectorWidth, panelSize, panelLayout::setPlacingComponent));
addRenderableWidget(panelLayout);
}
@Override
protected void renderForeground(@Nonnull PoseStack matrixStack, int mouseX, int mouseY, float partialTicks) {
final int required = container.getRequiredTapeLength();
final int available = container.getAvailableTapeLength();
final int color = required <= available ? -1 : 0xff_ff0000;
Minecraft.getInstance().font.draw(
matrixStack,
new TranslatableComponent(REQUIRED_VS_AVAILABLE_TAPE, required, available),
panelLayoutXMin, panelLayoutYMax + 5,
color
);
}
@Override
public boolean keyPressed(int keyCode, int scanCode, int modifiers) {
final Vec2d mouse = ScreenUtils.getMousePosition();
for (var button : children()) {
if (button.isMouseOver(mouse.x(), mouse.y())) {
if (button.keyPressed(keyCode, scanCode, modifiers)) {
return true;
} else {
break;
}
}
}
return super.keyPressed(keyCode, scanCode, modifiers);
}
public List<PlacedComponent> getComponents() {
return container.getComponents();
}
@Nonnull
@Override
public PanelDesignMenu getMenu() {
return container;
}
}
|
<gh_stars>1-10
module.exports = {
getStaticProps: jest.fn(),
render: (req, res) => {
res.end("pages/fallback/[slug].js");
},
renderReqToHTML: (req, res) => {
return Promise.resolve({
html: "<div>Rendered Page</div>",
renderOpts: {
pageData: {
page: "pages/fallback/[slug].js"
}
}
});
}
};
|
def sum_without_arithmetic_ops(a, b):
while b > 0:
carry = a & b
a = a ^ b
b = carry << 1
return a
result = sum_without_arithmetic_ops(3, 4)
print(result) |
#!/bin/sh
WEIRD_BG="\033[48;5;194m"
BLACK_FG="\033[38;5;0m"
BLACK_BG="\033[48;5;0m"
CLEAR_COLOR="\033[m"
MAIN_BG="\033[48;5;39m"
SIZE_BG="\033[48;5;11m"
TEST_FILE_BG="\033[48;5;172m"
FOLD="./copy_in_here_GNL_files/"
if [ ! -f "${FOLD}get_next_line.c" ] || [ ! -f "${FOLD}get_next_line_utils.c" ] || [ ! -f "${FOLD}get_next_line.h" ]
then
echo "get_next_line.c or\nget_next_line_utils.c or\nget_next_line.h\033[38;5;1m is missing${CLEAR_COLOR} in folder '${FOLD}'"
echo "\n\t🥰 RTFM 🥰 "
exit
fi
for MAIN_NAME in mains/*
do
if [ $MAIN_NAME == "mains/main.c" ]
then
echo "\n\n\tbuilding with main: ${MAIN_BG}${BLACK_FG}\t\t\t${MAIN_NAME}\t\t\t${CLEAR_COLOR}"
for TEST in `seq 1 9`
do
TEST_FILE=./test_files_GNL/test_file${TEST}
echo "\n\n\t\ttest_file is:\t\t${TEST_FILE_BG}${BLACK_FG}\t\ttest_file${TEST}\t\t${CLEAR_COLOR}"
for SIZE in `seq 1 5`
do
make re MAIN=${MAIN_NAME} BUF_SIZE=${SIZE}
./get_next_line ${TEST_FILE}
diff -u user_output ${TEST_FILE}
RESULT=$?
if [ ${RESULT} -eq 0 ]
then
echo "when BUFFER_SIZE=${SIZE}:\t\033[32mOK with file ending with a '\ n'\033[0m"
else
echo "when BUFFER_SIZE=${SIZE}:\t\033[31mKO with file ending with a '\ n'\033[0m"
fi
done
done
SIZE=1024
make re MAIN=${MAIN_NAME} BUF_SIZE=${SIZE}
./get_next_line ${TEST_FILE}
diff -u user_output ${TEST_FILE}
RESULT=$?
if [ ${RESULT} -eq 0 ]
then
echo "\t\twhen BUFFER_SIZE= ⚠️ ${WEIRD_BG}${BLACK_FG} ${SIZE} ${CLEAR_COLOR} ⚠️ : \033[32mOK${CLEAR_COLOR}"
else
echo "\t\twhen BUFFER_SIZE= ${WEIRD_BG}${BLACK_FG} ${SIZE} ${CLEAR_COLOR} : \033[32mOK${CLEAR_COLOR}"
fi
elif [ ${MAIN_NAME} == "mains/main_no_end_of_line_at_end.c" ] ######## #modify the iF in a elif
then
echo "\n\n\tbuilding with main: ${MAIN_BG}${BLACK_FG}\t\t${MAIN_NAME}\t${CLEAR_COLOR}"
for TEST in `seq 10 17`
do
TEST_FILE=./test_files_GNL/test_file${TEST}
echo "\n\n\t\ttest_file is:\t\t${TEST_FILE_BG}${BLACK_FG}\t\ttest_file${TEST}\t\t${CLEAR_COLOR}"
for SIZE in `seq 3 5`
do
make re MAIN=${MAIN_NAME} BUF_SIZE=${SIZE}
./get_next_line ${TEST_FILE}
diff -u user_output ${TEST_FILE}
RESULT=$?
if [ ${RESULT} -eq 0 ]
then
echo "when BUFFER_SIZE=${SIZE}:\t\033[32mGNL OK with no '\ n' at end of file\033[0m"
else
echo "when BUFFER_SIZE=${SIZE}:\t\033[31mGNL KO with no '\ n' at end of file\033[0m"
fi
done
done
elif [ ${MAIN_NAME} == "mains/main_INPUTS_WRONG.c" ]
then
echo "\n\n\tbuilding with main: ${MAIN_BG}${BLACK_FG}\t\t${MAIN_NAME}\t\t${CLEAR_COLOR}"
SIZE=3
TEST_FILE=Makefile
echo "\n\n\t\ttest_file is:\t\t${TEST_FILE_BG}${BLACK_FG}\t\t${TEST_FILE}\t\t${CLEAR_COLOR}"
make re MAIN=${MAIN_NAME} BUF_SIZE=${SIZE}
./get_next_line ${TEST_FILE}
RESULT=$?
if [ ${RESULT} -eq 0 ]
then
echo "when BUFFER_SIZE=${SIZE}:\t\033[32mGNL OK with WRONG_INPUTS\033[0m"
else
echo "when BUFFER_SIZE=${SIZE}:\t\033[31mGNL KO with WRONG INPUTS\033[0m"
fi
SIZE=0
make re MAIN=${MAIN_NAME} BUF_SIZE=${SIZE}
./get_next_line ${TEST_FILE}
RESULT=$?
if [ ${RESULT} -eq 0 ]
then
echo "when BUFFER_SIZE=${SIZE}:\t\033[32mGNL OK with BUFFER_SIZE = 0\033[0m"
else
echo "when BUFFER_SIZE=${SIZE}:\t\033[31mGNL KO with BUFFER_SIZE = 0\033[0m"
fi
elif [ ${MAIN_NAME} == "mains/main_dev_null.c" ];
then
echo "\n\n\tbuilding with main: ${MAIN_BG}${BLACK_FG}\t\t${MAIN_NAME}\t\t\t${CLEAR_COLOR}"
SIZE=12
TEST_FILE=/dev/null
echo "\n\n\t\ttest_file is:\t\t${TEST_FILE_BG}${BLACK_FG}\t${WEIRD_BG}${BLACK_FG}\t${TEST_FILE}\t${TEST_FILE_BG}${BLACK_FG}\t${CLEAR_COLOR}"
make re MAIN=${MAIN_NAME} BUF_SIZE=${SIZE}
./get_next_line ${TEST_FILE}
RESULT=$?
if [ ${RESULT} -eq 0 ]
then
echo "when BUFFER_SIZE=${SIZE}:\t\033[32mGNL OK with /dev/null\033[0m"
else
echo "when BUFFER_SIZE=${SIZE}:\t\033[31mGNL KO with /dev/null\033[0m"
fi
elif [ ${MAIN_NAME} == "mains/main_STDIN_FILENO.c" ];
then
echo "\n\n\tbuilding with main: ${MAIN_BG}${BLACK_FG}\t\t${MAIN_NAME}\t\t${CLEAR_COLOR}"
SIZE=12
TEST_FILE=/dev/stdin
echo "\n\n\t\ttest_file is:\t\t${TEST_FILE_BG}${BLACK_FG}\t${WEIRD_BG}${BLACK_FG}\t${TEST_FILE}\t${TEST_FILE_BG}${BLACK_FG}\t${CLEAR_COLOR} BUFFER_SIZE=${SIZE}"
make re MAIN=${MAIN_NAME} BUF_SIZE=${SIZE}
./get_next_line
fi
done
echo "\n\n\n 🔎 check if the read function is called with 'BUFFER_SIZE': 🔎\n"
cat copy_in_here_GNL_files/get_next_line.c copy_in_here_GNL_files/get_next_line_utils.c | grep "read(" | grep -n --colour "BUFFER_SIZE"
RESULT=$?
if [ ${RESULT} == 0 ]
then
echo "\033[38;5;2m\n\t=====> ✅\n\033[0m"
else
echo "\033[38;5;1m\n\t=====> ❌\n\033[0m"
fi
echo "\n\n\n 🔎 check if a global was declared: 🔎\n"
RESULT=`cat copy_in_here_GNL_files/* | grep -n --colour "global" | wc -l`
if [ ${RESULT} == 0 ]
then
echo "\033[38;5;2m\t=====> ✅\t 0 \033[0mglobal variable used.\n"
else
echo "\033[38;5;1m${RESULT}${CLEAR_COLOR} global variable(s) used.\n"
fi
echo "\n\n\n 🔎 check how many static variables were declared: 🔎\n"
RESULT=`cat copy_in_here_GNL_files/get_next_line.c copy_in_here_GNL_files/get_next_line_utils.c | grep -n --colour "^.static.*;$" | wc -l`
if [ ${RESULT} != 0 ]
then
if [ ${RESULT} == 1 ]
then
echo "\033[38;5;2m\t=====> ✅ ${RESULT}${CLEAR_COLOR} static variable used in basic files.\n"
else
echo "\033[38;5;2m\t=====> ✅ ${RESULT}${CLEAR_COLOR} static variables used in basic files.\n"
fi
fi
RESULT=`cat copy_in_here_GNL_files/get_next_line_bonus.c copy_in_here_GNL_files/get_next_line_utils_bonus.c | grep -n --colour "^.static.*;$" | wc -l`
if [ ${RESULT} != 0 ]
then
if [ ${RESULT} == 1 ]
then
echo "\033[38;5;2m\t=====> ✅ ${RESULT}${CLEAR_COLOR} static variable used in bonus files.\n"
else
echo "\033[38;5;2m\t=====> ✅ ${RESULT}${CLEAR_COLOR} static variables used in bonus files.\n"
fi
fi
if [ ${RESULT} -gt 0 ]
then
echo " ===> BONUS FILES PRESENT... "
echo ""
for i in `seq 5 0`
do
echo "\033[1A\033[38;5;10m$i\033[m"
sleep 0.6
done
echo "\033[1A "
echo " ===> START "
sleep 0.5
make fclean
make bonus MAIN="mains/main_multy_fd.c"
./get_next_line mains/main.c mains/main_INPUTS_WRONG.c mains/main_STDIN_FILENO.c mains/main_dev_null.c
echo " ===> END "
fi
rm user_output
make fclean
|
#!/bin/bash
# Setup CPU port
ip link add name veth250 type veth peer name veth251
ip link set dev veth250 up
ip link set dev veth251 up
# Setup front panel ports
num_ports=16
for i in `seq 1 ${num_ports}`
do
ip tuntap add dev swp${i} mode tap
ip link set swp${i} up
done
|
#!/bin/bash
cd `dirname "$0"`
echo Starting HTTP server in `pwd` on http://localhost:8001
python -m SimpleHTTPServer 8001 &
echo Starting HTTP server in `pwd` on http://localhost:8000
python -m SimpleHTTPServer 8000
kill `jobs -p`
|
#!/bin/sh
if [ $# != 1 ]; then
echo Usage: ./release.sh 1.2.3
exit 1
fi
if [ -z "$OVSX_PAT" ]; then
OVSX_PAT="$(pass pat/openvsx)" || exit 1
export OVSX_PAT
fi
set -ex
new_version="$1"
sed -i 's/"version": ".*"/"version": "'$new_version'"/' package.json
npm i
git commit -am "Release $new_version"
git tag -a v$new_version -m "vscode-lean $new_version"
./node_modules/.bin/vsce publish
./node_modules/.bin/ovsx publish
git push
git push --tags
./node_modules/.bin/vsce package
hub release create -m "vscode-lean $new_version" v$new_version -a lean-$new_version.vsix
|
<reponame>NIRALUser/BatchMake
/*=========================================================================
Program: Insight Segmentation & Registration Toolkit
Module: MomentRegistrator.h
Language: C++
Date: $Date$
Version: $Revision$
Copyright (c) Insight Software Consortium. All rights reserved.
See ITKCopyright.txt or http://www.itk.org/HTML/Copyright.htm for details.
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notices for more information.
=========================================================================*/
#ifndef __MomentRegistrator_h
#define __MomentRegistrator_h
#include "itkImage.h"
#include "itkImageRegistrationMethod.h"
#include "itkAffineTransform.h"
#include "itkImageMomentsCalculator.h"
namespace itk
{
template< class TImage >
class MomentRegistrator : public ImageRegistrationMethod < TImage, TImage >
{
public:
typedef MomentRegistrator Self;
typedef ImageRegistrationMethod< TImage, TImage> Superclass;
typedef SmartPointer<Self> Pointer;
typedef SmartPointer<const Self> ConstPointer;
itkTypeMacro(MomentRegistrator, ImageRegistrationMethod);
itkNewMacro(Self);
itkStaticConstMacro(ImageDimension, unsigned int,
TImage::ImageDimension);
typedef typename TImage::PixelType PixelType ;
typedef typename TImage::RegionType RegionType ;
/** preprocessing related typedefs */
typedef AffineTransform<double, itkGetStaticConstMacro(ImageDimension)>
TransformType ;
typedef typename TransformType::ParametersType ParametersType ;
typedef typename TransformType::ParametersType ScalesType ;
typedef ImageMomentsCalculator< TImage > MomentsCalculatorType;
void StartRegistration() ;
TransformType * GetTypedTransform(void)
{
return static_cast<TransformType *>(Superclass::GetTransform());
}
itkSetMacro(NumberOfMoments, unsigned int) ;
itkGetConstMacro(NumberOfMoments, unsigned int) ;
protected:
MomentRegistrator() ;
virtual ~MomentRegistrator() ;
void PrintSelf(std::ostream & os, Indent indent) const;
virtual void Initialize() throw(ExceptionObject);
void PrintUncaughtError() ;
void PrintError(ExceptionObject &e) ;
private:
unsigned int m_NumberOfMoments;
} ; // end of class
#ifndef ITK_MANUAL_INSTANTIATION
#include "MomentRegistrator.txx"
#endif
} // end namespace itk
#endif //__MomentRegistrator_H
|
#!/bin/sh
closureLibPath=/home/olmozavala/Dropbox/TutorialsByMe/JS/ClosureLibrary/closure-library
closureCompilerPath=/home/olmozavala/Dropbox/TutorialsByMe/JS/ClosureLibrary/closure-compiler
ol3Path=/home/olmozavala/Dropbox/OpenLayers3/ol3
python $closureLibPath/closure/bin/build/closurebuilder.py \
--root=$closureLibPath \
--root=$ol3Path/src/ \
--root=src/ \
--namespace="owgis" \
--output_mode=compiled \
--compiler_jar=$closureCompilerPath/compiler.jar \
> compiled/compiled.js
|
basic.forever(function () {
serial.writeValue("x", STTS751.temperature(STTS751.STTS751_T_UNIT.C))
basic.pause(1000)
}) |
import React, { Component } from 'react';
import ReactDOM from 'react-dom';
import Peer from 'simple-peer';
import MediaHandler from '../MediaHandler';
import Echo from "laravel-echo";
import Swal from 'sweetalert2';
import withReactContent from 'sweetalert2-react-content';
import axios from 'axios';
const MySwal = withReactContent(Swal)
export default class App extends Component {
constructor() {
super();
this.state = {
hasMedia: false,
otherUserId: null,
myuser: window.user.name
};
this.user = window.user;
this.stream = null;
this.peers = {};
this.mediaHandler = new MediaHandler();
this.startSocket = this.startSocket.bind(this);
}
componentDidMount() {
// this.fetchInitialDataUsingHttp();
// window.Pusher = require('pusher-js');
window.Echo = new Echo({
broadcaster: 'pusher',
key: process.env.MIX_PUSHER_APP_KEY,
wsHost: window.location.hostname,
wsPort: 6001,
disableStats: true,
// forceTLS: true,
// wssPort: 6001,
// enabledTransports: ['ws', 'wss']
});
//Set up listeners when the component is being mounted
window.Echo.channel('home').listen('NewMessage', (e) =>{
// this.setState({name_user: e.message});
// this.setState({myuser: e.message});
// console.log(e.message);
MySwal.fire({
title: <p>Mensage de un user</p>,
footer: 'CmsWeb v3.0',
onOpen: () => {
// `MySwal` is a subclass of `Swal`
// with all the same instance & static methods
MySwal.clickConfirm()
}
}).then(() => {
return MySwal.fire(<p>{e.message}</p>)
})
});
}
componentWillMount() {
this.mediaHandler.getPermissions()
.then((stream) => {
this.setState({hasMedia: true});
try {
this.myVideo.srcObject = stream;
} catch (e) {
this.myVideo.src = URL.createObjectURL(stream);
}
this.myVideo.play();
})
}
startSocket() {
// this.setState(state => ({
// isToggleOn: !state.isToggleOn
// }));
axios.get('http://localhost:8000/videochats/send/hola_buddy')
.then(res => {
console.log(res);
})
}
render() {
return (
<div className="container-fluid">
<div className="row">
<div className="col-xs-3 col-sm-3 col-md-3 col-lg-3">
<code><u>Chats de Usuarios</u></code>
<ul>
<li><code>{this.state.myuser}</code> - <small>Hola Grupo</small></li>
<li><code>admin</code> - <small>Hola Grupo</small></li>
<li><code><NAME></code> - <small>Hola Grupo</small></li>
</ul>
</div>
<div className="col-xs-6 col-sm-6 col-md-6 col-lg-6">
<video className="" width="100%" ref={(ref) => {this.myVideo = ref;}}></video>
<br />
<div className="standalone text-center">
<label style={{ padding: "30px" }}>
<input className="form-control" type="radio" />
Camara Web
</label>
<label>
<input className="form-control" type="radio" />
Escritorio
</label>
</div>
</div>
<div className="col-xs-3 col-sm-3 col-md-3 col-lg-3">
<code><u>Usuarios Conectados</u></code>
<ul>
<li>{this.state.myuser} <button className="btn btn-sm btn-primary" onClick={this.startSocket}>Pido Palabra </button></li>
<li>admin</li>
<li>juan peres</li>
</ul>
</div>
</div>
</div>
);
}
}
if (document.getElementById('example')) {
ReactDOM.render(<App />, document.getElementById('example'));
}
|
# 是否使用GPU(即是否使用 CUDA)
WITH_GPU=OFF
# 使用MKL or openblas
WITH_MKL=OFF
# 是否集成 TensorRT(仅WITH_GPU=ON 有效)
WITH_TENSORRT=OFF
# TensorRT 的路径,如果需要集成TensorRT,需修改为您实际安装的TensorRT路径
TENSORRT_DIR=/root/projects/TensorRT/
# Paddle 预测库路径, 请修改为您实际安装的预测库路径
PADDLE_DIR=/root/projects/fluid_inference
# Paddle 的预测库是否使用静态库来编译
# 使用TensorRT时,Paddle的预测库通常为动态库
WITH_STATIC_LIB=OFF
# CUDA 的 lib 路径
CUDA_LIB=/usr/local/cuda/lib64
# CUDNN 的 lib 路径
CUDNN_LIB=/usr/lib/aarch64-linux-gnu
# 以下无需改动
rm -rf build
mkdir -p build
cd build
cmake .. \
-DWITH_GPU=${WITH_GPU} \
-DWITH_MKL=${WITH_MKL} \
-DWITH_TENSORRT=${WITH_TENSORRT} \
-DTENSORRT_DIR=${TENSORRT_DIR} \
-DPADDLE_DIR=${PADDLE_DIR} \
-DWITH_STATIC_LIB=${WITH_STATIC_LIB} \
-DCUDA_LIB=${CUDA_LIB} \
-DCUDNN_LIB=${CUDNN_LIB}
make
|
#!/usr/bin/env bash
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
SCRIPT_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
CODEGEN_PKG=${CODEGEN_PKG:-$(
cd "${SCRIPT_ROOT}"
ls -d -1 ./vendor/k8s.io/code-generator 2>/dev/null || echo ../code-generator
)}
# generate the code with:
# --output-base because this script should also be able to run inside the vendor dir of
# k8s.io/kubernetes. The output-base is needed for the generators to output into the vendor dir
# instead of the $GOPATH directly. For normal projects this can be dropped.
bash "${CODEGEN_PKG}"/generate-groups.sh "deepcopy,client,informer,lister" \
github.com/einyx/tor-ingress-controller/pkg/generated github.com/einyx/tor-ingress-controller/pkg/apis \
toringress:v1alpha1 \
--output-base "$(dirname "${BASH_SOURCE[0]}")/../../.." \
--go-header-file "${SCRIPT_ROOT}"/hack/boilerplate.go.txt
# To use your own boilerplate text append:
# --go-header-file "${SCRIPT_ROOT}"/hack/custom-boilerplate.go.txt
|
<reponame>rafax/sourcegraph
package debugproxies
import (
"testing"
"github.com/google/go-cmp/cmp"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes/fake"
)
func TestClusterScan(t *testing.T) {
var eps []Endpoint
consumer := func(seen []Endpoint) {
eps = nil
eps = append(eps, seen...)
}
// test setup
client := fake.NewSimpleClientset()
const ns = "test-ns"
cs := &clusterScanner{
client: client.CoreV1(),
consume: consumer,
namespace: ns,
}
endpoints := []v1.Endpoints{
{
ObjectMeta: metav1.ObjectMeta{Name: "gitserver"},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
Hostname: "gitserver-0",
IP: "192.168.10.0",
}},
}},
},
{
ObjectMeta: metav1.ObjectMeta{Name: "searcher"},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "192.168.10.3",
}},
}},
},
{
ObjectMeta: metav1.ObjectMeta{Name: "no-port"},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "192.168.10.1",
}},
}},
},
{
ObjectMeta: metav1.ObjectMeta{Name: "no-prom-port"},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "192.168.10.2",
}},
Ports: []v1.EndpointPort{{
Port: 2324,
}},
}},
},
}
for _, e := range endpoints {
_, err := cs.client.Endpoints(ns).Create(&e)
if err != nil {
t.Fatalf("unable to create test endpoint: %v", err)
}
}
svcs := []v1.Service{
{
ObjectMeta: metav1.ObjectMeta{
Name: "gitserver",
Namespace: ns,
Annotations: map[string]string{
"sourcegraph.prometheus/scrape": "true",
"prometheus.io/port": "2323",
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "searcher",
Annotations: map[string]string{
"sourcegraph.prometheus/scrape": "true",
"prometheus.io/port": "2323",
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "no-scrape",
Annotations: map[string]string{
"prometheus.io/port": "2323",
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "no-prom-port",
Annotations: map[string]string{
"sourcegraph.prometheus/scrape": "true",
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "no-port",
Annotations: map[string]string{
"sourcegraph.prometheus/scrape": "true",
},
},
},
}
for _, svc := range svcs {
_, err := cs.client.Services(ns).Create(&svc)
if err != nil {
t.Fatal(err)
}
}
cs.scanCluster()
want := []Endpoint{{
Service: "gitserver",
Addr: "192.168.10.0:2323",
Hostname: "gitserver-0",
}, {
Service: "searcher",
Addr: "192.168.10.3:2323",
}, {
Service: "no-prom-port",
Addr: "192.168.10.2:2324",
}}
if !cmp.Equal(want, eps) {
t.Errorf("mismatch (-want +got):\n%s", cmp.Diff(want, eps))
}
}
|
package monkey
import (
"errors"
"fmt"
"net/http"
"reflect"
"testing"
)
func TestMockGlobalFunc(t *testing.T) {
type args struct {
target interface{}
replacement interface{}
}
tests := []struct {
name string
args args
want *PatchGuard
}{
{
name: "test1",
args: args{
target: fmt.Println,
replacement: func(a ...interface{}) (n int, err error) {
return 0, errors.New("test error")
},
},
want: nil,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := MockGlobalFunc(tt.args.target, tt.args.replacement); reflect.DeepEqual(got, tt.want) {
t.Errorf("MockGlobalFunc() = %v, want %v", got, tt.want)
}
})
}
}
func TestMockMemberFunc(t *testing.T) {
type args struct {
target reflect.Type
methodName string
replacement interface{}
}
tests := []struct {
name string
args args
want *PatchGuard
}{
{
name: "test1",
args: args{
target: reflect.TypeOf(http.DefaultClient),
methodName: "Get",
replacement: func(c *http.Client, url string) (*http.Response, error) {
return nil, errors.New("test http response error")
},
},
want: nil,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := MockMemberFunc(tt.args.target, tt.args.methodName, tt.args.replacement); reflect.DeepEqual(got, tt.want) {
t.Errorf("MockMemberFunc() = %v, want %v", got, tt.want)
}
})
}
}
|
nunit-console ./Testity.EngineServices.Tests/bin/Debug/Testity.EngineServices.Tests.dll
nunit-console ./Testity.EngineComponents.Tests/bin/Debug/Testity.EngineComponents.Tests.dll
nunit-console ./Testity.EngineMath.Tests/bin/Debug/Testity.EngineMath.Tests.dll
nunit-console ./Testity.BuildProcess.Tests/bin/Debug/Testity.BuildProcess.Tests.dll
nunit-console ./Testity.BuildProcess.Unity3D.Tests/bin/Debug/Testity.BuildProcess.Unity3D.Tests.dll
nunit-console -framework=Mono-4.0 ./Testity.EngineMath.Unity3D.Tests/bin/Debug/Testity.EngineMath.Unity3D.Tests.dll
nunit-console -framework=Mono-4.0 ./Testity.EngineComponents.Unity3D.Tests/bin/Debug/Testity.EngineComponents.Unity3D.Tests.dll
nunit-console -framework=Mono-4.0 ./Testity.Common.Unity3D.Tests/bin/Debug/Testity.Common.Unity3D.Tests.dll |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.