code stringlengths 3 1.05M | repo_name stringlengths 4 116 | path stringlengths 4 991 | language stringclasses 9 values | license stringclasses 15 values | size int32 3 1.05M |
|---|---|---|---|---|---|
/*
* Copyright 2014-2021 Real Logic Limited.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.agrona.agent;
import net.bytebuddy.asm.Advice;
import org.agrona.BitUtil;
import org.agrona.DirectBuffer;
/**
* Interceptor to be applied when verifying buffer alignment accesses.
*/
@SuppressWarnings("unused")
public class BufferAlignmentInterceptor
{
abstract static class Verifier
{
/**
* Interceptor for type alignment verifier.
*
* @param index into the buffer.
* @param buffer the buffer.
* @param alignment to be verified.
*/
public static void verifyAlignment(final int index, final @Advice.This DirectBuffer buffer, final int alignment)
{
final int alignmentOffset = (int)(buffer.addressOffset() + index) % alignment;
if (0 != alignmentOffset)
{
throw new BufferAlignmentException(
"Unaligned " + alignment + "-byte access (index=" + index + ", offset=" + alignmentOffset + ")");
}
}
}
/**
* Verifier for {@code long} types.
*/
public static final class LongVerifier extends Verifier
{
/**
* Verify alignment of the {@code long} types.
*
* @param index into the buffer.
* @param buffer the buffer.
*/
@Advice.OnMethodEnter
public static void verifyAlignment(final int index, final @Advice.This DirectBuffer buffer)
{
verifyAlignment(index, buffer, BitUtil.SIZE_OF_LONG);
}
}
/**
* Verifier for {@code double} types.
*/
public static final class DoubleVerifier extends Verifier
{
/**
* Verify alignment of the {@code double} types.
*
* @param index into the buffer.
* @param buffer the buffer.
*/
@Advice.OnMethodEnter
public static void verifyAlignment(final int index, final @Advice.This DirectBuffer buffer)
{
verifyAlignment(index, buffer, BitUtil.SIZE_OF_DOUBLE);
}
}
/**
* Verifier for {@code int} types.
*/
public static final class IntVerifier extends Verifier
{
/**
* Verify alignment of the {@code int} types.
*
* @param index into the buffer.
* @param buffer the buffer.
*/
@Advice.OnMethodEnter
public static void verifyAlignment(final int index, final @Advice.This DirectBuffer buffer)
{
verifyAlignment(index, buffer, BitUtil.SIZE_OF_INT);
}
}
/**
* Verifier for {@code float} types.
*/
public static final class FloatVerifier extends Verifier
{
/**
* Verify alignment of the {@code float} types.
*
* @param index into the buffer.
* @param buffer the buffer.
*/
@Advice.OnMethodEnter
public static void verifyAlignment(final int index, final @Advice.This DirectBuffer buffer)
{
verifyAlignment(index, buffer, BitUtil.SIZE_OF_FLOAT);
}
}
/**
* Verifier for {@code short} types.
*/
public static final class ShortVerifier extends Verifier
{
/**
* Verify alignment of the {@code short} types.
*
* @param index into the buffer.
* @param buffer the buffer.
*/
@Advice.OnMethodEnter
public static void verifyAlignment(final int index, final @Advice.This DirectBuffer buffer)
{
verifyAlignment(index, buffer, BitUtil.SIZE_OF_SHORT);
}
}
/**
* Verifier for {@code char} types.
*/
public static final class CharVerifier extends Verifier
{
/**
* Verify alignment of the {@code char} types.
*
* @param index into the buffer.
* @param buffer the buffer.
*/
@Advice.OnMethodEnter
public static void verifyAlignment(final int index, final @Advice.This DirectBuffer buffer)
{
verifyAlignment(index, buffer, BitUtil.SIZE_OF_CHAR);
}
}
}
| real-logic/Agrona | agrona-agent/src/main/java/org/agrona/agent/BufferAlignmentInterceptor.java | Java | apache-2.0 | 4,703 |
package com.example.demo;
import java.util.Collection;
import java.util.stream.Stream;
import javax.persistence.Entity;
import javax.persistence.GeneratedValue;
import javax.persistence.Id;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.CommandLineRunner;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.cloud.client.discovery.EnableDiscoveryClient;
import org.springframework.cloud.context.config.annotation.RefreshScope;
import org.springframework.context.annotation.Bean;
import org.springframework.data.jpa.repository.JpaRepository;
import org.springframework.data.repository.query.Param;
import org.springframework.data.rest.core.annotation.RepositoryRestResource;
import org.springframework.data.rest.core.annotation.RestResource;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RestController;
@EnableDiscoveryClient
@SpringBootApplication
public class SimpleRestApiApplication {
@Bean
CommandLineRunner commandLineRunner(ReservationRepository reservationRepository) {
return strings -> {
Stream.of("Josh", "Peter", "Susi")
.forEach(n -> reservationRepository.save(new Reservation(n)));
};
}
public static void main(String[] args) {
SpringApplication.run(SimpleRestApiApplication.class, args);
}
}
@RepositoryRestResource
interface ReservationRepository extends JpaRepository<Reservation, Long>{
@RestResource(path="by-name")
Collection<Reservation> findByReservationName(@Param("rn") String rn);
}
@Entity
class Reservation{
@Id
@GeneratedValue
private Long id;
private String reservationName;
public Reservation() {
}
public Reservation(String n) {
this.reservationName= n;
}
public Long getId() {
return id;
}
public String getReservationName() {
return reservationName;
}
@Override
public String toString() {
return "Reservation{" +
"id= " + id +
", reservationName= '" + reservationName + "'";
}
}
@RefreshScope
@RestController
class MessageRestController {
@Value("${msg:Hello world - Config Server is not working..pelase check}")
private String msg;
@RequestMapping("/msg")
String getMsg() {
return this.msg;
}
}
| manueldeveloper/springcloudconfigserver | SimpleRestAPI/src/main/java/com/example/demo/SimpleRestApiApplication.java | Java | apache-2.0 | 2,343 |
/**
* @license Apache-2.0
*
* Copyright (c) 2018 The Stdlib Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
'use strict';
// MODULES //
var config = require( './config.js' );
// VARIABLES //
var valid;
var test;
// MAIN //
// Create our test cases:
valid = [];
test = {
'code': [
'/**',
'* Squares a number.',
'* ',
'* @param {number} x - input number',
'* @returns {number} x squared',
'*',
'* @example',
'* var y = square( 2.0 );',
'* // returns 4.0',
'*/',
'function square( x ) {',
' return x*x;',
'}'
].join( '\n' ),
'options': [
{
'config': config
}
]
};
valid.push( test );
test = {
'code': [
'/**',
'* Returns a pseudo-random number on [0,1].',
'* ',
'* @returns {number} uniform random number',
'*',
'* @example',
'* var y = rand();',
'* // e.g., returns 0.5363925252089496',
'*/',
'function rand() {',
' return Math.random();',
'}'
].join( '\n' ),
'options': [
{
'config': config
}
]
};
valid.push( test );
test = {
'code': [
'/**',
'* Returns the number of minutes in a month.',
'*',
'* @param {(string|Date|integer)} [month] - month',
'* @param {integer} [year] - year',
'* @throws {TypeError} first argument must be either a string, integer, or `Date` object',
'* @throws {Error} must provide a recognized month',
'* @throws {RangeError} an integer month argument must be on the interval `[1,12]`',
'* @throws {TypeError} second argument must be an integer',
'* @returns {integer} minutes in a month',
'*',
'* @example',
'* var num = minutesInMonth();',
'* // returns <number>',
'*',
'* @example',
'* var num = minutesInMonth( 2 );',
'* // returns <number>',
'*',
'* @example',
'* var num = minutesInMonth( 2, 2016 );',
'* // returns 41760',
'*',
'* @example',
'* var num = minutesInMonth( 2, 2017 );',
'* // returns 40320',
'*/',
'function minutesInMonth( month, year ) {',
' var mins;',
' var mon;',
' var yr;',
' var d;',
' if ( arguments.length === 0 ) {',
' // Note: cannot cache as application may cross over into a new year:',
' d = new Date();',
' mon = d.getMonth() + 1; // zero-based',
' yr = d.getFullYear();',
' } else if ( arguments.length === 1 ) {',
' if ( isDateObject( month ) ) {',
' d = month;',
' mon = d.getMonth() + 1; // zero-based',
' yr = d.getFullYear();',
' } else if ( isString( month ) || isInteger( month ) ) {',
' // Note: cannot cache as application may cross over into a new year:',
' yr = ( new Date() ).getFullYear();',
' mon = month;',
' } else {',
' throw new TypeError( \'invalid argument. First argument must be either a string, integer, or `Date` object. Value: `\'+month+\'`.\' );',
' }',
' } else {',
' if ( !isString( month ) && !isInteger( month ) ) {',
' throw new TypeError( \'invalid argument. First argument must be either a string or integer. Value: `\'+month+\'`.\' );',
' }',
' if ( !isInteger( year ) ) {',
' throw new TypeError( \'invalid argument. Second argument must be an integer. Value: `\'+year+\'`.\' );',
' }',
' mon = month;',
' yr = year;',
' }',
' if ( isInteger( mon ) && (mon < 1 || mon > 12) ) {',
' throw new RangeError( \'invalid argument. An integer month value must be on the interval `[1,12]`. Value: `\'+mon+\'`.\' );',
' }',
' mon = lowercase( mon.toString() );',
' mins = MINUTES_IN_MONTH[ mon ];',
' if ( mins === void 0 ) {',
' throw new Error( \'invalid argument. Must provide a recognized month. Value: `\'+mon+\'`.\' );',
' }',
' // Check if February during a leap year...',
' if ( mins === 40320 && isLeapYear( yr ) ) {',
' mins += MINUTES_IN_DAY;',
' }',
' return mins;',
'}'
].join( '\n' ),
'options': [
{
'config': config
}
]
};
valid.push( test );
test = {
'code': [
'/**',
'* Removes a UTF-8 byte order mark (BOM) from the beginning of a string.',
'*',
'* ## Notes',
'*',
'* - A UTF-8 byte order mark ([BOM][1]) is the byte sequence `0xEF,0xBB,0xBF`.',
'* - To convert a UTF-8 encoded `Buffer` to a `string`, the `Buffer` must be converted to \'[UTF-16][2]. The BOM thus gets converted to the single 16-bit code point `\'\ufeff\'` \'(UTF-16 BOM).',
'*',
'* [1]: https://en.wikipedia.org/wiki/Byte_order_mark#UTF-8',
'* [2]: http://es5.github.io/#x4.3.16',
'*',
'*',
'* @param {string} str - input string',
'* @throws {TypeError} must provide a string primitive',
'* @returns {string} string with BOM removed',
'*',
'* @example',
'* var str = removeUTF8BOM( \'\ufeffbeep\' );',
'* // returns \'beep\'',
'*/',
'function removeUTF8BOM( str ) {',
' if ( !isString( str ) ) {',
' throw new TypeError( \'invalid argument. Must provide a string primitive. Value: `\' + str + \'`.\' );',
' }',
' if ( str.charCodeAt( 0 ) === BOM ) {',
' return str.slice( 1 );',
' }',
' return str;',
'}'
].join( '\n' ),
'options': [
{
'config': config
}
]
};
valid.push( test );
test = {
'code': [
'/**',
'* @name arcsine',
'* @memberof random',
'* @readonly',
'* @type {Function}',
'* @see {@link module:@stdlib/random/base/arcsine}',
'*/',
'setReadOnly( random, \'arcsine\', require( \'@stdlib/random/base/arcsine\' ) );'
].join( '\n' ),
'options': [
{
'config': config
}
]
};
valid.push( test );
test = {
'code': [
'/**',
'* Beep boop.',
'*',
'* Some code...',
'*',
'* ```javascript',
'* var f = foo();',
'* ```',
'*',
'* Some LaTeX...',
'*',
'* ```tex',
'* \\frac{1}{2}',
'* ```',
'*',
'* ## Notes',
'*',
'* - First.',
'* - Second.',
'* - Third.',
'*',
'* ## References',
'*',
'* - Jane Doe. Science. 2017.',
'*',
'* | x | y |',
'* | 1 | 2 |',
'* | 2 | 1 |',
'*',
'*',
'* @param {string} str - input value',
'* @returns {string} output value',
'*',
'* @example',
'* var out = beep( "boop" );',
'* // returns "beepboop"',
'*/',
'function beep( str ) {',
'\treturn "beep" + str;',
'}'
].join( '\n' ),
'options': [
{
'config': config
}
]
};
valid.push( test );
// EXPORTS //
module.exports = valid;
| stdlib-js/stdlib | lib/node_modules/@stdlib/_tools/eslint/rules/jsdoc-markdown-remark/test/fixtures/valid.js | JavaScript | apache-2.0 | 6,669 |
#include <QSettings>
#include <QFileDialog>
#include "adduniquewindow.h"
#include "ui_adduniquewindow.h"
#include "functions.h"
#include "helpers.h"
#include "vendor/json.h"
#include "models/page.h"
#include "models/filename.h"
/**
* Constructor of the AddUniqueWindow class, generating its window.
* @param favorites List of favorites tags, needed for coloration
* @param parent The parent window
*/
AddUniqueWindow::AddUniqueWindow(QString selected, QMap<QString,Site*> sites, Profile *profile, QWidget *parent)
: QDialog(parent), ui(new Ui::AddUniqueWindow), m_sites(sites), m_profile(profile)
{
ui->setupUi(this);
ui->comboSites->addItems(m_sites.keys());
ui->comboSites->setCurrentIndex(m_sites.keys().indexOf(selected));
QSettings *settings = profile->getSettings();
ui->lineFolder->setText(settings->value("Save/path").toString());
ui->lineFilename->setText(settings->value("Save/filename").toString());
}
/**
* Ui events
*/
void AddUniqueWindow::on_buttonFolder_clicked()
{
QString folder = QFileDialog::getExistingDirectory(this, tr("Choose a save folder"), ui->lineFolder->text());
if (!folder.isEmpty())
{ ui->lineFolder->setText(folder); }
}
void AddUniqueWindow::on_lineFilename_textChanged(QString text)
{
QString message;
Filename fn(text);
fn.isValid(&message);
ui->labelFilename->setText(message);
}
/**
* Search for image in available websites.
*/
void AddUniqueWindow::add()
{ ok(false); }
void AddUniqueWindow::ok(bool close)
{
Site *site = m_sites[ui->comboSites->currentText()];
m_close = close;
bool useDirectLink = true;
if (
(site->value("Urls/Html/Post").contains("{id}") && ui->lineId->text().isEmpty()) ||
(site->value("Urls/Html/Post").contains("{md5}") && ui->lineMd5->text().isEmpty()) ||
!site->contains("Regex/ImageUrl")
)
{ useDirectLink = false; }
if (useDirectLink)
{
QString url = site->value("Urls/Html/Post");
url.replace("{id}", ui->lineId->text());
url.replace("{md5}", ui->lineMd5->text());
QMap<QString,QString> details = QMap<QString,QString>();
details.insert("page_url", url);
details.insert("id", ui->lineId->text());
details.insert("md5", ui->lineMd5->text());
details.insert("website", ui->comboSites->currentText());
details.insert("site", QString::number((qintptr)m_sites[ui->comboSites->currentText()]));
m_image = QSharedPointer<Image>(new Image(site, details, m_profile));
connect(m_image.data(), &Image::finishedLoadingTags, this, &AddUniqueWindow::addLoadedImage);
m_image->loadDetails();
}
else
{
m_page = new Page(m_profile, m_sites[ui->comboSites->currentText()], m_sites.values(), QStringList() << (ui->lineId->text().isEmpty() ? "md5:"+ui->lineMd5->text() : "id:"+ui->lineId->text()) << "status:any", 1, 1);
connect(m_page, SIGNAL(finishedLoading(Page*)), this, SLOT(replyFinished(Page*)));
m_page->load();
}
}
/**
* Signal triggered when the search is finished.
* @param r The QNetworkReply associated with the search
*/
void AddUniqueWindow::replyFinished(Page *p)
{
if (p->images().isEmpty())
{
p->deleteLater();
error(this, tr("No image found."));
return;
}
addImage(p->images().first());
p->deleteLater();
}
void AddUniqueWindow::addLoadedImage()
{
addImage(m_image);
}
void AddUniqueWindow::addImage(QSharedPointer<Image> img)
{
emit sendData(DownloadQueryImage(img, m_sites[ui->comboSites->currentText()], ui->lineFilename->text(), ui->lineFolder->text()));
if (m_close)
close();
}
| YoukaiCat/imgbrd-grabber | gui/src/batch/adduniquewindow.cpp | C++ | apache-2.0 | 3,454 |
/**
* @license
* Copyright 2020 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
import * as tf from '../index';
import {ALL_ENVS, describeWithFlags} from '../jasmine_util';
import {expectArraysClose} from '../test_util';
describeWithFlags('sign', ALL_ENVS, () => {
it('basic', async () => {
const a = tf.tensor1d([1.5, 0, NaN, -1.4]);
const r = tf.sign(a);
expectArraysClose(await r.data(), [1, 0, 0, -1]);
});
it('does not propagate NaNs', async () => {
const a = tf.tensor1d([1.5, NaN, -1.4]);
const r = tf.sign(a);
expectArraysClose(await r.data(), [1, 0, -1]);
});
it('gradients: Scalar', async () => {
const a = tf.scalar(5.2);
const dy = tf.scalar(3);
const gradients = tf.grad(a => tf.sign(a))(a, dy);
expect(gradients.shape).toEqual(a.shape);
expect(gradients.dtype).toEqual('float32');
expectArraysClose(await gradients.data(), [0]);
});
it('gradient with clones', async () => {
const a = tf.scalar(5.2);
const dy = tf.scalar(3);
const gradients = tf.grad(a => tf.sign(a.clone()).clone())(a, dy);
expect(gradients.shape).toEqual(a.shape);
expect(gradients.dtype).toEqual('float32');
expectArraysClose(await gradients.data(), [0]);
});
it('gradients: Tensor1D', async () => {
const a = tf.tensor1d([-1.1, 2.6, 3, -5.9]);
const dy = tf.tensor1d([-1, 1, 1, -1]);
const gradients = tf.grad(a => tf.sign(a))(a, dy);
expect(gradients.shape).toEqual(a.shape);
expect(gradients.dtype).toEqual('float32');
expectArraysClose(await gradients.data(), [0, 0, 0, 0]);
});
it('gradients: Tensor2D', async () => {
const a = tf.tensor2d([-3, 1, 2.2, 3], [2, 2]);
const dy = tf.tensor2d([1, 2, 3, 4], [2, 2]);
const gradients = tf.grad(a => tf.sign(a))(a, dy);
expect(gradients.shape).toEqual(a.shape);
expect(gradients.dtype).toEqual('float32');
expectArraysClose(await gradients.data(), [0, 0, 0, 0]);
});
it('throws when passed a non-tensor', () => {
expect(() => tf.sign({} as tf.Tensor))
.toThrowError(/Argument 'x' passed to 'sign' must be a Tensor/);
});
it('accepts a tensor-like object', async () => {
const r = tf.sign([1.5, 0, NaN, -1.4]);
expectArraysClose(await r.data(), [1, 0, 0, -1]);
});
it('throws for string tensor', () => {
expect(() => tf.sign('q'))
.toThrowError(/Argument 'x' passed to 'sign' must be numeric/);
});
});
| tensorflow/tfjs | tfjs-core/src/ops/sign_test.ts | TypeScript | apache-2.0 | 3,063 |
/*
* Copyright 2015-2016 OpenCB
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.opencb.opencga.storage.alignment.hbase;
import com.google.protobuf.InvalidProtocolBufferException;
import java.io.IOException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.logging.Level;
import java.util.logging.Logger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.util.Bytes;
import org.opencb.biodata.models.alignment.Alignment;
import org.opencb.biodata.models.alignment.stats.RegionCoverage;
import org.opencb.biodata.models.feature.Region;
import org.opencb.commons.containers.QueryResult;
import org.opencb.commons.containers.map.QueryOptions;
import org.opencb.opencga.core.auth.MonbaseCredentials;
import org.opencb.opencga.storage.alignment.AlignmentQueryBuilder;
import org.opencb.opencga.storage.alignment.AlignmentSummary;
import org.opencb.opencga.storage.alignment.proto.AlignmentProto;
import org.opencb.opencga.storage.alignment.proto.AlignmentProtoHelper;
import org.xerial.snappy.Snappy;
/**
* Created with IntelliJ IDEA.
* User: jcoll
* Date: 3/7/14
* Time: 10:12 AM
* To change this template use File | Settings | File Templates.
*/
public class AlignmentHBaseQueryBuilder implements AlignmentQueryBuilder {
HBaseManager manager;
String tableName, columnFamilyName = null;
public void connect(){
manager.connect();
}
// public AlignmentHBaseQueryBuilder(MonbaseCredentials credentials, String tableName) {
// manager = new HBaseManager(credentials);
// this.tableName = tableName;
// }
public AlignmentHBaseQueryBuilder(String tableName) {
}
public AlignmentHBaseQueryBuilder(Configuration config, String tableName) {
manager = new HBaseManager(config);
this.tableName = tableName;
}
@Override
public QueryResult getAllAlignmentsByRegion(Region region, QueryOptions options) {
boolean wasOpened = true;
if(!manager.isOpened()){
manager.connect();
wasOpened = false;
}
HTable table = manager.getTable(tableName);//manager.createTable(tableName, columnFamilyName);
if(table == null){
return null;
}
QueryResult<Alignment> queryResult = new QueryResult<>();
String sample = options.getString("sample", "HG00096");
String family = options.getString("family", "c");
int bucketSize = 256; //FIXME: HARDCODE!
//String startRow = region.getChromosome() + "_" + String.format("%07d", region.getStart() / bucketSize);
//String endRow = region.getChromosome() + "_" + String.format("%07d", region.getEnd() / bucketSize);
String startRow = AlignmentHBase.getBucketRowkey(region.getChromosome(), region.getStart(), bucketSize);
String endRow = AlignmentHBase.getBucketRowkey(region.getChromosome(), region.getEnd()+bucketSize, bucketSize);
System.out.println("Scaning from " + startRow + " to " + endRow);
Scan scan = new Scan();
scan.setStartRow(Bytes.toBytes(startRow));
scan.setStopRow(Bytes.toBytes(endRow));
scan.addColumn(Bytes.toBytes(family), Bytes.toBytes(sample));
scan.setMaxVersions(1);
// scan.setMaxResultSize()
ResultScanner resultScanner;
try {
resultScanner = table.getScanner(scan);
} catch (IOException e) {
e.printStackTrace(); //To change body of catch statement use File | Settings | File Templates.
System.err.println("[ERROR] -- Bad query");
return null;
}
Map<Integer, AlignmentSummary> summaryMap = new HashMap<>();
for(Result result : resultScanner){
for(Cell cell : result.listCells()){
//System.out.println("Qualifier : " + keyValue.getKeyString() + " : Value : "/* + Bytes.toString(keyValue.getValue())*/);
AlignmentProto.AlignmentBucket alignmentBucket = null;
try {
alignmentBucket = AlignmentProto.AlignmentBucket.parseFrom(Snappy.uncompress(CellUtil.cloneValue(cell)));
} catch (InvalidProtocolBufferException e) {
e.printStackTrace(); //To change body of catch statement use File | Settings | File Templates.
continue;
} catch (IOException e) {
e.printStackTrace(); //To change body of catch statement use File | Settings | File Templates.
continue;
}
//System.out.println("Tenemos un bucket!");
AlignmentSummary summary;
if(!summaryMap.containsKey(alignmentBucket.getSummaryIndex())) {
summaryMap.put(alignmentBucket.getSummaryIndex(), getRegionSummary(region.getChromosome(), alignmentBucket.getSummaryIndex(), table));
}
summary = summaryMap.get(alignmentBucket.getSummaryIndex());
long pos = AlignmentHBase.getPositionFromRowkey(org.apache.hadoop.hbase.util.Bytes.toString(cell.getRowArray()), bucketSize);
List<Alignment> alignmentList = AlignmentProtoHelper.toAlignmentList(alignmentBucket, summary, region.getChromosome(), pos);
//System.out.println("Los tenemos!!");
for(Alignment alignment : alignmentList){
queryResult.addResult(alignment);
}
}
}
if(!wasOpened){
manager.disconnect();
}
return queryResult;
//return null;
}
private AlignmentSummary getRegionSummary(String chromosome, int index,HTable table){
// manager.connect();
// HTable table = manager.createTable(tableName, columnFamilyName);
Scan scan = new Scan(
Bytes.toBytes(AlignmentHBase.getSummaryRowkey(chromosome, index)) ,
Bytes.toBytes(AlignmentHBase.getSummaryRowkey(chromosome, index+1)));
ResultScanner resultScanner;
try {
resultScanner = table.getScanner(scan);
} catch (IOException e) {
e.printStackTrace(); //To change body of catch statement use File | Settings | File Templates.
System.err.println("[ERROR] -- Bad query");
return null;
}
AlignmentSummary summary = null;
for(Result result : resultScanner){
for(KeyValue keyValue : result.list()){
//System.out.println("Qualifier : " + keyValue.getKeyString() );
try {
summary = new AlignmentSummary( AlignmentProto.Summary.parseFrom(Snappy.uncompress(keyValue.getValue())), index);
} catch (IOException e) {
e.printStackTrace(); //To change body of catch statement use File | Settings | File Templates.
}
}
}
return summary;
}
@Override
public QueryResult getAllAlignmentsByGene(String gene, QueryOptions options) {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public QueryResult getCoverageByRegion(Region region, QueryOptions options) {
QueryResult<RegionCoverage> queryResult = new QueryResult<>();
if (!manager.isOpened()) {
manager.connect();
}
String sample = options.getString("sample", "HG00096");
String family = options.getString("family", "c");
int bucketSize = 256; //FIXME: HARDCODE!
//String startRow = region.getChromosome() + "_" + String.format("%07d", region.getStart() / bucketSize);
//String endRow = region.getChromosome() + "_" + String.format("%07d", region.getEnd() / bucketSize);
String startRow = AlignmentHBase.getBucketRowkey(region.getChromosome(), region.getStart(), bucketSize);
String endRow = AlignmentHBase.getBucketRowkey(region.getChromosome(), region.getEnd() + bucketSize, bucketSize);
HTable table = manager.getTable(tableName);
Scan scan = new Scan();
scan.setStartRow(Bytes.toBytes(startRow));
scan.setStopRow(Bytes.toBytes(endRow));
scan.addColumn(Bytes.toBytes(family), Bytes.toBytes(sample));
scan.setMaxVersions(1);
ResultScanner scanner;
try {
scanner = table.getScanner(scan);
} catch (IOException ex) {
Logger.getLogger(AlignmentHBaseQueryBuilder.class.getName()).log(Level.SEVERE, null, ex);
return null;
}
for (Result result : scanner) {
for (Cell cell : result.listCells()) {
AlignmentProto.Coverage parseFrom;
try {
parseFrom = AlignmentProto.Coverage.parseFrom(CellUtil.cloneValue(cell));
} catch (InvalidProtocolBufferException ex) {
Logger.getLogger(AlignmentHBaseQueryBuilder.class.getName()).log(Level.SEVERE, null, ex);
}
}
}
return queryResult;
}
@Override
public QueryResult getAlignmentsHistogramByRegion(Region region, boolean histogramLogarithm, int histogramMax) {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public QueryResult getAlignmentRegionInfo(Region region, QueryOptions options) {
throw new UnsupportedOperationException("Not supported yet.");
}
public String getTableName() {
return tableName;
}
public void setTableName(String tableName) {
this.tableName = tableName;
}
public String getColumnFamilyName() {
return columnFamilyName;
}
public void setColumnFamilyName(String columnFamilyName) {
this.columnFamilyName = columnFamilyName;
}
}
| javild/opencga | opencga-storage/src/main/java/org/opencb/opencga/storage/alignment/hbase/AlignmentHBaseQueryBuilder.java | Java | apache-2.0 | 10,790 |
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.step.finscholar;
import com.google.step.finscholar.data.College;
import java.util.ArrayList;
import java.util.List;
import java.util.UUID;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.JUnit4;
/** This class tests the College object's methods and behavior. */
@RunWith(JUnit4.class)
public final class CollegeTest {
private static College college;
private static List<UUID> users = new ArrayList<UUID>();
@BeforeClass
public static void setUp() {
users.add(UUID.randomUUID());
users.add(UUID.randomUUID());
college = new College.CollegeBuilder("Duke University")
.setInstitutionType("Private")
.setAcceptanceRate(0.07)
.setAverageACTScore(33)
.setUsersUUIDList(users)
.setTotalCostAttendance(75000)
.setNetCostForFirstQuintile(2000)
.setNetCostForSecondQuintile(5000)
.setNetCostForThirdQuintile(10000)
.setNetCostForFourthQuintile(20000)
.setNetCostForFifthQuintile(30000)
.setCumulativeMedianDebt(10000)
.build();
}
@Test
public void schoolNameCorrectlySet() {
String expected = "Duke University";
String actual = college.getSchoolName();
Assert.assertEquals(expected, actual);
}
@Test
public void isUUIDGenerated() {
UUID actual = college.getCollegeUuid();
Assert.assertNotNull(actual);
}
@Test
public void institutionTypeCorrectlySet() {
String expected = "Private";
String actual = college.getInstitutionType();
Assert.assertEquals(expected, actual);
}
@Test
public void acceptanceRateCorrectlySet() {
double expected = 0.07;
double actual = college.getAcceptanceRate();
Assert.assertEquals(expected, actual, 0);
}
@Test
public void ACTScoreCorrectlySet() {
double expected = 33;
double actual = college.getAverageACTScore();
Assert.assertEquals(expected, actual, 0);
}
@Test
public void usersListCorrectlySet() {
List<UUID> actual = college.getUsersUUIDList();
Assert.assertEquals(users, actual);
}
@Test
public void totalCostCorrectlySet() {
int expected = 75000;
int actual = college.getTotalCostAttendance();
Assert.assertEquals(expected, actual);
}
@Test
public void firstNetCostCorrectlySet() {
int expected = 2000;
int actual = college.getNetCostForFirstQuintile();
Assert.assertEquals(expected, actual);
}
@Test
public void secondNetCostCorrectlySet() {
int expected = 5000;
int actual = college.getNetCostForSecondQuintile();
Assert.assertEquals(expected, actual);
}
@Test
public void thirdNetCostCorrectlySet() {
int expected = 10000;
int actual = college.getNetCostForThirdQuintile();
Assert.assertEquals(expected, actual);
}
@Test
public void fourthNetCostCorrectlySet() {
int expected = 20000;
int actual = college.getNetCostForFourthQuintile();
Assert.assertEquals(expected, actual);
}
@Test
public void fifthNetCostCorrectlySet() {
int expected = 30000;
int actual = college.getNetCostForFifthQuintile();
Assert.assertEquals(expected, actual);
}
@Test
public void medianDebtCorrectlySet() {
int expected = 10000;
int actual = college.getCumulativeMedianDebt();
Assert.assertEquals(expected, actual);
}
} | googleinterns/step133-2020 | src/test/java/com/google/step/finscholar/CollegeTest.java | Java | apache-2.0 | 3,967 |
"""Check that available RPM packages match the required versions."""
from openshift_checks import OpenShiftCheck
from openshift_checks.mixins import NotContainerizedMixin
class PackageVersion(NotContainerizedMixin, OpenShiftCheck):
"""Check that available RPM packages match the required versions."""
name = "package_version"
tags = ["preflight"]
# NOTE: versions outside those specified are mapped to least/greatest
openshift_to_ovs_version = {
(3, 4): "2.4",
(3, 5): ["2.6", "2.7"],
(3, 6): ["2.6", "2.7", "2.8", "2.9"],
(3, 7): ["2.6", "2.7", "2.8", "2.9"],
(3, 8): ["2.6", "2.7", "2.8", "2.9"],
(3, 9): ["2.6", "2.7", "2.8", "2.9"],
(3, 10): ["2.6", "2.7", "2.8", "2.9"],
}
openshift_to_docker_version = {
(3, 1): "1.8",
(3, 2): "1.10",
(3, 3): "1.10",
(3, 4): "1.12",
(3, 5): "1.12",
(3, 6): "1.12",
(3, 7): "1.12",
(3, 8): "1.12",
(3, 9): ["1.12", "1.13"],
}
def is_active(self):
"""Skip hosts that do not have package requirements."""
group_names = self.get_var("group_names", default=[])
master_or_node = 'oo_masters_to_config' in group_names or 'oo_nodes_to_config' in group_names
return super(PackageVersion, self).is_active() and master_or_node
def run(self):
rpm_prefix = self.get_var("openshift_service_type")
if self._templar is not None:
rpm_prefix = self._templar.template(rpm_prefix)
openshift_release = self.get_var("openshift_release", default='')
deployment_type = self.get_var("openshift_deployment_type")
check_multi_minor_release = deployment_type in ['openshift-enterprise']
args = {
"package_mgr": self.get_var("ansible_pkg_mgr"),
"package_list": [
{
"name": "openvswitch",
"version": self.get_required_ovs_version(),
"check_multi": False,
},
{
"name": "docker",
"version": self.get_required_docker_version(),
"check_multi": False,
},
{
"name": "{}".format(rpm_prefix),
"version": openshift_release,
"check_multi": check_multi_minor_release,
},
{
"name": "{}-master".format(rpm_prefix),
"version": openshift_release,
"check_multi": check_multi_minor_release,
},
{
"name": "{}-node".format(rpm_prefix),
"version": openshift_release,
"check_multi": check_multi_minor_release,
},
],
}
return self.execute_module_with_retries("aos_version", args)
def get_required_ovs_version(self):
"""Return the correct Open vSwitch version(s) for the current OpenShift version."""
return self.get_required_version("Open vSwitch", self.openshift_to_ovs_version)
def get_required_docker_version(self):
"""Return the correct Docker version(s) for the current OpenShift version."""
return self.get_required_version("Docker", self.openshift_to_docker_version)
| wbrefvem/openshift-ansible | roles/openshift_health_checker/openshift_checks/package_version.py | Python | apache-2.0 | 3,386 |
#include "testing/testing.hpp"
#include "geocoder/geocoder.hpp"
#include "indexer/search_string_utils.hpp"
#include "platform/platform_tests_support/scoped_file.hpp"
#include "base/geo_object_id.hpp"
#include "base/math.hpp"
#include "base/stl_helpers.hpp"
#include <algorithm>
#include <string>
#include <vector>
using namespace platform::tests_support;
using namespace std;
namespace
{
double const kCertaintyEps = 1e-6;
string const kRegionsData = R"#(
-4611686018427080071 {"type": "Feature", "geometry": {"type": "Point", "coordinates": [-80.1142033187951, 21.55511095]}, "properties": {"name": "Cuba", "rank": 2, "address": {"country": "Cuba"}}}
-4611686018425533273 {"type": "Feature", "geometry": {"type": "Point", "coordinates": [-78.7260117405499, 21.74300205]}, "properties": {"name": "Ciego de Ávila", "rank": 4, "address": {"region": "Ciego de Ávila", "country": "Cuba"}}}
-4611686018421500235 {"type": "Feature", "geometry": {"type": "Point", "coordinates": [-78.9263054493181, 22.08185765]}, "properties": {"name": "Florencia", "rank": 6, "address": {"subregion": "Florencia", "region": "Ciego de Ávila", "country": "Cuba"}}}
)#";
geocoder::Tokens Split(string const & s)
{
geocoder::Tokens result;
search::NormalizeAndTokenizeAsUtf8(s, result);
return result;
}
} // namespace
namespace geocoder
{
void TestGeocoder(Geocoder & geocoder, string const & query, vector<Result> && expected)
{
vector<Result> actual;
geocoder.ProcessQuery(query, actual);
TEST_EQUAL(actual.size(), expected.size(), (query, actual, expected));
sort(actual.begin(), actual.end(), base::LessBy(&Result::m_osmId));
sort(expected.begin(), expected.end(), base::LessBy(&Result::m_osmId));
for (size_t i = 0; i < actual.size(); ++i)
{
TEST(actual[i].m_certainty >= 0.0 && actual[i].m_certainty <= 1.0,
(query, actual[i].m_certainty));
TEST_EQUAL(actual[i].m_osmId, expected[i].m_osmId, (query));
TEST(base::AlmostEqualAbs(actual[i].m_certainty, expected[i].m_certainty, kCertaintyEps),
(query, actual[i].m_certainty, expected[i].m_certainty));
}
}
UNIT_TEST(Geocoder_Smoke)
{
ScopedFile const regionsJsonFile("regions.jsonl", kRegionsData);
Geocoder geocoder(regionsJsonFile.GetFullPath());
base::GeoObjectId const florenciaId(0xc00000000059d6b5);
base::GeoObjectId const cubaId(0xc00000000004b279);
TestGeocoder(geocoder, "florencia", {{florenciaId, 1.0}});
TestGeocoder(geocoder, "cuba florencia", {{florenciaId, 1.0}, {cubaId, 0.714286}});
TestGeocoder(geocoder, "florencia somewhere in cuba", {{cubaId, 0.714286}, {florenciaId, 1.0}});
}
UNIT_TEST(Geocoder_Hierarchy)
{
ScopedFile const regionsJsonFile("regions.jsonl", kRegionsData);
Geocoder geocoder(regionsJsonFile.GetFullPath());
auto entries = geocoder.GetHierarchy().GetEntries({("florencia")});
TEST(entries, ());
TEST_EQUAL(entries->size(), 1, ());
TEST_EQUAL((*entries)[0]->m_address[static_cast<size_t>(Type::Country)], Split("cuba"), ());
TEST_EQUAL((*entries)[0]->m_address[static_cast<size_t>(Type::Region)], Split("ciego de avila"),
());
TEST_EQUAL((*entries)[0]->m_address[static_cast<size_t>(Type::Subregion)], Split("florencia"),
());
}
UNIT_TEST(Geocoder_OnlyBuildings)
{
string const kData = R"#(
10 {"properties": {"address": {"locality": "Some Locality"}}}
21 {"properties": {"address": {"street": "Good", "locality": "Some Locality"}}}
22 {"properties": {"address": {"building": "5", "street": "Good", "locality": "Some Locality"}}}
31 {"properties": {"address": {"street": "Bad", "locality": "Some Locality"}}}
32 {"properties": {"address": {"building": "10", "street": "Bad", "locality": "Some Locality"}}}
40 {"properties": {"address": {"street": "MaybeNumbered", "locality": "Some Locality"}}}
41 {"properties": {"address": {"street": "MaybeNumbered-3", "locality": "Some Locality"}}}
42 {"properties": {"address": {"building": "3", "street": "MaybeNumbered", "locality": "Some Locality"}}}
)#";
ScopedFile const regionsJsonFile("regions.jsonl", kData);
Geocoder geocoder(regionsJsonFile.GetFullPath());
base::GeoObjectId const localityId(10);
base::GeoObjectId const goodStreetId(21);
base::GeoObjectId const badStreetId(31);
base::GeoObjectId const building5(22);
base::GeoObjectId const building10(32);
TestGeocoder(geocoder, "some locality", {{localityId, 1.0}});
TestGeocoder(geocoder, "some locality good", {{goodStreetId, 1.0}, {localityId, 0.857143}});
TestGeocoder(geocoder, "some locality bad", {{badStreetId, 1.0}, {localityId, 0.857143}});
TestGeocoder(geocoder, "some locality good 5", {{building5, 1.0}});
TestGeocoder(geocoder, "some locality bad 10", {{building10, 1.0}});
// There is a building "10" on Bad Street but we should not return it.
// Another possible resolution would be to return just "Good Street" (relaxed matching)
// but at the time of writing the goal is to either have an exact match or no match at all.
TestGeocoder(geocoder, "some locality good 10", {});
// Sometimes we may still emit a non-building.
// In this case it happens because all query tokens are used.
base::GeoObjectId const numberedStreet(41);
base::GeoObjectId const houseOnANonNumberedStreet(42);
TestGeocoder(geocoder, "some locality maybenumbered 3",
{{numberedStreet, 1.0}, {houseOnANonNumberedStreet, 0.8875}});
}
UNIT_TEST(Geocoder_MismatchedLocality)
{
string const kData = R"#(
10 {"properties": {"address": {"locality": "Moscow"}}}
11 {"properties": {"address": {"locality": "Paris"}}}
21 {"properties": {"address": {"street": "Street", "locality": "Moscow"}}}
22 {"properties": {"address": {"building": "2", "street": "Street", "locality": "Moscow"}}}
31 {"properties": {"address": {"street": "Street", "locality": "Paris"}}}
32 {"properties": {"address": {"building": "3", "street": "Street", "locality": "Paris"}}}
)#";
ScopedFile const regionsJsonFile("regions.jsonl", kData);
Geocoder geocoder(regionsJsonFile.GetFullPath());
base::GeoObjectId const building2(22);
TestGeocoder(geocoder, "Moscow Street 2", {{building2, 1.0}});
// "Street 3" looks almost like a match to "Paris-Street-3" but we should not emit it.
TestGeocoder(geocoder, "Moscow Street 3", {});
}
} // namespace geocoder
| bykoianko/omim | geocoder/geocoder_tests/geocoder_tests.cpp | C++ | apache-2.0 | 6,292 |
package com.emf.flickring.manager;
import static com.emf.flickring.Command.Response.END;
import static com.emf.flickring.deploy.DeployModule.Constant.API_KEY;
import static com.emf.flickring.deploy.DeployModule.Constant.BASE_PICS_DIR;
import static com.emf.flickring.deploy.DeployModule.Constant.SECRET;
import static com.emf.flickring.deploy.DeployModule.Constant.SECRET_KEY;
import static com.emf.flickring.deploy.DeployModule.Constant.TOKEN;
import static com.emf.flickring.deploy.DeployModule.Constant.USER_ID;
import java.io.File;
import java.io.IOException;
import java.util.Scanner;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.configuration.Configuration;
import org.apache.commons.configuration.FileConfiguration;
import org.scribe.exceptions.OAuthException;
import org.scribe.model.Token;
import org.scribe.model.Verifier;
import com.emf.flickring.Command;
import com.emf.flickring.deploy.DeployModule;
import com.emf.flickring.model.ConfigInput;
import com.flickr4java.flickr.Flickr;
import com.flickr4java.flickr.FlickrException;
import com.flickr4java.flickr.RequestContext;
import com.flickr4java.flickr.auth.Auth;
import com.flickr4java.flickr.auth.AuthInterface;
import com.flickr4java.flickr.auth.Permission;
import com.google.common.base.Preconditions;
import com.google.common.base.Strings;
import com.google.common.io.Files;
import com.google.inject.Guice;
import com.google.inject.Inject;
import com.google.inject.Injector;
@Slf4j
public class ApiKeysCommand implements Command {
private final FileConfiguration config;
private final Flickr flickr;
@Inject
public ApiKeysCommand(final Configuration config, final Flickr flickr) {
this.config = (FileConfiguration) config;
this.flickr = flickr;
}
@Override
public Response process(final Chain chain) {
log.debug("Running api keys command...");
String apikey = config.getString(API_KEY, null);
String secret = config.getString(SECRET_KEY, null);
String tokenValue = config.getString(TOKEN, null);
String basePicsDir = config.getString(BASE_PICS_DIR, null);
String userId = config.getString(USER_ID, null);
final File configFile = config.getFile();
if (!configFile.exists()) {
log.debug("Config file does not exist");
try {
Files.touch(configFile);
} catch (IOException e) {
log.warn("Could not create config file. {}", e.getMessage());
return END;
}
}
final Scanner scanner = new Scanner(System.in);
try {
// Check api key
if (Strings.isNullOrEmpty(apikey)) {
log.debug("Api key is null");
final ConfigInput apiKeyInput = ConfigInput.builder().label("Enter your Flickr api key:").scanner(scanner).build();
apiKeyInput.read();
apikey = apiKeyInput.getInputedValue();
if (Strings.isNullOrEmpty(apikey)) {
log.warn("Api key is empty...");
return END;
} else {
config.addProperty(API_KEY, apikey);
}
} else {
log.debug("Api key is not null");
}
// Check secret key
if (Strings.isNullOrEmpty(secret)) {
log.debug("Secret key is null");
final ConfigInput secretKeyInput = ConfigInput.builder().label("Enter your Flickr secret key:").scanner(scanner).build();
secretKeyInput.read();
secret = secretKeyInput.getInputedValue();
if (Strings.isNullOrEmpty(secret)) {
log.warn("Secret key is empty...");
return END;
} else {
config.addProperty(SECRET_KEY, secret);
}
} else {
log.debug("Secret key is not null");
}
// Check token
final RequestContext requestContext = RequestContext.getRequestContext();
Flickr.debugStream = false;
flickr.setApiKey(config.getString(API_KEY));
flickr.setSharedSecret(config.getString(SECRET_KEY));
final AuthInterface authInterface = flickr.getAuthInterface();
if (Strings.isNullOrEmpty(tokenValue)) {
log.debug("Flickr token is null");
final Token token = authInterface.getRequestToken();
final String url = authInterface.getAuthorizationUrl(token, Permission.WRITE);
final StringBuilder label = new StringBuilder("Follow this URL to authorise yourself on Flickr");
label.append("\n")
.append(url).append("\n")
.append("Paste in the token it gives you:").append("\n");
final ConfigInput tokenInput = ConfigInput.builder().label(label.toString()).scanner(scanner).build();
tokenInput.read();
tokenValue = tokenInput.getInputedValue();
final Token requestToken = authInterface.getAccessToken(token, new Verifier(tokenValue));
// Check userId
if (Strings.isNullOrEmpty(userId)) {
try {
final Auth auth = authInterface.checkToken(requestToken);
userId = auth.getUser().getId();
if (Strings.isNullOrEmpty(userId)) {
log.error("Could not get user id.");
return END;
}
config.addProperty(USER_ID, userId);
requestContext.setAuth(auth);
log.info("User is authenticated.");
} catch (FlickrException e) {
log.error("Could not get user id", e);
return END;
}
}
if (requestToken == null || requestToken.isEmpty()) {
log.debug("User token is empty...");
return END;
} else {
config.addProperty(TOKEN, requestToken.getToken());
config.addProperty(SECRET, requestToken.getSecret());
log.info("Authentication success");
}
} else {
log.debug("Authenticating flickr user...");
Auth auth;
try {
auth = authInterface.checkToken(config.getString(TOKEN), config.getString(SECRET));
log.debug("User is authenticated.");
requestContext.setAuth(auth);
} catch (FlickrException e) {
log.error("Could not authenticate user", e);
}
}
log.debug("Checking base pictures directory");
// Check base pictures dir
if (Strings.isNullOrEmpty(basePicsDir)) {
final ConfigInput basePicsDirInput = ConfigInput.builder().label("Enter your pictures folder location:").scanner(scanner).build();
basePicsDirInput.read();
basePicsDir = basePicsDirInput.getInputedValue();
if (Strings.isNullOrEmpty(basePicsDir)) {
log.error("Base pictures location is empty...");
return END;
} else {
config.addProperty(BASE_PICS_DIR, basePicsDir);
}
log.debug("Base picture dir: {}", basePicsDir);
}
} catch (OAuthException ex) {
log.error("Could not authenticate user.", ex);
} finally {
scanner.close();
}
log.debug("Calling next in charge in the chain...");
return chain.execute();
}
@Override
public void stop() {
// Does nothing
}
public static void main(final String[] args) {
Preconditions.checkNotNull(args);
Preconditions.checkArgument(args.length > 0);
final Injector injector = Guice.createInjector(new DeployModule(args[0]));
final ApiKeysCommand command = injector.getInstance(ApiKeysCommand.class);
command.process(new Chain() {
@SuppressWarnings("unchecked")
@Override
public <T> T execute() {
log.info("API Keys Command is success");
return (T) Response.SUCCESS;
}
@Override
public void breakIt() {
log.info("API Keys Command has failed");
}
});
}
}
| edersonmf/flickring | src/main/java/com/emf/flickring/manager/ApiKeysCommand.java | Java | apache-2.0 | 7,638 |
/*******************************************************************************
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
package com.github.am0e.utils;
public interface Handler<T> {
public void handle(T ctx) throws Exception;
}
| am0e/commons | src/main/java/com/github/am0e/utils/Handler.java | Java | apache-2.0 | 1,071 |
from .layout_helpers import *
| armstrong/armstrong.core.arm_layout | tests/templatetags/__init__.py | Python | apache-2.0 | 30 |
package wx.sunl.entry;
/**
* WeixinUserInfo(΢ÐÅÓû§µÄ»ù±¾ÐÅÏ¢)
* @author Youngman
*/
public class WeixinOauth2Token {
// ÍøÒ³ÊÚȨ½Ó¿Úµ÷ÓÃÆ¾Ö¤
private String accessToken;
// ƾ֤ÓÐЧʱ³¤
private int expiresIn;
// ÓÃÓÚË¢ÐÂÆ¾Ö¤
private String refreshToken;
// Óû§±êʶ
private String openId;
// Óû§ÊÚȨ×÷ÓÃÓò
private String scope;
public String getAccessToken() {
return accessToken;
}
public void setAccessToken(String accessToken) {
this.accessToken = accessToken;
}
public int getExpiresIn() {
return expiresIn;
}
public void setExpiresIn(int expiresIn) {
this.expiresIn = expiresIn;
}
public String getRefreshToken() {
return refreshToken;
}
public void setRefreshToken(String refreshToken) {
this.refreshToken = refreshToken;
}
public String getOpenId() {
return openId;
}
public void setOpenId(String openId) {
this.openId = openId;
}
public String getScope() {
return scope;
}
public void setScope(String scope) {
this.scope = scope;
}
}
| Youngman619/cwkz | src/wx/sunl/entry/WeixinOauth2Token.java | Java | apache-2.0 | 1,035 |
/*
* Copyright (C) 2008 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.android.email.provider;
import com.android.emailcommon.Logging;
import com.android.emailcommon.internet.MimeUtility;
import com.android.emailcommon.provider.EmailContent;
import com.android.emailcommon.provider.EmailContent.Attachment;
import com.android.emailcommon.provider.EmailContent.AttachmentColumns;
import com.android.emailcommon.utility.AttachmentUtilities;
import com.android.emailcommon.utility.AttachmentUtilities.Columns;
import android.content.ContentProvider;
import android.content.ContentUris;
import android.content.ContentValues;
import android.content.Context;
import android.content.pm.PackageManager;
import android.database.Cursor;
import android.database.MatrixCursor;
import android.graphics.Bitmap;
import android.graphics.BitmapFactory;
import android.net.Uri;
import android.os.Binder;
import android.os.ParcelFileDescriptor;
import android.util.Log;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.List;
/*
* A simple ContentProvider that allows file access to Email's attachments.
*
* The URI scheme is as follows. For raw file access:
* content://com.android.email.attachmentprovider/acct#/attach#/RAW
*
* And for access to thumbnails:
* content://com.android.email.attachmentprovider/acct#/attach#/THUMBNAIL/width#/height#
*
* The on-disk (storage) schema is as follows.
*
* Attachments are stored at: <database-path>/account#.db_att/item#
* Thumbnails are stored at: <cache-path>/thmb_account#_item#
*
* Using the standard application context, account #10 and attachment # 20, this would be:
* /data/data/com.android.email/databases/10.db_att/20
* /data/data/com.android.email/cache/thmb_10_20
*/
public class AttachmentProvider extends ContentProvider {
private static final String[] MIME_TYPE_PROJECTION = new String[] {
AttachmentColumns.MIME_TYPE, AttachmentColumns.FILENAME };
private static final int MIME_TYPE_COLUMN_MIME_TYPE = 0;
private static final int MIME_TYPE_COLUMN_FILENAME = 1;
private static final String[] PROJECTION_QUERY = new String[] { AttachmentColumns.FILENAME,
AttachmentColumns.SIZE, AttachmentColumns.CONTENT_URI };
@Override
public boolean onCreate() {
/*
* We use the cache dir as a temporary directory (since Android doesn't give us one) so
* on startup we'll clean up any .tmp files from the last run.
*/
File[] files = getContext().getCacheDir().listFiles();
for (File file : files) {
String filename = file.getName();
if (filename.endsWith(".tmp") || filename.startsWith("thmb_")) {
file.delete();
}
}
return true;
}
/**
* Returns the mime type for a given attachment. There are three possible results:
* - If thumbnail Uri, always returns "image/png" (even if there's no attachment)
* - If the attachment does not exist, returns null
* - Returns the mime type of the attachment
*/
@Override
public String getType(Uri uri) {
long callingId = Binder.clearCallingIdentity();
try {
List<String> segments = uri.getPathSegments();
String id = segments.get(1);
String format = segments.get(2);
if (AttachmentUtilities.FORMAT_THUMBNAIL.equals(format)) {
return "image/png";
} else {
uri = ContentUris.withAppendedId(Attachment.CONTENT_URI, Long.parseLong(id));
Cursor c = getContext().getContentResolver().query(uri, MIME_TYPE_PROJECTION, null,
null, null);
try {
if (c.moveToFirst()) {
String mimeType = c.getString(MIME_TYPE_COLUMN_MIME_TYPE);
String fileName = c.getString(MIME_TYPE_COLUMN_FILENAME);
mimeType = AttachmentUtilities.inferMimeType(fileName, mimeType);
return mimeType;
}
} finally {
c.close();
}
return null;
}
} finally {
Binder.restoreCallingIdentity(callingId);
}
}
/**
* Open an attachment file. There are two "formats" - "raw", which returns an actual file,
* and "thumbnail", which attempts to generate a thumbnail image.
*
* Thumbnails are cached for easy space recovery and cleanup.
*
* TODO: The thumbnail format returns null for its failure cases, instead of throwing
* FileNotFoundException, and should be fixed for consistency.
*
* @throws FileNotFoundException
*/
@Override
public ParcelFileDescriptor openFile(Uri uri, String mode) throws FileNotFoundException {
// If this is a write, the caller must have the EmailProvider permission, which is
// based on signature only
if (mode.equals("w")) {
Context context = getContext();
if (context.checkCallingPermission(EmailContent.PROVIDER_PERMISSION)
!= PackageManager.PERMISSION_GRANTED) {
throw new FileNotFoundException();
}
List<String> segments = uri.getPathSegments();
String accountId = segments.get(0);
String id = segments.get(1);
File saveIn =
AttachmentUtilities.getAttachmentDirectory(context, Long.parseLong(accountId));
if (!saveIn.exists()) {
saveIn.mkdirs();
}
File newFile = new File(saveIn, id);
return ParcelFileDescriptor.open(
newFile, ParcelFileDescriptor.MODE_READ_WRITE |
ParcelFileDescriptor.MODE_CREATE | ParcelFileDescriptor.MODE_TRUNCATE);
}
long callingId = Binder.clearCallingIdentity();
try {
List<String> segments = uri.getPathSegments();
String accountId = segments.get(0);
String id = segments.get(1);
String format = segments.get(2);
if (AttachmentUtilities.FORMAT_THUMBNAIL.equals(format)) {
int width = Integer.parseInt(segments.get(3));
int height = Integer.parseInt(segments.get(4));
String filename = "thmb_" + accountId + "_" + id;
File dir = getContext().getCacheDir();
File file = new File(dir, filename);
if (!file.exists()) {
Uri attachmentUri = AttachmentUtilities.
getAttachmentUri(Long.parseLong(accountId), Long.parseLong(id));
Cursor c = query(attachmentUri,
new String[] { Columns.DATA }, null, null, null);
if (c != null) {
try {
if (c.moveToFirst()) {
attachmentUri = Uri.parse(c.getString(0));
} else {
return null;
}
} finally {
c.close();
}
}
String type = getContext().getContentResolver().getType(attachmentUri);
try {
InputStream in =
getContext().getContentResolver().openInputStream(attachmentUri);
Bitmap thumbnail = createThumbnail(type, in);
if (thumbnail == null) {
return null;
}
thumbnail = Bitmap.createScaledBitmap(thumbnail, width, height, true);
FileOutputStream out = new FileOutputStream(file);
thumbnail.compress(Bitmap.CompressFormat.PNG, 100, out);
out.close();
in.close();
} catch (IOException ioe) {
Log.d(Logging.LOG_TAG, "openFile/thumbnail failed with " +
ioe.getMessage());
return null;
} catch (OutOfMemoryError oome) {
Log.d(Logging.LOG_TAG, "openFile/thumbnail failed with " +
oome.getMessage());
return null;
}
}
return ParcelFileDescriptor.open(file, ParcelFileDescriptor.MODE_READ_ONLY);
}
else {
return ParcelFileDescriptor.open(
new File(getContext().getDatabasePath(accountId + ".db_att"), id),
ParcelFileDescriptor.MODE_READ_ONLY);
}
} finally {
Binder.restoreCallingIdentity(callingId);
}
}
@Override
public int delete(Uri uri, String arg1, String[] arg2) {
return 0;
}
@Override
public Uri insert(Uri uri, ContentValues values) {
return null;
}
/**
* Returns a cursor based on the data in the attachments table, or null if the attachment
* is not recorded in the table.
*
* Supports REST Uri only, for a single row - selection, selection args, and sortOrder are
* ignored (non-null values should probably throw an exception....)
*/
@Override
public Cursor query(Uri uri, String[] projection, String selection, String[] selectionArgs,
String sortOrder) {
long callingId = Binder.clearCallingIdentity();
try {
if (projection == null) {
projection =
new String[] {
Columns._ID,
Columns.DATA,
Columns.DISPLAY_NAME, // matched the DISPLAY_NAME of {@link OpenableColumns}
Columns.SIZE, // matched the SIZE of {@link OpenableColumns}
};
}
List<String> segments = uri.getPathSegments();
String accountId = segments.get(0);
String id = segments.get(1);
String format = segments.get(2);
String name = null;
int size = -1;
String contentUri = null;
uri = ContentUris.withAppendedId(Attachment.CONTENT_URI, Long.parseLong(id));
Cursor c = getContext().getContentResolver().query(uri, PROJECTION_QUERY,
null, null, null);
try {
if (c.moveToFirst()) {
name = c.getString(0);
size = c.getInt(1);
contentUri = c.getString(2);
} else {
return null;
}
} finally {
c.close();
}
MatrixCursor ret = new MatrixCursor(projection);
Object[] values = new Object[projection.length];
for (int i = 0, count = projection.length; i < count; i++) {
String column = projection[i];
if (Columns._ID.equals(column)) {
values[i] = id;
}
else if (Columns.DATA.equals(column)) {
values[i] = contentUri;
}
else if (Columns.DISPLAY_NAME.equals(column)) {
values[i] = name;
}
else if (Columns.SIZE.equals(column)) {
values[i] = size;
}
}
ret.addRow(values);
return ret;
} finally {
Binder.restoreCallingIdentity(callingId);
}
}
@Override
public int update(Uri uri, ContentValues values, String selection, String[] selectionArgs) {
return 0;
}
private Bitmap createThumbnail(String type, InputStream data) {
if(MimeUtility.mimeTypeMatches(type, "image/*")) {
return createImageThumbnail(data);
}
return null;
}
private Bitmap createImageThumbnail(InputStream data) {
try {
Bitmap bitmap = BitmapFactory.decodeStream(data);
return bitmap;
} catch (OutOfMemoryError oome) {
Log.d(Logging.LOG_TAG, "createImageThumbnail failed with " + oome.getMessage());
return null;
} catch (Exception e) {
Log.d(Logging.LOG_TAG, "createImageThumbnail failed with " + e.getMessage());
return null;
}
}
/**
* Need this to suppress warning in unit tests.
*/
@Override
public void shutdown() {
// Don't call super.shutdown(), which emits a warning...
}
}
| craigacgomez/flaming_monkey_packages_apps_Email | src/com/android/email/provider/AttachmentProvider.java | Java | apache-2.0 | 13,480 |
//
// This file was generated by the JavaTM Architecture for XML Binding(JAXB) Reference Implementation, vJAXB 2.1.10 in JDK 6
// See <a href="http://java.sun.com/xml/jaxb">http://java.sun.com/xml/jaxb</a>
// Any modifications to this file will be lost upon recompilation of the source schema.
// Generated on: 2012.08.31 at 10:43:34 AM EDT
//
package org.slc.sli.test.edfi.entitiesR1;
import java.util.ArrayList;
import java.util.List;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlElement;
import javax.xml.bind.annotation.XmlType;
/**
*
* This entity represents any program designed to work
* in conjunction with or to supplement the main
* academic program.
* Programs may provide instruction, training, services or benefits
* through federal,
* state, or local agencies. Programs may also include
* organized extracurricular activities for students.
*
*
* <p>Java class for program complex type.
*
* <p>The following schema fragment specifies the expected content contained within this class.
*
* <pre>
* <complexType name="program">
* <complexContent>
* <restriction base="{http://www.w3.org/2001/XMLSchema}anyType">
* <sequence>
* <element name="programId" type="{http://slc-sli/ed-org/0.1}programId" minOccurs="0"/>
* <element name="programType" type="{http://slc-sli/ed-org/0.1}programType"/>
* <element name="programSponsor" type="{http://slc-sli/ed-org/0.1}programSponsorType" minOccurs="0"/>
* <element name="services" type="{http://slc-sli/ed-org/0.1}serviceDescriptorReferenceType" maxOccurs="unbounded" minOccurs="0"/>
* <element name="staffAssociations" type="{http://slc-sli/ed-org/0.1}staffProgramAssociation" maxOccurs="unbounded" minOccurs="0"/>
* </sequence>
* </restriction>
* </complexContent>
* </complexType>
* </pre>
*
*
*/
@XmlAccessorType(XmlAccessType.FIELD)
@XmlType(name = "program", propOrder = {
"programId",
"programType",
"programSponsor",
"services",
"staffAssociations"
})
public class Program {
protected String programId;
@XmlElement(required = true)
protected ProgramType programType;
protected ProgramSponsorType programSponsor;
protected List<ServiceDescriptorReferenceType> services;
protected List<StaffProgramAssociation> staffAssociations;
/**
* Gets the value of the programId property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getProgramId() {
return programId;
}
/**
* Sets the value of the programId property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setProgramId(String value) {
this.programId = value;
}
/**
* Gets the value of the programType property.
*
* @return
* possible object is
* {@link ProgramType }
*
*/
public ProgramType getProgramType() {
return programType;
}
/**
* Sets the value of the programType property.
*
* @param value
* allowed object is
* {@link ProgramType }
*
*/
public void setProgramType(ProgramType value) {
this.programType = value;
}
/**
* Gets the value of the programSponsor property.
*
* @return
* possible object is
* {@link ProgramSponsorType }
*
*/
public ProgramSponsorType getProgramSponsor() {
return programSponsor;
}
/**
* Sets the value of the programSponsor property.
*
* @param value
* allowed object is
* {@link ProgramSponsorType }
*
*/
public void setProgramSponsor(ProgramSponsorType value) {
this.programSponsor = value;
}
/**
* Gets the value of the services property.
*
* <p>
* This accessor method returns a reference to the live list,
* not a snapshot. Therefore any modification you make to the
* returned list will be present inside the JAXB object.
* This is why there is not a <CODE>set</CODE> method for the services property.
*
* <p>
* For example, to add a new item, do as follows:
* <pre>
* getServices().add(newItem);
* </pre>
*
*
* <p>
* Objects of the following type(s) are allowed in the list
* {@link ServiceDescriptorReferenceType }
*
*
*/
public List<ServiceDescriptorReferenceType> getServices() {
if (services == null) {
services = new ArrayList<ServiceDescriptorReferenceType>();
}
return this.services;
}
/**
* Gets the value of the staffAssociations property.
*
* <p>
* This accessor method returns a reference to the live list,
* not a snapshot. Therefore any modification you make to the
* returned list will be present inside the JAXB object.
* This is why there is not a <CODE>set</CODE> method for the staffAssociations property.
*
* <p>
* For example, to add a new item, do as follows:
* <pre>
* getStaffAssociations().add(newItem);
* </pre>
*
*
* <p>
* Objects of the following type(s) are allowed in the list
* {@link StaffProgramAssociation }
*
*
*/
public List<StaffProgramAssociation> getStaffAssociations() {
if (staffAssociations == null) {
staffAssociations = new ArrayList<StaffProgramAssociation>();
}
return this.staffAssociations;
}
}
| inbloom/secure-data-service | tools/data-tools/src/org/slc/sli/test/edfi/entitiesR1/Program.java | Java | apache-2.0 | 6,094 |
// Generated from /Library/Java/JavaVirtualMachines/jdk1.8.0_144.jdk/Contents/Home/jre/lib/rt.jar
#include <java/awt/geom/Path2D_Float_CopyIterator.hpp>
extern void unimplemented_(const char16_t* name);
java::awt::geom::Path2D_Float_CopyIterator::Path2D_Float_CopyIterator(const ::default_init_tag&)
: super(*static_cast< ::default_init_tag* >(0))
{
clinit();
}
java::awt::geom::Path2D_Float_CopyIterator::Path2D_Float_CopyIterator(Path2D_Float* p2df)
: Path2D_Float_CopyIterator(*static_cast< ::default_init_tag* >(0))
{
ctor(p2df);
}
void ::java::awt::geom::Path2D_Float_CopyIterator::ctor(Path2D_Float* p2df)
{ /* stub */
/* super::ctor(); */
unimplemented_(u"void ::java::awt::geom::Path2D_Float_CopyIterator::ctor(Path2D_Float* p2df)");
}
int32_t java::awt::geom::Path2D_Float_CopyIterator::currentSegment(::floatArray* coords)
{ /* stub */
unimplemented_(u"int32_t java::awt::geom::Path2D_Float_CopyIterator::currentSegment(::floatArray* coords)");
return 0;
}
int32_t java::awt::geom::Path2D_Float_CopyIterator::currentSegment(::doubleArray* coords)
{ /* stub */
unimplemented_(u"int32_t java::awt::geom::Path2D_Float_CopyIterator::currentSegment(::doubleArray* coords)");
return 0;
}
extern java::lang::Class *class_(const char16_t *c, int n);
java::lang::Class* java::awt::geom::Path2D_Float_CopyIterator::class_()
{
static ::java::lang::Class* c = ::class_(u"java.awt.geom.Path2D.Float.CopyIterator", 39);
return c;
}
java::lang::Class* java::awt::geom::Path2D_Float_CopyIterator::getClass0()
{
return class_();
}
| pebble2015/cpoi | ext/stub/java/awt/geom/Path2D_Float_CopyIterator-stub.cpp | C++ | apache-2.0 | 1,586 |
/*
* Copyright 2017 B2i Healthcare Pte Ltd, http://b2i.sg
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.b2international.index.es;
import java.util.List;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.service.PendingClusterTask;
import org.slf4j.Logger;
/**
* @since 5.10
*/
public final class AwaitPendingTasks {
private static final int PENDING_CLUSTER_TASKS_RETRY_INTERVAL = 50;
private AwaitPendingTasks() {}
public static final void await(Client client, Logger log) {
int pendingTaskCount = 0;
do {
List<PendingClusterTask> pendingTasks = client.admin()
.cluster()
.preparePendingClusterTasks()
.get()
.getPendingTasks();
pendingTaskCount = pendingTasks.size();
if (pendingTaskCount > 0) {
log.info("Waiting for pending cluster tasks to finish.");
try {
Thread.sleep(PENDING_CLUSTER_TASKS_RETRY_INTERVAL);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
}
} while (pendingTaskCount > 0);
}
}
| IHTSDO/snow-owl | commons/com.b2international.index/src/com/b2international/index/es/AwaitPendingTasks.java | Java | apache-2.0 | 1,558 |
package jwt4j.checkers;
import com.google.gson.JsonObject;
import jwt4j.TokenChecker;
import jwt4j.exceptions.InvalidIssuerException;
import static jwt4j.JWTConstants.ISSUER;
public class IssuerChecker implements TokenChecker
{
private final String issuer;
public IssuerChecker(final String issuer)
{
this.issuer = issuer;
}
@Override
public void check(JsonObject payloadJson)
{
if (!payloadJson.has(ISSUER) || !payloadJson.get(ISSUER).getAsString().equals(issuer)) {
throw new InvalidIssuerException("Expected " + issuer + " issuer");
}
}
} | milpol/jwt4j | src/main/java/jwt4j/checkers/IssuerChecker.java | Java | apache-2.0 | 614 |
/**
* A TabPanel osztály egységbe foglal több Panel-t, és segíti azok kezelését.
*
*
* @author Gabor Kokeny
*/
class TabPanel extends Container {
/**
* Ez a változó tárolja az aktív index értékét, amely egy pozitív egész szám.
*
* @property {number} activeTabIndex
*/
private activeIndex: number = null;
private activePanel: Panel;
private menu: TabPanelMenu;
/**
*
* @param {Container} parent
* @constructor
*/
constructor(parent: Container) {
super(parent, true);
this.setClass("ui-tabpanel");
this.getContainer().setClass("ui-tabpanel-wrapper");
this.menu = new TabPanelMenu(this.getContainer());
}
add(panel: Panel) {
if (!(panel instanceof Panel)) {
Log.error("Item couldn't add to tabpanel, because it is not a Panel", panel);
}
super.add(panel);
var me = this;
var listener = function(e:Event) {
var menuItem = new MenuItem(me.menu);
menuItem.setText(panel.getTitle());
menuItem.setIconClass(panel.getIconClass());
menuItem.addClickListener(function(e:MouseEvent){
me.setActivePanel(panel);
});
panel.removeListener(TabPanel.EVENT_BEFORE_RENDER, listener);
}
panel.addListener(TabPanel.EVENT_BEFORE_RENDER, listener);
//Log.debug("TabPanel children :", this.getChildren());
this.setActiveTabIndex(0);
}
/**
* Ezzel a metódussal lehet beállítani, hogy melyik legyen az aktív panel.
* Paraméterként egy számot kell megadnunk, ami nem lehet null, undifined, vagy negatív érték.
* A TabPanel 0 bázisindexű, így ha azt szeretnénk, hogy az első Panel legyen aktív akkor 0-t kell
* paraméterként megadni.
*
* Ha a paraméterként megadott index megegyezik az aktuális aktív index értékével, akkor a metódus kilép.
*
* @param {number} activeIndex
*/
setActiveTabIndex(index: number) {
this.checkTabIndex(index);
if (this.activeIndex == index) {
return;
}
var panel = this.getPanel(index);
this.setActivePanel(panel);
this.activeIndex = index;
}
/**
*
* @param {Panel} panel
* @private
*/
private setActivePanel(panel: Panel) {
Assert.notNull(panel, "panel");
if (this.activePanel) {
this.activePanel.removeClass("active");
}
this.activePanel = panel;
this.activePanel.addClass("active");
}
/**
* Ez a metódus vissza adja az aktív panel indexét.
* @return {number} Vissza adja a jelenleg aktív panel indexet. Ha nincs még panel hozzáadva a TabPanel-hez,
* akkor a -1 értékkel tér vissza.
*/
getActiveTabIndex(): number {
return this.activeIndex ? this.activeIndex : -1;
}
/**
* Ez a helper metódus eldönti, hogy a paraméterként kapott egész szám az megfelel-e
* a TabPanel által megkövetelt elvárásoknak.
*
* - Nem lehet null vagy undifined
* - Nem lehet negatív érték
* - Kisebb kell, hogy legyen mint a panelek száma.
*
* @param {number} tabIndex Az ellenőrizni kívánt index.
* @private
*/
private checkTabIndex(tabIndex: number) {
if (tabIndex === null || tabIndex === undefined) {
Log.error("The active tab index cannot be null or undifined");
}
if (tabIndex < 0) {
Log.error("The active tab index cannot be negative, it is should be a positve number!");
}
if (tabIndex >= this.getChildren().size()) {
Log.error("The active tab index should be less then " + this.getChildren().size());
}
}
/**
* Vissza adja a jelenleg aktív panelt. Az aktív panel módosítása
* a setActiveTabIndex metódus meghívásával lehetséges.
*
* @return {Panel} Az aktív panel vagy null érték, ha a TapPanel még üres.
*/
getActivePanel(): Panel {
return this.activePanel;
}
/**
*
* @param {number} index
* @return {Panel}
*/
getPanel(index: number): Panel {
//TODO check it
return <Panel>super.getComponent(index);
}
}
/**
*
* @author Gabor Kokeny
*/
class TabPanelMenu extends Menu {
constructor(parent: Container) {
super(parent);
}
} | Gubancs/TypeUI | typeui-framework/src/main/resources/typescript/ui/TabPanel.ts | TypeScript | apache-2.0 | 4,557 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from keystoneauth1 import exceptions as ks_exc
import mock
from six.moves.urllib import parse
import nova.conf
from nova import context
from nova import exception
from nova import objects
from nova import rc_fields as fields
from nova.scheduler.client import report
from nova.scheduler import utils as scheduler_utils
from nova import test
from nova.tests.unit import fake_requests
from nova.tests import uuidsentinel as uuids
CONF = nova.conf.CONF
class SafeConnectedTestCase(test.NoDBTestCase):
"""Test the safe_connect decorator for the scheduler client."""
def setUp(self):
super(SafeConnectedTestCase, self).setUp()
self.context = context.get_admin_context()
with mock.patch('keystoneauth1.loading.load_auth_from_conf_options'):
self.client = report.SchedulerReportClient()
@mock.patch('keystoneauth1.session.Session.request')
def test_missing_endpoint(self, req):
"""Test EndpointNotFound behavior.
A missing endpoint entry should not explode.
"""
req.side_effect = ks_exc.EndpointNotFound()
self.client._get_resource_provider(self.context, "fake")
# reset the call count to demonstrate that future calls still
# work
req.reset_mock()
self.client._get_resource_provider(self.context, "fake")
self.assertTrue(req.called)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_create_client')
@mock.patch('keystoneauth1.session.Session.request')
def test_missing_endpoint_create_client(self, req, create_client):
"""Test EndpointNotFound retry behavior.
A missing endpoint should cause _create_client to be called.
"""
req.side_effect = ks_exc.EndpointNotFound()
self.client._get_resource_provider(self.context, "fake")
# This is the second time _create_client is called, but the first since
# the mock was created.
self.assertTrue(create_client.called)
@mock.patch('keystoneauth1.session.Session.request')
def test_missing_auth(self, req):
"""Test Missing Auth handled correctly.
A missing auth configuration should not explode.
"""
req.side_effect = ks_exc.MissingAuthPlugin()
self.client._get_resource_provider(self.context, "fake")
# reset the call count to demonstrate that future calls still
# work
req.reset_mock()
self.client._get_resource_provider(self.context, "fake")
self.assertTrue(req.called)
@mock.patch('keystoneauth1.session.Session.request')
def test_unauthorized(self, req):
"""Test Unauthorized handled correctly.
An unauthorized configuration should not explode.
"""
req.side_effect = ks_exc.Unauthorized()
self.client._get_resource_provider(self.context, "fake")
# reset the call count to demonstrate that future calls still
# work
req.reset_mock()
self.client._get_resource_provider(self.context, "fake")
self.assertTrue(req.called)
@mock.patch('keystoneauth1.session.Session.request')
def test_connect_fail(self, req):
"""Test Connect Failure handled correctly.
If we get a connect failure, this is transient, and we expect
that this will end up working correctly later.
"""
req.side_effect = ks_exc.ConnectFailure()
self.client._get_resource_provider(self.context, "fake")
# reset the call count to demonstrate that future calls do
# work
req.reset_mock()
self.client._get_resource_provider(self.context, "fake")
self.assertTrue(req.called)
@mock.patch.object(report, 'LOG')
def test_warning_limit(self, mock_log):
# Assert that __init__ initializes _warn_count as we expect
self.assertEqual(0, self.client._warn_count)
mock_self = mock.MagicMock()
mock_self._warn_count = 0
for i in range(0, report.WARN_EVERY + 3):
report.warn_limit(mock_self, 'warning')
mock_log.warning.assert_has_calls([mock.call('warning'),
mock.call('warning')])
@mock.patch('keystoneauth1.session.Session.request')
def test_failed_discovery(self, req):
"""Test DiscoveryFailure behavior.
Failed discovery should not blow up.
"""
req.side_effect = ks_exc.DiscoveryFailure()
self.client._get_resource_provider(self.context, "fake")
# reset the call count to demonstrate that future calls still
# work
req.reset_mock()
self.client._get_resource_provider(self.context, "fake")
self.assertTrue(req.called)
class TestConstructor(test.NoDBTestCase):
@mock.patch('keystoneauth1.loading.load_session_from_conf_options')
@mock.patch('keystoneauth1.loading.load_auth_from_conf_options')
def test_constructor(self, load_auth_mock, load_sess_mock):
client = report.SchedulerReportClient()
load_auth_mock.assert_called_once_with(CONF, 'placement')
load_sess_mock.assert_called_once_with(CONF, 'placement',
auth=load_auth_mock.return_value)
self.assertEqual(['internal', 'public'], client._client.interface)
self.assertEqual({'accept': 'application/json'},
client._client.additional_headers)
@mock.patch('keystoneauth1.loading.load_session_from_conf_options')
@mock.patch('keystoneauth1.loading.load_auth_from_conf_options')
def test_constructor_admin_interface(self, load_auth_mock, load_sess_mock):
self.flags(valid_interfaces='admin', group='placement')
client = report.SchedulerReportClient()
load_auth_mock.assert_called_once_with(CONF, 'placement')
load_sess_mock.assert_called_once_with(CONF, 'placement',
auth=load_auth_mock.return_value)
self.assertEqual(['admin'], client._client.interface)
self.assertEqual({'accept': 'application/json'},
client._client.additional_headers)
class SchedulerReportClientTestCase(test.NoDBTestCase):
def setUp(self):
super(SchedulerReportClientTestCase, self).setUp()
self.context = context.get_admin_context()
self.ks_adap_mock = mock.Mock()
self.compute_node = objects.ComputeNode(
uuid=uuids.compute_node,
hypervisor_hostname='foo',
vcpus=8,
cpu_allocation_ratio=16.0,
memory_mb=1024,
ram_allocation_ratio=1.5,
local_gb=10,
disk_allocation_ratio=1.0,
)
with test.nested(
mock.patch('keystoneauth1.adapter.Adapter',
return_value=self.ks_adap_mock),
mock.patch('keystoneauth1.loading.load_auth_from_conf_options')
):
self.client = report.SchedulerReportClient()
def _init_provider_tree(self, generation_override=None,
resources_override=None):
cn = self.compute_node
resources = resources_override
if resources_override is None:
resources = {
'VCPU': {
'total': cn.vcpus,
'reserved': 0,
'min_unit': 1,
'max_unit': cn.vcpus,
'step_size': 1,
'allocation_ratio': cn.cpu_allocation_ratio,
},
'MEMORY_MB': {
'total': cn.memory_mb,
'reserved': 512,
'min_unit': 1,
'max_unit': cn.memory_mb,
'step_size': 1,
'allocation_ratio': cn.ram_allocation_ratio,
},
'DISK_GB': {
'total': cn.local_gb,
'reserved': 0,
'min_unit': 1,
'max_unit': cn.local_gb,
'step_size': 1,
'allocation_ratio': cn.disk_allocation_ratio,
},
}
generation = generation_override or 1
rp_uuid = self.client._provider_tree.new_root(
cn.hypervisor_hostname,
cn.uuid,
generation=generation,
)
self.client._provider_tree.update_inventory(rp_uuid, resources)
def _validate_provider(self, name_or_uuid, **kwargs):
"""Validates existence and values of a provider in this client's
_provider_tree.
:param name_or_uuid: The name or UUID of the provider to validate.
:param kwargs: Optional keyword arguments of ProviderData attributes
whose values are to be validated.
"""
found = self.client._provider_tree.data(name_or_uuid)
# If kwargs provided, their names indicate ProviderData attributes
for attr, expected in kwargs.items():
try:
self.assertEqual(getattr(found, attr), expected)
except AttributeError:
self.fail("Provider with name or UUID %s doesn't have "
"attribute %s (expected value: %s)" %
(name_or_uuid, attr, expected))
class TestPutAllocations(SchedulerReportClientTestCase):
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.put')
def test_put_allocations(self, mock_put):
mock_put.return_value.status_code = 204
mock_put.return_value.text = "cool"
rp_uuid = mock.sentinel.rp
consumer_uuid = mock.sentinel.consumer
data = {"MEMORY_MB": 1024}
expected_url = "/allocations/%s" % consumer_uuid
resp = self.client.put_allocations(self.context, rp_uuid,
consumer_uuid, data,
mock.sentinel.project_id,
mock.sentinel.user_id)
self.assertTrue(resp)
mock_put.assert_called_once_with(
expected_url, mock.ANY, version='1.8',
global_request_id=self.context.global_id)
@mock.patch.object(report.LOG, 'warning')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.put')
def test_put_allocations_fail(self, mock_put, mock_warn):
mock_put.return_value.status_code = 400
mock_put.return_value.text = "not cool"
rp_uuid = mock.sentinel.rp
consumer_uuid = mock.sentinel.consumer
data = {"MEMORY_MB": 1024}
expected_url = "/allocations/%s" % consumer_uuid
resp = self.client.put_allocations(self.context, rp_uuid,
consumer_uuid, data,
mock.sentinel.project_id,
mock.sentinel.user_id)
self.assertFalse(resp)
mock_put.assert_called_once_with(
expected_url, mock.ANY, version='1.8',
global_request_id=self.context.global_id)
log_msg = mock_warn.call_args[0][0]
self.assertIn("Unable to submit allocation for instance", log_msg)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.put')
def test_put_allocations_retries_conflict(self, mock_put):
failed = mock.MagicMock()
failed.status_code = 409
failed.text = "concurrently updated"
succeeded = mock.MagicMock()
succeeded.status_code = 204
mock_put.side_effect = (failed, succeeded)
rp_uuid = mock.sentinel.rp
consumer_uuid = mock.sentinel.consumer
data = {"MEMORY_MB": 1024}
expected_url = "/allocations/%s" % consumer_uuid
resp = self.client.put_allocations(self.context, rp_uuid,
consumer_uuid, data,
mock.sentinel.project_id,
mock.sentinel.user_id)
self.assertTrue(resp)
mock_put.assert_has_calls([
mock.call(expected_url, mock.ANY, version='1.8',
global_request_id=self.context.global_id)] * 2)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.put')
def test_put_allocations_retry_gives_up(self, mock_put):
failed = mock.MagicMock()
failed.status_code = 409
failed.text = "concurrently updated"
mock_put.return_value = failed
rp_uuid = mock.sentinel.rp
consumer_uuid = mock.sentinel.consumer
data = {"MEMORY_MB": 1024}
expected_url = "/allocations/%s" % consumer_uuid
resp = self.client.put_allocations(self.context, rp_uuid,
consumer_uuid, data,
mock.sentinel.project_id,
mock.sentinel.user_id)
self.assertFalse(resp)
mock_put.assert_has_calls([
mock.call(expected_url, mock.ANY, version='1.8',
global_request_id=self.context.global_id)] * 3)
def test_claim_resources_success_with_old_version(self):
get_resp_mock = mock.Mock(status_code=200)
get_resp_mock.json.return_value = {
'allocations': {}, # build instance, not move
}
self.ks_adap_mock.get.return_value = get_resp_mock
resp_mock = mock.Mock(status_code=204)
self.ks_adap_mock.put.return_value = resp_mock
consumer_uuid = uuids.consumer_uuid
alloc_req = {
'allocations': [
{
'resource_provider': {
'uuid': uuids.cn1
},
'resources': {
'VCPU': 1,
'MEMORY_MB': 1024,
}
},
],
}
project_id = uuids.project_id
user_id = uuids.user_id
res = self.client.claim_resources(
self.context, consumer_uuid, alloc_req, project_id, user_id)
expected_url = "/allocations/%s" % consumer_uuid
expected_payload = {
'allocations': {
alloc['resource_provider']['uuid']: {
'resources': alloc['resources']
}
for alloc in alloc_req['allocations']
}
}
expected_payload['project_id'] = project_id
expected_payload['user_id'] = user_id
self.ks_adap_mock.put.assert_called_once_with(
expected_url, microversion='1.12', json=expected_payload,
raise_exc=False,
headers={'X-Openstack-Request-Id': self.context.global_id})
self.assertTrue(res)
def test_claim_resources_success(self):
get_resp_mock = mock.Mock(status_code=200)
get_resp_mock.json.return_value = {
'allocations': {}, # build instance, not move
}
self.ks_adap_mock.get.return_value = get_resp_mock
resp_mock = mock.Mock(status_code=204)
self.ks_adap_mock.put.return_value = resp_mock
consumer_uuid = uuids.consumer_uuid
alloc_req = {
'allocations': {
uuids.cn1: {
'resources': {
'VCPU': 1,
'MEMORY_MB': 1024,
}
},
},
}
project_id = uuids.project_id
user_id = uuids.user_id
res = self.client.claim_resources(self.context, consumer_uuid,
alloc_req, project_id, user_id,
allocation_request_version='1.12')
expected_url = "/allocations/%s" % consumer_uuid
expected_payload = {'allocations': {
rp_uuid: alloc
for rp_uuid, alloc in alloc_req['allocations'].items()}}
expected_payload['project_id'] = project_id
expected_payload['user_id'] = user_id
self.ks_adap_mock.put.assert_called_once_with(
expected_url, microversion='1.12', json=expected_payload,
raise_exc=False,
headers={'X-Openstack-Request-Id': self.context.global_id})
self.assertTrue(res)
def test_claim_resources_success_move_operation_no_shared(self):
"""Tests that when a move operation is detected (existing allocations
for the same instance UUID) that we end up constructing an appropriate
allocation that contains the original resources on the source host
as well as the resources on the destination host.
"""
get_resp_mock = mock.Mock(status_code=200)
get_resp_mock.json.return_value = {
'allocations': {
uuids.source: {
'resource_provider_generation': 42,
'resources': {
'VCPU': 1,
'MEMORY_MB': 1024,
},
},
},
}
self.ks_adap_mock.get.return_value = get_resp_mock
resp_mock = mock.Mock(status_code=204)
self.ks_adap_mock.put.return_value = resp_mock
consumer_uuid = uuids.consumer_uuid
alloc_req = {
'allocations': {
uuids.destination: {
'resources': {
'VCPU': 1,
'MEMORY_MB': 1024
}
},
},
}
project_id = uuids.project_id
user_id = uuids.user_id
res = self.client.claim_resources(self.context, consumer_uuid,
alloc_req, project_id, user_id,
allocation_request_version='1.12')
expected_url = "/allocations/%s" % consumer_uuid
# New allocation should include resources claimed on both the source
# and destination hosts
expected_payload = {
'allocations': {
uuids.source: {
'resources': {
'VCPU': 1,
'MEMORY_MB': 1024
}
},
uuids.destination: {
'resources': {
'VCPU': 1,
'MEMORY_MB': 1024
}
},
},
}
expected_payload['project_id'] = project_id
expected_payload['user_id'] = user_id
self.ks_adap_mock.put.assert_called_once_with(
expected_url, microversion='1.12', json=mock.ANY,
raise_exc=False,
headers={'X-Openstack-Request-Id': self.context.global_id})
# We have to pull the json body from the mock call_args to validate
# it separately otherwise hash seed issues get in the way.
actual_payload = self.ks_adap_mock.put.call_args[1]['json']
self.assertEqual(expected_payload, actual_payload)
self.assertTrue(res)
def test_claim_resources_success_move_operation_with_shared(self):
"""Tests that when a move operation is detected (existing allocations
for the same instance UUID) that we end up constructing an appropriate
allocation that contains the original resources on the source host
as well as the resources on the destination host but that when a shared
storage provider is claimed against in both the original allocation as
well as the new allocation request, we don't double that allocation
resource request up.
"""
get_resp_mock = mock.Mock(status_code=200)
get_resp_mock.json.return_value = {
'allocations': {
uuids.source: {
'resource_provider_generation': 42,
'resources': {
'VCPU': 1,
'MEMORY_MB': 1024,
},
},
uuids.shared_storage: {
'resource_provider_generation': 42,
'resources': {
'DISK_GB': 100,
},
},
},
}
self.ks_adap_mock.get.return_value = get_resp_mock
resp_mock = mock.Mock(status_code=204)
self.ks_adap_mock.put.return_value = resp_mock
consumer_uuid = uuids.consumer_uuid
alloc_req = {
'allocations': {
uuids.destination: {
'resources': {
'VCPU': 1,
'MEMORY_MB': 1024,
}
},
uuids.shared_storage: {
'resources': {
'DISK_GB': 100,
}
},
}
}
project_id = uuids.project_id
user_id = uuids.user_id
res = self.client.claim_resources(self.context, consumer_uuid,
alloc_req, project_id, user_id,
allocation_request_version='1.12')
expected_url = "/allocations/%s" % consumer_uuid
# New allocation should include resources claimed on both the source
# and destination hosts but not have a doubled-up request for the disk
# resources on the shared provider
expected_payload = {
'allocations': {
uuids.source: {
'resources': {
'VCPU': 1,
'MEMORY_MB': 1024
}
},
uuids.shared_storage: {
'resources': {
'DISK_GB': 100
}
},
uuids.destination: {
'resources': {
'VCPU': 1,
'MEMORY_MB': 1024
}
},
},
}
expected_payload['project_id'] = project_id
expected_payload['user_id'] = user_id
self.ks_adap_mock.put.assert_called_once_with(
expected_url, microversion='1.12', json=mock.ANY,
raise_exc=False,
headers={'X-Openstack-Request-Id': self.context.global_id})
# We have to pull the allocations from the json body from the
# mock call_args to validate it separately otherwise hash seed
# issues get in the way.
actual_payload = self.ks_adap_mock.put.call_args[1]['json']
self.assertEqual(expected_payload, actual_payload)
self.assertTrue(res)
def test_claim_resources_success_resize_to_same_host_no_shared(self):
"""Tests that when a resize to the same host operation is detected
(existing allocations for the same instance UUID and same resource
provider) that we end up constructing an appropriate allocation that
contains the original resources on the source host as well as the
resources on the destination host, which in this case are the same.
"""
get_current_allocations_resp_mock = mock.Mock(status_code=200)
get_current_allocations_resp_mock.json.return_value = {
'allocations': {
uuids.same_host: {
'resource_provider_generation': 42,
'resources': {
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 20
},
},
},
}
self.ks_adap_mock.get.return_value = get_current_allocations_resp_mock
put_allocations_resp_mock = mock.Mock(status_code=204)
self.ks_adap_mock.put.return_value = put_allocations_resp_mock
consumer_uuid = uuids.consumer_uuid
# This is the resize-up allocation where VCPU, MEMORY_MB and DISK_GB
# are all being increased but on the same host. We also throw a custom
# resource class in the new allocation to make sure it's not lost and
# that we don't have a KeyError when merging the allocations.
alloc_req = {
'allocations': {
uuids.same_host: {
'resources': {
'VCPU': 2,
'MEMORY_MB': 2048,
'DISK_GB': 40,
'CUSTOM_FOO': 1
}
},
},
}
project_id = uuids.project_id
user_id = uuids.user_id
res = self.client.claim_resources(self.context, consumer_uuid,
alloc_req, project_id, user_id,
allocation_request_version='1.12')
expected_url = "/allocations/%s" % consumer_uuid
# New allocation should include doubled resources claimed on the same
# host.
expected_payload = {
'allocations': {
uuids.same_host: {
'resources': {
'VCPU': 3,
'MEMORY_MB': 3072,
'DISK_GB': 60,
'CUSTOM_FOO': 1
}
},
},
}
expected_payload['project_id'] = project_id
expected_payload['user_id'] = user_id
self.ks_adap_mock.put.assert_called_once_with(
expected_url, microversion='1.12', json=mock.ANY, raise_exc=False,
headers={'X-Openstack-Request-Id': self.context.global_id})
# We have to pull the json body from the mock call_args to validate
# it separately otherwise hash seed issues get in the way.
actual_payload = self.ks_adap_mock.put.call_args[1]['json']
self.assertEqual(expected_payload, actual_payload)
self.assertTrue(res)
def test_claim_resources_success_resize_to_same_host_with_shared(self):
"""Tests that when a resize to the same host operation is detected
(existing allocations for the same instance UUID and same resource
provider) that we end up constructing an appropriate allocation that
contains the original resources on the source host as well as the
resources on the destination host, which in this case are the same.
This test adds the fun wrinkle of throwing a shared storage provider
in the mix when doing resize to the same host.
"""
get_current_allocations_resp_mock = mock.Mock(status_code=200)
get_current_allocations_resp_mock.json.return_value = {
'allocations': {
uuids.same_host: {
'resource_provider_generation': 42,
'resources': {
'VCPU': 1,
'MEMORY_MB': 1024
},
},
uuids.shared_storage: {
'resource_provider_generation': 42,
'resources': {
'DISK_GB': 20,
},
},
},
}
self.ks_adap_mock.get.return_value = get_current_allocations_resp_mock
put_allocations_resp_mock = mock.Mock(status_code=204)
self.ks_adap_mock.put.return_value = put_allocations_resp_mock
consumer_uuid = uuids.consumer_uuid
# This is the resize-up allocation where VCPU, MEMORY_MB and DISK_GB
# are all being increased but DISK_GB is on a shared storage provider.
alloc_req = {
'allocations': {
uuids.same_host: {
'resources': {
'VCPU': 2,
'MEMORY_MB': 2048
}
},
uuids.shared_storage: {
'resources': {
'DISK_GB': 40,
}
},
},
}
project_id = uuids.project_id
user_id = uuids.user_id
res = self.client.claim_resources(self.context, consumer_uuid,
alloc_req, project_id, user_id,
allocation_request_version='1.12')
expected_url = "/allocations/%s" % consumer_uuid
# New allocation should include doubled resources claimed on the same
# host.
expected_payload = {
'allocations': {
uuids.same_host: {
'resources': {
'VCPU': 3,
'MEMORY_MB': 3072
}
},
uuids.shared_storage: {
'resources': {
'DISK_GB': 60
}
},
},
}
expected_payload['project_id'] = project_id
expected_payload['user_id'] = user_id
self.ks_adap_mock.put.assert_called_once_with(
expected_url, microversion='1.12', json=mock.ANY, raise_exc=False,
headers={'X-Openstack-Request-Id': self.context.global_id})
# We have to pull the json body from the mock call_args to validate
# it separately otherwise hash seed issues get in the way.
actual_payload = self.ks_adap_mock.put.call_args[1]['json']
self.assertEqual(expected_payload, actual_payload)
self.assertTrue(res)
def test_claim_resources_fail_retry_success(self):
get_resp_mock = mock.Mock(status_code=200)
get_resp_mock.json.return_value = {
'allocations': {}, # build instance, not move
}
self.ks_adap_mock.get.return_value = get_resp_mock
resp_mocks = [
mock.Mock(
status_code=409,
text='Inventory changed while attempting to allocate: '
'Another thread concurrently updated the data. '
'Please retry your update'),
mock.Mock(status_code=204),
]
self.ks_adap_mock.put.side_effect = resp_mocks
consumer_uuid = uuids.consumer_uuid
alloc_req = {
'allocations': {
uuids.cn1: {
'resources': {
'VCPU': 1,
'MEMORY_MB': 1024,
}
},
},
}
project_id = uuids.project_id
user_id = uuids.user_id
res = self.client.claim_resources(self.context, consumer_uuid,
alloc_req, project_id, user_id,
allocation_request_version='1.12')
expected_url = "/allocations/%s" % consumer_uuid
expected_payload = {
'allocations':
{rp_uuid: res
for rp_uuid, res in alloc_req['allocations'].items()}
}
expected_payload['project_id'] = project_id
expected_payload['user_id'] = user_id
# We should have exactly two calls to the placement API that look
# identical since we're retrying the same HTTP request
expected_calls = [
mock.call(expected_url, microversion='1.12', json=expected_payload,
raise_exc=False,
headers={'X-Openstack-Request-Id':
self.context.global_id})] * 2
self.assertEqual(len(expected_calls),
self.ks_adap_mock.put.call_count)
self.ks_adap_mock.put.assert_has_calls(expected_calls)
self.assertTrue(res)
@mock.patch.object(report.LOG, 'warning')
def test_claim_resources_failure(self, mock_log):
get_resp_mock = mock.Mock(status_code=200)
get_resp_mock.json.return_value = {
'allocations': {}, # build instance, not move
}
self.ks_adap_mock.get.return_value = get_resp_mock
resp_mock = mock.Mock(status_code=409, text='not cool')
self.ks_adap_mock.put.return_value = resp_mock
consumer_uuid = uuids.consumer_uuid
alloc_req = {
'allocations': {
uuids.cn1: {
'resources': {
'VCPU': 1,
'MEMORY_MB': 1024,
}
},
},
}
project_id = uuids.project_id
user_id = uuids.user_id
res = self.client.claim_resources(self.context, consumer_uuid,
alloc_req, project_id, user_id,
allocation_request_version='1.12')
expected_url = "/allocations/%s" % consumer_uuid
expected_payload = {
'allocations':
{rp_uuid: res
for rp_uuid, res in alloc_req['allocations'].items()}
}
expected_payload['project_id'] = project_id
expected_payload['user_id'] = user_id
self.ks_adap_mock.put.assert_called_once_with(
expected_url, microversion='1.12', json=expected_payload,
raise_exc=False,
headers={'X-Openstack-Request-Id': self.context.global_id})
self.assertFalse(res)
self.assertTrue(mock_log.called)
def test_remove_provider_from_inst_alloc_no_shared(self):
"""Tests that the method which manipulates an existing doubled-up
allocation for a move operation to remove the source host results in
sending placement the proper payload to PUT
/allocations/{consumer_uuid} call.
"""
get_resp_mock = mock.Mock(status_code=200)
get_resp_mock.json.return_value = {
'allocations': {
uuids.source: {
'resource_provider_generation': 42,
'resources': {
'VCPU': 1,
'MEMORY_MB': 1024,
},
},
uuids.destination: {
'resource_provider_generation': 42,
'resources': {
'VCPU': 1,
'MEMORY_MB': 1024,
},
},
},
}
self.ks_adap_mock.get.return_value = get_resp_mock
resp_mock = mock.Mock(status_code=204)
self.ks_adap_mock.put.return_value = resp_mock
consumer_uuid = uuids.consumer_uuid
project_id = uuids.project_id
user_id = uuids.user_id
res = self.client.remove_provider_from_instance_allocation(
self.context, consumer_uuid, uuids.source, user_id, project_id,
mock.Mock())
expected_url = "/allocations/%s" % consumer_uuid
# New allocations should only include the destination...
expected_payload = {
'allocations': [
{
'resource_provider': {
'uuid': uuids.destination,
},
'resources': {
'VCPU': 1,
'MEMORY_MB': 1024,
},
},
],
}
expected_payload['project_id'] = project_id
expected_payload['user_id'] = user_id
# We have to pull the json body from the mock call_args to validate
# it separately otherwise hash seed issues get in the way.
actual_payload = self.ks_adap_mock.put.call_args[1]['json']
sort_by_uuid = lambda x: x['resource_provider']['uuid']
expected_allocations = sorted(expected_payload['allocations'],
key=sort_by_uuid)
actual_allocations = sorted(actual_payload['allocations'],
key=sort_by_uuid)
self.assertEqual(expected_allocations, actual_allocations)
self.ks_adap_mock.put.assert_called_once_with(
expected_url, microversion='1.10', json=mock.ANY, raise_exc=False,
headers={'X-Openstack-Request-Id': self.context.global_id})
self.assertTrue(res)
def test_remove_provider_from_inst_alloc_with_shared(self):
"""Tests that the method which manipulates an existing doubled-up
allocation with DISK_GB being consumed from a shared storage provider
for a move operation to remove the source host results in sending
placement the proper payload to PUT /allocations/{consumer_uuid}
call.
"""
get_resp_mock = mock.Mock(status_code=200)
get_resp_mock.json.return_value = {
'allocations': {
uuids.source: {
'resource_provider_generation': 42,
'resources': {
'VCPU': 1,
'MEMORY_MB': 1024,
},
},
uuids.shared_storage: {
'resource_provider_generation': 42,
'resources': {
'DISK_GB': 100,
},
},
uuids.destination: {
'resource_provider_generation': 42,
'resources': {
'VCPU': 1,
'MEMORY_MB': 1024,
},
},
},
}
self.ks_adap_mock.get.return_value = get_resp_mock
resp_mock = mock.Mock(status_code=204)
self.ks_adap_mock.put.return_value = resp_mock
consumer_uuid = uuids.consumer_uuid
project_id = uuids.project_id
user_id = uuids.user_id
res = self.client.remove_provider_from_instance_allocation(
self.context, consumer_uuid, uuids.source, user_id, project_id,
mock.Mock())
expected_url = "/allocations/%s" % consumer_uuid
# New allocations should only include the destination...
expected_payload = {
'allocations': [
{
'resource_provider': {
'uuid': uuids.shared_storage,
},
'resources': {
'DISK_GB': 100,
},
},
{
'resource_provider': {
'uuid': uuids.destination,
},
'resources': {
'VCPU': 1,
'MEMORY_MB': 1024,
},
},
],
}
expected_payload['project_id'] = project_id
expected_payload['user_id'] = user_id
# We have to pull the json body from the mock call_args to validate
# it separately otherwise hash seed issues get in the way.
actual_payload = self.ks_adap_mock.put.call_args[1]['json']
sort_by_uuid = lambda x: x['resource_provider']['uuid']
expected_allocations = sorted(expected_payload['allocations'],
key=sort_by_uuid)
actual_allocations = sorted(actual_payload['allocations'],
key=sort_by_uuid)
self.assertEqual(expected_allocations, actual_allocations)
self.ks_adap_mock.put.assert_called_once_with(
expected_url, microversion='1.10', json=mock.ANY, raise_exc=False,
headers={'X-Openstack-Request-Id': self.context.global_id})
self.assertTrue(res)
def test_remove_provider_from_inst_alloc_no_source(self):
"""Tests that if remove_provider_from_instance_allocation() fails to
find any allocations for the source host, it just returns True and
does not attempt to rewrite the allocation for the consumer.
"""
get_resp_mock = mock.Mock(status_code=200)
# Act like the allocations already did not include the source host for
# some reason
get_resp_mock.json.return_value = {
'allocations': {
uuids.shared_storage: {
'resource_provider_generation': 42,
'resources': {
'DISK_GB': 100,
},
},
uuids.destination: {
'resource_provider_generation': 42,
'resources': {
'VCPU': 1,
'MEMORY_MB': 1024,
},
},
},
}
self.ks_adap_mock.get.return_value = get_resp_mock
consumer_uuid = uuids.consumer_uuid
project_id = uuids.project_id
user_id = uuids.user_id
res = self.client.remove_provider_from_instance_allocation(
self.context, consumer_uuid, uuids.source, user_id, project_id,
mock.Mock())
self.ks_adap_mock.get.assert_called()
self.ks_adap_mock.put.assert_not_called()
self.assertTrue(res)
def test_remove_provider_from_inst_alloc_fail_get_allocs(self):
"""Tests that we gracefully exit with False from
remove_provider_from_instance_allocation() if the call to get the
existing allocations fails for some reason
"""
get_resp_mock = mock.Mock(status_code=500)
self.ks_adap_mock.get.return_value = get_resp_mock
consumer_uuid = uuids.consumer_uuid
project_id = uuids.project_id
user_id = uuids.user_id
res = self.client.remove_provider_from_instance_allocation(
self.context, consumer_uuid, uuids.source, user_id, project_id,
mock.Mock())
self.ks_adap_mock.get.assert_called()
self.ks_adap_mock.put.assert_not_called()
self.assertFalse(res)
class TestSetAndClearAllocations(SchedulerReportClientTestCase):
def setUp(self):
super(TestSetAndClearAllocations, self).setUp()
# We want to reuse the mock throughout the class, but with
# different return values.
self.mock_post = mock.patch(
'nova.scheduler.client.report.SchedulerReportClient.post').start()
self.addCleanup(self.mock_post.stop)
self.mock_post.return_value.status_code = 204
self.rp_uuid = mock.sentinel.rp
self.consumer_uuid = mock.sentinel.consumer
self.data = {"MEMORY_MB": 1024}
self.project_id = mock.sentinel.project_id
self.user_id = mock.sentinel.user_id
self.expected_url = '/allocations'
def test_url_microversion(self):
expected_microversion = '1.13'
resp = self.client.set_and_clear_allocations(
self.context, self.rp_uuid, self.consumer_uuid, self.data,
self.project_id, self.user_id)
self.assertTrue(resp)
self.mock_post.assert_called_once_with(
self.expected_url, mock.ANY,
version=expected_microversion,
global_request_id=self.context.global_id)
def test_payload_no_clear(self):
expected_payload = {
self.consumer_uuid: {
'user_id': self.user_id,
'project_id': self.project_id,
'allocations': {
self.rp_uuid: {
'resources': {
'MEMORY_MB': 1024
}
}
}
}
}
resp = self.client.set_and_clear_allocations(
self.context, self.rp_uuid, self.consumer_uuid, self.data,
self.project_id, self.user_id)
self.assertTrue(resp)
args, kwargs = self.mock_post.call_args
payload = args[1]
self.assertEqual(expected_payload, payload)
def test_payload_with_clear(self):
expected_payload = {
self.consumer_uuid: {
'user_id': self.user_id,
'project_id': self.project_id,
'allocations': {
self.rp_uuid: {
'resources': {
'MEMORY_MB': 1024
}
}
}
},
mock.sentinel.migration_uuid: {
'user_id': self.user_id,
'project_id': self.project_id,
'allocations': {}
}
}
resp = self.client.set_and_clear_allocations(
self.context, self.rp_uuid, self.consumer_uuid, self.data,
self.project_id, self.user_id,
consumer_to_clear=mock.sentinel.migration_uuid)
self.assertTrue(resp)
args, kwargs = self.mock_post.call_args
payload = args[1]
self.assertEqual(expected_payload, payload)
@mock.patch('time.sleep')
def test_409_concurrent_update(self, mock_sleep):
self.mock_post.return_value.status_code = 409
self.mock_post.return_value.text = 'concurrently updated'
resp = self.client.set_and_clear_allocations(
self.context, self.rp_uuid, self.consumer_uuid, self.data,
self.project_id, self.user_id,
consumer_to_clear=mock.sentinel.migration_uuid)
self.assertFalse(resp)
# Post was attempted four times.
self.assertEqual(4, self.mock_post.call_count)
@mock.patch('nova.scheduler.client.report.LOG.warning')
def test_not_409_failure(self, mock_log):
error_message = 'placement not there'
self.mock_post.return_value.status_code = 503
self.mock_post.return_value.text = error_message
resp = self.client.set_and_clear_allocations(
self.context, self.rp_uuid, self.consumer_uuid, self.data,
self.project_id, self.user_id,
consumer_to_clear=mock.sentinel.migration_uuid)
self.assertFalse(resp)
args, kwargs = mock_log.call_args
log_message = args[0]
log_args = args[1]
self.assertIn('Unable to post allocations', log_message)
self.assertEqual(error_message, log_args['text'])
class TestProviderOperations(SchedulerReportClientTestCase):
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_create_resource_provider')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_get_inventory')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_get_provider_aggregates')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_get_provider_traits')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_get_sharing_providers')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_get_providers_in_tree')
def test_ensure_resource_provider_get(self, get_rpt_mock, get_shr_mock,
get_trait_mock, get_agg_mock, get_inv_mock, create_rp_mock):
# No resource provider exists in the client's cache, so validate that
# if we get the resource provider from the placement API that we don't
# try to create the resource provider.
get_rpt_mock.return_value = [{
'uuid': uuids.compute_node,
'name': mock.sentinel.name,
'generation': 1,
}]
get_inv_mock.return_value = None
get_agg_mock.return_value = set([uuids.agg1])
get_trait_mock.return_value = set(['CUSTOM_GOLD'])
get_shr_mock.return_value = []
self.client._ensure_resource_provider(self.context, uuids.compute_node)
get_rpt_mock.assert_called_once_with(self.context, uuids.compute_node)
self.assertTrue(self.client._provider_tree.exists(uuids.compute_node))
get_agg_mock.assert_called_once_with(self.context, uuids.compute_node)
self.assertTrue(
self.client._provider_tree.in_aggregates(uuids.compute_node,
[uuids.agg1]))
self.assertFalse(
self.client._provider_tree.in_aggregates(uuids.compute_node,
[uuids.agg2]))
get_trait_mock.assert_called_once_with(self.context,
uuids.compute_node)
self.assertTrue(
self.client._provider_tree.has_traits(uuids.compute_node,
['CUSTOM_GOLD']))
self.assertFalse(
self.client._provider_tree.has_traits(uuids.compute_node,
['CUSTOM_SILVER']))
get_shr_mock.assert_called_once_with(self.context, set([uuids.agg1]))
self.assertTrue(self.client._provider_tree.exists(uuids.compute_node))
self.assertFalse(create_rp_mock.called)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_create_resource_provider')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_refresh_associations')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_get_providers_in_tree')
def test_ensure_resource_provider_create_fail(self, get_rpt_mock,
refresh_mock, create_rp_mock):
# No resource provider exists in the client's cache, and
# _create_provider raises, indicating there was an error with the
# create call. Ensure we don't populate the resource provider cache
get_rpt_mock.return_value = []
create_rp_mock.side_effect = exception.ResourceProviderCreationFailed(
name=uuids.compute_node)
self.assertRaises(
exception.ResourceProviderCreationFailed,
self.client._ensure_resource_provider, self.context,
uuids.compute_node)
get_rpt_mock.assert_called_once_with(self.context, uuids.compute_node)
create_rp_mock.assert_called_once_with(
self.context, uuids.compute_node, uuids.compute_node,
parent_provider_uuid=None)
self.assertFalse(self.client._provider_tree.exists(uuids.compute_node))
self.assertFalse(refresh_mock.called)
self.assertRaises(
ValueError,
self.client._provider_tree.in_aggregates, uuids.compute_node, [])
self.assertRaises(
ValueError,
self.client._provider_tree.has_traits, uuids.compute_node, [])
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_create_resource_provider', return_value=None)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_refresh_associations')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_get_providers_in_tree')
def test_ensure_resource_provider_create_no_placement(self, get_rpt_mock,
refresh_mock, create_rp_mock):
# No resource provider exists in the client's cache, and
# @safe_connect on _create_resource_provider returns None because
# Placement isn't running yet. Ensure we don't populate the resource
# provider cache.
get_rpt_mock.return_value = []
self.assertRaises(
exception.ResourceProviderCreationFailed,
self.client._ensure_resource_provider, self.context,
uuids.compute_node)
get_rpt_mock.assert_called_once_with(self.context, uuids.compute_node)
create_rp_mock.assert_called_once_with(
self.context, uuids.compute_node, uuids.compute_node,
parent_provider_uuid=None)
self.assertFalse(self.client._provider_tree.exists(uuids.compute_node))
refresh_mock.assert_not_called()
self.assertRaises(
ValueError,
self.client._provider_tree.in_aggregates, uuids.compute_node, [])
self.assertRaises(
ValueError,
self.client._provider_tree.has_traits, uuids.compute_node, [])
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_create_resource_provider')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_refresh_and_get_inventory')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_refresh_associations')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_get_providers_in_tree')
def test_ensure_resource_provider_create(self, get_rpt_mock,
refresh_inv_mock,
refresh_assoc_mock,
create_rp_mock):
# No resource provider exists in the client's cache and no resource
# provider was returned from the placement API, so verify that in this
# case we try to create the resource provider via the placement API.
get_rpt_mock.return_value = []
create_rp_mock.return_value = {
'uuid': uuids.compute_node,
'name': 'compute-name',
'generation': 1,
}
self.assertEqual(
uuids.compute_node,
self.client._ensure_resource_provider(self.context,
uuids.compute_node))
self._validate_provider(uuids.compute_node, name='compute-name',
generation=1, parent_uuid=None,
aggregates=set(), traits=set())
# We don't refresh for a just-created provider
refresh_inv_mock.assert_not_called()
refresh_assoc_mock.assert_not_called()
get_rpt_mock.assert_called_once_with(self.context, uuids.compute_node)
create_rp_mock.assert_called_once_with(
self.context,
uuids.compute_node,
uuids.compute_node, # name param defaults to UUID if None
parent_provider_uuid=None,
)
self.assertTrue(self.client._provider_tree.exists(uuids.compute_node))
create_rp_mock.reset_mock()
# Validate the path where we specify a name (don't default to the UUID)
self.client._ensure_resource_provider(
self.context, uuids.cn2, 'a-name')
create_rp_mock.assert_called_once_with(
self.context, uuids.cn2, 'a-name', parent_provider_uuid=None)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_refresh_associations', new=mock.Mock())
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_create_resource_provider')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_get_providers_in_tree')
def test_ensure_resource_provider_tree(self, get_rpt_mock, create_rp_mock):
"""Test _ensure_resource_provider with a tree of providers."""
def _create_resource_provider(context, uuid, name,
parent_provider_uuid=None):
"""Mock side effect for creating the RP with the specified args."""
return {
'uuid': uuid,
'name': name,
'generation': 0,
'parent_provider_uuid': parent_provider_uuid
}
create_rp_mock.side_effect = _create_resource_provider
# Not initially in the placement database, so we have to create it.
get_rpt_mock.return_value = []
# Create the root
root = self.client._ensure_resource_provider(self.context, uuids.root)
self.assertEqual(uuids.root, root)
# Now create a child
child1 = self.client._ensure_resource_provider(
self.context, uuids.child1, name='junior',
parent_provider_uuid=uuids.root)
self.assertEqual(uuids.child1, child1)
# If we re-ensure the child, we get the object from the tree, not a
# newly-created one - i.e. the early .find() works like it should.
self.assertIs(child1,
self.client._ensure_resource_provider(self.context,
uuids.child1))
# Make sure we can create a grandchild
grandchild = self.client._ensure_resource_provider(
self.context, uuids.grandchild,
parent_provider_uuid=uuids.child1)
self.assertEqual(uuids.grandchild, grandchild)
# Now create a second child of the root and make sure it doesn't wind
# up in some crazy wrong place like under child1 or grandchild
child2 = self.client._ensure_resource_provider(
self.context, uuids.child2, parent_provider_uuid=uuids.root)
self.assertEqual(uuids.child2, child2)
# At this point we should get all the providers.
self.assertEqual(
set([uuids.root, uuids.child1, uuids.child2, uuids.grandchild]),
set(self.client._provider_tree.get_provider_uuids()))
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_get_providers_in_tree')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_refresh_and_get_inventory')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_refresh_associations')
def test_ensure_resource_provider_refresh_fetch(self, mock_ref_assoc,
mock_ref_inv, mock_gpit):
"""Make sure refreshes are called with the appropriate UUIDs and flags
when we fetch the provider tree from placement.
"""
tree_uuids = set([uuids.root, uuids.one, uuids.two])
mock_gpit.return_value = [{'uuid': u, 'name': u, 'generation': 42}
for u in tree_uuids]
self.assertEqual(uuids.root,
self.client._ensure_resource_provider(self.context,
uuids.root))
mock_gpit.assert_called_once_with(self.context, uuids.root)
mock_ref_inv.assert_has_calls([mock.call(self.context, uuid)
for uuid in tree_uuids])
mock_ref_assoc.assert_has_calls(
[mock.call(self.context, uuid, generation=42, force=True)
for uuid in tree_uuids])
self.assertEqual(tree_uuids,
set(self.client._provider_tree.get_provider_uuids()))
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_get_providers_in_tree')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_create_resource_provider')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_refresh_associations')
def test_ensure_resource_provider_refresh_create(self, mock_refresh,
mock_create, mock_gpit):
"""Make sure refresh is not called when we create the RP."""
mock_gpit.return_value = []
mock_create.return_value = {'name': 'cn', 'uuid': uuids.cn,
'generation': 42}
self.assertEqual(uuids.root,
self.client._ensure_resource_provider(self.context,
uuids.root))
mock_gpit.assert_called_once_with(self.context, uuids.root)
mock_create.assert_called_once_with(self.context, uuids.root,
uuids.root,
parent_provider_uuid=None)
mock_refresh.assert_not_called()
self.assertEqual([uuids.cn],
self.client._provider_tree.get_provider_uuids())
def test_get_allocation_candidates(self):
resp_mock = mock.Mock(status_code=200)
json_data = {
'allocation_requests': mock.sentinel.alloc_reqs,
'provider_summaries': mock.sentinel.p_sums,
}
resources = scheduler_utils.ResourceRequest.from_extra_specs({
'resources:VCPU': '1',
'resources:MEMORY_MB': '1024',
'trait:HW_CPU_X86_AVX': 'required',
'trait:CUSTOM_TRAIT1': 'required',
'trait:CUSTOM_TRAIT2': 'preferred',
'trait:CUSTOM_TRAIT3': 'forbidden',
'trait:CUSTOM_TRAIT4': 'forbidden',
'resources1:DISK_GB': '30',
'trait1:STORAGE_DISK_SSD': 'required',
'resources2:VGPU': '2',
'trait2:HW_GPU_RESOLUTION_W2560H1600': 'required',
'trait2:HW_GPU_API_VULKAN': 'required',
'resources3:SRIOV_NET_VF': '1',
'resources3:CUSTOM_NET_EGRESS_BYTES_SEC': '125000',
'group_policy': 'isolate',
# These are ignored because misspelled, bad value, etc.
'resources02:CUSTOM_WIDGET': '123',
'trait:HW_NIC_OFFLOAD_LRO': 'preferred',
'group_policy3': 'none',
})
resources.get_request_group(None).member_of = [
('agg1', 'agg2', 'agg3'), ('agg1', 'agg2')]
expected_path = '/allocation_candidates'
expected_query = [
('group_policy', 'isolate'),
('limit', '1000'),
('member_of', 'in:agg1,agg2'),
('member_of', 'in:agg1,agg2,agg3'),
('required', 'CUSTOM_TRAIT1,HW_CPU_X86_AVX,!CUSTOM_TRAIT3,'
'!CUSTOM_TRAIT4'),
('required1', 'STORAGE_DISK_SSD'),
('required2', 'HW_GPU_API_VULKAN,HW_GPU_RESOLUTION_W2560H1600'),
('resources', 'MEMORY_MB:1024,VCPU:1'),
('resources1', 'DISK_GB:30'),
('resources2', 'VGPU:2'),
('resources3', 'CUSTOM_NET_EGRESS_BYTES_SEC:125000,SRIOV_NET_VF:1')
]
resp_mock.json.return_value = json_data
self.ks_adap_mock.get.return_value = resp_mock
alloc_reqs, p_sums, allocation_request_version = (
self.client.get_allocation_candidates(self.context, resources))
url = self.ks_adap_mock.get.call_args[0][0]
split_url = parse.urlsplit(url)
query = parse.parse_qsl(split_url.query)
self.assertEqual(expected_path, split_url.path)
self.assertEqual(expected_query, query)
expected_url = '/allocation_candidates?%s' % parse.urlencode(
expected_query)
self.ks_adap_mock.get.assert_called_once_with(
expected_url, raise_exc=False, microversion='1.25',
headers={'X-Openstack-Request-Id': self.context.global_id})
self.assertEqual(mock.sentinel.alloc_reqs, alloc_reqs)
self.assertEqual(mock.sentinel.p_sums, p_sums)
def test_get_ac_no_trait_bogus_group_policy_custom_limit(self):
self.flags(max_placement_results=42, group='scheduler')
resp_mock = mock.Mock(status_code=200)
json_data = {
'allocation_requests': mock.sentinel.alloc_reqs,
'provider_summaries': mock.sentinel.p_sums,
}
resources = scheduler_utils.ResourceRequest.from_extra_specs({
'resources:VCPU': '1',
'resources:MEMORY_MB': '1024',
'resources1:DISK_GB': '30',
'group_policy': 'bogus',
})
expected_path = '/allocation_candidates'
expected_query = [
('limit', '42'),
('resources', 'MEMORY_MB:1024,VCPU:1'),
('resources1', 'DISK_GB:30'),
]
resp_mock.json.return_value = json_data
self.ks_adap_mock.get.return_value = resp_mock
alloc_reqs, p_sums, allocation_request_version = (
self.client.get_allocation_candidates(self.context, resources))
url = self.ks_adap_mock.get.call_args[0][0]
split_url = parse.urlsplit(url)
query = parse.parse_qsl(split_url.query)
self.assertEqual(expected_path, split_url.path)
self.assertEqual(expected_query, query)
expected_url = '/allocation_candidates?%s' % parse.urlencode(
expected_query)
self.assertEqual(mock.sentinel.alloc_reqs, alloc_reqs)
self.ks_adap_mock.get.assert_called_once_with(
expected_url, raise_exc=False, microversion='1.25',
headers={'X-Openstack-Request-Id': self.context.global_id})
self.assertEqual(mock.sentinel.p_sums, p_sums)
def test_get_allocation_candidates_not_found(self):
# Ensure _get_resource_provider() just returns None when the placement
# API doesn't find a resource provider matching a UUID
resp_mock = mock.Mock(status_code=404)
self.ks_adap_mock.get.return_value = resp_mock
expected_path = '/allocation_candidates'
expected_query = {'resources': ['MEMORY_MB:1024'],
'limit': ['100']}
# Make sure we're also honoring the configured limit
self.flags(max_placement_results=100, group='scheduler')
resources = scheduler_utils.ResourceRequest.from_extra_specs(
{'resources:MEMORY_MB': '1024'})
res = self.client.get_allocation_candidates(self.context, resources)
self.ks_adap_mock.get.assert_called_once_with(
mock.ANY, raise_exc=False, microversion='1.25',
headers={'X-Openstack-Request-Id': self.context.global_id})
url = self.ks_adap_mock.get.call_args[0][0]
split_url = parse.urlsplit(url)
query = parse.parse_qs(split_url.query)
self.assertEqual(expected_path, split_url.path)
self.assertEqual(expected_query, query)
self.assertIsNone(res[0])
def test_get_resource_provider_found(self):
# Ensure _get_resource_provider() returns a dict of resource provider
# if it finds a resource provider record from the placement API
uuid = uuids.compute_node
resp_mock = mock.Mock(status_code=200)
json_data = {
'uuid': uuid,
'name': uuid,
'generation': 42,
'parent_provider_uuid': None,
}
resp_mock.json.return_value = json_data
self.ks_adap_mock.get.return_value = resp_mock
result = self.client._get_resource_provider(self.context, uuid)
expected_provider_dict = dict(
uuid=uuid,
name=uuid,
generation=42,
parent_provider_uuid=None,
)
expected_url = '/resource_providers/' + uuid
self.ks_adap_mock.get.assert_called_once_with(
expected_url, raise_exc=False, microversion='1.14',
headers={'X-Openstack-Request-Id': self.context.global_id})
self.assertEqual(expected_provider_dict, result)
def test_get_resource_provider_not_found(self):
# Ensure _get_resource_provider() just returns None when the placement
# API doesn't find a resource provider matching a UUID
resp_mock = mock.Mock(status_code=404)
self.ks_adap_mock.get.return_value = resp_mock
uuid = uuids.compute_node
result = self.client._get_resource_provider(self.context, uuid)
expected_url = '/resource_providers/' + uuid
self.ks_adap_mock.get.assert_called_once_with(
expected_url, raise_exc=False, microversion='1.14',
headers={'X-Openstack-Request-Id': self.context.global_id})
self.assertIsNone(result)
@mock.patch.object(report.LOG, 'error')
def test_get_resource_provider_error(self, logging_mock):
# Ensure _get_resource_provider() sets the error flag when trying to
# communicate with the placement API and not getting an error we can
# deal with
resp_mock = mock.Mock(status_code=503)
self.ks_adap_mock.get.return_value = resp_mock
self.ks_adap_mock.get.return_value.headers = {
'x-openstack-request-id': uuids.request_id}
uuid = uuids.compute_node
self.assertRaises(
exception.ResourceProviderRetrievalFailed,
self.client._get_resource_provider, self.context, uuid)
expected_url = '/resource_providers/' + uuid
self.ks_adap_mock.get.assert_called_once_with(
expected_url, raise_exc=False, microversion='1.14',
headers={'X-Openstack-Request-Id': self.context.global_id})
# A 503 Service Unavailable should trigger an error log that
# includes the placement request id and return None
# from _get_resource_provider()
self.assertTrue(logging_mock.called)
self.assertEqual(uuids.request_id,
logging_mock.call_args[0][1]['placement_req_id'])
def test_get_sharing_providers(self):
resp_mock = mock.Mock(status_code=200)
rpjson = [
{
'uuid': uuids.sharing1,
'name': 'bandwidth_provider',
'generation': 42,
'parent_provider_uuid': None,
'root_provider_uuid': None,
'links': [],
},
{
'uuid': uuids.sharing2,
'name': 'storage_provider',
'generation': 42,
'parent_provider_uuid': None,
'root_provider_uuid': None,
'links': [],
},
]
resp_mock.json.return_value = {'resource_providers': rpjson}
self.ks_adap_mock.get.return_value = resp_mock
result = self.client._get_sharing_providers(
self.context, [uuids.agg1, uuids.agg2])
expected_url = ('/resource_providers?member_of=in:' +
','.join((uuids.agg1, uuids.agg2)) +
'&required=MISC_SHARES_VIA_AGGREGATE')
self.ks_adap_mock.get.assert_called_once_with(
expected_url, microversion='1.18', raise_exc=False,
headers={'X-Openstack-Request-Id': self.context.global_id})
self.assertEqual(rpjson, result)
def test_get_sharing_providers_emptylist(self):
self.assertEqual(
[], self.client._get_sharing_providers(self.context, []))
self.ks_adap_mock.get.assert_not_called()
@mock.patch.object(report.LOG, 'error')
def test_get_sharing_providers_error(self, logging_mock):
# Ensure _get_sharing_providers() logs an error and raises if the
# placement API call doesn't respond 200
resp_mock = mock.Mock(status_code=503)
self.ks_adap_mock.get.return_value = resp_mock
self.ks_adap_mock.get.return_value.headers = {
'x-openstack-request-id': uuids.request_id}
uuid = uuids.agg
self.assertRaises(exception.ResourceProviderRetrievalFailed,
self.client._get_sharing_providers,
self.context, [uuid])
expected_url = ('/resource_providers?member_of=in:' + uuid +
'&required=MISC_SHARES_VIA_AGGREGATE')
self.ks_adap_mock.get.assert_called_once_with(
expected_url, raise_exc=False, microversion='1.18',
headers={'X-Openstack-Request-Id': self.context.global_id})
# A 503 Service Unavailable should trigger an error log that
# includes the placement request id
self.assertTrue(logging_mock.called)
self.assertEqual(uuids.request_id,
logging_mock.call_args[0][1]['placement_req_id'])
def test_get_providers_in_tree(self):
# Ensure _get_providers_in_tree() returns a list of resource
# provider dicts if it finds a resource provider record from the
# placement API
root = uuids.compute_node
child = uuids.child
resp_mock = mock.Mock(status_code=200)
rpjson = [
{
'uuid': root,
'name': 'daddy', 'generation': 42,
'parent_provider_uuid': None,
},
{
'uuid': child,
'name': 'junior',
'generation': 42,
'parent_provider_uuid': root,
},
]
resp_mock.json.return_value = {'resource_providers': rpjson}
self.ks_adap_mock.get.return_value = resp_mock
result = self.client._get_providers_in_tree(self.context, root)
expected_url = '/resource_providers?in_tree=' + root
self.ks_adap_mock.get.assert_called_once_with(
expected_url, raise_exc=False, microversion='1.14',
headers={'X-Openstack-Request-Id': self.context.global_id})
self.assertEqual(rpjson, result)
@mock.patch.object(report.LOG, 'error')
def test_get_providers_in_tree_error(self, logging_mock):
# Ensure _get_providers_in_tree() logs an error and raises if the
# placement API call doesn't respond 200
resp_mock = mock.Mock(status_code=503)
self.ks_adap_mock.get.return_value = resp_mock
self.ks_adap_mock.get.return_value.headers = {
'x-openstack-request-id': 'req-' + uuids.request_id}
uuid = uuids.compute_node
self.assertRaises(exception.ResourceProviderRetrievalFailed,
self.client._get_providers_in_tree, self.context,
uuid)
expected_url = '/resource_providers?in_tree=' + uuid
self.ks_adap_mock.get.assert_called_once_with(
expected_url, raise_exc=False, microversion='1.14',
headers={'X-Openstack-Request-Id': self.context.global_id})
# A 503 Service Unavailable should trigger an error log that includes
# the placement request id
self.assertTrue(logging_mock.called)
self.assertEqual('req-' + uuids.request_id,
logging_mock.call_args[0][1]['placement_req_id'])
def test_create_resource_provider(self):
"""Test that _create_resource_provider() sends a dict of resource
provider information without a parent provider UUID.
"""
uuid = uuids.compute_node
name = 'computehost'
resp_mock = mock.Mock(status_code=200)
self.ks_adap_mock.post.return_value = resp_mock
self.assertEqual(
resp_mock.json.return_value,
self.client._create_resource_provider(self.context, uuid, name))
expected_payload = {
'uuid': uuid,
'name': name,
}
expected_url = '/resource_providers'
self.ks_adap_mock.post.assert_called_once_with(
expected_url, json=expected_payload, raise_exc=False,
microversion='1.20',
headers={'X-Openstack-Request-Id': self.context.global_id})
def test_create_resource_provider_with_parent(self):
"""Test that when specifying a parent provider UUID, that the
parent_provider_uuid part of the payload is properly specified.
"""
parent_uuid = uuids.parent
uuid = uuids.compute_node
name = 'computehost'
resp_mock = mock.Mock(status_code=200)
self.ks_adap_mock.post.return_value = resp_mock
self.assertEqual(
resp_mock.json.return_value,
self.client._create_resource_provider(
self.context,
uuid,
name,
parent_provider_uuid=parent_uuid,
)
)
expected_payload = {
'uuid': uuid,
'name': name,
'parent_provider_uuid': parent_uuid,
}
expected_url = '/resource_providers'
self.ks_adap_mock.post.assert_called_once_with(
expected_url, json=expected_payload, raise_exc=False,
microversion='1.20',
headers={'X-Openstack-Request-Id': self.context.global_id})
@mock.patch.object(report.LOG, 'info')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_get_resource_provider')
def test_create_resource_provider_concurrent_create(self, get_rp_mock,
logging_mock):
# Ensure _create_resource_provider() returns a dict of resource
# provider gotten from _get_resource_provider() if the call to create
# the resource provider in the placement API returned a 409 Conflict,
# indicating another thread concurrently created the resource provider
# record.
uuid = uuids.compute_node
name = 'computehost'
self.ks_adap_mock.post.return_value = fake_requests.FakeResponse(
409, content='not a name conflict',
headers={'x-openstack-request-id': uuids.request_id})
get_rp_mock.return_value = mock.sentinel.get_rp
result = self.client._create_resource_provider(self.context, uuid,
name)
expected_payload = {
'uuid': uuid,
'name': name,
}
expected_url = '/resource_providers'
self.ks_adap_mock.post.assert_called_once_with(
expected_url, json=expected_payload, raise_exc=False,
microversion='1.20',
headers={'X-Openstack-Request-Id': self.context.global_id})
self.assertEqual(mock.sentinel.get_rp, result)
# The 409 response will produce a message to the info log.
self.assertTrue(logging_mock.called)
self.assertEqual(uuids.request_id,
logging_mock.call_args[0][1]['placement_req_id'])
def test_create_resource_provider_name_conflict(self):
# When the API call to create the resource provider fails 409 with a
# name conflict, we raise an exception.
self.ks_adap_mock.post.return_value = fake_requests.FakeResponse(
409, content='<stuff>Conflicting resource provider name: foo '
'already exists.</stuff>')
self.assertRaises(
exception.ResourceProviderCreationFailed,
self.client._create_resource_provider, self.context,
uuids.compute_node, 'foo')
@mock.patch.object(report.LOG, 'error')
def test_create_resource_provider_error(self, logging_mock):
# Ensure _create_resource_provider() sets the error flag when trying to
# communicate with the placement API and not getting an error we can
# deal with
uuid = uuids.compute_node
name = 'computehost'
self.ks_adap_mock.post.return_value = fake_requests.FakeResponse(
503, headers={'x-openstack-request-id': uuids.request_id})
self.assertRaises(
exception.ResourceProviderCreationFailed,
self.client._create_resource_provider, self.context, uuid, name)
expected_payload = {
'uuid': uuid,
'name': name,
}
expected_url = '/resource_providers'
self.ks_adap_mock.post.assert_called_once_with(
expected_url, json=expected_payload, raise_exc=False,
microversion='1.20',
headers={'X-Openstack-Request-Id': self.context.global_id})
# A 503 Service Unavailable should log an error that
# includes the placement request id and
# _create_resource_provider() should return None
self.assertTrue(logging_mock.called)
self.assertEqual(uuids.request_id,
logging_mock.call_args[0][1]['placement_req_id'])
def test_put_empty(self):
# A simple put with an empty (not None) payload should send the empty
# payload through.
# Bug #1744786
url = '/resource_providers/%s/aggregates' % uuids.foo
self.client.put(url, [])
self.ks_adap_mock.put.assert_called_once_with(
url, json=[], raise_exc=False, microversion=None, headers={})
def test_delete_provider(self):
delete_mock = fake_requests.FakeResponse(None)
self.ks_adap_mock.delete.return_value = delete_mock
for status_code in (204, 404):
delete_mock.status_code = status_code
# Seed the caches
self.client._provider_tree.new_root('compute', uuids.root,
generation=0)
self.client._association_refresh_time[uuids.root] = 1234
self.client._delete_provider(uuids.root, global_request_id='gri')
self.ks_adap_mock.delete.assert_called_once_with(
'/resource_providers/' + uuids.root,
headers={'X-Openstack-Request-Id': 'gri'}, microversion=None,
raise_exc=False)
self.assertFalse(self.client._provider_tree.exists(uuids.root))
self.assertNotIn(uuids.root, self.client._association_refresh_time)
self.ks_adap_mock.delete.reset_mock()
def test_delete_provider_fail(self):
delete_mock = fake_requests.FakeResponse(None)
self.ks_adap_mock.delete.return_value = delete_mock
resp_exc_map = {409: exception.ResourceProviderInUse,
503: exception.ResourceProviderDeletionFailed}
for status_code, exc in resp_exc_map.items():
delete_mock.status_code = status_code
self.assertRaises(exc, self.client._delete_provider, uuids.root)
self.ks_adap_mock.delete.assert_called_once_with(
'/resource_providers/' + uuids.root, microversion=None,
headers={}, raise_exc=False)
self.ks_adap_mock.delete.reset_mock()
def test_set_aggregates_for_provider(self):
aggs = [uuids.agg1, uuids.agg2]
resp_mock = mock.Mock(status_code=200)
resp_mock.json.return_value = {
'aggregates': aggs,
}
self.ks_adap_mock.put.return_value = resp_mock
# Prime the provider tree cache
self.client._provider_tree.new_root('rp', uuids.rp, generation=0)
self.assertEqual(set(),
self.client._provider_tree.data(uuids.rp).aggregates)
self.client.set_aggregates_for_provider(self.context, uuids.rp, aggs)
self.ks_adap_mock.put.assert_called_once_with(
'/resource_providers/%s/aggregates' % uuids.rp, json=aggs,
raise_exc=False, microversion='1.1',
headers={'X-Openstack-Request-Id': self.context.global_id})
# Cache was updated
self.assertEqual(set(aggs),
self.client._provider_tree.data(uuids.rp).aggregates)
def test_set_aggregates_for_provider_fail(self):
self.ks_adap_mock.put.return_value = mock.Mock(status_code=503)
# Prime the provider tree cache
self.client._provider_tree.new_root('rp', uuids.rp, generation=0)
self.assertRaises(
exception.ResourceProviderUpdateFailed,
self.client.set_aggregates_for_provider,
self.context, uuids.rp, [uuids.agg])
# The cache wasn't updated
self.assertEqual(set(),
self.client._provider_tree.data(uuids.rp).aggregates)
class TestAggregates(SchedulerReportClientTestCase):
def test_get_provider_aggregates_found(self):
uuid = uuids.compute_node
resp_mock = mock.Mock(status_code=200)
aggs = [
uuids.agg1,
uuids.agg2,
]
resp_mock.json.return_value = {'aggregates': aggs}
self.ks_adap_mock.get.return_value = resp_mock
result = self.client._get_provider_aggregates(self.context, uuid)
expected_url = '/resource_providers/' + uuid + '/aggregates'
self.ks_adap_mock.get.assert_called_once_with(
expected_url, raise_exc=False, microversion='1.1',
headers={'X-Openstack-Request-Id': self.context.global_id})
self.assertEqual(set(aggs), result)
@mock.patch.object(report.LOG, 'error')
def test_get_provider_aggregates_error(self, log_mock):
"""Test that when the placement API returns any error when looking up a
provider's aggregates, we raise an exception.
"""
uuid = uuids.compute_node
resp_mock = mock.Mock(headers={
'x-openstack-request-id': uuids.request_id})
self.ks_adap_mock.get.return_value = resp_mock
for status_code in (400, 404, 503):
resp_mock.status_code = status_code
self.assertRaises(
exception.ResourceProviderAggregateRetrievalFailed,
self.client._get_provider_aggregates, self.context, uuid)
expected_url = '/resource_providers/' + uuid + '/aggregates'
self.ks_adap_mock.get.assert_called_once_with(
expected_url, raise_exc=False, microversion='1.1',
headers={'X-Openstack-Request-Id': self.context.global_id})
self.assertTrue(log_mock.called)
self.assertEqual(uuids.request_id,
log_mock.call_args[0][1]['placement_req_id'])
self.ks_adap_mock.get.reset_mock()
log_mock.reset_mock()
class TestTraits(SchedulerReportClientTestCase):
trait_api_kwargs = {'raise_exc': False, 'microversion': '1.6'}
def test_get_provider_traits_found(self):
uuid = uuids.compute_node
resp_mock = mock.Mock(status_code=200)
traits = [
'CUSTOM_GOLD',
'CUSTOM_SILVER',
]
resp_mock.json.return_value = {'traits': traits}
self.ks_adap_mock.get.return_value = resp_mock
result = self.client._get_provider_traits(self.context, uuid)
expected_url = '/resource_providers/' + uuid + '/traits'
self.ks_adap_mock.get.assert_called_once_with(
expected_url,
headers={'X-Openstack-Request-Id': self.context.global_id},
**self.trait_api_kwargs)
self.assertEqual(set(traits), result)
@mock.patch.object(report.LOG, 'error')
def test_get_provider_traits_error(self, log_mock):
"""Test that when the placement API returns any error when looking up a
provider's traits, we raise an exception.
"""
uuid = uuids.compute_node
resp_mock = mock.Mock(headers={
'x-openstack-request-id': uuids.request_id})
self.ks_adap_mock.get.return_value = resp_mock
for status_code in (400, 404, 503):
resp_mock.status_code = status_code
self.assertRaises(
exception.ResourceProviderTraitRetrievalFailed,
self.client._get_provider_traits, self.context, uuid)
expected_url = '/resource_providers/' + uuid + '/traits'
self.ks_adap_mock.get.assert_called_once_with(
expected_url,
headers={'X-Openstack-Request-Id': self.context.global_id},
**self.trait_api_kwargs)
self.assertTrue(log_mock.called)
self.assertEqual(uuids.request_id,
log_mock.call_args[0][1]['placement_req_id'])
self.ks_adap_mock.get.reset_mock()
log_mock.reset_mock()
def test_ensure_traits(self):
"""Successful paths, various permutations of traits existing or needing
to be created.
"""
standard_traits = ['HW_NIC_OFFLOAD_UCS', 'HW_NIC_OFFLOAD_RDMA']
custom_traits = ['CUSTOM_GOLD', 'CUSTOM_SILVER']
all_traits = standard_traits + custom_traits
get_mock = mock.Mock(status_code=200)
self.ks_adap_mock.get.return_value = get_mock
# Request all traits; custom traits need to be created
get_mock.json.return_value = {'traits': standard_traits}
self.client._ensure_traits(self.context, all_traits)
self.ks_adap_mock.get.assert_called_once_with(
'/traits?name=in:' + ','.join(all_traits),
headers={'X-Openstack-Request-Id': self.context.global_id},
**self.trait_api_kwargs)
self.ks_adap_mock.put.assert_has_calls(
[mock.call('/traits/' + trait,
headers={'X-Openstack-Request-Id': self.context.global_id},
**self.trait_api_kwargs)
for trait in custom_traits], any_order=True)
self.ks_adap_mock.reset_mock()
# Request standard traits; no traits need to be created
get_mock.json.return_value = {'traits': standard_traits}
self.client._ensure_traits(self.context, standard_traits)
self.ks_adap_mock.get.assert_called_once_with(
'/traits?name=in:' + ','.join(standard_traits),
headers={'X-Openstack-Request-Id': self.context.global_id},
**self.trait_api_kwargs)
self.ks_adap_mock.put.assert_not_called()
self.ks_adap_mock.reset_mock()
# Request no traits - short circuit
self.client._ensure_traits(self.context, None)
self.client._ensure_traits(self.context, [])
self.ks_adap_mock.get.assert_not_called()
self.ks_adap_mock.put.assert_not_called()
def test_ensure_traits_fail_retrieval(self):
self.ks_adap_mock.get.return_value = mock.Mock(status_code=400)
self.assertRaises(exception.TraitRetrievalFailed,
self.client._ensure_traits,
self.context, ['FOO'])
self.ks_adap_mock.get.assert_called_once_with(
'/traits?name=in:FOO',
headers={'X-Openstack-Request-Id': self.context.global_id},
**self.trait_api_kwargs)
self.ks_adap_mock.put.assert_not_called()
def test_ensure_traits_fail_creation(self):
get_mock = mock.Mock(status_code=200)
get_mock.json.return_value = {'traits': []}
self.ks_adap_mock.get.return_value = get_mock
self.ks_adap_mock.put.return_value = fake_requests.FakeResponse(400)
self.assertRaises(exception.TraitCreationFailed,
self.client._ensure_traits,
self.context, ['FOO'])
self.ks_adap_mock.get.assert_called_once_with(
'/traits?name=in:FOO',
headers={'X-Openstack-Request-Id': self.context.global_id},
**self.trait_api_kwargs)
self.ks_adap_mock.put.assert_called_once_with(
'/traits/FOO',
headers={'X-Openstack-Request-Id': self.context.global_id},
**self.trait_api_kwargs)
def test_set_traits_for_provider(self):
traits = ['HW_NIC_OFFLOAD_UCS', 'HW_NIC_OFFLOAD_RDMA']
# Make _ensure_traits succeed without PUTting
get_mock = mock.Mock(status_code=200)
get_mock.json.return_value = {'traits': traits}
self.ks_adap_mock.get.return_value = get_mock
# Prime the provider tree cache
self.client._provider_tree.new_root('rp', uuids.rp, generation=0)
# Mock the /rp/{u}/traits PUT to succeed
put_mock = mock.Mock(status_code=200)
put_mock.json.return_value = {'traits': traits,
'resource_provider_generation': 1}
self.ks_adap_mock.put.return_value = put_mock
# Invoke
self.client.set_traits_for_provider(self.context, uuids.rp, traits)
# Verify API calls
self.ks_adap_mock.get.assert_called_once_with(
'/traits?name=in:' + ','.join(traits),
headers={'X-Openstack-Request-Id': self.context.global_id},
**self.trait_api_kwargs)
self.ks_adap_mock.put.assert_called_once_with(
'/resource_providers/%s/traits' % uuids.rp,
json={'traits': traits, 'resource_provider_generation': 0},
headers={'X-Openstack-Request-Id': self.context.global_id},
**self.trait_api_kwargs)
# And ensure the provider tree cache was updated appropriately
self.assertFalse(
self.client._provider_tree.have_traits_changed(uuids.rp, traits))
# Validate the generation
self.assertEqual(
1, self.client._provider_tree.data(uuids.rp).generation)
def test_set_traits_for_provider_fail(self):
traits = ['HW_NIC_OFFLOAD_UCS', 'HW_NIC_OFFLOAD_RDMA']
get_mock = mock.Mock()
self.ks_adap_mock.get.return_value = get_mock
# Prime the provider tree cache
self.client._provider_tree.new_root('rp', uuids.rp, generation=0)
# _ensure_traits exception bubbles up
get_mock.status_code = 400
self.assertRaises(
exception.TraitRetrievalFailed,
self.client.set_traits_for_provider,
self.context, uuids.rp, traits)
self.ks_adap_mock.put.assert_not_called()
get_mock.status_code = 200
get_mock.json.return_value = {'traits': traits}
# Conflict
self.ks_adap_mock.put.return_value = mock.Mock(status_code=409)
self.assertRaises(
exception.ResourceProviderUpdateConflict,
self.client.set_traits_for_provider,
self.context, uuids.rp, traits)
# Other error
self.ks_adap_mock.put.return_value = mock.Mock(status_code=503)
self.assertRaises(
exception.ResourceProviderUpdateFailed,
self.client.set_traits_for_provider,
self.context, uuids.rp, traits)
class TestAssociations(SchedulerReportClientTestCase):
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_get_provider_aggregates')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_get_provider_traits')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_get_sharing_providers')
def test_refresh_associations_no_last(self, mock_shr_get, mock_trait_get,
mock_agg_get):
"""Test that associations are refreshed when stale."""
uuid = uuids.compute_node
# Seed the provider tree so _refresh_associations finds the provider
self.client._provider_tree.new_root('compute', uuid, generation=1)
mock_agg_get.return_value = set([uuids.agg1])
mock_trait_get.return_value = set(['CUSTOM_GOLD'])
self.client._refresh_associations(self.context, uuid)
mock_agg_get.assert_called_once_with(self.context, uuid)
mock_trait_get.assert_called_once_with(self.context, uuid)
mock_shr_get.assert_called_once_with(
self.context, mock_agg_get.return_value)
self.assertIn(uuid, self.client._association_refresh_time)
self.assertTrue(
self.client._provider_tree.in_aggregates(uuid, [uuids.agg1]))
self.assertFalse(
self.client._provider_tree.in_aggregates(uuid, [uuids.agg2]))
self.assertTrue(
self.client._provider_tree.has_traits(uuid, ['CUSTOM_GOLD']))
self.assertFalse(
self.client._provider_tree.has_traits(uuid, ['CUSTOM_SILVER']))
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_get_provider_aggregates')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_get_provider_traits')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_get_sharing_providers')
def test_refresh_associations_no_refresh_sharing(self, mock_shr_get,
mock_trait_get,
mock_agg_get):
"""Test refresh_sharing=False."""
uuid = uuids.compute_node
# Seed the provider tree so _refresh_associations finds the provider
self.client._provider_tree.new_root('compute', uuid, generation=1)
mock_agg_get.return_value = set([uuids.agg1])
mock_trait_get.return_value = set(['CUSTOM_GOLD'])
self.client._refresh_associations(self.context, uuid,
refresh_sharing=False)
mock_agg_get.assert_called_once_with(self.context, uuid)
mock_trait_get.assert_called_once_with(self.context, uuid)
mock_shr_get.assert_not_called()
self.assertIn(uuid, self.client._association_refresh_time)
self.assertTrue(
self.client._provider_tree.in_aggregates(uuid, [uuids.agg1]))
self.assertFalse(
self.client._provider_tree.in_aggregates(uuid, [uuids.agg2]))
self.assertTrue(
self.client._provider_tree.has_traits(uuid, ['CUSTOM_GOLD']))
self.assertFalse(
self.client._provider_tree.has_traits(uuid, ['CUSTOM_SILVER']))
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_get_provider_aggregates')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_get_provider_traits')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_get_sharing_providers')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_associations_stale')
def test_refresh_associations_not_stale(self, mock_stale, mock_shr_get,
mock_trait_get, mock_agg_get):
"""Test that refresh associations is not called when the map is
not stale.
"""
mock_stale.return_value = False
uuid = uuids.compute_node
self.client._refresh_associations(self.context, uuid)
mock_agg_get.assert_not_called()
mock_trait_get.assert_not_called()
mock_shr_get.assert_not_called()
self.assertFalse(self.client._association_refresh_time)
@mock.patch.object(report.LOG, 'debug')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_get_provider_aggregates')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_get_provider_traits')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_get_sharing_providers')
def test_refresh_associations_time(self, mock_shr_get, mock_trait_get,
mock_agg_get, log_mock):
"""Test that refresh associations is called when the map is stale."""
uuid = uuids.compute_node
# Seed the provider tree so _refresh_associations finds the provider
self.client._provider_tree.new_root('compute', uuid, generation=1)
mock_agg_get.return_value = set([])
mock_trait_get.return_value = set([])
mock_shr_get.return_value = []
# Called a first time because association_refresh_time is empty.
now = time.time()
self.client._refresh_associations(self.context, uuid)
mock_agg_get.assert_called_once_with(self.context, uuid)
mock_trait_get.assert_called_once_with(self.context, uuid)
mock_shr_get.assert_called_once_with(self.context, set())
log_mock.assert_has_calls([
mock.call('Refreshing aggregate associations for resource '
'provider %s, aggregates: %s', uuid, 'None'),
mock.call('Refreshing trait associations for resource '
'provider %s, traits: %s', uuid, 'None')
])
self.assertIn(uuid, self.client._association_refresh_time)
# Clear call count.
mock_agg_get.reset_mock()
mock_trait_get.reset_mock()
mock_shr_get.reset_mock()
with mock.patch('time.time') as mock_future:
# Not called a second time because not enough time has passed.
mock_future.return_value = (now +
CONF.compute.resource_provider_association_refresh / 2)
self.client._refresh_associations(self.context, uuid)
mock_agg_get.assert_not_called()
mock_trait_get.assert_not_called()
mock_shr_get.assert_not_called()
# Called because time has passed.
mock_future.return_value = (now +
CONF.compute.resource_provider_association_refresh + 1)
self.client._refresh_associations(self.context, uuid)
mock_agg_get.assert_called_once_with(self.context, uuid)
mock_trait_get.assert_called_once_with(self.context, uuid)
mock_shr_get.assert_called_once_with(self.context, set())
class TestComputeNodeToInventoryDict(test.NoDBTestCase):
def test_compute_node_inventory(self):
uuid = uuids.compute_node
name = 'computehost'
compute_node = objects.ComputeNode(uuid=uuid,
hypervisor_hostname=name,
vcpus=2,
cpu_allocation_ratio=16.0,
memory_mb=1024,
ram_allocation_ratio=1.5,
local_gb=10,
disk_allocation_ratio=1.0)
self.flags(reserved_host_memory_mb=1000)
self.flags(reserved_host_disk_mb=200)
self.flags(reserved_host_cpus=1)
result = report._compute_node_to_inventory_dict(compute_node)
expected = {
'VCPU': {
'total': compute_node.vcpus,
'reserved': CONF.reserved_host_cpus,
'min_unit': 1,
'max_unit': compute_node.vcpus,
'step_size': 1,
'allocation_ratio': compute_node.cpu_allocation_ratio,
},
'MEMORY_MB': {
'total': compute_node.memory_mb,
'reserved': CONF.reserved_host_memory_mb,
'min_unit': 1,
'max_unit': compute_node.memory_mb,
'step_size': 1,
'allocation_ratio': compute_node.ram_allocation_ratio,
},
'DISK_GB': {
'total': compute_node.local_gb,
'reserved': 1, # this is ceil(1000/1024)
'min_unit': 1,
'max_unit': compute_node.local_gb,
'step_size': 1,
'allocation_ratio': compute_node.disk_allocation_ratio,
},
}
self.assertEqual(expected, result)
def test_compute_node_inventory_empty(self):
uuid = uuids.compute_node
name = 'computehost'
compute_node = objects.ComputeNode(uuid=uuid,
hypervisor_hostname=name,
vcpus=0,
cpu_allocation_ratio=16.0,
memory_mb=0,
ram_allocation_ratio=1.5,
local_gb=0,
disk_allocation_ratio=1.0)
result = report._compute_node_to_inventory_dict(compute_node)
self.assertEqual({}, result)
class TestInventory(SchedulerReportClientTestCase):
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_ensure_resource_provider')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_update_inventory')
def test_update_compute_node(self, mock_ui, mock_erp):
cn = self.compute_node
self.client.update_compute_node(self.context, cn)
mock_erp.assert_called_once_with(self.context, cn.uuid,
cn.hypervisor_hostname)
expected_inv_data = {
'VCPU': {
'total': 8,
'reserved': CONF.reserved_host_cpus,
'min_unit': 1,
'max_unit': 8,
'step_size': 1,
'allocation_ratio': 16.0,
},
'MEMORY_MB': {
'total': 1024,
'reserved': 512,
'min_unit': 1,
'max_unit': 1024,
'step_size': 1,
'allocation_ratio': 1.5,
},
'DISK_GB': {
'total': 10,
'reserved': 0,
'min_unit': 1,
'max_unit': 10,
'step_size': 1,
'allocation_ratio': 1.0,
},
}
mock_ui.assert_called_once_with(
self.context,
cn.uuid,
expected_inv_data,
)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_ensure_resource_provider')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_update_inventory')
def test_update_compute_node_no_inv(self, mock_ui, mock_erp):
"""Ensure that if there are no inventory records, we still call
_update_inventory().
"""
cn = self.compute_node
cn.vcpus = 0
cn.memory_mb = 0
cn.local_gb = 0
self.client.update_compute_node(self.context, cn)
mock_erp.assert_called_once_with(self.context, cn.uuid,
cn.hypervisor_hostname)
mock_ui.assert_called_once_with(self.context, cn.uuid, {})
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'get')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'put')
def test_update_inventory_initial_empty(self, mock_put, mock_get):
# Ensure _update_inventory() returns a list of Inventories objects
# after creating or updating the existing values
uuid = uuids.compute_node
compute_node = self.compute_node
# Make sure the resource provider exists for preventing to call the API
self._init_provider_tree(resources_override={})
mock_get.return_value.json.return_value = {
'resource_provider_generation': 43,
'inventories': {
'VCPU': {'total': 16},
'MEMORY_MB': {'total': 1024},
'DISK_GB': {'total': 10},
}
}
mock_put.return_value.status_code = 200
mock_put.return_value.json.return_value = {
'resource_provider_generation': 44,
'inventories': {
'VCPU': {'total': 16},
'MEMORY_MB': {'total': 1024},
'DISK_GB': {'total': 10},
}
}
inv_data = report._compute_node_to_inventory_dict(compute_node)
result = self.client._update_inventory_attempt(
self.context, compute_node.uuid, inv_data
)
self.assertTrue(result)
exp_url = '/resource_providers/%s/inventories' % uuid
mock_get.assert_called_once_with(
exp_url, global_request_id=self.context.global_id)
# Updated with the new inventory from the PUT call
self._validate_provider(uuid, generation=44)
expected = {
# Called with the newly-found generation from the existing
# inventory
'resource_provider_generation': 43,
'inventories': {
'VCPU': {
'total': 8,
'reserved': CONF.reserved_host_cpus,
'min_unit': 1,
'max_unit': compute_node.vcpus,
'step_size': 1,
'allocation_ratio': compute_node.cpu_allocation_ratio,
},
'MEMORY_MB': {
'total': 1024,
'reserved': CONF.reserved_host_memory_mb,
'min_unit': 1,
'max_unit': compute_node.memory_mb,
'step_size': 1,
'allocation_ratio': compute_node.ram_allocation_ratio,
},
'DISK_GB': {
'total': 10,
'reserved': 0, # reserved_host_disk_mb is 0 by default
'min_unit': 1,
'max_unit': compute_node.local_gb,
'step_size': 1,
'allocation_ratio': compute_node.disk_allocation_ratio,
},
}
}
mock_put.assert_called_once_with(
exp_url, expected, global_request_id=self.context.global_id)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'get')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'put')
def test_update_inventory(self, mock_put, mock_get):
self.flags(reserved_host_disk_mb=1000)
# Ensure _update_inventory() returns a list of Inventories objects
# after creating or updating the existing values
uuid = uuids.compute_node
compute_node = self.compute_node
# Make sure the resource provider exists for preventing to call the API
self._init_provider_tree()
new_vcpus_total = 240
mock_get.return_value.json.return_value = {
'resource_provider_generation': 43,
'inventories': {
'VCPU': {'total': 16},
'MEMORY_MB': {'total': 1024},
'DISK_GB': {'total': 10},
}
}
mock_put.return_value.status_code = 200
mock_put.return_value.json.return_value = {
'resource_provider_generation': 44,
'inventories': {
'VCPU': {'total': new_vcpus_total},
'MEMORY_MB': {'total': 1024},
'DISK_GB': {'total': 10},
}
}
inv_data = report._compute_node_to_inventory_dict(compute_node)
# Make a change to trigger the update...
inv_data['VCPU']['total'] = new_vcpus_total
result = self.client._update_inventory_attempt(
self.context, compute_node.uuid, inv_data
)
self.assertTrue(result)
exp_url = '/resource_providers/%s/inventories' % uuid
mock_get.assert_called_once_with(
exp_url, global_request_id=self.context.global_id)
# Updated with the new inventory from the PUT call
self._validate_provider(uuid, generation=44)
expected = {
# Called with the newly-found generation from the existing
# inventory
'resource_provider_generation': 43,
'inventories': {
'VCPU': {
'total': new_vcpus_total,
'reserved': 0,
'min_unit': 1,
'max_unit': compute_node.vcpus,
'step_size': 1,
'allocation_ratio': compute_node.cpu_allocation_ratio,
},
'MEMORY_MB': {
'total': 1024,
'reserved': CONF.reserved_host_memory_mb,
'min_unit': 1,
'max_unit': compute_node.memory_mb,
'step_size': 1,
'allocation_ratio': compute_node.ram_allocation_ratio,
},
'DISK_GB': {
'total': 10,
'reserved': 1, # this is ceil for 1000MB
'min_unit': 1,
'max_unit': compute_node.local_gb,
'step_size': 1,
'allocation_ratio': compute_node.disk_allocation_ratio,
},
}
}
mock_put.assert_called_once_with(
exp_url, expected, global_request_id=self.context.global_id)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'get')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'put')
def test_update_inventory_no_update(self, mock_put, mock_get):
"""Simulate situation where scheduler client is first starting up and
ends up loading information from the placement API via a GET against
the resource provider's inventory but has no local cached inventory
information for a resource provider.
"""
uuid = uuids.compute_node
compute_node = self.compute_node
# Make sure the resource provider exists for preventing to call the API
self._init_provider_tree(generation_override=42, resources_override={})
mock_get.return_value.json.return_value = {
'resource_provider_generation': 43,
'inventories': {
'VCPU': {
'total': 8,
'reserved': CONF.reserved_host_cpus,
'min_unit': 1,
'max_unit': compute_node.vcpus,
'step_size': 1,
'allocation_ratio': compute_node.cpu_allocation_ratio,
},
'MEMORY_MB': {
'total': 1024,
'reserved': CONF.reserved_host_memory_mb,
'min_unit': 1,
'max_unit': compute_node.memory_mb,
'step_size': 1,
'allocation_ratio': compute_node.ram_allocation_ratio,
},
'DISK_GB': {
'total': 10,
'reserved': 0,
'min_unit': 1,
'max_unit': compute_node.local_gb,
'step_size': 1,
'allocation_ratio': compute_node.disk_allocation_ratio,
},
}
}
inv_data = report._compute_node_to_inventory_dict(compute_node)
result = self.client._update_inventory_attempt(
self.context, compute_node.uuid, inv_data
)
self.assertTrue(result)
exp_url = '/resource_providers/%s/inventories' % uuid
mock_get.assert_called_once_with(
exp_url, global_request_id=self.context.global_id)
# No update so put should not be called
self.assertFalse(mock_put.called)
# Make sure we updated the generation from the inventory records
self._validate_provider(uuid, generation=43)
@mock.patch.object(report.LOG, 'info')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_get_inventory')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'put')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_ensure_resource_provider')
def test_update_inventory_concurrent_update(self, mock_ensure,
mock_put, mock_get, mock_info):
# Ensure _update_inventory() returns a list of Inventories objects
# after creating or updating the existing values
uuid = uuids.compute_node
compute_node = self.compute_node
# Make sure the resource provider exists for preventing to call the API
self.client._provider_tree.new_root(
compute_node.hypervisor_hostname,
compute_node.uuid,
generation=42,
)
mock_get.return_value = {
'resource_provider_generation': 42,
'inventories': {},
}
mock_put.return_value.status_code = 409
mock_put.return_value.text = 'Does not match inventory in use'
mock_put.return_value.headers = {'x-openstack-request-id':
uuids.request_id}
inv_data = report._compute_node_to_inventory_dict(compute_node)
result = self.client._update_inventory_attempt(
self.context, compute_node.uuid, inv_data
)
self.assertFalse(result)
# Invalidated the cache
self.assertFalse(self.client._provider_tree.exists(uuid))
# Refreshed our resource provider
mock_ensure.assert_called_once_with(self.context, uuid)
# Logged the request id in the log message
self.assertEqual(uuids.request_id,
mock_info.call_args[0][1]['placement_req_id'])
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_get_inventory')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'put')
def test_update_inventory_inventory_in_use(self, mock_put, mock_get):
# Ensure _update_inventory() returns a list of Inventories objects
# after creating or updating the existing values
uuid = uuids.compute_node
compute_node = self.compute_node
# Make sure the resource provider exists for preventing to call the API
self.client._provider_tree.new_root(
compute_node.hypervisor_hostname,
compute_node.uuid,
generation=42,
)
mock_get.return_value = {
'resource_provider_generation': 42,
'inventories': {},
}
mock_put.return_value.status_code = 409
mock_put.return_value.text = (
"update conflict: Inventory for VCPU on "
"resource provider 123 in use"
)
inv_data = report._compute_node_to_inventory_dict(compute_node)
self.assertRaises(
exception.InventoryInUse,
self.client._update_inventory_attempt,
self.context,
compute_node.uuid,
inv_data,
)
# Did NOT invalidate the cache
self.assertTrue(self.client._provider_tree.exists(uuid))
@mock.patch.object(report.LOG, 'info')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_get_inventory')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'put')
def test_update_inventory_unknown_response(self, mock_put, mock_get,
mock_info):
# Ensure _update_inventory() returns a list of Inventories objects
# after creating or updating the existing values
uuid = uuids.compute_node
compute_node = self.compute_node
# Make sure the resource provider exists for preventing to call the API
self.client._provider_tree.new_root(
compute_node.hypervisor_hostname,
compute_node.uuid,
generation=42,
)
mock_get.return_value = {
'resource_provider_generation': 42,
'inventories': {},
}
mock_put.return_value.status_code = 234
mock_put.return_value.headers = {'x-openstack-request-id':
uuids.request_id}
inv_data = report._compute_node_to_inventory_dict(compute_node)
result = self.client._update_inventory_attempt(
self.context, compute_node.uuid, inv_data
)
self.assertFalse(result)
# No cache invalidation
self.assertTrue(self.client._provider_tree.exists(uuid))
@mock.patch.object(report.LOG, 'warning')
@mock.patch.object(report.LOG, 'debug')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_get_inventory')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'put')
def test_update_inventory_failed(self, mock_put, mock_get,
mock_debug, mock_warn):
# Ensure _update_inventory() returns a list of Inventories objects
# after creating or updating the existing values
uuid = uuids.compute_node
compute_node = self.compute_node
# Make sure the resource provider exists for preventing to call the API
self.client._provider_tree.new_root(
compute_node.hypervisor_hostname,
compute_node.uuid,
generation=42,
)
mock_get.return_value = {
'resource_provider_generation': 42,
'inventories': {},
}
mock_put.return_value = fake_requests.FakeResponse(
400, headers={'x-openstack-request-id': uuids.request_id})
inv_data = report._compute_node_to_inventory_dict(compute_node)
result = self.client._update_inventory_attempt(
self.context, compute_node.uuid, inv_data
)
self.assertFalse(result)
# No cache invalidation
self.assertTrue(self.client._provider_tree.exists(uuid))
# Logged the request id in the log messages
self.assertEqual(uuids.request_id,
mock_debug.call_args[0][1]['placement_req_id'])
self.assertEqual(uuids.request_id,
mock_warn.call_args[0][1]['placement_req_id'])
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_ensure_resource_provider')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_update_inventory_attempt')
@mock.patch('time.sleep')
def test_update_inventory_fails_and_then_succeeds(self, mock_sleep,
mock_update,
mock_ensure):
# Ensure _update_inventory() fails if we have a conflict when updating
# but retries correctly.
cn = self.compute_node
mock_update.side_effect = (False, True)
self.client._provider_tree.new_root(
cn.hypervisor_hostname,
cn.uuid,
generation=42,
)
result = self.client._update_inventory(
self.context, cn.uuid, mock.sentinel.inv_data
)
self.assertTrue(result)
# Only slept once
mock_sleep.assert_called_once_with(1)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_ensure_resource_provider')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_update_inventory_attempt')
@mock.patch('time.sleep')
def test_update_inventory_never_succeeds(self, mock_sleep,
mock_update,
mock_ensure):
# but retries correctly.
cn = self.compute_node
mock_update.side_effect = (False, False, False)
self.client._provider_tree.new_root(
cn.hypervisor_hostname,
cn.uuid,
generation=42,
)
result = self.client._update_inventory(
self.context, cn.uuid, mock.sentinel.inv_data
)
self.assertFalse(result)
# Slept three times
mock_sleep.assert_has_calls([mock.call(1), mock.call(1), mock.call(1)])
# Three attempts to update
mock_update.assert_has_calls([
mock.call(self.context, cn.uuid, mock.sentinel.inv_data),
mock.call(self.context, cn.uuid, mock.sentinel.inv_data),
mock.call(self.context, cn.uuid, mock.sentinel.inv_data),
])
# Slept three times
mock_sleep.assert_has_calls([mock.call(1), mock.call(1), mock.call(1)])
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_update_inventory')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_ensure_resource_classes')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_ensure_resource_provider')
def test_set_inventory_for_provider_no_custom(self, mock_erp, mock_erc,
mock_upd):
"""Tests that inventory records of all standard resource classes are
passed to the report client's _update_inventory() method.
"""
inv_data = {
'VCPU': {
'total': 24,
'reserved': 0,
'min_unit': 1,
'max_unit': 24,
'step_size': 1,
'allocation_ratio': 1.0,
},
'MEMORY_MB': {
'total': 1024,
'reserved': 0,
'min_unit': 1,
'max_unit': 1024,
'step_size': 1,
'allocation_ratio': 1.0,
},
'DISK_GB': {
'total': 100,
'reserved': 0,
'min_unit': 1,
'max_unit': 100,
'step_size': 1,
'allocation_ratio': 1.0,
},
}
self.client.set_inventory_for_provider(
self.context,
mock.sentinel.rp_uuid,
mock.sentinel.rp_name,
inv_data,
)
mock_erp.assert_called_once_with(
self.context,
mock.sentinel.rp_uuid,
mock.sentinel.rp_name,
parent_provider_uuid=None,
)
# No custom resource classes to ensure...
mock_erc.assert_called_once_with(self.context,
set(['VCPU', 'MEMORY_MB', 'DISK_GB']))
mock_upd.assert_called_once_with(
self.context,
mock.sentinel.rp_uuid,
inv_data,
)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_update_inventory')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_ensure_resource_classes')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_ensure_resource_provider')
def test_set_inventory_for_provider_no_inv(self, mock_erp, mock_erc,
mock_upd):
"""Tests that passing empty set of inventory records triggers a delete
of inventory for the provider.
"""
inv_data = {}
self.client.set_inventory_for_provider(
self.context,
mock.sentinel.rp_uuid,
mock.sentinel.rp_name,
inv_data,
)
mock_erp.assert_called_once_with(
self.context,
mock.sentinel.rp_uuid,
mock.sentinel.rp_name,
parent_provider_uuid=None,
)
mock_erc.assert_called_once_with(self.context, set())
mock_upd.assert_called_once_with(
self.context, mock.sentinel.rp_uuid, {})
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_update_inventory')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_ensure_resource_classes')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_ensure_resource_provider')
def test_set_inventory_for_provider_with_custom(self, mock_erp, mock_erc,
mock_upd):
"""Tests that inventory records that include a custom resource class
are passed to the report client's _update_inventory() method and that
the custom resource class is auto-created.
"""
inv_data = {
'VCPU': {
'total': 24,
'reserved': 0,
'min_unit': 1,
'max_unit': 24,
'step_size': 1,
'allocation_ratio': 1.0,
},
'MEMORY_MB': {
'total': 1024,
'reserved': 0,
'min_unit': 1,
'max_unit': 1024,
'step_size': 1,
'allocation_ratio': 1.0,
},
'DISK_GB': {
'total': 100,
'reserved': 0,
'min_unit': 1,
'max_unit': 100,
'step_size': 1,
'allocation_ratio': 1.0,
},
'CUSTOM_IRON_SILVER': {
'total': 1,
'reserved': 0,
'min_unit': 1,
'max_unit': 1,
'step_size': 1,
'allocation_ratio': 1.0,
}
}
self.client.set_inventory_for_provider(
self.context,
mock.sentinel.rp_uuid,
mock.sentinel.rp_name,
inv_data,
)
mock_erp.assert_called_once_with(
self.context,
mock.sentinel.rp_uuid,
mock.sentinel.rp_name,
parent_provider_uuid=None,
)
mock_erc.assert_called_once_with(
self.context,
set(['VCPU', 'MEMORY_MB', 'DISK_GB', 'CUSTOM_IRON_SILVER']))
mock_upd.assert_called_once_with(
self.context,
mock.sentinel.rp_uuid,
inv_data,
)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_ensure_resource_classes', new=mock.Mock())
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_ensure_resource_provider')
def test_set_inventory_for_provider_with_parent(self, mock_erp):
"""Ensure parent UUID is sent through."""
self.client.set_inventory_for_provider(
self.context, uuids.child, 'junior', {},
parent_provider_uuid=uuids.parent)
mock_erp.assert_called_once_with(
self.context, uuids.child, 'junior',
parent_provider_uuid=uuids.parent)
class TestAllocations(SchedulerReportClientTestCase):
@mock.patch('nova.compute.utils.is_volume_backed_instance')
def test_instance_to_allocations_dict(self, mock_vbi):
mock_vbi.return_value = False
inst = objects.Instance(
uuid=uuids.inst,
flavor=objects.Flavor(root_gb=10,
swap=1023,
ephemeral_gb=100,
memory_mb=1024,
vcpus=2,
extra_specs={}))
result = report._instance_to_allocations_dict(inst)
expected = {
'MEMORY_MB': 1024,
'VCPU': 2,
'DISK_GB': 111,
}
self.assertEqual(expected, result)
@mock.patch('nova.compute.utils.is_volume_backed_instance')
def test_instance_to_allocations_dict_overrides(self, mock_vbi):
"""Test that resource overrides in an instance's flavor extra_specs
are reported to placement.
"""
mock_vbi.return_value = False
specs = {
'resources:CUSTOM_DAN': '123',
'resources:%s' % fields.ResourceClass.VCPU: '4',
'resources:NOTATHING': '456',
'resources:NOTEVENANUMBER': 'catfood',
'resources:': '7',
'resources:ferret:weasel': 'smelly',
'foo': 'bar',
}
inst = objects.Instance(
uuid=uuids.inst,
flavor=objects.Flavor(root_gb=10,
swap=1023,
ephemeral_gb=100,
memory_mb=1024,
vcpus=2,
extra_specs=specs))
result = report._instance_to_allocations_dict(inst)
expected = {
'MEMORY_MB': 1024,
'VCPU': 4,
'DISK_GB': 111,
'CUSTOM_DAN': 123,
}
self.assertEqual(expected, result)
@mock.patch('nova.compute.utils.is_volume_backed_instance')
def test_instance_to_allocations_dict_boot_from_volume(self, mock_vbi):
mock_vbi.return_value = True
inst = objects.Instance(
uuid=uuids.inst,
flavor=objects.Flavor(root_gb=10,
swap=1,
ephemeral_gb=100,
memory_mb=1024,
vcpus=2,
extra_specs={}))
result = report._instance_to_allocations_dict(inst)
expected = {
'MEMORY_MB': 1024,
'VCPU': 2,
'DISK_GB': 101,
}
self.assertEqual(expected, result)
@mock.patch('nova.compute.utils.is_volume_backed_instance')
def test_instance_to_allocations_dict_zero_disk(self, mock_vbi):
mock_vbi.return_value = True
inst = objects.Instance(
uuid=uuids.inst,
flavor=objects.Flavor(root_gb=10,
swap=0,
ephemeral_gb=0,
memory_mb=1024,
vcpus=2,
extra_specs={}))
result = report._instance_to_allocations_dict(inst)
expected = {
'MEMORY_MB': 1024,
'VCPU': 2,
}
self.assertEqual(expected, result)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'put')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'get')
@mock.patch('nova.scheduler.client.report.'
'_instance_to_allocations_dict')
def test_update_instance_allocation_new(self, mock_a, mock_get,
mock_put):
cn = objects.ComputeNode(uuid=uuids.cn)
inst = objects.Instance(uuid=uuids.inst, project_id=uuids.project,
user_id=uuids.user)
mock_get.return_value.json.return_value = {'allocations': {}}
expected = {
'allocations': [
{'resource_provider': {'uuid': cn.uuid},
'resources': mock_a.return_value}],
'project_id': inst.project_id,
'user_id': inst.user_id,
}
self.client.update_instance_allocation(self.context, cn, inst, 1)
mock_put.assert_called_once_with(
'/allocations/%s' % inst.uuid,
expected, version='1.8',
global_request_id=self.context.global_id)
self.assertTrue(mock_get.called)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'put')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'get')
@mock.patch('nova.scheduler.client.report.'
'_instance_to_allocations_dict')
def test_update_instance_allocation_existing(self, mock_a, mock_get,
mock_put):
cn = objects.ComputeNode(uuid=uuids.cn)
inst = objects.Instance(uuid=uuids.inst)
mock_get.return_value.json.return_value = {'allocations': {
cn.uuid: {
'generation': 2,
'resources': {
'DISK_GB': 123,
'MEMORY_MB': 456,
}
}}
}
mock_a.return_value = {
'DISK_GB': 123,
'MEMORY_MB': 456,
}
self.client.update_instance_allocation(self.context, cn, inst, 1)
self.assertFalse(mock_put.called)
mock_get.assert_called_once_with(
'/allocations/%s' % inst.uuid,
global_request_id=self.context.global_id)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'get')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'put')
@mock.patch('nova.scheduler.client.report.'
'_instance_to_allocations_dict')
@mock.patch.object(report.LOG, 'warning')
def test_update_instance_allocation_new_failed(self, mock_warn, mock_a,
mock_put, mock_get):
cn = objects.ComputeNode(uuid=uuids.cn)
inst = objects.Instance(uuid=uuids.inst, project_id=uuids.project,
user_id=uuids.user)
mock_put.return_value = fake_requests.FakeResponse(400)
self.client.update_instance_allocation(self.context, cn, inst, 1)
self.assertTrue(mock_warn.called)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'delete')
def test_update_instance_allocation_delete(self, mock_delete):
cn = objects.ComputeNode(uuid=uuids.cn)
inst = objects.Instance(uuid=uuids.inst)
self.client.update_instance_allocation(self.context, cn, inst, -1)
mock_delete.assert_called_once_with(
'/allocations/%s' % inst.uuid,
global_request_id=self.context.global_id)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'delete')
@mock.patch.object(report.LOG, 'warning')
def test_update_instance_allocation_delete_failed(self, mock_warn,
mock_delete):
cn = objects.ComputeNode(uuid=uuids.cn)
inst = objects.Instance(uuid=uuids.inst)
mock_delete.return_value = fake_requests.FakeResponse(400)
self.client.update_instance_allocation(self.context, cn, inst, -1)
self.assertTrue(mock_warn.called)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'delete')
@mock.patch('nova.scheduler.client.report.LOG')
def test_delete_allocation_for_instance_ignore_404(self, mock_log,
mock_delete):
"""Tests that we don't log a warning on a 404 response when trying to
delete an allocation record.
"""
mock_delete.return_value = fake_requests.FakeResponse(404)
self.client.delete_allocation_for_instance(self.context, uuids.rp_uuid)
# make sure we didn't screw up the logic or the mock
mock_log.info.assert_not_called()
# make sure warning wasn't called for the 404
mock_log.warning.assert_not_called()
@mock.patch("nova.scheduler.client.report.SchedulerReportClient."
"delete")
@mock.patch("nova.scheduler.client.report.SchedulerReportClient."
"delete_allocation_for_instance")
@mock.patch("nova.objects.InstanceList.get_by_host_and_node")
def test_delete_resource_provider_cascade(self, mock_by_host,
mock_del_alloc, mock_delete):
self.client._provider_tree.new_root(uuids.cn, uuids.cn, generation=1)
cn = objects.ComputeNode(uuid=uuids.cn, host="fake_host",
hypervisor_hostname="fake_hostname", )
inst1 = objects.Instance(uuid=uuids.inst1)
inst2 = objects.Instance(uuid=uuids.inst2)
mock_by_host.return_value = objects.InstanceList(
objects=[inst1, inst2])
resp_mock = mock.Mock(status_code=204)
mock_delete.return_value = resp_mock
self.client.delete_resource_provider(self.context, cn, cascade=True)
self.assertEqual(2, mock_del_alloc.call_count)
exp_url = "/resource_providers/%s" % uuids.cn
mock_delete.assert_called_once_with(
exp_url, global_request_id=self.context.global_id)
self.assertFalse(self.client._provider_tree.exists(uuids.cn))
@mock.patch("nova.scheduler.client.report.SchedulerReportClient."
"delete")
@mock.patch("nova.scheduler.client.report.SchedulerReportClient."
"delete_allocation_for_instance")
@mock.patch("nova.objects.InstanceList.get_by_host_and_node")
def test_delete_resource_provider_no_cascade(self, mock_by_host,
mock_del_alloc, mock_delete):
self.client._provider_tree.new_root(uuids.cn, uuids.cn, generation=1)
self.client._association_refresh_time[uuids.cn] = mock.Mock()
cn = objects.ComputeNode(uuid=uuids.cn, host="fake_host",
hypervisor_hostname="fake_hostname", )
inst1 = objects.Instance(uuid=uuids.inst1)
inst2 = objects.Instance(uuid=uuids.inst2)
mock_by_host.return_value = objects.InstanceList(
objects=[inst1, inst2])
resp_mock = mock.Mock(status_code=204)
mock_delete.return_value = resp_mock
self.client.delete_resource_provider(self.context, cn)
mock_del_alloc.assert_not_called()
exp_url = "/resource_providers/%s" % uuids.cn
mock_delete.assert_called_once_with(
exp_url, global_request_id=self.context.global_id)
self.assertNotIn(uuids.cn, self.client._association_refresh_time)
@mock.patch("nova.scheduler.client.report.SchedulerReportClient."
"delete")
@mock.patch('nova.scheduler.client.report.LOG')
def test_delete_resource_provider_log_calls(self, mock_log, mock_delete):
# First, check a successful call
self.client._provider_tree.new_root(uuids.cn, uuids.cn, generation=1)
cn = objects.ComputeNode(uuid=uuids.cn, host="fake_host",
hypervisor_hostname="fake_hostname", )
resp_mock = fake_requests.FakeResponse(204)
mock_delete.return_value = resp_mock
self.client.delete_resource_provider(self.context, cn)
# With a 204, only the info should be called
self.assertEqual(1, mock_log.info.call_count)
self.assertEqual(0, mock_log.warning.call_count)
# Now check a 404 response
mock_log.reset_mock()
resp_mock.status_code = 404
self.client.delete_resource_provider(self.context, cn)
# With a 404, neither log message should be called
self.assertEqual(0, mock_log.info.call_count)
self.assertEqual(0, mock_log.warning.call_count)
# Finally, check a 409 response
mock_log.reset_mock()
resp_mock.status_code = 409
self.client.delete_resource_provider(self.context, cn)
# With a 409, only the error should be called
self.assertEqual(0, mock_log.info.call_count)
self.assertEqual(1, mock_log.error.call_count)
class TestResourceClass(SchedulerReportClientTestCase):
def setUp(self):
super(TestResourceClass, self).setUp()
_put_patch = mock.patch(
"nova.scheduler.client.report.SchedulerReportClient.put")
self.addCleanup(_put_patch.stop)
self.mock_put = _put_patch.start()
def test_ensure_resource_classes(self):
rcs = ['VCPU', 'CUSTOM_FOO', 'MEMORY_MB', 'CUSTOM_BAR']
self.client._ensure_resource_classes(self.context, rcs)
self.mock_put.assert_has_calls([
mock.call('/resource_classes/%s' % rc, None, version='1.7',
global_request_id=self.context.global_id)
for rc in ('CUSTOM_FOO', 'CUSTOM_BAR')
], any_order=True)
def test_ensure_resource_classes_none(self):
for empty in ([], (), set(), {}):
self.client._ensure_resource_classes(self.context, empty)
self.mock_put.assert_not_called()
def test_ensure_resource_classes_put_fail(self):
self.mock_put.return_value = fake_requests.FakeResponse(503)
rcs = ['VCPU', 'MEMORY_MB', 'CUSTOM_BAD']
self.assertRaises(
exception.InvalidResourceClass,
self.client._ensure_resource_classes, self.context, rcs)
# Only called with the "bad" one
self.mock_put.assert_called_once_with(
'/resource_classes/CUSTOM_BAD', None, version='1.7',
global_request_id=self.context.global_id)
| phenoxim/nova | nova/tests/unit/scheduler/client/test_report.py | Python | apache-2.0 | 143,398 |
package com.bnsantos.movies.model;
import com.j256.ormlite.field.DatabaseField;
/**
* Created by bruno on 14/11/14.
*/
public class Links {
@DatabaseField(generatedId = true, columnName = "id")
private Integer id;
@DatabaseField()
private String self;
@DatabaseField()
private String alternate;
@DatabaseField()
private String cast;
@DatabaseField()
private String reviews;
@DatabaseField()
private String similar;
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public String getSelf() {
return self;
}
public void setSelf(String self) {
this.self = self;
}
public String getAlternate() {
return alternate;
}
public void setAlternate(String alternate) {
this.alternate = alternate;
}
public String getCast() {
return cast;
}
public void setCast(String cast) {
this.cast = cast;
}
public String getReviews() {
return reviews;
}
public void setReviews(String reviews) {
this.reviews = reviews;
}
public String getSimilar() {
return similar;
}
public void setSimilar(String similar) {
this.similar = similar;
}
}
| bnsantos/android-javarx-example | app/src/main/java/com/bnsantos/movies/model/Links.java | Java | apache-2.0 | 1,305 |
package com.mytian.lb.bean.push;
import com.core.openapi.OpenApiBaseRequestAdapter;
import com.core.util.StringUtil;
import java.util.HashMap;
import java.util.Map;
public class UpdateChannelidParam extends OpenApiBaseRequestAdapter {
private String uid;
private String token;
private String channelId;
public String getUid() {
return uid;
}
public void setUid(String uid) {
this.uid = uid;
}
public String getToken() {
return token;
}
public void setToken(String token) {
this.token = token;
}
public String getChannelId() {
return channelId;
}
public void setChannelId(String channelId) {
this.channelId = channelId;
}
@Override
public boolean validate() {
if (StringUtil.isBlank(this.uid)) return false;
if (StringUtil.isBlank(this.token)) return false;
if (StringUtil.isBlank(this.channelId)) return false;
return true;
}
@Override
public void fill2Map(Map<String, Object> param, boolean includeEmptyAttr) {
if (includeEmptyAttr || (!includeEmptyAttr && StringUtil.isNotBlank(uid)))
param.put("uid", uid);
if (includeEmptyAttr || (!includeEmptyAttr && StringUtil.isNotBlank(token)))
param.put("token", token);
if (includeEmptyAttr || (!includeEmptyAttr && StringUtil.isNotBlank(channelId)))
param.put("parent.channelId", channelId);
}
@Override
public String toString() {
return "UpdateChannelidParam{" +
"uid='" + uid + '\'' +
", token='" + token + '\'' +
", channelId='" + channelId + '\'' +
'}';
}
}
| tengbinlive/aibao_demo | app/src/main/java/com/mytian/lb/bean/push/UpdateChannelidParam.java | Java | apache-2.0 | 1,799 |
/* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.flowable.rest.form;
import java.text.MessageFormat;
import org.apache.commons.lang3.StringUtils;
/**
* @author Yvo Swillens
*/
public final class FormRestUrls {
public static final String SEGMENT_FORM_RESOURCES = "form";
public static final String SEGMENT_REPOSITORY_RESOURCES = "form-repository";
public static final String SEGMENT_RUNTIME_FORM_DEFINITION = "runtime-form-definition";
public static final String SEGMENT_COMPLETED_FORM_DEFINITION = "completed-form-definition";
public static final String SEGMENT_DEPLOYMENT_RESOURCES = "deployments";
public static final String SEGMENT_DEPLOYMENT_ARTIFACT_RESOURCE_CONTENT = "resourcedata";
public static final String SEGMENT_FORMS_RESOURCES = "forms";
public static final String SEGMENT_FORM_MODEL = "model";
public static final String SEGMENT_FORM_INSTANCES_RESOURCES = "form-instances";
public static final String SEGMENT_QUERY_RESOURCES = "query";
/**
* URL template for a form collection: <i>/form/runtime-form-definition</i>
*/
public static final String[] URL_RUNTIME_TASK_FORM = { SEGMENT_FORM_RESOURCES, SEGMENT_RUNTIME_FORM_DEFINITION };
/**
* URL template for a form collection: <i>/form/completed-form-definition</i>
*/
public static final String[] URL_COMPLETED_TASK_FORM = { SEGMENT_FORM_RESOURCES, SEGMENT_COMPLETED_FORM_DEFINITION };
/**
* URL template for a form collection: <i>/form-repository/forms/{0:formId}</i>
*/
public static final String[] URL_FORM_COLLECTION = { SEGMENT_REPOSITORY_RESOURCES, SEGMENT_FORMS_RESOURCES };
/**
* URL template for a single form: <i>/form-repository/forms/{0:formId}</i>
*/
public static final String[] URL_FORM = { SEGMENT_REPOSITORY_RESOURCES, SEGMENT_FORMS_RESOURCES, "{0}" };
/**
* URL template for a single form model: <i>/form-repository/forms/{0:formId}/model</i>
*/
public static final String[] URL_FORM_MODEL = { SEGMENT_REPOSITORY_RESOURCES, SEGMENT_FORM_RESOURCES, "{0}", SEGMENT_FORM_MODEL };
/**
* URL template for the resource of a single form: <i>/form-repository/forms/{0:formId}/resourcedata</i>
*/
public static final String[] URL_FORM_RESOURCE_CONTENT = { SEGMENT_REPOSITORY_RESOURCES, SEGMENT_FORM_RESOURCES, "{0}", SEGMENT_DEPLOYMENT_ARTIFACT_RESOURCE_CONTENT };
/**
* URL template for a deployment collection: <i>/form-repository/deployments</i>
*/
public static final String[] URL_DEPLOYMENT_COLLECTION = { SEGMENT_REPOSITORY_RESOURCES, SEGMENT_DEPLOYMENT_RESOURCES };
/**
* URL template for a single deployment: <i>/form-repository/deployments/{0:deploymentId}</i>
*/
public static final String[] URL_DEPLOYMENT = { SEGMENT_REPOSITORY_RESOURCES, SEGMENT_DEPLOYMENT_RESOURCES, "{0}" };
/**
* URL template for the resource of a single deployment: <i>/form-repository/deployments/{0:deploymentId}/resourcedata/{1:resourceId}</i>
*/
public static final String[] URL_DEPLOYMENT_RESOURCE_CONTENT = { SEGMENT_REPOSITORY_RESOURCES, SEGMENT_DEPLOYMENT_RESOURCES, "{0}", SEGMENT_DEPLOYMENT_ARTIFACT_RESOURCE_CONTENT, "{1}" };
/**
* URL template for the resource of a form instance query: <i>/query/form-instances</i>
*/
public static final String[] URL_FORM_INSTANCE_QUERY = { SEGMENT_QUERY_RESOURCES, SEGMENT_FORM_INSTANCES_RESOURCES };
/**
* Creates an url based on the passed fragments and replaces any placeholders with the given arguments. The placeholders are following the {@link MessageFormat} convention (eg. {0} is replaced by
* first argument value).
*/
public static final String createRelativeResourceUrl(String[] segments, Object... arguments) {
return MessageFormat.format(StringUtils.join(segments, '/'), arguments);
}
}
| robsoncardosoti/flowable-engine | modules/flowable-form-rest/src/main/java/org/flowable/rest/form/FormRestUrls.java | Java | apache-2.0 | 4,399 |
package weka.classifiers.lazy.AM.label;
import org.hamcrest.core.StringContains;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import weka.classifiers.lazy.AM.TestUtils;
import weka.core.Instances;
/**
* Test aspects of {@link IntLabeler} that are not applicable to other
* {@link Labeler} implementations.
*
* @author Nathan Glenn
*/
public class IntLabelerTest {
@Rule
public final ExpectedException exception = ExpectedException.none();
@Test
public void testConstructorCardinalityTooHigh() throws Exception {
exception.expect(IllegalArgumentException.class);
exception.expectMessage(new StringContains("Cardinality of instance too high (35)"));
Instances data = TestUtils.getDataSet(TestUtils.SOYBEAN);
new IntLabeler(data.get(0), false, MissingDataCompare.MATCH);
}
}
| garfieldnate/Weka_AnalogicalModeling | src/test/java/weka/classifiers/lazy/AM/label/IntLabelerTest.java | Java | apache-2.0 | 877 |
/*
* Copyright 2015 The Closure Compiler Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.javascript.jscomp;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkState;
import static com.google.javascript.jscomp.Es6ToEs3Util.makeIterator;
import com.google.javascript.rhino.IR;
import com.google.javascript.rhino.JSDocInfo;
import com.google.javascript.rhino.JSDocInfoBuilder;
import com.google.javascript.rhino.Node;
import com.google.javascript.rhino.Token;
import com.google.javascript.rhino.TokenStream;
/**
* Rewrites ES6 destructuring patterns and default parameters to valid ES3 code.
*/
public final class Es6RewriteDestructuring implements NodeTraversal.Callback, HotSwapCompilerPass {
private final AbstractCompiler compiler;
static final String DESTRUCTURING_TEMP_VAR = "$jscomp$destructuring$var";
private int destructuringVarCounter = 0;
public Es6RewriteDestructuring(AbstractCompiler compiler) {
this.compiler = compiler;
}
@Override
public void process(Node externs, Node root) {
TranspilationPasses.processTranspile(compiler, externs, this);
TranspilationPasses.processTranspile(compiler, root, this);
}
@Override
public void hotSwapScript(Node scriptRoot, Node originalRoot) {
TranspilationPasses.hotSwapTranspile(compiler, scriptRoot, this);
}
@Override
public boolean shouldTraverse(NodeTraversal t, Node n, Node parent) {
switch (n.getToken()) {
case FUNCTION:
visitFunction(t, n);
break;
case PARAM_LIST:
visitParamList(t, n, parent);
break;
case FOR_OF:
visitForOf(n);
break;
default:
break;
}
return true;
}
@Override
public void visit(NodeTraversal t, Node n, Node parent) {
if (parent != null && parent.isDestructuringLhs()) {
parent = parent.getParent();
}
switch (n.getToken()) {
case ARRAY_PATTERN:
case OBJECT_PATTERN:
visitPattern(t, n, parent);
break;
default:
break;
}
}
/**
* If the function is an arrow function, wrap the body in a block if it is not already a block.
*/
private void visitFunction(NodeTraversal t, Node function) {
Node body = function.getLastChild();
if (!body.isNormalBlock()) {
body.detach();
Node replacement = IR.block(IR.returnNode(body)).useSourceInfoIfMissingFromForTree(body);
function.addChildToBack(replacement);
t.reportCodeChange();
}
}
/**
* Processes trailing default and rest parameters.
*/
private void visitParamList(NodeTraversal t, Node paramList, Node function) {
Node insertSpot = null;
Node body = function.getLastChild();
int i = 0;
Node next = null;
for (Node param = paramList.getFirstChild(); param != null; param = next, i++) {
next = param.getNext();
if (param.isDefaultValue()) {
JSDocInfo jsDoc = param.getJSDocInfo();
Node nameOrPattern = param.removeFirstChild();
Node defaultValue = param.removeFirstChild();
Node newParam;
// Treat name=undefined (and equivalent) as if it was just name. There
// is no need to generate a (name===void 0?void 0:name) statement for
// such arguments.
boolean isNoop = false;
if (!nameOrPattern.isName()) {
// Do not try to optimize unless nameOrPattern is a simple name.
} else if (defaultValue.isName()) {
isNoop = "undefined".equals(defaultValue.getString());
} else if (defaultValue.isVoid()) {
// Any kind of 'void literal' is fine, but 'void fun()' or anything
// else with side effects isn't. We're not trying to be particularly
// smart here and treat 'void {}' for example as if it could cause
// side effects. Any sane person will type 'name=undefined' or
// 'name=void 0' so this should not be an issue.
isNoop = NodeUtil.isImmutableValue(defaultValue.getFirstChild());
}
if (isNoop) {
newParam = nameOrPattern.cloneTree();
} else {
newParam =
nameOrPattern.isName()
? nameOrPattern
: IR.name(getTempParameterName(function, i));
Node lhs = nameOrPattern.cloneTree();
Node rhs = defaultValueHook(newParam.cloneTree(), defaultValue);
Node newStatement =
nameOrPattern.isName() ? IR.exprResult(IR.assign(lhs, rhs)) : IR.var(lhs, rhs);
newStatement.useSourceInfoIfMissingFromForTree(param);
body.addChildAfter(newStatement, insertSpot);
insertSpot = newStatement;
}
paramList.replaceChild(param, newParam);
newParam.setOptionalArg(true);
newParam.setJSDocInfo(jsDoc);
t.reportCodeChange();
} else if (param.isDestructuringPattern()) {
insertSpot =
replacePatternParamWithTempVar(
function, insertSpot, param, getTempParameterName(function, i));
t.reportCodeChange();
} else if (param.isRest() && param.getFirstChild().isDestructuringPattern()) {
insertSpot =
replacePatternParamWithTempVar(
function, insertSpot, param.getFirstChild(), getTempParameterName(function, i));
t.reportCodeChange();
}
}
}
/**
* Replace a destructuring pattern parameter with a a temporary parameter name and add a new
* local variable declaration to the function assigning the temporary parameter to the pattern.
*
* <p> Note: Rewrites of variable declaration destructuring will happen later to rewrite
* this declaration as non-destructured code.
* @param function
* @param insertSpot The local variable declaration will be inserted after this statement.
* @param patternParam
* @param tempVarName the name to use for the temporary variable
* @return the declaration statement that was generated for the local variable
*/
private Node replacePatternParamWithTempVar(
Node function, Node insertSpot, Node patternParam, String tempVarName) {
Node newParam = IR.name(tempVarName);
newParam.setJSDocInfo(patternParam.getJSDocInfo());
patternParam.replaceWith(newParam);
Node newDecl = IR.var(patternParam, IR.name(tempVarName));
function.getLastChild().addChildAfter(newDecl, insertSpot);
return newDecl;
}
/**
* Find or create the best name to use for a parameter we need to rewrite.
*
* <ol>
* <li> Use the JS Doc function parameter name at the given index, if possible.
* <li> Otherwise, build one of our own.
* </ol>
* @param function
* @param parameterIndex
* @return name to use for the given parameter
*/
private String getTempParameterName(Node function, int parameterIndex) {
String tempVarName;
JSDocInfo fnJSDoc = NodeUtil.getBestJSDocInfo(function);
if (fnJSDoc != null && fnJSDoc.getParameterNameAt(parameterIndex) != null) {
tempVarName = fnJSDoc.getParameterNameAt(parameterIndex);
} else {
tempVarName = DESTRUCTURING_TEMP_VAR + (destructuringVarCounter++);
}
checkState(TokenStream.isJSIdentifier(tempVarName));
return tempVarName;
}
private void visitForOf(Node node) {
Node lhs = node.getFirstChild();
if (lhs.isDestructuringLhs()) {
visitDestructuringPatternInEnhancedFor(lhs.getFirstChild());
}
}
private void visitPattern(NodeTraversal t, Node pattern, Node parent) {
if (NodeUtil.isNameDeclaration(parent) && !NodeUtil.isEnhancedFor(parent.getParent())) {
replacePattern(t, pattern, pattern.getNext(), parent, parent);
} else if (parent.isAssign()) {
if (parent.getParent().isExprResult()) {
replacePattern(t, pattern, pattern.getNext(), parent, parent.getParent());
} else {
wrapAssignmentInCallToArrow(t, parent);
}
} else if (parent.isRest()
|| parent.isStringKey()
|| parent.isArrayPattern()
|| parent.isDefaultValue()) {
// Nested pattern; do nothing. We will visit it after rewriting the parent.
} else if (NodeUtil.isEnhancedFor(parent) || NodeUtil.isEnhancedFor(parent.getParent())) {
visitDestructuringPatternInEnhancedFor(pattern);
} else if (parent.isCatch()) {
visitDestructuringPatternInCatch(pattern);
} else {
throw new IllegalStateException("unexpected parent");
}
}
private void replacePattern(
NodeTraversal t, Node pattern, Node rhs, Node parent, Node nodeToDetach) {
switch (pattern.getToken()) {
case ARRAY_PATTERN:
replaceArrayPattern(t, pattern, rhs, parent, nodeToDetach);
break;
case OBJECT_PATTERN:
replaceObjectPattern(t, pattern, rhs, parent, nodeToDetach);
break;
default:
throw new IllegalStateException("unexpected");
}
}
/**
* Convert 'var {a: b, c: d} = rhs' to:
*
* @const var temp = rhs; var b = temp.a; var d = temp.c;
*/
private void replaceObjectPattern(
NodeTraversal t, Node objectPattern, Node rhs, Node parent, Node nodeToDetach) {
String tempVarName = DESTRUCTURING_TEMP_VAR + (destructuringVarCounter++);
Node tempDecl = IR.var(IR.name(tempVarName), rhs.detach())
.useSourceInfoIfMissingFromForTree(objectPattern);
// TODO(tbreisacher): Remove the "if" and add this JSDoc unconditionally.
if (parent.isConst()) {
JSDocInfoBuilder jsDoc = new JSDocInfoBuilder(false);
jsDoc.recordConstancy();
tempDecl.setJSDocInfo(jsDoc.build());
}
nodeToDetach.getParent().addChildBefore(tempDecl, nodeToDetach);
for (Node child = objectPattern.getFirstChild(), next; child != null; child = next) {
next = child.getNext();
Node newLHS;
Node newRHS;
if (child.isStringKey()) {
if (!child.hasChildren()) { // converting shorthand
Node name = IR.name(child.getString());
name.useSourceInfoIfMissingFrom(child);
child.addChildToBack(name);
}
Node getprop =
new Node(
child.isQuotedString() ? Token.GETELEM : Token.GETPROP,
IR.name(tempVarName),
IR.string(child.getString()));
Node value = child.removeFirstChild();
if (!value.isDefaultValue()) {
newLHS = value;
newRHS = getprop;
} else {
newLHS = value.removeFirstChild();
Node defaultValue = value.removeFirstChild();
newRHS = defaultValueHook(getprop, defaultValue);
}
} else if (child.isComputedProp()) {
if (child.getLastChild().isDefaultValue()) {
newLHS = child.getLastChild().removeFirstChild();
Node getelem = IR.getelem(IR.name(tempVarName), child.removeFirstChild());
String intermediateTempVarName = DESTRUCTURING_TEMP_VAR + (destructuringVarCounter++);
Node intermediateDecl = IR.var(IR.name(intermediateTempVarName), getelem);
intermediateDecl.useSourceInfoIfMissingFromForTree(child);
nodeToDetach.getParent().addChildBefore(intermediateDecl, nodeToDetach);
newRHS =
defaultValueHook(
IR.name(intermediateTempVarName), child.getLastChild().removeFirstChild());
} else {
newRHS = IR.getelem(IR.name(tempVarName), child.removeFirstChild());
newLHS = child.removeFirstChild();
}
} else if (child.isDefaultValue()) {
newLHS = child.removeFirstChild();
Node defaultValue = child.removeFirstChild();
Node getprop = IR.getprop(IR.name(tempVarName), IR.string(newLHS.getString()));
newRHS = defaultValueHook(getprop, defaultValue);
} else {
throw new IllegalStateException("unexpected child");
}
Node newNode;
if (NodeUtil.isNameDeclaration(parent)) {
newNode = IR.declaration(newLHS, newRHS, parent.getToken());
} else if (parent.isAssign()) {
newNode = IR.exprResult(IR.assign(newLHS, newRHS));
} else {
throw new IllegalStateException("not reached");
}
newNode.useSourceInfoIfMissingFromForTree(child);
nodeToDetach.getParent().addChildBefore(newNode, nodeToDetach);
// Explicitly visit the LHS of the new node since it may be a nested
// destructuring pattern.
visit(t, newLHS, newLHS.getParent());
}
nodeToDetach.detach();
t.reportCodeChange();
}
/**
* Convert 'var [x, y] = rhs' to: var temp = $jscomp.makeIterator(rhs); var x = temp.next().value;
* var y = temp.next().value;
*/
private void replaceArrayPattern(
NodeTraversal t, Node arrayPattern, Node rhs, Node parent, Node nodeToDetach) {
String tempVarName = DESTRUCTURING_TEMP_VAR + (destructuringVarCounter++);
Node tempDecl = IR.var(
IR.name(tempVarName),
makeIterator(compiler, rhs.detach()));
tempDecl.useSourceInfoIfMissingFromForTree(arrayPattern);
nodeToDetach.getParent().addChildBefore(tempDecl, nodeToDetach);
boolean needsRuntime = false;
for (Node child = arrayPattern.getFirstChild(), next; child != null; child = next) {
next = child.getNext();
if (child.isEmpty()) {
// Just call the next() method to advance the iterator, but throw away the value.
Node nextCall = IR.exprResult(IR.call(IR.getprop(IR.name(tempVarName), IR.string("next"))));
nextCall.useSourceInfoIfMissingFromForTree(child);
nodeToDetach.getParent().addChildBefore(nextCall, nodeToDetach);
continue;
}
Node newLHS;
Node newRHS;
if (child.isDefaultValue()) {
// [x = defaultValue] = rhs;
// becomes
// var temp0 = $jscomp.makeIterator(rhs);
// var temp1 = temp.next().value
// x = (temp1 === undefined) ? defaultValue : temp1;
String nextVarName = DESTRUCTURING_TEMP_VAR + (destructuringVarCounter++);
Node var = IR.var(
IR.name(nextVarName),
IR.getprop(
IR.call(IR.getprop(IR.name(tempVarName), IR.string("next"))),
IR.string("value")));
var.useSourceInfoIfMissingFromForTree(child);
nodeToDetach.getParent().addChildBefore(var, nodeToDetach);
newLHS = child.getFirstChild().detach();
newRHS = defaultValueHook(IR.name(nextVarName), child.getLastChild().detach());
} else if (child.isRest()) {
// [...x] = rhs;
// becomes
// var temp = $jscomp.makeIterator(rhs);
// x = $jscomp.arrayFromIterator(temp);
newLHS = child.getFirstChild().detach();
newRHS =
IR.call(
NodeUtil.newQName(compiler, "$jscomp.arrayFromIterator"),
IR.name(tempVarName));
needsRuntime = true;
} else {
// LHS is just a name (or a nested pattern).
// var [x] = rhs;
// becomes
// var temp = $jscomp.makeIterator(rhs);
// var x = temp.next().value;
newLHS = child.detach();
newRHS = IR.getprop(
IR.call(IR.getprop(IR.name(tempVarName), IR.string("next"))),
IR.string("value"));
}
Node newNode;
if (parent.isAssign()) {
Node assignment = IR.assign(newLHS, newRHS);
newNode = IR.exprResult(assignment);
} else {
newNode = IR.declaration(newLHS, newRHS, parent.getToken());
}
newNode.useSourceInfoIfMissingFromForTree(arrayPattern);
nodeToDetach.getParent().addChildBefore(newNode, nodeToDetach);
// Explicitly visit the LHS of the new node since it may be a nested
// destructuring pattern.
visit(t, newLHS, newLHS.getParent());
}
nodeToDetach.detach();
if (needsRuntime) {
compiler.ensureLibraryInjected("es6/util/arrayfromiterator", false);
}
t.reportCodeChange();
}
/**
* Convert the assignment '[x, y] = rhs' that is used as an expression and not an expr result to:
* (() => let temp0 = rhs; var temp1 = $jscomp.makeIterator(temp0); var x = temp0.next().value;
* var y = temp0.next().value; return temp0; }) And the assignment '{x: a, y: b} = rhs' used as an
* expression and not an expr result to: (() => let temp0 = rhs; var temp1 = temp0; var a =
* temp0.x; var b = temp0.y; return temp0; })
*/
private void wrapAssignmentInCallToArrow(NodeTraversal t, Node assignment) {
String tempVarName = DESTRUCTURING_TEMP_VAR + (destructuringVarCounter++);
Node rhs = assignment.getLastChild().detach();
Node newAssignment = IR.let(IR.name(tempVarName), rhs);
Node replacementExpr = IR.assign(assignment.getFirstChild().detach(), IR.name(tempVarName));
Node exprResult = IR.exprResult(replacementExpr);
Node returnNode = IR.returnNode(IR.name(tempVarName));
Node block = IR.block(newAssignment, exprResult, returnNode);
Node call = IR.call(IR.arrowFunction(IR.name(""), IR.paramList(), block));
call.useSourceInfoIfMissingFromForTree(assignment);
call.putBooleanProp(Node.FREE_CALL, true);
assignment.getParent().replaceChild(assignment, call);
NodeUtil.markNewScopesChanged(call, compiler);
replacePattern(
t,
replacementExpr.getFirstChild(),
replacementExpr.getLastChild(),
replacementExpr,
exprResult);
}
private void visitDestructuringPatternInEnhancedFor(Node pattern) {
checkArgument(pattern.isDestructuringPattern());
String tempVarName = DESTRUCTURING_TEMP_VAR + (destructuringVarCounter++);
if (NodeUtil.isEnhancedFor(pattern.getParent())) {
Node forNode = pattern.getParent();
Node block = forNode.getLastChild();
Node decl = IR.var(IR.name(tempVarName));
decl.useSourceInfoIfMissingFromForTree(pattern);
forNode.replaceChild(pattern, decl);
Node exprResult = IR.exprResult(IR.assign(pattern, IR.name(tempVarName)));
exprResult.useSourceInfoIfMissingFromForTree(pattern);
block.addChildToFront(exprResult);
} else {
Node destructuringLhs = pattern.getParent();
checkState(destructuringLhs.isDestructuringLhs());
Node declarationNode = destructuringLhs.getParent();
Node forNode = declarationNode.getParent();
checkState(NodeUtil.isEnhancedFor(forNode));
Node block = forNode.getLastChild();
declarationNode.replaceChild(
destructuringLhs, IR.name(tempVarName).useSourceInfoFrom(pattern));
Token declarationType = declarationNode.getToken();
Node decl = IR.declaration(pattern.detach(), IR.name(tempVarName), declarationType);
decl.useSourceInfoIfMissingFromForTree(pattern);
block.addChildToFront(decl);
}
}
private void visitDestructuringPatternInCatch(Node pattern) {
String tempVarName = DESTRUCTURING_TEMP_VAR + (destructuringVarCounter++);
Node catchBlock = pattern.getNext();
pattern.replaceWith(IR.name(tempVarName));
catchBlock.addChildToFront(IR.declaration(pattern, IR.name(tempVarName), Token.LET));
}
/**
* Helper for transpiling DEFAULT_VALUE trees.
*/
private static Node defaultValueHook(Node getprop, Node defaultValue) {
return IR.hook(IR.sheq(getprop, IR.name("undefined")), defaultValue, getprop.cloneTree());
}
}
| MatrixFrog/closure-compiler | src/com/google/javascript/jscomp/Es6RewriteDestructuring.java | Java | apache-2.0 | 19,873 |
<?php
/**
* ZonecityController
* @var $this ZonecityController
* @var $model OmmuZoneCity
* @var $form CActiveForm
* version: 1.1.0
* Reference start
*
* TOC :
* Index
* Suggest
* Manage
* Add
* Edit
* RunAction
* Delete
* Publish
*
* LoadModel
* performAjaxValidation
*
* @author Putra Sudaryanto <putra@sudaryanto.id>
* @copyright Copyright (c) 2015 Ommu Platform (ommu.co)
* @link https://github.com/oMMu/Ommu-Core
* @contect (+62)856-299-4114
*
*----------------------------------------------------------------------------------------------------------
*/
class ZonecityController extends Controller
{
/**
* @var string the default layout for the views. Defaults to '//layouts/column2', meaning
* using two-column layout. See 'protected/views/layouts/column2.php'.
*/
//public $layout='//layouts/column2';
public $defaultAction = 'index';
/**
* Initialize admin page theme
*/
public function init()
{
if(!Yii::app()->user->isGuest) {
if(Yii::app()->user->level == 1) {
$arrThemes = Utility::getCurrentTemplate('admin');
Yii::app()->theme = $arrThemes['folder'];
$this->layout = $arrThemes['layout'];
} else {
$this->redirect(Yii::app()->createUrl('site/login'));
}
} else {
$this->redirect(Yii::app()->createUrl('site/login'));
}
}
/**
* @return array action filters
*/
public function filters()
{
return array(
'accessControl', // perform access control for CRUD operations
//'postOnly + delete', // we only allow deletion via POST request
);
}
/**
* Specifies the access control rules.
* This method is used by the 'accessControl' filter.
* @return array access control rules
*/
public function accessRules()
{
return array(
array('allow', // allow all users to perform 'index' and 'view' actions
'actions'=>array('index','suggest'),
'users'=>array('*'),
),
array('allow', // allow authenticated user to perform 'create' and 'update' actions
'actions'=>array(),
'users'=>array('@'),
'expression'=>'isset(Yii::app()->user->level)',
//'expression'=>'isset(Yii::app()->user->level) && (Yii::app()->user->level != 1)',
),
array('allow', // allow authenticated user to perform 'create' and 'update' actions
'actions'=>array('manage','add','edit','runaction','delete','publish'),
'users'=>array('@'),
'expression'=>'isset(Yii::app()->user->level) && (Yii::app()->user->level == 1)',
),
array('allow', // allow admin user to perform 'admin' and 'delete' actions
'actions'=>array(),
'users'=>array('admin'),
),
array('deny', // deny all users
'users'=>array('*'),
),
);
}
/**
* Lists all models.
*/
public function actionIndex()
{
$this->redirect(array('manage'));
}
/**
* Lists all models.
*/
public function actionSuggest($id=null)
{
if($id == null) {
if(isset($_GET['term'])) {
$criteria = new CDbCriteria;
$criteria->condition = 'city LIKE :city';
$criteria->select = "city_id, city";
$criteria->order = "city_id ASC";
$criteria->params = array(':city' => '%' . strtolower($_GET['term']) . '%');
$model = OmmuZoneCity::model()->findAll($criteria);
if($model) {
foreach($model as $items) {
$result[] = array('id' => $items->city_id, 'value' => $items->city);
}
}
}
echo CJSON::encode($result);
Yii::app()->end();
} else {
$model = OmmuZoneCity::getCity($id);
$message['data'] = '<option value="">'.Yii::t('phrase', 'Select one').'</option>';
foreach($model as $key => $val) {
$message['data'] .= '<option value="'.$key.'">'.$val.'</option>';
}
echo CJSON::encode($message);
}
}
/**
* Manages all models.
*/
public function actionManage()
{
$model=new OmmuZoneCity('search');
$model->unsetAttributes(); // clear any default values
if(isset($_GET['OmmuZoneCity'])) {
$model->attributes=$_GET['OmmuZoneCity'];
}
$columnTemp = array();
if(isset($_GET['GridColumn'])) {
foreach($_GET['GridColumn'] as $key => $val) {
if($_GET['GridColumn'][$key] == 1) {
$columnTemp[] = $key;
}
}
}
$columns = $model->getGridColumn($columnTemp);
$this->pageTitle = 'Ommu Zone Cities Manage';
$this->pageDescription = '';
$this->pageMeta = '';
$this->render('/zone_city/admin_manage',array(
'model'=>$model,
'columns' => $columns,
));
}
/**
* Creates a new model.
* If creation is successful, the browser will be redirected to the 'view' page.
*/
public function actionAdd()
{
$model=new OmmuZoneCity;
// Uncomment the following line if AJAX validation is needed
$this->performAjaxValidation($model);
if(isset($_POST['OmmuZoneCity'])) {
$model->attributes=$_POST['OmmuZoneCity'];
$jsonError = CActiveForm::validate($model);
if(strlen($jsonError) > 2) {
echo $jsonError;
} else {
if(isset($_GET['enablesave']) && $_GET['enablesave'] == 1) {
if($model->save()) {
echo CJSON::encode(array(
'type' => 5,
'get' => Yii::app()->controller->createUrl('manage'),
'id' => 'partial-ommu-zone-city',
'msg' => '<div class="errorSummary success"><strong>OmmuZoneCity success created.</strong></div>',
));
} else {
print_r($model->getErrors());
}
}
}
Yii::app()->end();
} else {
$this->dialogDetail = true;
$this->dialogGroundUrl = Yii::app()->controller->createUrl('manage');
$this->dialogWidth = 600;
$this->pageTitle = 'Create Ommu Zone Cities';
$this->pageDescription = '';
$this->pageMeta = '';
$this->render('/zone_city/admin_add',array(
'model'=>$model,
));
}
}
/**
* Updates a particular model.
* If update is successful, the browser will be redirected to the 'view' page.
* @param integer $id the ID of the model to be updated
*/
public function actionEdit($id)
{
$model=$this->loadModel($id);
// Uncomment the following line if AJAX validation is needed
$this->performAjaxValidation($model);
if(isset($_POST['OmmuZoneCity'])) {
$model->attributes=$_POST['OmmuZoneCity'];
$jsonError = CActiveForm::validate($model);
if(strlen($jsonError) > 2) {
echo $jsonError;
} else {
if(isset($_GET['enablesave']) && $_GET['enablesave'] == 1) {
if($model->save()) {
echo CJSON::encode(array(
'type' => 5,
'get' => Yii::app()->controller->createUrl('manage'),
'id' => 'partial-ommu-zone-city',
'msg' => '<div class="errorSummary success"><strong>OmmuZoneCity success updated.</strong></div>',
));
} else {
print_r($model->getErrors());
}
}
}
Yii::app()->end();
} else {
$this->dialogDetail = true;
$this->dialogGroundUrl = Yii::app()->controller->createUrl('manage');
$this->dialogWidth = 600;
$this->pageTitle = 'Update Ommu Zone Cities';
$this->pageDescription = '';
$this->pageMeta = '';
$this->render('/zone_city/admin_edit',array(
'model'=>$model,
));
}
}
/**
* Displays a particular model.
* @param integer $id the ID of the model to be displayed
*/
public function actionRunAction() {
$id = $_POST['trash_id'];
$criteria = null;
$actions = $_GET['action'];
if(count($id) > 0) {
$criteria = new CDbCriteria;
$criteria->addInCondition('id', $id);
if($actions == 'publish') {
OmmuZoneCity::model()->updateAll(array(
'publish' => 1,
),$criteria);
} elseif($actions == 'unpublish') {
OmmuZoneCity::model()->updateAll(array(
'publish' => 0,
),$criteria);
} elseif($actions == 'trash') {
OmmuZoneCity::model()->updateAll(array(
'publish' => 2,
),$criteria);
} elseif($actions == 'delete') {
OmmuZoneCity::model()->deleteAll($criteria);
}
}
// if AJAX request (triggered by deletion via admin grid view), we should not redirect the browser
if(!isset($_GET['ajax'])) {
$this->redirect(isset($_POST['returnUrl']) ? $_POST['returnUrl'] : array('manage'));
}
}
/**
* Deletes a particular model.
* If deletion is successful, the browser will be redirected to the 'admin' page.
* @param integer $id the ID of the model to be deleted
*/
public function actionDelete($id)
{
$model=$this->loadModel($id);
if(Yii::app()->request->isPostRequest) {
// we only allow deletion via POST request
if(isset($id)) {
if($model->delete()) {
echo CJSON::encode(array(
'type' => 5,
'get' => Yii::app()->controller->createUrl('manage'),
'id' => 'partial-ommu-zone-city',
'msg' => '<div class="errorSummary success"><strong>OmmuZoneCity success deleted.</strong></div>',
));
}
}
} else {
$this->dialogDetail = true;
$this->dialogGroundUrl = Yii::app()->controller->createUrl('manage');
$this->dialogWidth = 350;
$this->pageTitle = 'OmmuZoneCity Delete.';
$this->pageDescription = '';
$this->pageMeta = '';
$this->render('/zone_city/admin_delete');
}
}
/**
* Deletes a particular model.
* If deletion is successful, the browser will be redirected to the 'admin' page.
* @param integer $id the ID of the model to be deleted
*/
public function actionPublish($id)
{
$model=$this->loadModel($id);
if($model->publish == 1) {
$title = Yii::t('phrase', 'Unpublish');
$replace = 0;
} else {
$title = Yii::t('phrase', 'Publish');
$replace = 1;
}
if(Yii::app()->request->isPostRequest) {
// we only allow deletion via POST request
if(isset($id)) {
//change value active or publish
$model->publish = $replace;
if($model->update()) {
echo CJSON::encode(array(
'type' => 5,
'get' => Yii::app()->controller->createUrl('manage'),
'id' => 'partial-ommu-zone-city',
'msg' => '<div class="errorSummary success"><strong>OmmuZoneCity success published.</strong></div>',
));
}
}
} else {
$this->dialogDetail = true;
$this->dialogGroundUrl = Yii::app()->controller->createUrl('manage');
$this->dialogWidth = 350;
$this->pageTitle = $title;
$this->pageDescription = '';
$this->pageMeta = '';
$this->render('/zone_city/admin_publish',array(
'title'=>$title,
'model'=>$model,
));
}
}
/**
* Returns the data model based on the primary key given in the GET variable.
* If the data model is not found, an HTTP exception will be raised.
* @param integer the ID of the model to be loaded
*/
public function loadModel($id)
{
$model = OmmuZoneCity::model()->findByPk($id);
if($model===null)
throw new CHttpException(404, Yii::t('phrase', 'The requested page does not exist.'));
return $model;
}
/**
* Performs the AJAX validation.
* @param CModel the model to be validated
*/
protected function performAjaxValidation($model)
{
if(isset($_POST['ajax']) && $_POST['ajax']==='ommu-zone-city-form') {
echo CActiveForm::validate($model);
Yii::app()->end();
}
}
}
| oMMuCo/HPTT-FT-UGM-Official-Website | protected/controllers/ZonecityController.php | PHP | apache-2.0 | 10,950 |
package rds
import (
"fmt"
"testing"
)
func TestModifyDBInstanceNetworkType(t *testing.T) {
var req ModifyDBInstanceNetworkTypeRequest
req.Init()
req.SetFormat("JSON")
req.SetRegionId("cn-shenzhen")
var accessId = "Ie65kUInu5GeAsma"
var accessSecret = "8cCqoxdYU9zKUihwXFXiN1HEACBDwB"
resp, err := ModifyDBInstanceNetworkType(&req, accessId, accessSecret)
if err != nil {
t.Errorf("Error: %s", err.Error())
}
fmt.Printf("Success: %v\n", resp)
}
| cklxmu/aliyun-openapi-go-sdk | rds/2014-08-15/ModifyDBInstanceNetworkType_test.go | GO | apache-2.0 | 461 |
/*
* Copyright 2012-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.kinesisvideo;
import com.amazonaws.services.kinesisvideo.model.ClientLimitExceededException;
import com.amazonaws.services.kinesisvideo.model.ConnectionLimitExceededException;
import com.amazonaws.services.kinesisvideo.model.InvalidArgumentException;
import com.amazonaws.services.kinesisvideo.model.InvalidEndpointException;
import com.amazonaws.services.kinesisvideo.model.NotAuthorizedException;
import com.amazonaws.services.kinesisvideo.model.PutMediaRequest;
import com.amazonaws.services.kinesisvideo.model.ResourceNotFoundException;
import java.io.Closeable;
/**
* Interface for accessing Amazon Kinesis Video's PutMedia operation. This is a special, asynchronous operation that is not supported
* in the normal client ({@link AWSKinesisVideoMediaClient}.
* <p>
* <b>Note:</b> Do not directly implement this interface, new methods are added to it regularly. Extend from
* {@link AbstractAmazonKinesisVideoPutMedia} instead.
* </p>
*/
// TODO service docs when available.
public interface AmazonKinesisVideoPutMedia extends Closeable {
/**
* <p>
* Use this API to send media data to a Kinesis video stream.
* </p>
* <note>
* <p>
* Before using this API, you must call the <code>GetDataEndpoint</code> API to get an endpoint. You then specify
* the endpoint in your <code>PutMedia</code> request.
* </p>
* </note>
* <p>
* In the request, you use the HTTP headers to provide parameter information, for example, stream name, time stamp,
* and whether the time stamp value is absolute or relative to when the producer started recording. You use the
* request body to send the media data. Kinesis Video Streams supports only the Matroska (MKV) container format for
* sending media data using this API.
* </p>
* <p>
* You have the following options for sending data using this API:
* </p>
* <ul>
* <li>
* <p>
* Send media data in real time: For example, a security camera can send frames in real time as it generates them.
* This approach minimizes the latency between the video recording and data sent on the wire. This is referred to as
* a continuous producer. In this case, a consumer application can read the stream in real time or when needed.
* </p>
* </li>
* <li>
* <p>
* Send media data offline (in batches): For example, a body camera might record video for hours and store it on the
* device. Later, when you connect the camera to the docking port, the camera can start a <code>PutMedia</code>
* session to send data to a Kinesis video stream. In this scenario, latency is not an issue.
* </p>
* </li>
* </ul>
* <p>
* When using this API, note the following considerations:
* </p>
* <ul>
* <li>
* <p>
* You must specify either <code>streamName</code> or <code>streamARN</code>, but not both.
* </p>
* </li>
* <li>
* <p>
* You might find it easier to use a single long-running <code>PutMedia</code> session and send a large number of
* media data fragments in the payload. Note that for each fragment received, Kinesis Video Streams sends one or
* more acknowledgements. Potential network considerations might cause you to not get all these acknowledgements as
* they are generated.
* </p>
* </li>
* <li>
* <p>
* You might choose multiple consecutive <code>PutMedia</code> sessions, each with fewer fragments to ensure that
* you get all acknowledgements from the service in real time.
* </p>
* </li>
* </ul>
* <note>
* <p>
* If you send data to the same stream on multiple simultaneous <code>PutMedia</code> sessions, the media fragments
* get interleaved on the stream. You should make sure that this is OK in your application scenario.
* </p>
* </note>
* <p>
* The following limits apply when using the <code>PutMedia</code> API:
* </p>
* <ul>
* <li>
* <p>
* A client can call <code>PutMedia</code> up to five times per second per stream.
* </p>
* </li>
* <li>
* <p>
* A client can send up to five fragments per second per stream.
* </p>
* </li>
* <li>
* <p>
* Kinesis Video Streams reads media data at a rate of up to 12.5 MB/second, or 100 Mbps during a
* <code>PutMedia</code> session.
* </p>
* </li>
* </ul>
* <p>
* Note the following constraints. In these cases, Kinesis Video Streams sends the Error acknowledgement in the
* response.
* </p>
* <ul>
* <li>
* <p>
* Fragments that have time codes spanning longer than 10 seconds and that contain more than 50 megabytes of data
* are not allowed.
* </p>
* </li>
* <li>
* <p>
* An MKV stream containing more than one MKV segment or containing disallowed MKV elements (like
* <code>track*</code>) also results in the Error acknowledgement.
* </p>
* </li>
* </ul>
* <p>
* Kinesis Video Streams stores each incoming fragment and related metadata in what is called a "chunk." The
* fragment metadata includes the following:
* </p>
* <ul>
* <li>
* <p>
* The MKV headers provided at the start of the <code>PutMedia</code> request
* </p>
* </li>
* <li>
* <p>
* The following Kinesis Video Streams-specific metadata for the fragment:
* </p>
* <ul>
* <li>
* <p>
* <code>server_timestamp</code> - Time stamp when Kinesis Video Streams started receiving the fragment.
* </p>
* </li>
* <li>
* <p>
* <code>producer_timestamp</code> - Time stamp, when the producer started recording the fragment. Kinesis Video
* Streams uses three pieces of information received in the request to calculate this value.
* </p>
* <ul>
* <li>
* <p>
* The fragment timecode value received in the request body along with the fragment.
* </p>
* </li>
* <li>
* <p>
* Two request headers: <code>producerStartTimestamp</code> (when the producer started recording) and
* <code>fragmentTimeCodeType</code> (whether the fragment timecode in the payload is absolute or relative).
* </p>
* </li>
* </ul>
* <p>
* Kinesis Video Streams then computes the <code>producer_timestamp</code> for the fragment as follows:
* </p>
* <p>
* If <code>fragmentTimeCodeType</code> is relative, then
* </p>
* <p>
* <code>producer_timestamp</code> = <code>producerStartTimeSamp</code> + fragment timecode
* </p>
* <p>
* If <code>fragmentTimeCodeType</code> is absolute, then
* </p>
* <p>
* <code>producer_timestamp</code> = fragment timecode (converted to milliseconds)
* </p>
* </li>
* <li>
* <p>
* Unique fragment number assigned by Kinesis Video Streams.
* </p>
* </li>
* </ul>
* <p/></li>
* </ul>
* <note>
* <p>
* When you make the <code>GetMedia</code> request, Kinesis Video Streams returns a stream of these chunks. The
* client can process the metadata as needed.
* </p>
* </note> <note>
* <p>
* This operation is only available for the AWS SDK for Java. It is not supported in AWS SDKs for other languages.
* </p>
* </note>
*
* @param request Represents the input of a <code>PutMedia</code> operation
* @param responseHandler Handler to asynchronously process the {@link com.amazonaws.services.kinesisvideo.model.AckEvent} that
* are received by the service.
* @return Result of the PutMedia operation returned by the service.
* @throws ResourceNotFoundException
* Status Code: 404, The stream with the given name does not exist.
* @throws NotAuthorizedException
* Status Code: 403, The caller is not authorized to perform an operation on the given stream, or the token
* has expired.
* @throws InvalidEndpointException
* Status Code: 400, Caller used wrong endpoint to write data to a stream. On receiving such an exception,
* the user must call <code>GetDataEndpoint</code> with <code>AccessMode</code> set to "READ" and use the
* endpoint Kinesis Video returns in the next <code>GetMedia</code> call.
* @throws InvalidArgumentException
* The value for this input parameter is invalid.
* @throws ClientLimitExceededException
* Kinesis Video Streams has throttled the request because you have exceeded the limit of allowed client
* calls. Try making the call later.
* @throws ConnectionLimitExceededException
* Kinesis Video Streams has throttled the request because you have exceeded the limit of allowed client
* connections.
* @sample AmazonKinesisVideoMedia.PutMedia
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/kinesis-video-media-2017-09-30/PutMedia" target="_top">AWS
* API Documentation</a>
*/
void putMedia(PutMediaRequest request, PutMediaAckResponseHandler responseHandler);
/**
* Closes the client and releases all resources like connection pools and threads.
*/
@Override
void close();
}
| jentfoo/aws-sdk-java | aws-java-sdk-kinesisvideo/src/main/java/com/amazonaws/services/kinesisvideo/AmazonKinesisVideoPutMedia.java | Java | apache-2.0 | 10,008 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.sejda.sambox.pdmodel.graphics.optionalcontent;
import java.awt.Color;
import java.awt.image.BufferedImage;
import java.awt.image.DataBufferInt;
import java.io.File;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashSet;
import java.util.Set;
import javax.imageio.ImageIO;
import org.junit.Assert;
import org.sejda.io.SeekableSources;
import org.sejda.sambox.cos.COSName;
import org.sejda.sambox.input.PDFParser;
import org.sejda.sambox.pdmodel.PDDocument;
import org.sejda.sambox.pdmodel.PDDocumentCatalog;
import org.sejda.sambox.pdmodel.PDPage;
import org.sejda.sambox.pdmodel.PDPageContentStream;
import org.sejda.sambox.pdmodel.PDPageContentStream.AppendMode;
import org.sejda.sambox.pdmodel.PDResources;
import org.sejda.sambox.pdmodel.PageMode;
import org.sejda.sambox.pdmodel.font.PDFont;
import org.sejda.sambox.pdmodel.font.PDType1Font;
import org.sejda.sambox.pdmodel.graphics.optionalcontent.PDOptionalContentProperties.BaseState;
import org.sejda.sambox.rendering.PDFRenderer;
import org.sejda.sambox.util.SpecVersionUtils;
import junit.framework.TestCase;
/**
* Tests optional content group functionality (also called layers).
*/
public class TestOptionalContentGroups extends TestCase
{
private final File testResultsDir = new File("target/test-output");
@Override
protected void setUp() throws Exception
{
super.setUp();
testResultsDir.mkdirs();
}
/**
* Tests OCG generation.
*
* @throws Exception if an error occurs
*/
public void testOCGGeneration() throws Exception
{
PDDocument doc = new PDDocument();
try
{
// Create new page
PDPage page = new PDPage();
doc.addPage(page);
PDResources resources = page.getResources();
if (resources == null)
{
resources = new PDResources();
page.setResources(resources);
}
// Prepare OCG functionality
PDOptionalContentProperties ocprops = new PDOptionalContentProperties();
doc.getDocumentCatalog().setOCProperties(ocprops);
// ocprops.setBaseState(BaseState.ON); //ON=default
// Create OCG for background
PDOptionalContentGroup background = new PDOptionalContentGroup("background");
ocprops.addGroup(background);
assertTrue(ocprops.isGroupEnabled("background"));
// Create OCG for enabled
PDOptionalContentGroup enabled = new PDOptionalContentGroup("enabled");
ocprops.addGroup(enabled);
assertFalse(ocprops.setGroupEnabled("enabled", true));
assertTrue(ocprops.isGroupEnabled("enabled"));
// Create OCG for disabled
PDOptionalContentGroup disabled = new PDOptionalContentGroup("disabled");
ocprops.addGroup(disabled);
assertFalse(ocprops.setGroupEnabled("disabled", true));
assertTrue(ocprops.isGroupEnabled("disabled"));
assertTrue(ocprops.setGroupEnabled("disabled", false));
assertFalse(ocprops.isGroupEnabled("disabled"));
// Setup page content stream and paint background/title
PDPageContentStream contentStream = new PDPageContentStream(doc, page,
AppendMode.OVERWRITE, false);
PDFont font = PDType1Font.HELVETICA_BOLD;
contentStream.beginMarkedContent(COSName.OC, background);
contentStream.beginText();
contentStream.setFont(font, 14);
contentStream.newLineAtOffset(80, 700);
contentStream.showText("PDF 1.5: Optional Content Groups");
contentStream.endText();
font = PDType1Font.HELVETICA;
contentStream.beginText();
contentStream.setFont(font, 12);
contentStream.newLineAtOffset(80, 680);
contentStream.showText("You should see a green textline, but no red text line.");
contentStream.endText();
contentStream.endMarkedContent();
// Paint enabled layer
contentStream.beginMarkedContent(COSName.OC, enabled);
contentStream.setNonStrokingColor(Color.GREEN);
contentStream.beginText();
contentStream.setFont(font, 12);
contentStream.newLineAtOffset(80, 600);
contentStream.showText("This is from an enabled layer. If you see this, that's good.");
contentStream.endText();
contentStream.endMarkedContent();
// Paint disabled layer
contentStream.beginMarkedContent(COSName.OC, disabled);
contentStream.setNonStrokingColor(Color.RED);
contentStream.beginText();
contentStream.setFont(font, 12);
contentStream.newLineAtOffset(80, 500);
contentStream
.showText("This is from a disabled layer. If you see this, that's NOT good!");
contentStream.endText();
contentStream.endMarkedContent();
contentStream.close();
File targetFile = new File(testResultsDir, "ocg-generation.pdf");
doc.writeTo(targetFile);
}
finally
{
doc.close();
}
}
/**
* Tests OCG functions on a loaded PDF.
*
* @throws Exception if an error occurs
*/
public void testOCGConsumption() throws Exception
{
File pdfFile = new File(testResultsDir, "ocg-generation.pdf");
if (!pdfFile.exists())
{
testOCGGeneration();
}
try (PDDocument doc = PDFParser.parse(SeekableSources.seekableSourceFrom(pdfFile)))
{
assertEquals(SpecVersionUtils.V1_5, doc.getVersion());
PDDocumentCatalog catalog = doc.getDocumentCatalog();
PDPage page = doc.getPage(0);
PDResources resources = page.getResources();
COSName mc0 = COSName.getPDFName("oc1");
PDOptionalContentGroup ocg = (PDOptionalContentGroup) resources.getProperties(mc0);
assertNotNull(ocg);
assertEquals("background", ocg.getName());
assertNull(resources.getProperties(COSName.getPDFName("inexistent")));
PDOptionalContentProperties ocgs = catalog.getOCProperties();
assertEquals(BaseState.ON, ocgs.getBaseState());
Set<String> names = new java.util.HashSet<String>(Arrays.asList(ocgs.getGroupNames()));
assertEquals(3, names.size());
assertTrue(names.contains("background"));
assertTrue(ocgs.isGroupEnabled("background"));
assertTrue(ocgs.isGroupEnabled("enabled"));
assertFalse(ocgs.isGroupEnabled("disabled"));
ocgs.setGroupEnabled("background", false);
assertFalse(ocgs.isGroupEnabled("background"));
PDOptionalContentGroup background = ocgs.getGroup("background");
assertEquals(ocg.getName(), background.getName());
assertNull(ocgs.getGroup("inexistent"));
Collection<PDOptionalContentGroup> coll = ocgs.getOptionalContentGroups();
assertEquals(3, coll.size());
Set<String> nameSet = new HashSet<>();
for (PDOptionalContentGroup ocg2 : coll)
{
nameSet.add(ocg2.getName());
}
assertTrue(nameSet.contains("background"));
assertTrue(nameSet.contains("enabled"));
assertTrue(nameSet.contains("disabled"));
}
}
public void testOCGsWithSameNameCanHaveDifferentVisibility() throws Exception
{
PDDocument doc = new PDDocument();
try
{
// Create new page
PDPage page = new PDPage();
doc.addPage(page);
PDResources resources = page.getResources();
if (resources == null)
{
resources = new PDResources();
page.setResources(resources);
}
// Prepare OCG functionality
PDOptionalContentProperties ocprops = new PDOptionalContentProperties();
doc.getDocumentCatalog().setOCProperties(ocprops);
// ocprops.setBaseState(BaseState.ON); //ON=default
// Create visible OCG
PDOptionalContentGroup visible = new PDOptionalContentGroup("layer");
ocprops.addGroup(visible);
assertTrue(ocprops.isGroupEnabled(visible));
// Create invisible OCG
PDOptionalContentGroup invisible = new PDOptionalContentGroup("layer");
ocprops.addGroup(invisible);
assertFalse(ocprops.setGroupEnabled(invisible, false));
assertFalse(ocprops.isGroupEnabled(invisible));
// Check that visible layer is still visible
assertTrue(ocprops.isGroupEnabled(visible));
// Setup page content stream and paint background/title
PDPageContentStream contentStream = new PDPageContentStream(doc, page,
AppendMode.OVERWRITE, false);
PDFont font = PDType1Font.HELVETICA_BOLD;
contentStream.beginMarkedContent(COSName.OC, visible);
contentStream.beginText();
contentStream.setFont(font, 14);
contentStream.newLineAtOffset(80, 700);
contentStream.showText("PDF 1.5: Optional Content Groups");
contentStream.endText();
font = PDType1Font.HELVETICA;
contentStream.beginText();
contentStream.setFont(font, 12);
contentStream.newLineAtOffset(80, 680);
contentStream.showText("You should see this text, but no red text line.");
contentStream.endText();
contentStream.endMarkedContent();
// Paint disabled layer
contentStream.beginMarkedContent(COSName.OC, invisible);
contentStream.setNonStrokingColor(Color.RED);
contentStream.beginText();
contentStream.setFont(font, 12);
contentStream.newLineAtOffset(80, 500);
contentStream
.showText("This is from a disabled layer. If you see this, that's NOT good!");
contentStream.endText();
contentStream.endMarkedContent();
contentStream.close();
File targetFile = new File(testResultsDir, "ocg-generation-same-name.pdf");
doc.writeTo(targetFile);
}
finally
{
doc.close();
}
}
/**
* PDFBOX-4496: setGroupEnabled(String, boolean) must catch all OCGs of a name even when several names are
* identical.
*
* @throws IOException
*/
public void testOCGGenerationSameNameCanHaveSameVisibilityOff() throws IOException
{
BufferedImage expectedImage;
BufferedImage actualImage;
try (PDDocument doc = new PDDocument())
{
// Create new page
PDPage page = new PDPage();
doc.addPage(page);
PDResources resources = page.getResources();
if (resources == null)
{
resources = new PDResources();
page.setResources(resources);
}
// Prepare OCG functionality
PDOptionalContentProperties ocprops = new PDOptionalContentProperties();
doc.getDocumentCatalog().setOCProperties(ocprops);
// ocprops.setBaseState(BaseState.ON); //ON=default
// Create OCG for background
PDOptionalContentGroup background = new PDOptionalContentGroup("background");
ocprops.addGroup(background);
assertTrue(ocprops.isGroupEnabled("background"));
// Create OCG for enabled
PDOptionalContentGroup enabled = new PDOptionalContentGroup("science");
ocprops.addGroup(enabled);
assertFalse(ocprops.setGroupEnabled("science", true));
assertTrue(ocprops.isGroupEnabled("science"));
// Create OCG for disabled1
PDOptionalContentGroup disabled1 = new PDOptionalContentGroup("alternative");
ocprops.addGroup(disabled1);
// Create OCG for disabled2 with same name as disabled1
PDOptionalContentGroup disabled2 = new PDOptionalContentGroup("alternative");
ocprops.addGroup(disabled2);
assertFalse(ocprops.setGroupEnabled("alternative", false));
assertFalse(ocprops.isGroupEnabled("alternative"));
// Setup page content stream and paint background/title
PDPageContentStream contentStream = new PDPageContentStream(doc, page,
AppendMode.OVERWRITE, false);
PDFont font = PDType1Font.HELVETICA_BOLD;
contentStream.beginMarkedContent(COSName.OC, background);
contentStream.beginText();
contentStream.setFont(font, 14);
contentStream.newLineAtOffset(80, 700);
contentStream.showText("PDF 1.5: Optional Content Groups");
contentStream.endText();
contentStream.endMarkedContent();
font = PDType1Font.HELVETICA;
// Paint enabled layer
contentStream.beginMarkedContent(COSName.OC, enabled);
contentStream.setNonStrokingColor(Color.GREEN);
contentStream.beginText();
contentStream.setFont(font, 12);
contentStream.newLineAtOffset(80, 600);
contentStream.showText("The earth is a sphere");
contentStream.endText();
contentStream.endMarkedContent();
// Paint disabled layer1
contentStream.beginMarkedContent(COSName.OC, disabled1);
contentStream.setNonStrokingColor(Color.RED);
contentStream.beginText();
contentStream.setFont(font, 12);
contentStream.newLineAtOffset(80, 500);
contentStream.showText("Alternative 1: The earth is a flat circle");
contentStream.endText();
contentStream.endMarkedContent();
// Paint disabled layer2
contentStream.beginMarkedContent(COSName.OC, disabled2);
contentStream.setNonStrokingColor(Color.BLUE);
contentStream.beginText();
contentStream.setFont(font, 12);
contentStream.newLineAtOffset(80, 450);
contentStream.showText("Alternative 2: The earth is a flat parallelogram");
contentStream.endText();
contentStream.endMarkedContent();
contentStream.close();
doc.getDocumentCatalog().setPageMode(PageMode.USE_OPTIONAL_CONTENT);
File targetFile = new File(testResultsDir, "ocg-generation-same-name-off.pdf");
doc.writeTo(targetFile.getAbsolutePath());
}
// render PDF with science disabled and alternatives with same name enabled
try (PDDocument doc = PDDocument
.load(new File(testResultsDir, "ocg-generation-same-name-off.pdf")))
{
doc.getDocumentCatalog().getOCProperties().setGroupEnabled("background", false);
doc.getDocumentCatalog().getOCProperties().setGroupEnabled("science", false);
doc.getDocumentCatalog().getOCProperties().setGroupEnabled("alternative", true);
actualImage = new PDFRenderer(doc).renderImage(0, 2);
ImageIO.write(actualImage, "png",
new File(testResultsDir, "ocg-generation-same-name-off-actual.png"));
}
// create PDF without OCGs to created expected rendering
try (PDDocument doc2 = new PDDocument())
{
// Create new page
PDPage page = new PDPage();
doc2.addPage(page);
PDResources resources = page.getResources();
if (resources == null)
{
resources = new PDResources();
page.setResources(resources);
}
try (PDPageContentStream contentStream = new PDPageContentStream(doc2, page,
AppendMode.OVERWRITE, false))
{
PDFont font = PDType1Font.HELVETICA;
contentStream.setNonStrokingColor(Color.RED);
contentStream.beginText();
contentStream.setFont(font, 12);
contentStream.newLineAtOffset(80, 500);
contentStream.showText("Alternative 1: The earth is a flat circle");
contentStream.endText();
contentStream.setNonStrokingColor(Color.BLUE);
contentStream.beginText();
contentStream.setFont(font, 12);
contentStream.newLineAtOffset(80, 450);
contentStream.showText("Alternative 2: The earth is a flat parallelogram");
contentStream.endText();
}
File targetFile = new File(testResultsDir, "ocg-generation-same-name-off-expected.pdf");
doc2.writeTo(targetFile.getAbsolutePath());
}
try (PDDocument doc = PDDocument
.load(new File(testResultsDir, "ocg-generation-same-name-off-expected.pdf")))
{
expectedImage = new PDFRenderer(doc).renderImage(0, 2);
ImageIO.write(expectedImage, "png",
new File(testResultsDir, "ocg-generation-same-name-off-expected.png"));
}
// compare images
DataBufferInt expectedData = (DataBufferInt) expectedImage.getRaster().getDataBuffer();
DataBufferInt actualData = (DataBufferInt) actualImage.getRaster().getDataBuffer();
Assert.assertArrayEquals(expectedData.getData(), actualData.getData());
}
}
| torakiki/sambox | src/test/java/org/sejda/sambox/pdmodel/graphics/optionalcontent/TestOptionalContentGroups.java | Java | apache-2.0 | 18,662 |
package org.appenders.log4j2.elasticsearch;
/*-
* #%L
* log4j2-elasticsearch
* %%
* Copyright (C) 2020 Rafal Foltynski
* %%
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* #L%
*/
import org.apache.logging.log4j.core.config.ConfigurationException;
import org.appenders.log4j2.elasticsearch.spi.BatchEmitterServiceProvider;
import org.appenders.log4j2.elasticsearch.spi.TestBatchEmitterFactory;
import org.junit.jupiter.api.Test;
import org.mockito.ArgumentCaptor;
import java.util.Arrays;
import java.util.Random;
import java.util.UUID;
import java.util.function.Function;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.core.IsEqual.equalTo;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyBoolean;
import static org.mockito.Mockito.eq;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
public class AsyncBatchDeliveryPluginTest {
private static final int TEST_BATCH_SIZE = 100;
private static final int TEST_DELIVERY_INTERVAL = 100;
public static final String TEST_SERVER_URIS = "http://localhost:9200";
public static TestHttpObjectFactory.Builder createTestObjectFactoryBuilder() {
return TestHttpObjectFactory.newBuilder()
.withServerUris(TEST_SERVER_URIS);
}
public static AsyncBatchDeliveryPlugin.Builder createTestBatchDeliveryBuilder() {
return spy(AsyncBatchDeliveryPlugin.newBuilder()
.withBatchSize(TEST_BATCH_SIZE)
.withDeliveryInterval(TEST_DELIVERY_INTERVAL)
.withClientObjectFactory(createTestObjectFactoryBuilder().build()))
.withFailoverPolicy(new NoopFailoverPolicy());
}
/* To make testing easier and break when changed */
private BatchDelivery<String> invokePluginFactory(AsyncBatchDelivery.Builder builder) {
return AsyncBatchDeliveryPlugin.createAsyncBatchDelivery(
builder.clientObjectFactory,
builder.deliveryInterval,
builder.batchSize,
builder.failoverPolicy,
builder.shutdownDelayMillis,
builder.setupOpSources);
}
@Test
public void pluginFactoryReturnsNonNullObject() {
// given
AsyncBatchDeliveryPlugin.Builder batchDeliveryBuilder = createTestBatchDeliveryBuilder();
// when
BatchDelivery<String> delivery = invokePluginFactory(batchDeliveryBuilder);
// then
assertNotNull(delivery);
}
@Test
public void pluginFactoryFailsWhenClientObjectFactoryIsNull() {
// given
AsyncBatchDeliveryPlugin.Builder batchDeliveryBuilder = createTestBatchDeliveryBuilder();
batchDeliveryBuilder.withClientObjectFactory(null);
// when
final ConfigurationException exception = assertThrows(ConfigurationException.class, () -> invokePluginFactory(batchDeliveryBuilder));
// then
assertThat(exception.getMessage(),
equalTo("No Elasticsearch client factory [HCHttp|JestHttp|ElasticsearchBulkProcessor] provided for " + AsyncBatchDelivery.class.getSimpleName()));
}
@Test
public void pluginFactoryFallsBackToDefaults() {
// given
Function<BulkEmitterTest.TestBatch, Boolean> listener = mock(Function.class);
TestHttpObjectFactory objectFactory = spy(createTestObjectFactoryBuilder().build());
when(objectFactory.createBatchListener(any())).thenReturn(listener);
AsyncBatchDeliveryPlugin.Builder batchDeliveryBuilder = createTestBatchDeliveryBuilder()
.withClientObjectFactory(objectFactory)
.withBatchSize(0)
.withDeliveryInterval(0)
.withShutdownDelayMillis(-1)
.withFailoverPolicy(null)
.withSetupOpSources();
// when
AsyncBatchDelivery batchDelivery = (AsyncBatchDelivery) invokePluginFactory(batchDeliveryBuilder);
int expectedBatches = 10;
for (int i = 0; i < AsyncBatchDelivery.Builder.DEFAULT_BATCH_SIZE * expectedBatches; i++) {
batchDelivery.add(NoopIndexNameFormatterTest.TEST_INDEX_NAME, "test");
}
// then
assertEquals(AsyncBatchDelivery.Builder.DEFAULT_FAILOVER_POLICY, batchDelivery.failoverPolicy);
assertEquals(Arrays.asList(AsyncBatchDelivery.Builder.DEFAULT_OP_SOURCES), batchDelivery.setupOpSources);
assertEquals(AsyncBatchDelivery.Builder.DEFAULT_SHUTDOWN_DELAY, batchDelivery.shutdownDelayMillis);
verify(listener, times(expectedBatches)).apply(any());
}
@Test
public void builderConfiguresShutdownDelayMillis() {
// given
long expectedShutdownDelayMillis = 10 + new Random().nextInt(100);
FailoverPolicy failoverPolicy = spy(new TestFailoverPolicy());
AsyncBatchDeliveryPlugin.Builder batchDeliveryBuilder = createTestBatchDeliveryBuilder()
.withFailoverPolicy(failoverPolicy)
.withShutdownDelayMillis(expectedShutdownDelayMillis);
BatchDelivery<String> asyncBatchDelivery = invokePluginFactory(batchDeliveryBuilder);
asyncBatchDelivery.start();
// when
asyncBatchDelivery.stop();
// then
ArgumentCaptor<Long> captor = ArgumentCaptor.forClass(Long.class);
verify(LifeCycle.of(failoverPolicy)).stop(captor.capture(), anyBoolean());
assertEquals((Long) expectedShutdownDelayMillis, captor.getValue());
}
@Test
public void builderConfiguresSetupOpSources() {
// given
ClientObjectFactory clientObjectFactory = spy(createTestObjectFactoryBuilder().build());
OperationFactory operationFactory = mock(OperationFactory.class);
when(clientObjectFactory.setupOperationFactory()).thenReturn(operationFactory);
IndexTemplate indexTemplate = mock(IndexTemplate.class);
AsyncBatchDeliveryPlugin.Builder batchDeliveryBuilder = createTestBatchDeliveryBuilder()
.withSetupOpSources(indexTemplate)
.withClientObjectFactory(clientObjectFactory);
BatchDelivery<String> asyncBatchDelivery = invokePluginFactory(batchDeliveryBuilder);
// when
asyncBatchDelivery.start();
// then
verify(operationFactory).create(eq(indexTemplate));
verify(clientObjectFactory).addOperation(any());
}
@Test
public void batchDeliveryAddObjectDelegatesToProvidedBatchOperationsObjectApi() {
// given
AsyncBatchDeliveryPlugin.Builder batchDeliveryBuilder = createTestBatchDeliveryBuilder();
ClientObjectFactory clientObjectFactory = spy(createTestObjectFactoryBuilder().build());
BatchOperations batchOperations = spy(clientObjectFactory.createBatchOperations());
when(clientObjectFactory.createBatchOperations()).thenReturn(batchOperations);
batchDeliveryBuilder.withClientObjectFactory(clientObjectFactory);
BatchDelivery<String> batchDelivery = invokePluginFactory(batchDeliveryBuilder);
String indexName = UUID.randomUUID().toString();
String logObject = UUID.randomUUID().toString();
// when
batchDelivery.add(indexName, logObject);
// then
verify(batchOperations).createBatchItem(eq(indexName), eq(logObject));
}
@Test
public void batchDeliveryAddItemSourceDelegatesToProvidedBatchOperationsItemSourceApi() {
// given
AsyncBatchDeliveryPlugin.Builder batchDeliveryBuilder = createTestBatchDeliveryBuilder();
ClientObjectFactory clientObjectFactory = spy(createTestObjectFactoryBuilder().build());
BatchOperations batchOperations = spy(clientObjectFactory.createBatchOperations());
when(clientObjectFactory.createBatchOperations()).thenReturn(batchOperations);
batchDeliveryBuilder.withClientObjectFactory(clientObjectFactory);
BatchDelivery batchDelivery = invokePluginFactory(batchDeliveryBuilder);
String indexName = UUID.randomUUID().toString();
ItemSource itemSource = mock(ItemSource.class);
// when
batchDelivery.add(indexName, itemSource);
// then
verify(batchOperations).createBatchItem(eq(indexName), eq(itemSource));
}
@Test
public void deliveryAddsBatchItemToBatchEmitter() {
// given
TestHttpObjectFactory objectFactory = createTestObjectFactoryBuilder().build();
TestBatchEmitterFactory batchEmitterFactory = spy(new TestBatchEmitterFactory());
BatchEmitter emitter = batchEmitterFactory.createInstance(TEST_BATCH_SIZE, TEST_DELIVERY_INTERVAL, objectFactory, new NoopFailoverPolicy());
TestAsyncBatchDelivery delivery = spy(new TestAsyncBatchDelivery(createTestBatchDeliveryBuilder()
.withBatchSize(1)
.withDeliveryInterval(TEST_DELIVERY_INTERVAL)
.withClientObjectFactory(objectFactory)
.withFailoverPolicy(new NoopFailoverPolicy())
.withSetupOpSources()) {
@Override
protected BatchEmitterServiceProvider createBatchEmitterServiceProvider() {
return batchEmitterFactory;
}
});
String testMessage = "test message";
// when
delivery.add("testIndexName", testMessage);
// then
verify(batchEmitterFactory).createInstance(eq(1), eq(TEST_DELIVERY_INTERVAL), eq(objectFactory), any());
ArgumentCaptor<BulkEmitterTest.TestBatchItem> captor = ArgumentCaptor.forClass(BulkEmitterTest.TestBatchItem.class);
verify(emitter, times(1)).add(captor.capture());
assertEquals(testMessage, captor.getValue().getData(null));
}
public static class TestAsyncBatchDelivery extends AsyncBatchDeliveryPlugin {
public TestAsyncBatchDelivery(Builder builder) {
super(builder);
}
@Override
protected BatchEmitterServiceProvider createBatchEmitterServiceProvider() {
return null;
}
}
private static class TestFailoverPolicy implements FailoverPolicy, LifeCycle {
private State state = State.STOPPED;
@Override
public void deliver(Object failedPayload) {
}
@Override
public void start() {
state = State.STARTED;
}
@Override
public void stop() {
state = State.STOPPED;
}
@Override
public LifeCycle stop(long timeout, boolean runInBackground) {
state = State.STOPPED;
return this;
}
@Override
public boolean isStarted() {
return state == State.STARTED;
}
@Override
public boolean isStopped() {
return state == State.STOPPED;
}
}
}
| rfoltyns/log4j2-elasticsearch | log4j2-elasticsearch-core/src/test/java/org/appenders/log4j2/elasticsearch/AsyncBatchDeliveryPluginTest.java | Java | apache-2.0 | 11,724 |
require_relative '../../../../test_helper'
require_relative '../endpoint_test_helper'
class CommentsTest < Minitest::Test
extend Smartsheet::Test::EndpointHelper
attr_accessor :mock_client
attr_accessor :smartsheet_client
def category
smartsheet_client.sheets.comments
end
def self.endpoints
[
{
symbol: :add,
method: :post,
url: ['sheets', :sheet_id, 'discussions', :discussion_id, 'comments'],
args: {sheet_id: 123, discussion_id: 234, body: {}},
has_params: false,
headers: nil
},
# TODO: Add this!
# {
# symbol: :add_with_file,
# method: :post,
# url: ['sheets', :sheet_id, 'rows', :row_id, 'discussions'],
# args: {sheet_id: 123, row_id: 234, body: {}},
# has_params: false,
# headers: nil
# },
{
symbol: :update,
method: :put,
url: ['sheets', :sheet_id, 'comments', :comment_id],
args: {sheet_id: 123, comment_id: 234, body: {}},
has_params: false,
headers: nil
},
{
symbol: :delete,
method: :delete,
url: ['sheets', :sheet_id, 'comments', :comment_id],
args: {sheet_id: 123, comment_id: 234},
has_params: false,
headers: nil
},
{
symbol: :get,
method: :get,
url: ['sheets', :sheet_id, 'comments', :comment_id],
args: {sheet_id: 123, comment_id: 234},
has_params: false,
headers: nil
},
]
end
define_setup
define_endpoints_tests
end
| smartsheet-platform/smartsheet-ruby-sdk | test/unit/smartsheet/endpoints/sheets/comments_test.rb | Ruby | apache-2.0 | 1,724 |
/*
* Copyright 2002-2015 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.web.servlet.view;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.lang.reflect.Array;
import java.net.URLEncoder;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.springframework.beans.BeanUtils;
import org.springframework.http.HttpStatus;
import org.springframework.util.CollectionUtils;
import org.springframework.util.ObjectUtils;
import org.springframework.util.StringUtils;
import org.springframework.web.context.WebApplicationContext;
import org.springframework.web.servlet.FlashMap;
import org.springframework.web.servlet.FlashMapManager;
import org.springframework.web.servlet.HandlerMapping;
import org.springframework.web.servlet.SmartView;
import org.springframework.web.servlet.View;
import org.springframework.web.servlet.support.RequestContextUtils;
import org.springframework.web.servlet.support.RequestDataValueProcessor;
import org.springframework.web.util.UriComponents;
import org.springframework.web.util.UriComponentsBuilder;
import org.springframework.web.util.UriUtils;
import org.springframework.web.util.WebUtils;
/**
* View that redirects to an absolute, context relative, or current request
* relative URL. The URL may be a URI template in which case the URI template
* variables will be replaced with values available in the model. By default
* all primitive model attributes (or collections thereof) are exposed as HTTP
* query parameters (assuming they've not been used as URI template variables),
* but this behavior can be changed by overriding the
* {@link #isEligibleProperty(String, Object)} method.
*
* <p>A URL for this view is supposed to be a HTTP redirect URL, i.e.
* suitable for HttpServletResponse's {@code sendRedirect} method, which
* is what actually does the redirect if the HTTP 1.0 flag is on, or via sending
* back an HTTP 303 code - if the HTTP 1.0 compatibility flag is off.
*
* <p>Note that while the default value for the "contextRelative" flag is off,
* you will probably want to almost always set it to true. With the flag off,
* URLs starting with "/" are considered relative to the web server root, while
* with the flag on, they are considered relative to the web application root.
* Since most web applications will never know or care what their context path
* actually is, they are much better off setting this flag to true, and submitting
* paths which are to be considered relative to the web application root.
*
* <p><b>NOTE when using this redirect view in a Portlet environment:</b> Make sure
* that your controller respects the Portlet {@code sendRedirect} constraints.
*
* @author Rod Johnson
* @author Juergen Hoeller
* @author Colin Sampaleanu
* @author Sam Brannen
* @author Arjen Poutsma
* @author Rossen Stoyanchev
* @see #setContextRelative
* @see #setHttp10Compatible
* @see #setExposeModelAttributes
* @see javax.servlet.http.HttpServletResponse#sendRedirect
*/
public class RedirectView extends AbstractUrlBasedView implements SmartView {
private static final Pattern URI_TEMPLATE_VARIABLE_PATTERN = Pattern.compile("\\{([^/]+?)\\}");
private boolean contextRelative = false;
private boolean http10Compatible = true;
private boolean exposeModelAttributes = true;
private String encodingScheme;
private HttpStatus statusCode;
private boolean expandUriTemplateVariables = true;
private boolean propagateQueryParams = false;
/**
* Constructor for use as a bean.
*/
public RedirectView() {
setExposePathVariables(false);
}
/**
* Create a new RedirectView with the given URL.
* <p>The given URL will be considered as relative to the web server,
* not as relative to the current ServletContext.
* @param url the URL to redirect to
* @see #RedirectView(String, boolean)
*/
public RedirectView(String url) {
super(url);
setExposePathVariables(false);
}
/**
* Create a new RedirectView with the given URL.
* @param url the URL to redirect to
* @param contextRelative whether to interpret the given URL as
* relative to the current ServletContext
*/
public RedirectView(String url, boolean contextRelative) {
super(url);
this.contextRelative = contextRelative;
setExposePathVariables(false);
}
/**
* Create a new RedirectView with the given URL.
* @param url the URL to redirect to
* @param contextRelative whether to interpret the given URL as
* relative to the current ServletContext
* @param http10Compatible whether to stay compatible with HTTP 1.0 clients
*/
public RedirectView(String url, boolean contextRelative, boolean http10Compatible) {
super(url);
this.contextRelative = contextRelative;
this.http10Compatible = http10Compatible;
setExposePathVariables(false);
}
/**
* Create a new RedirectView with the given URL.
* @param url the URL to redirect to
* @param contextRelative whether to interpret the given URL as
* relative to the current ServletContext
* @param http10Compatible whether to stay compatible with HTTP 1.0 clients
* @param exposeModelAttributes whether or not model attributes should be
* exposed as query parameters
*/
public RedirectView(String url, boolean contextRelative, boolean http10Compatible, boolean exposeModelAttributes) {
super(url);
this.contextRelative = contextRelative;
this.http10Compatible = http10Compatible;
this.exposeModelAttributes = exposeModelAttributes;
setExposePathVariables(false);
}
/**
* Set whether to interpret a given URL that starts with a slash ("/")
* as relative to the current ServletContext, i.e. as relative to the
* web application root.
* <p>Default is "false": A URL that starts with a slash will be interpreted
* as absolute, i.e. taken as-is. If "true", the context path will be
* prepended to the URL in such a case.
* @see javax.servlet.http.HttpServletRequest#getContextPath
*/
public void setContextRelative(boolean contextRelative) {
this.contextRelative = contextRelative;
}
/**
* Set whether to stay compatible with HTTP 1.0 clients.
* <p>In the default implementation, this will enforce HTTP status code 302
* in any case, i.e. delegate to {@code HttpServletResponse.sendRedirect}.
* Turning this off will send HTTP status code 303, which is the correct
* code for HTTP 1.1 clients, but not understood by HTTP 1.0 clients.
* <p>Many HTTP 1.1 clients treat 302 just like 303, not making any
* difference. However, some clients depend on 303 when redirecting
* after a POST request; turn this flag off in such a scenario.
* @see javax.servlet.http.HttpServletResponse#sendRedirect
*/
public void setHttp10Compatible(boolean http10Compatible) {
this.http10Compatible = http10Compatible;
}
/**
* Set the {@code exposeModelAttributes} flag which denotes whether
* or not model attributes should be exposed as HTTP query parameters.
* <p>Defaults to {@code true}.
*/
public void setExposeModelAttributes(final boolean exposeModelAttributes) {
this.exposeModelAttributes = exposeModelAttributes;
}
/**
* Set the encoding scheme for this view.
* <p>Default is the request's encoding scheme
* (which is ISO-8859-1 if not specified otherwise).
*/
public void setEncodingScheme(String encodingScheme) {
this.encodingScheme = encodingScheme;
}
/**
* Set the status code for this view.
* <p>Default is to send 302/303, depending on the value of the
* {@link #setHttp10Compatible(boolean) http10Compatible} flag.
*/
public void setStatusCode(HttpStatus statusCode) {
this.statusCode = statusCode;
}
/**
* Whether to treat the redirect URL as a URI template.
* Set this flag to {@code false} if the redirect URL contains open
* and close curly braces "{", "}" and you don't want them interpreted
* as URI variables.
* <p>Defaults to {@code true}.
*/
public void setExpandUriTemplateVariables(boolean expandUriTemplateVariables) {
this.expandUriTemplateVariables = expandUriTemplateVariables;
}
/**
* When set to {@code true} the query string of the current URL is appended
* and thus propagated through to the redirected URL.
* <p>Defaults to {@code false}.
* @since 4.1
*/
public void setPropagateQueryParams(boolean propagateQueryParams) {
this.propagateQueryParams = propagateQueryParams;
}
/**
* Whether to propagate the query params of the current URL.
* @since 4.1
*/
public boolean isPropagateQueryProperties() {
return this.propagateQueryParams;
}
/**
* Returns "true" indicating this view performs a redirect.
*/
@Override
public boolean isRedirectView() {
return true;
}
/**
* An ApplicationContext is not strictly required for RedirectView.
*/
@Override
protected boolean isContextRequired() {
return false;
}
/**
* Convert model to request parameters and redirect to the given URL.
* @see #appendQueryProperties
* @see #sendRedirect
*/
@Override
protected void renderMergedOutputModel(Map<String, Object> model, HttpServletRequest request,
HttpServletResponse response) throws IOException {
String targetUrl = createTargetUrl(model, request);
targetUrl = updateTargetUrl(targetUrl, model, request, response);
FlashMap flashMap = RequestContextUtils.getOutputFlashMap(request);
if (!CollectionUtils.isEmpty(flashMap)) {
UriComponents uriComponents = UriComponentsBuilder.fromUriString(targetUrl).build();
flashMap.setTargetRequestPath(uriComponents.getPath());
flashMap.addTargetRequestParams(uriComponents.getQueryParams());
FlashMapManager flashMapManager = RequestContextUtils.getFlashMapManager(request);
if (flashMapManager == null) {
throw new IllegalStateException("FlashMapManager not found despite output FlashMap having been set");
}
flashMapManager.saveOutputFlashMap(flashMap, request, response);
}
sendRedirect(request, response, targetUrl, this.http10Compatible);
}
/**
* Create the target URL by checking if the redirect string is a URI template first,
* expanding it with the given model, and then optionally appending simple type model
* attributes as query String parameters.
*/
protected final String createTargetUrl(Map<String, Object> model, HttpServletRequest request)
throws UnsupportedEncodingException {
// Prepare target URL.
StringBuilder targetUrl = new StringBuilder();
if (this.contextRelative && getUrl().startsWith("/")) {
// Do not apply context path to relative URLs.
targetUrl.append(request.getContextPath());
}
targetUrl.append(getUrl());
String enc = this.encodingScheme;
if (enc == null) {
enc = request.getCharacterEncoding();
}
if (enc == null) {
enc = WebUtils.DEFAULT_CHARACTER_ENCODING;
}
if (this.expandUriTemplateVariables && StringUtils.hasText(targetUrl)) {
Map<String, String> variables = getCurrentRequestUriVariables(request);
targetUrl = replaceUriTemplateVariables(targetUrl.toString(), model, variables, enc);
}
if (isPropagateQueryProperties()) {
appendCurrentQueryParams(targetUrl, request);
}
if (this.exposeModelAttributes) {
appendQueryProperties(targetUrl, model, enc);
}
return targetUrl.toString();
}
/**
* Replace URI template variables in the target URL with encoded model
* attributes or URI variables from the current request. Model attributes
* referenced in the URL are removed from the model.
* @param targetUrl the redirect URL
* @param model Map that contains model attributes
* @param currentUriVariables current request URI variables to use
* @param encodingScheme the encoding scheme to use
* @throws UnsupportedEncodingException if string encoding failed
*/
protected StringBuilder replaceUriTemplateVariables(
String targetUrl, Map<String, Object> model, Map<String, String> currentUriVariables, String encodingScheme)
throws UnsupportedEncodingException {
StringBuilder result = new StringBuilder();
Matcher matcher = URI_TEMPLATE_VARIABLE_PATTERN.matcher(targetUrl);
int endLastMatch = 0;
while (matcher.find()) {
String name = matcher.group(1);
Object value = (model.containsKey(name) ? model.remove(name) : currentUriVariables.get(name));
if (value == null) {
throw new IllegalArgumentException("Model has no value for key '" + name + "'");
}
result.append(targetUrl.substring(endLastMatch, matcher.start()));
result.append(UriUtils.encodePathSegment(value.toString(), encodingScheme));
endLastMatch = matcher.end();
}
result.append(targetUrl.substring(endLastMatch, targetUrl.length()));
return result;
}
@SuppressWarnings("unchecked")
private Map<String, String> getCurrentRequestUriVariables(HttpServletRequest request) {
String name = HandlerMapping.URI_TEMPLATE_VARIABLES_ATTRIBUTE;
Map<String, String> uriVars = (Map<String, String>) request.getAttribute(name);
return (uriVars != null) ? uriVars : Collections.<String, String> emptyMap();
}
/**
* Append the query string of the current request to the target redirect URL.
* @param targetUrl the StringBuilder to append the properties to
* @param request the current request
* @since 4.1
*/
protected void appendCurrentQueryParams(StringBuilder targetUrl, HttpServletRequest request) {
String query = request.getQueryString();
if (StringUtils.hasText(query)) {
// Extract anchor fragment, if any.
String fragment = null;
int anchorIndex = targetUrl.indexOf("#");
if (anchorIndex > -1) {
fragment = targetUrl.substring(anchorIndex);
targetUrl.delete(anchorIndex, targetUrl.length());
}
if (targetUrl.toString().indexOf('?') < 0) {
targetUrl.append('?').append(query);
}
else {
targetUrl.append('&').append(query);
}
// Append anchor fragment, if any, to end of URL.
if (fragment != null) {
targetUrl.append(fragment);
}
}
}
/**
* Append query properties to the redirect URL.
* Stringifies, URL-encodes and formats model attributes as query properties.
* @param targetUrl the StringBuilder to append the properties to
* @param model Map that contains model attributes
* @param encodingScheme the encoding scheme to use
* @throws UnsupportedEncodingException if string encoding failed
* @see #queryProperties
*/
@SuppressWarnings("unchecked")
protected void appendQueryProperties(StringBuilder targetUrl, Map<String, Object> model, String encodingScheme)
throws UnsupportedEncodingException {
// Extract anchor fragment, if any.
String fragment = null;
int anchorIndex = targetUrl.indexOf("#");
if (anchorIndex > -1) {
fragment = targetUrl.substring(anchorIndex);
targetUrl.delete(anchorIndex, targetUrl.length());
}
// If there aren't already some parameters, we need a "?".
boolean first = (targetUrl.toString().indexOf('?') < 0);
for (Map.Entry<String, Object> entry : queryProperties(model).entrySet()) {
Object rawValue = entry.getValue();
Iterator<Object> valueIter;
if (rawValue != null && rawValue.getClass().isArray()) {
valueIter = Arrays.asList(ObjectUtils.toObjectArray(rawValue)).iterator();
}
else if (rawValue instanceof Collection) {
valueIter = ((Collection<Object>) rawValue).iterator();
}
else {
valueIter = Collections.singleton(rawValue).iterator();
}
while (valueIter.hasNext()) {
Object value = valueIter.next();
if (first) {
targetUrl.append('?');
first = false;
}
else {
targetUrl.append('&');
}
String encodedKey = urlEncode(entry.getKey(), encodingScheme);
String encodedValue = (value != null ? urlEncode(value.toString(), encodingScheme) : "");
targetUrl.append(encodedKey).append('=').append(encodedValue);
}
}
// Append anchor fragment, if any, to end of URL.
if (fragment != null) {
targetUrl.append(fragment);
}
}
/**
* Determine name-value pairs for query strings, which will be stringified,
* URL-encoded and formatted by {@link #appendQueryProperties}.
* <p>This implementation filters the model through checking
* {@link #isEligibleProperty(String, Object)} for each element,
* by default accepting Strings, primitives and primitive wrappers only.
* @param model the original model Map
* @return the filtered Map of eligible query properties
* @see #isEligibleProperty(String, Object)
*/
protected Map<String, Object> queryProperties(Map<String, Object> model) {
Map<String, Object> result = new LinkedHashMap<String, Object>();
for (Map.Entry<String, Object> entry : model.entrySet()) {
if (isEligibleProperty(entry.getKey(), entry.getValue())) {
result.put(entry.getKey(), entry.getValue());
}
}
return result;
}
/**
* Determine whether the given model element should be exposed
* as a query property.
* <p>The default implementation considers Strings and primitives
* as eligible, and also arrays and Collections/Iterables with
* corresponding elements. This can be overridden in subclasses.
* @param key the key of the model element
* @param value the value of the model element
* @return whether the element is eligible as query property
*/
protected boolean isEligibleProperty(String key, Object value) {
if (value == null) {
return false;
}
if (isEligibleValue(value)) {
return true;
}
if (value.getClass().isArray()) {
int length = Array.getLength(value);
if (length == 0) {
return false;
}
for (int i = 0; i < length; i++) {
Object element = Array.get(value, i);
if (!isEligibleValue(element)) {
return false;
}
}
return true;
}
if (value instanceof Collection) {
Collection<?> coll = (Collection<?>) value;
if (coll.isEmpty()) {
return false;
}
for (Object element : coll) {
if (!isEligibleValue(element)) {
return false;
}
}
return true;
}
return false;
}
/**
* Determine whether the given model element value is eligible for exposure.
* <p>The default implementation considers primitives, Strings, Numbers, Dates,
* URIs, URLs and Locale objects as eligible. This can be overridden in subclasses.
* @param value the model element value
* @return whether the element value is eligible
* @see BeanUtils#isSimpleValueType
*/
protected boolean isEligibleValue(Object value) {
return (value != null && BeanUtils.isSimpleValueType(value.getClass()));
}
/**
* URL-encode the given input String with the given encoding scheme.
* <p>The default implementation uses {@code URLEncoder.encode(input, enc)}.
* @param input the unencoded input String
* @param encodingScheme the encoding scheme
* @return the encoded output String
* @throws UnsupportedEncodingException if thrown by the JDK URLEncoder
* @see java.net.URLEncoder#encode(String, String)
* @see java.net.URLEncoder#encode(String)
*/
protected String urlEncode(String input, String encodingScheme) throws UnsupportedEncodingException {
return (input != null ? URLEncoder.encode(input, encodingScheme) : null);
}
/**
* Find the registered {@link RequestDataValueProcessor}, if any, and allow
* it to update the redirect target URL.
* @param targetUrl the given redirect URL
* @return the updated URL or the same as URL as the one passed in
*/
protected String updateTargetUrl(String targetUrl, Map<String, Object> model,
HttpServletRequest request, HttpServletResponse response) {
WebApplicationContext wac = getWebApplicationContext();
if (wac == null) {
wac = RequestContextUtils.findWebApplicationContext(request, getServletContext());
}
if (wac != null && wac.containsBean(RequestContextUtils.REQUEST_DATA_VALUE_PROCESSOR_BEAN_NAME)) {
RequestDataValueProcessor processor = wac.getBean(
RequestContextUtils.REQUEST_DATA_VALUE_PROCESSOR_BEAN_NAME, RequestDataValueProcessor.class);
return processor.processUrl(request, targetUrl);
}
return targetUrl;
}
/**
* Send a redirect back to the HTTP client
* @param request current HTTP request (allows for reacting to request method)
* @param response current HTTP response (for sending response headers)
* @param targetUrl the target URL to redirect to
* @param http10Compatible whether to stay compatible with HTTP 1.0 clients
* @throws IOException if thrown by response methods
*/
protected void sendRedirect(HttpServletRequest request, HttpServletResponse response,
String targetUrl, boolean http10Compatible) throws IOException {
String encodedRedirectURL = response.encodeRedirectURL(targetUrl);
if (http10Compatible) {
HttpStatus attributeStatusCode = (HttpStatus) request.getAttribute(View.RESPONSE_STATUS_ATTRIBUTE);
if (this.statusCode != null) {
response.setStatus(this.statusCode.value());
response.setHeader("Location", encodedRedirectURL);
}
else if (attributeStatusCode != null) {
response.setStatus(attributeStatusCode.value());
response.setHeader("Location", encodedRedirectURL);
}
else {
// Send status code 302 by default.
response.sendRedirect(encodedRedirectURL);
}
}
else {
HttpStatus statusCode = getHttp11StatusCode(request, response, targetUrl);
response.setStatus(statusCode.value());
response.setHeader("Location", encodedRedirectURL);
}
}
/**
* Determines the status code to use for HTTP 1.1 compatible requests.
* <p>The default implementation returns the {@link #setStatusCode(HttpStatus) statusCode}
* property if set, or the value of the {@link #RESPONSE_STATUS_ATTRIBUTE} attribute.
* If neither are set, it defaults to {@link HttpStatus#SEE_OTHER} (303).
* @param request the request to inspect
* @param response the servlet response
* @param targetUrl the target URL
* @return the response status
*/
protected HttpStatus getHttp11StatusCode(
HttpServletRequest request, HttpServletResponse response, String targetUrl) {
if (this.statusCode != null) {
return this.statusCode;
}
HttpStatus attributeStatusCode = (HttpStatus) request.getAttribute(View.RESPONSE_STATUS_ATTRIBUTE);
if (attributeStatusCode != null) {
return attributeStatusCode;
}
return HttpStatus.SEE_OTHER;
}
}
| QBNemo/spring-mvc-showcase | src/main/java/org/springframework/web/servlet/view/RedirectView.java | Java | apache-2.0 | 22,944 |
#include <sgx_fcntl_util.h>
#include <sgx_ocall_util.h>
#include <sgx_thread.h>
INIT_LOCK(ocall_open2);
INIT_LOCK(ocall_creat);
INIT_LOCK(ocall_openat2);
INIT_LOCK(ocall_fcntl1);
INIT_LOCK(ocall_fcntl2);
INIT_LOCK(ocall_fcntl3);
int sgx_wrapper_open(const char *pathname, int flags, ...)
{
va_list ap;
mode_t mode = 0;
va_start(ap, flags);
if (flags & O_CREAT)
mode = va_arg(ap, mode_t);
else
mode = 0777;
va_end(ap);
int retval;
sgx_status_t status = SAFE_INVOKE(ocall_open2, &retval, pathname, flags, mode);
CHECK_STATUS(status);
return retval;
}
int sgx_wrapper_creat(const char *pathname, unsigned int mode)
{
int retval;
sgx_status_t status = SAFE_INVOKE(ocall_creat, &retval, pathname, mode);
CHECK_STATUS(status);
return retval;
}
int sgx_wrapper_openat(int dirfd, const char *pathname, int flags, ...)
{
va_list ap;
mode_t mode = 0;
va_start(ap, flags);
if (flags & O_CREAT)
mode = va_arg(ap, mode_t);
else
mode = 0777;
va_end(ap);
int retval;
sgx_status_t status = SAFE_INVOKE(ocall_openat2, &retval, dirfd, pathname, flags, mode);
CHECK_STATUS(status);
return retval;
}
int sgx_wrapper_fcntl(int fd, int cmd, ...)
{
sgx_status_t status;
va_list ap;
int retval;
va_start(ap, cmd);
long larg = -1;
struct flock *flarg = NULL;
// Fix me: Should refer to the linux kernel in order to do it in the right way
switch(cmd) {
case F_GETFD:
case F_GETFL:
case F_GETOWN:
va_end(ap);
status = SAFE_INVOKE(ocall_fcntl1, &retval, fd, cmd);
CHECK_STATUS(status);
return retval;
case F_DUPFD:
case F_DUPFD_CLOEXEC:
case F_SETFD:
case F_SETFL:
case F_SETOWN:
larg = va_arg(ap, long);
// fprintf(stderr, "fcntl setfd or setfl with flag: %d \n", larg);
status = SAFE_INVOKE(ocall_fcntl2, &retval, fd, cmd, larg);
CHECK_STATUS(status);
return retval;
case F_SETLK:
case F_GETLK:
case F_SETLKW:
flarg = va_arg(ap, struct flock *);
status = SAFE_INVOKE(ocall_fcntl3, &retval, fd, cmd, flarg, sizeof(struct flock));
CHECK_STATUS(status);
return retval;
default:
va_end(ap);
return -1;
};
return -1;
}
| shwetasshinde24/Panoply | case-studies/h2o/src/H2oEnclave/IO/sgx_fcntl.cpp | C++ | apache-2.0 | 2,173 |
/*
* Copyright 2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.ssmincidents.model;
import java.io.Serializable;
import javax.annotation.Generated;
/**
*
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/ssm-incidents-2018-05-10/ListTimelineEvents" target="_top">AWS
* API Documentation</a>
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class ListTimelineEventsResult extends com.amazonaws.AmazonWebServiceResult<com.amazonaws.ResponseMetadata> implements Serializable, Cloneable {
/**
* <p>
* Details about each event that occurred during the incident.
* </p>
*/
private java.util.List<EventSummary> eventSummaries;
/**
* <p>
* The pagination token to continue to the next page of results.
* </p>
*/
private String nextToken;
/**
* <p>
* Details about each event that occurred during the incident.
* </p>
*
* @return Details about each event that occurred during the incident.
*/
public java.util.List<EventSummary> getEventSummaries() {
return eventSummaries;
}
/**
* <p>
* Details about each event that occurred during the incident.
* </p>
*
* @param eventSummaries
* Details about each event that occurred during the incident.
*/
public void setEventSummaries(java.util.Collection<EventSummary> eventSummaries) {
if (eventSummaries == null) {
this.eventSummaries = null;
return;
}
this.eventSummaries = new java.util.ArrayList<EventSummary>(eventSummaries);
}
/**
* <p>
* Details about each event that occurred during the incident.
* </p>
* <p>
* <b>NOTE:</b> This method appends the values to the existing list (if any). Use
* {@link #setEventSummaries(java.util.Collection)} or {@link #withEventSummaries(java.util.Collection)} if you want
* to override the existing values.
* </p>
*
* @param eventSummaries
* Details about each event that occurred during the incident.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public ListTimelineEventsResult withEventSummaries(EventSummary... eventSummaries) {
if (this.eventSummaries == null) {
setEventSummaries(new java.util.ArrayList<EventSummary>(eventSummaries.length));
}
for (EventSummary ele : eventSummaries) {
this.eventSummaries.add(ele);
}
return this;
}
/**
* <p>
* Details about each event that occurred during the incident.
* </p>
*
* @param eventSummaries
* Details about each event that occurred during the incident.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public ListTimelineEventsResult withEventSummaries(java.util.Collection<EventSummary> eventSummaries) {
setEventSummaries(eventSummaries);
return this;
}
/**
* <p>
* The pagination token to continue to the next page of results.
* </p>
*
* @param nextToken
* The pagination token to continue to the next page of results.
*/
public void setNextToken(String nextToken) {
this.nextToken = nextToken;
}
/**
* <p>
* The pagination token to continue to the next page of results.
* </p>
*
* @return The pagination token to continue to the next page of results.
*/
public String getNextToken() {
return this.nextToken;
}
/**
* <p>
* The pagination token to continue to the next page of results.
* </p>
*
* @param nextToken
* The pagination token to continue to the next page of results.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public ListTimelineEventsResult withNextToken(String nextToken) {
setNextToken(nextToken);
return this;
}
/**
* Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be
* redacted from this string using a placeholder value.
*
* @return A string representation of this object.
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{");
if (getEventSummaries() != null)
sb.append("EventSummaries: ").append(getEventSummaries()).append(",");
if (getNextToken() != null)
sb.append("NextToken: ").append(getNextToken());
sb.append("}");
return sb.toString();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (obj instanceof ListTimelineEventsResult == false)
return false;
ListTimelineEventsResult other = (ListTimelineEventsResult) obj;
if (other.getEventSummaries() == null ^ this.getEventSummaries() == null)
return false;
if (other.getEventSummaries() != null && other.getEventSummaries().equals(this.getEventSummaries()) == false)
return false;
if (other.getNextToken() == null ^ this.getNextToken() == null)
return false;
if (other.getNextToken() != null && other.getNextToken().equals(this.getNextToken()) == false)
return false;
return true;
}
@Override
public int hashCode() {
final int prime = 31;
int hashCode = 1;
hashCode = prime * hashCode + ((getEventSummaries() == null) ? 0 : getEventSummaries().hashCode());
hashCode = prime * hashCode + ((getNextToken() == null) ? 0 : getNextToken().hashCode());
return hashCode;
}
@Override
public ListTimelineEventsResult clone() {
try {
return (ListTimelineEventsResult) super.clone();
} catch (CloneNotSupportedException e) {
throw new IllegalStateException("Got a CloneNotSupportedException from Object.clone() " + "even though we're Cloneable!", e);
}
}
}
| aws/aws-sdk-java | aws-java-sdk-ssmincidents/src/main/java/com/amazonaws/services/ssmincidents/model/ListTimelineEventsResult.java | Java | apache-2.0 | 6,834 |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
*
* You may not use this file except in compliance with the License.
*
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Contributions from 2013-2017 where performed either by US government
* employees, or under US Veterans Health Administration contracts.
*
* US Veterans Health Administration contributions by government employees
* are work of the U.S. Government and are not subject to copyright
* protection in the United States. Portions contributed by government
* employees are USGovWork (17USC §105). Not subject to copyright.
*
* Contribution by contractors to the US Veterans Health Administration
* during this period are contractually contributed under the
* Apache License, Version 2.0.
*
* See: https://www.usa.gov/government-works
*
* Contributions prior to 2013:
*
* Copyright (C) International Health Terminology Standards Development Organisation.
* Licensed under the Apache License, Version 2.0.
*
*/
package sh.isaac.api;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Optional;
import java.util.concurrent.ExecutionException;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import javafx.concurrent.Task;
import sh.isaac.api.chronicle.Chronology;
import sh.isaac.api.component.concept.ConceptChronology;
import sh.isaac.api.component.semantic.SemanticChronology;
import sh.isaac.api.externalizable.IsaacObjectType;
/**
* The Class Util.
*
* @author kec
*/
public class Util {
private static final Logger LOG = LogManager.getLogger(Util.class);
/**
* Adds the to task set and wait till done.
*
* @param <T> the generic type
* @param task the task
* @return the t
* @throws InterruptedException the interrupted exception
* @throws ExecutionException the execution exception
*/
public static <T> T addToTaskSetAndWaitTillDone(Task<T> task)
throws InterruptedException, ExecutionException {
Get.activeTasks().add(task);
try {
final T returnValue = task.get();
return returnValue;
} finally {
Get.activeTasks().remove(task);
}
}
/**
* String array to path array.
*
* @param strings the strings
* @return the path[]
*/
public static Path[] stringArrayToPathArray(String... strings) {
final Path[] paths = new Path[strings.length];
for (int i = 0; i < paths.length; i++) {
paths[i] = Paths.get(strings[i]);
}
return paths;
}
/**
* Convenience method to find the nearest concept related to a semantic. Recursively walks referenced components until it finds a concept.
* @param nid
* @return the nearest concept nid, or empty, if no concept can be found.
*/
public static Optional<Integer> getNearestConcept(int nid) {
Optional<? extends Chronology> c = Get.identifiedObjectService().getChronology(nid);
if (c.isPresent()) {
if (c.get().getIsaacObjectType() == IsaacObjectType.SEMANTIC) {
return getNearestConcept(((SemanticChronology)c.get()).getReferencedComponentNid());
}
else if (c.get().getIsaacObjectType() == IsaacObjectType.CONCEPT) {
return Optional.of(((ConceptChronology)c.get()).getNid());
}
else {
LOG.warn("Unexpected object type: " + c.get().getIsaacObjectType());
}
}
return Optional.empty();
}
} | OSEHRA/ISAAC | core/api/src/main/java/sh/isaac/api/Util.java | Java | apache-2.0 | 3,905 |
/*
* #%L
* GarethHealy :: JBoss Fuse Examples :: MBeans Expose
* %%
* Copyright (C) 2013 - 2018 Gareth Healy
* %%
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* #L%
*/
package com.garethahealy.mbeans.expose.routes;
import org.junit.Test;
public class CamelContextTest extends BaseCamelBlueprintTestSupport {
@Test
public void camelContextIsNotNull() {
assertNotNull(context);
}
}
| garethahealy/jboss-fuse-examples | mbeans-expose/src/test/java/com/garethahealy/mbeans/expose/routes/CamelContextTest.java | Java | apache-2.0 | 923 |
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: google/ads/googleads/v10/resources/shared_criterion.proto
package com.google.ads.googleads.v10.resources;
public interface SharedCriterionOrBuilder extends
// @@protoc_insertion_point(interface_extends:google.ads.googleads.v10.resources.SharedCriterion)
com.google.protobuf.MessageOrBuilder {
/**
* <pre>
* Immutable. The resource name of the shared criterion.
* Shared set resource names have the form:
* `customers/{customer_id}/sharedCriteria/{shared_set_id}~{criterion_id}`
* </pre>
*
* <code>string resource_name = 1 [(.google.api.field_behavior) = IMMUTABLE, (.google.api.resource_reference) = { ... }</code>
* @return The resourceName.
*/
java.lang.String getResourceName();
/**
* <pre>
* Immutable. The resource name of the shared criterion.
* Shared set resource names have the form:
* `customers/{customer_id}/sharedCriteria/{shared_set_id}~{criterion_id}`
* </pre>
*
* <code>string resource_name = 1 [(.google.api.field_behavior) = IMMUTABLE, (.google.api.resource_reference) = { ... }</code>
* @return The bytes for resourceName.
*/
com.google.protobuf.ByteString
getResourceNameBytes();
/**
* <pre>
* Immutable. The shared set to which the shared criterion belongs.
* </pre>
*
* <code>optional string shared_set = 10 [(.google.api.field_behavior) = IMMUTABLE, (.google.api.resource_reference) = { ... }</code>
* @return Whether the sharedSet field is set.
*/
boolean hasSharedSet();
/**
* <pre>
* Immutable. The shared set to which the shared criterion belongs.
* </pre>
*
* <code>optional string shared_set = 10 [(.google.api.field_behavior) = IMMUTABLE, (.google.api.resource_reference) = { ... }</code>
* @return The sharedSet.
*/
java.lang.String getSharedSet();
/**
* <pre>
* Immutable. The shared set to which the shared criterion belongs.
* </pre>
*
* <code>optional string shared_set = 10 [(.google.api.field_behavior) = IMMUTABLE, (.google.api.resource_reference) = { ... }</code>
* @return The bytes for sharedSet.
*/
com.google.protobuf.ByteString
getSharedSetBytes();
/**
* <pre>
* Output only. The ID of the criterion.
* This field is ignored for mutates.
* </pre>
*
* <code>optional int64 criterion_id = 11 [(.google.api.field_behavior) = OUTPUT_ONLY];</code>
* @return Whether the criterionId field is set.
*/
boolean hasCriterionId();
/**
* <pre>
* Output only. The ID of the criterion.
* This field is ignored for mutates.
* </pre>
*
* <code>optional int64 criterion_id = 11 [(.google.api.field_behavior) = OUTPUT_ONLY];</code>
* @return The criterionId.
*/
long getCriterionId();
/**
* <pre>
* Output only. The type of the criterion.
* </pre>
*
* <code>.google.ads.googleads.v10.enums.CriterionTypeEnum.CriterionType type = 4 [(.google.api.field_behavior) = OUTPUT_ONLY];</code>
* @return The enum numeric value on the wire for type.
*/
int getTypeValue();
/**
* <pre>
* Output only. The type of the criterion.
* </pre>
*
* <code>.google.ads.googleads.v10.enums.CriterionTypeEnum.CriterionType type = 4 [(.google.api.field_behavior) = OUTPUT_ONLY];</code>
* @return The type.
*/
com.google.ads.googleads.v10.enums.CriterionTypeEnum.CriterionType getType();
/**
* <pre>
* Immutable. Keyword.
* </pre>
*
* <code>.google.ads.googleads.v10.common.KeywordInfo keyword = 3 [(.google.api.field_behavior) = IMMUTABLE];</code>
* @return Whether the keyword field is set.
*/
boolean hasKeyword();
/**
* <pre>
* Immutable. Keyword.
* </pre>
*
* <code>.google.ads.googleads.v10.common.KeywordInfo keyword = 3 [(.google.api.field_behavior) = IMMUTABLE];</code>
* @return The keyword.
*/
com.google.ads.googleads.v10.common.KeywordInfo getKeyword();
/**
* <pre>
* Immutable. Keyword.
* </pre>
*
* <code>.google.ads.googleads.v10.common.KeywordInfo keyword = 3 [(.google.api.field_behavior) = IMMUTABLE];</code>
*/
com.google.ads.googleads.v10.common.KeywordInfoOrBuilder getKeywordOrBuilder();
/**
* <pre>
* Immutable. YouTube Video.
* </pre>
*
* <code>.google.ads.googleads.v10.common.YouTubeVideoInfo youtube_video = 5 [(.google.api.field_behavior) = IMMUTABLE];</code>
* @return Whether the youtubeVideo field is set.
*/
boolean hasYoutubeVideo();
/**
* <pre>
* Immutable. YouTube Video.
* </pre>
*
* <code>.google.ads.googleads.v10.common.YouTubeVideoInfo youtube_video = 5 [(.google.api.field_behavior) = IMMUTABLE];</code>
* @return The youtubeVideo.
*/
com.google.ads.googleads.v10.common.YouTubeVideoInfo getYoutubeVideo();
/**
* <pre>
* Immutable. YouTube Video.
* </pre>
*
* <code>.google.ads.googleads.v10.common.YouTubeVideoInfo youtube_video = 5 [(.google.api.field_behavior) = IMMUTABLE];</code>
*/
com.google.ads.googleads.v10.common.YouTubeVideoInfoOrBuilder getYoutubeVideoOrBuilder();
/**
* <pre>
* Immutable. YouTube Channel.
* </pre>
*
* <code>.google.ads.googleads.v10.common.YouTubeChannelInfo youtube_channel = 6 [(.google.api.field_behavior) = IMMUTABLE];</code>
* @return Whether the youtubeChannel field is set.
*/
boolean hasYoutubeChannel();
/**
* <pre>
* Immutable. YouTube Channel.
* </pre>
*
* <code>.google.ads.googleads.v10.common.YouTubeChannelInfo youtube_channel = 6 [(.google.api.field_behavior) = IMMUTABLE];</code>
* @return The youtubeChannel.
*/
com.google.ads.googleads.v10.common.YouTubeChannelInfo getYoutubeChannel();
/**
* <pre>
* Immutable. YouTube Channel.
* </pre>
*
* <code>.google.ads.googleads.v10.common.YouTubeChannelInfo youtube_channel = 6 [(.google.api.field_behavior) = IMMUTABLE];</code>
*/
com.google.ads.googleads.v10.common.YouTubeChannelInfoOrBuilder getYoutubeChannelOrBuilder();
/**
* <pre>
* Immutable. Placement.
* </pre>
*
* <code>.google.ads.googleads.v10.common.PlacementInfo placement = 7 [(.google.api.field_behavior) = IMMUTABLE];</code>
* @return Whether the placement field is set.
*/
boolean hasPlacement();
/**
* <pre>
* Immutable. Placement.
* </pre>
*
* <code>.google.ads.googleads.v10.common.PlacementInfo placement = 7 [(.google.api.field_behavior) = IMMUTABLE];</code>
* @return The placement.
*/
com.google.ads.googleads.v10.common.PlacementInfo getPlacement();
/**
* <pre>
* Immutable. Placement.
* </pre>
*
* <code>.google.ads.googleads.v10.common.PlacementInfo placement = 7 [(.google.api.field_behavior) = IMMUTABLE];</code>
*/
com.google.ads.googleads.v10.common.PlacementInfoOrBuilder getPlacementOrBuilder();
/**
* <pre>
* Immutable. Mobile App Category.
* </pre>
*
* <code>.google.ads.googleads.v10.common.MobileAppCategoryInfo mobile_app_category = 8 [(.google.api.field_behavior) = IMMUTABLE];</code>
* @return Whether the mobileAppCategory field is set.
*/
boolean hasMobileAppCategory();
/**
* <pre>
* Immutable. Mobile App Category.
* </pre>
*
* <code>.google.ads.googleads.v10.common.MobileAppCategoryInfo mobile_app_category = 8 [(.google.api.field_behavior) = IMMUTABLE];</code>
* @return The mobileAppCategory.
*/
com.google.ads.googleads.v10.common.MobileAppCategoryInfo getMobileAppCategory();
/**
* <pre>
* Immutable. Mobile App Category.
* </pre>
*
* <code>.google.ads.googleads.v10.common.MobileAppCategoryInfo mobile_app_category = 8 [(.google.api.field_behavior) = IMMUTABLE];</code>
*/
com.google.ads.googleads.v10.common.MobileAppCategoryInfoOrBuilder getMobileAppCategoryOrBuilder();
/**
* <pre>
* Immutable. Mobile application.
* </pre>
*
* <code>.google.ads.googleads.v10.common.MobileApplicationInfo mobile_application = 9 [(.google.api.field_behavior) = IMMUTABLE];</code>
* @return Whether the mobileApplication field is set.
*/
boolean hasMobileApplication();
/**
* <pre>
* Immutable. Mobile application.
* </pre>
*
* <code>.google.ads.googleads.v10.common.MobileApplicationInfo mobile_application = 9 [(.google.api.field_behavior) = IMMUTABLE];</code>
* @return The mobileApplication.
*/
com.google.ads.googleads.v10.common.MobileApplicationInfo getMobileApplication();
/**
* <pre>
* Immutable. Mobile application.
* </pre>
*
* <code>.google.ads.googleads.v10.common.MobileApplicationInfo mobile_application = 9 [(.google.api.field_behavior) = IMMUTABLE];</code>
*/
com.google.ads.googleads.v10.common.MobileApplicationInfoOrBuilder getMobileApplicationOrBuilder();
public com.google.ads.googleads.v10.resources.SharedCriterion.CriterionCase getCriterionCase();
}
| googleads/google-ads-java | google-ads-stubs-v10/src/main/java/com/google/ads/googleads/v10/resources/SharedCriterionOrBuilder.java | Java | apache-2.0 | 8,879 |
package com.example;
/**
* Created by hilmiat on 7/29/17.
*/
public class DemoConditional {
public static char getGrade(int nilai){
char grade = 'D';
if(nilai > 85){
grade = 'A';
}else if(nilai > 69){
grade = 'B';
}else if(nilai >= 60){
grade = 'C';
}
return grade;
}
public static void main(String[] args) {
int[] nilai_siswa = {78,90,89,68,77};
OperasiArray.cetakArray(nilai_siswa);
//100-86 A,70-85 B,60-69 C,0-59 D
//kalau D tidak lulus, A,B,C lulus
for(int n:nilai_siswa){
System.out.println("Nilai "+n+",garade-nya:"+getGrade(n));
}
}
}
| hilmiat/NF_android_complete | Pertemuan2/pertemuan_dua/src/main/java/com/example/DemoConditional.java | Java | apache-2.0 | 707 |
/*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.client;
import static org.apache.bookkeeper.client.BookKeeperClientStats.WRITE_DELAYED_DUE_TO_NOT_ENOUGH_FAULT_DOMAINS;
import static org.apache.bookkeeper.client.BookKeeperClientStats.WRITE_TIMED_OUT_DUE_TO_NOT_ENOUGH_FAULT_DOMAINS;
import static org.apache.bookkeeper.common.concurrent.FutureUtils.result;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import io.netty.util.IllegalReferenceCountException;
import java.io.IOException;
import java.net.UnknownHostException;
import java.util.Collections;
import java.util.Enumeration;
import java.util.List;
import java.util.Set;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.bookkeeper.client.AsyncCallback.AddCallback;
import org.apache.bookkeeper.client.AsyncCallback.ReadCallback;
import org.apache.bookkeeper.client.BKException.BKBookieHandleNotAvailableException;
import org.apache.bookkeeper.client.BKException.BKIllegalOpException;
import org.apache.bookkeeper.client.BookKeeper.DigestType;
import org.apache.bookkeeper.client.api.WriteFlag;
import org.apache.bookkeeper.client.api.WriteHandle;
import org.apache.bookkeeper.conf.ClientConfiguration;
import org.apache.bookkeeper.net.BookieSocketAddress;
import org.apache.bookkeeper.proto.BookieServer;
import org.apache.bookkeeper.stats.NullStatsLogger;
import org.apache.bookkeeper.stats.StatsLogger;
import org.apache.bookkeeper.test.BookKeeperClusterTestCase;
import org.apache.bookkeeper.test.TestStatsProvider;
import org.apache.bookkeeper.zookeeper.BoundExponentialBackoffRetryPolicy;
import org.apache.bookkeeper.zookeeper.ZooKeeperClient;
import org.apache.bookkeeper.zookeeper.ZooKeeperWatcherBase;
import org.apache.zookeeper.AsyncCallback.StringCallback;
import org.apache.zookeeper.CreateMode;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.KeeperException.ConnectionLossException;
import org.apache.zookeeper.WatchedEvent;
import org.apache.zookeeper.Watcher;
import org.apache.zookeeper.Watcher.Event.EventType;
import org.apache.zookeeper.Watcher.Event.KeeperState;
import org.apache.zookeeper.ZooKeeper;
import org.apache.zookeeper.ZooKeeper.States;
import org.apache.zookeeper.data.ACL;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Tests of the main BookKeeper client.
*/
public class BookKeeperTest extends BookKeeperClusterTestCase {
private static final Logger LOG = LoggerFactory.getLogger(BookKeeperTest.class);
private static final long INVALID_LEDGERID = -1L;
private final DigestType digestType;
public BookKeeperTest() {
super(4);
this.digestType = DigestType.CRC32;
}
@Test
public void testConstructionZkDelay() throws Exception {
ClientConfiguration conf = new ClientConfiguration();
conf.setMetadataServiceUri(zkUtil.getMetadataServiceUri())
.setZkTimeout(20000);
CountDownLatch l = new CountDownLatch(1);
zkUtil.sleepCluster(200, TimeUnit.MILLISECONDS, l);
l.await();
BookKeeper bkc = new BookKeeper(conf);
bkc.createLedger(digestType, "testPasswd".getBytes()).close();
bkc.close();
}
@Test
public void testConstructionNotConnectedExplicitZk() throws Exception {
ClientConfiguration conf = new ClientConfiguration();
conf.setMetadataServiceUri(zkUtil.getMetadataServiceUri())
.setZkTimeout(20000);
CountDownLatch l = new CountDownLatch(1);
zkUtil.sleepCluster(200, TimeUnit.MILLISECONDS, l);
l.await();
ZooKeeper zk = new ZooKeeper(
zkUtil.getZooKeeperConnectString(),
50,
event -> {});
assertFalse("ZK shouldn't have connected yet", zk.getState().isConnected());
try {
BookKeeper bkc = new BookKeeper(conf, zk);
fail("Shouldn't be able to construct with unconnected zk");
} catch (IOException cle) {
// correct behaviour
assertTrue(cle.getCause() instanceof ConnectionLossException);
}
}
/**
* Test that bookkeeper is not able to open ledgers if
* it provides the wrong password or wrong digest.
*/
@Test
public void testBookkeeperDigestPasswordWithAutoDetection() throws Exception {
testBookkeeperDigestPassword(true);
}
@Test
public void testBookkeeperDigestPasswordWithoutAutoDetection() throws Exception {
testBookkeeperDigestPassword(false);
}
void testBookkeeperDigestPassword(boolean autodetection) throws Exception {
ClientConfiguration conf = new ClientConfiguration();
conf.setMetadataServiceUri(zkUtil.getMetadataServiceUri());
conf.setEnableDigestTypeAutodetection(autodetection);
BookKeeper bkc = new BookKeeper(conf);
DigestType digestCorrect = digestType;
byte[] passwdCorrect = "AAAAAAA".getBytes();
DigestType digestBad = digestType == DigestType.MAC ? DigestType.CRC32 : DigestType.MAC;
byte[] passwdBad = "BBBBBBB".getBytes();
LedgerHandle lh = null;
try {
lh = bkc.createLedger(digestCorrect, passwdCorrect);
long id = lh.getId();
for (int i = 0; i < 100; i++) {
lh.addEntry("foobar".getBytes());
}
lh.close();
// try open with bad passwd
try {
bkc.openLedger(id, digestCorrect, passwdBad);
fail("Shouldn't be able to open with bad passwd");
} catch (BKException.BKUnauthorizedAccessException bke) {
// correct behaviour
}
// try open with bad digest
try {
bkc.openLedger(id, digestBad, passwdCorrect);
if (!autodetection) {
fail("Shouldn't be able to open with bad digest");
}
} catch (BKException.BKDigestMatchException bke) {
// correct behaviour
if (autodetection) {
fail("Should not throw digest match exception if `autodetection` is enabled");
}
}
// try open with both bad
try {
bkc.openLedger(id, digestBad, passwdBad);
fail("Shouldn't be able to open with bad passwd and digest");
} catch (BKException.BKUnauthorizedAccessException bke) {
// correct behaviour
}
// try open with both correct
bkc.openLedger(id, digestCorrect, passwdCorrect).close();
} finally {
if (lh != null) {
lh.close();
}
bkc.close();
}
}
/**
* Tests that when trying to use a closed BK client object we get
* a callback error and not an InterruptedException.
* @throws Exception
*/
@Test
public void testAsyncReadWithError() throws Exception {
LedgerHandle lh = bkc.createLedger(3, 3, DigestType.CRC32, "testPasswd".getBytes());
bkc.close();
final AtomicInteger result = new AtomicInteger(0);
final CountDownLatch counter = new CountDownLatch(1);
// Try to write, we shoud get and error callback but not an exception
lh.asyncAddEntry("test".getBytes(), new AddCallback() {
public void addComplete(int rc, LedgerHandle lh, long entryId, Object ctx) {
result.set(rc);
counter.countDown();
}
}, null);
counter.await();
assertTrue(result.get() != 0);
}
/**
* Test that bookkeeper will close cleanly if close is issued
* while another operation is in progress.
*/
@Test
public void testCloseDuringOp() throws Exception {
ClientConfiguration conf = new ClientConfiguration();
conf.setMetadataServiceUri(zkUtil.getMetadataServiceUri());
for (int i = 0; i < 10; i++) {
final BookKeeper client = new BookKeeper(conf);
final CountDownLatch l = new CountDownLatch(1);
final AtomicBoolean success = new AtomicBoolean(false);
Thread t = new Thread() {
public void run() {
try {
LedgerHandle lh = client.createLedger(3, 3, digestType, "testPasswd".getBytes());
startNewBookie();
killBookie(0);
lh.asyncAddEntry("test".getBytes(), new AddCallback() {
@Override
public void addComplete(int rc, LedgerHandle lh, long entryId, Object ctx) {
// noop, we don't care if this completes
}
}, null);
client.close();
success.set(true);
l.countDown();
} catch (Exception e) {
LOG.error("Error running test", e);
success.set(false);
l.countDown();
}
}
};
t.start();
assertTrue("Close never completed", l.await(10, TimeUnit.SECONDS));
assertTrue("Close was not successful", success.get());
}
}
@Test
public void testIsClosed() throws Exception {
ClientConfiguration conf = new ClientConfiguration();
conf.setMetadataServiceUri(zkUtil.getMetadataServiceUri());
BookKeeper bkc = new BookKeeper(conf);
LedgerHandle lh = bkc.createLedger(digestType, "testPasswd".getBytes());
Long lId = lh.getId();
lh.addEntry("000".getBytes());
boolean result = bkc.isClosed(lId);
assertTrue("Ledger shouldn't be flagged as closed!", !result);
lh.close();
result = bkc.isClosed(lId);
assertTrue("Ledger should be flagged as closed!", result);
bkc.close();
}
@Test
public void testReadFailureCallback() throws Exception {
ClientConfiguration conf = new ClientConfiguration();
conf.setMetadataServiceUri(zkUtil.getMetadataServiceUri());
BookKeeper bkc = new BookKeeper(conf);
LedgerHandle lh = bkc.createLedger(digestType, "testPasswd".getBytes());
final int numEntries = 10;
for (int i = 0; i < numEntries; i++) {
lh.addEntry(("entry-" + i).getBytes());
}
stopBKCluster();
try {
lh.readEntries(0, numEntries - 1);
fail("Read operation should have failed");
} catch (BKBookieHandleNotAvailableException e) {
// expected
}
final CountDownLatch counter = new CountDownLatch(1);
final AtomicInteger receivedResponses = new AtomicInteger(0);
final AtomicInteger returnCode = new AtomicInteger();
lh.asyncReadEntries(0, numEntries - 1, new ReadCallback() {
@Override
public void readComplete(int rc, LedgerHandle lh, Enumeration<LedgerEntry> seq, Object ctx) {
returnCode.set(rc);
receivedResponses.incrementAndGet();
counter.countDown();
}
}, null);
counter.await();
// Wait extra time to ensure no extra responses received
Thread.sleep(1000);
assertEquals(1, receivedResponses.get());
assertEquals(BKException.Code.BookieHandleNotAvailableException, returnCode.get());
bkc.close();
}
@Test
public void testAutoCloseableBookKeeper() throws Exception {
ClientConfiguration conf = new ClientConfiguration();
conf.setMetadataServiceUri(zkUtil.getMetadataServiceUri());
BookKeeper bkc2;
try (BookKeeper bkc = new BookKeeper(conf)) {
bkc2 = bkc;
long ledgerId;
try (LedgerHandle lh = bkc.createLedger(digestType, "testPasswd".getBytes())) {
ledgerId = lh.getId();
for (int i = 0; i < 100; i++) {
lh.addEntry("foobar".getBytes());
}
}
assertTrue("Ledger should be closed!", bkc.isClosed(ledgerId));
}
assertTrue("BookKeeper should be closed!", bkc2.closed);
}
@Test
public void testReadAfterLastAddConfirmed() throws Exception {
ClientConfiguration clientConfiguration = new ClientConfiguration();
clientConfiguration.setMetadataServiceUri(zkUtil.getMetadataServiceUri());
try (BookKeeper bkWriter = new BookKeeper(clientConfiguration)) {
LedgerHandle writeLh = bkWriter.createLedger(digestType, "testPasswd".getBytes());
long ledgerId = writeLh.getId();
int numOfEntries = 5;
for (int i = 0; i < numOfEntries; i++) {
writeLh.addEntry(("foobar" + i).getBytes());
}
try (BookKeeper bkReader = new BookKeeper(clientConfiguration);
LedgerHandle rlh = bkReader.openLedgerNoRecovery(ledgerId, digestType, "testPasswd".getBytes())) {
assertTrue(
"Expected LAC of rlh: " + (numOfEntries - 2) + " actual LAC of rlh: " + rlh.getLastAddConfirmed(),
(rlh.getLastAddConfirmed() == (numOfEntries - 2)));
assertFalse(writeLh.isClosed());
// with readUnconfirmedEntries we are able to read all of the entries
Enumeration<LedgerEntry> entries = rlh.readUnconfirmedEntries(0, numOfEntries - 1);
int entryId = 0;
while (entries.hasMoreElements()) {
LedgerEntry entry = entries.nextElement();
String entryString = new String(entry.getEntry());
assertTrue("Expected entry String: " + ("foobar" + entryId)
+ " actual entry String: " + entryString,
entryString.equals("foobar" + entryId));
entryId++;
}
}
try (BookKeeper bkReader = new BookKeeper(clientConfiguration);
LedgerHandle rlh = bkReader.openLedgerNoRecovery(ledgerId, digestType, "testPasswd".getBytes())) {
assertTrue(
"Expected LAC of rlh: " + (numOfEntries - 2) + " actual LAC of rlh: " + rlh.getLastAddConfirmed(),
(rlh.getLastAddConfirmed() == (numOfEntries - 2)));
assertFalse(writeLh.isClosed());
// without readUnconfirmedEntries we are not able to read all of the entries
try {
rlh.readEntries(0, numOfEntries - 1);
fail("shoud not be able to read up to " + (numOfEntries - 1) + " with readEntries");
} catch (BKException.BKReadException expected) {
}
// read all entries within the 0..LastAddConfirmed range with readEntries
assertEquals(rlh.getLastAddConfirmed() + 1,
Collections.list(rlh.readEntries(0, rlh.getLastAddConfirmed())).size());
// assert local LAC does not change after reads
assertTrue(
"Expected LAC of rlh: " + (numOfEntries - 2) + " actual LAC of rlh: " + rlh.getLastAddConfirmed(),
(rlh.getLastAddConfirmed() == (numOfEntries - 2)));
// read all entries within the 0..LastAddConfirmed range with readUnconfirmedEntries
assertEquals(rlh.getLastAddConfirmed() + 1,
Collections.list(rlh.readUnconfirmedEntries(0, rlh.getLastAddConfirmed())).size());
// assert local LAC does not change after reads
assertTrue(
"Expected LAC of rlh: " + (numOfEntries - 2) + " actual LAC of rlh: " + rlh.getLastAddConfirmed(),
(rlh.getLastAddConfirmed() == (numOfEntries - 2)));
// read all entries within the LastAddConfirmed..numOfEntries - 1 range with readUnconfirmedEntries
assertEquals(numOfEntries - rlh.getLastAddConfirmed(),
Collections.list(rlh.readUnconfirmedEntries(rlh.getLastAddConfirmed(), numOfEntries - 1)).size());
// assert local LAC does not change after reads
assertTrue(
"Expected LAC of rlh: " + (numOfEntries - 2) + " actual LAC of rlh: " + rlh.getLastAddConfirmed(),
(rlh.getLastAddConfirmed() == (numOfEntries - 2)));
try {
// read all entries within the LastAddConfirmed..numOfEntries range with readUnconfirmedEntries
// this is an error, we are going outside the range of existing entries
rlh.readUnconfirmedEntries(rlh.getLastAddConfirmed(), numOfEntries);
fail("the read tried to access data for unexisting entry id " + numOfEntries);
} catch (BKException.BKNoSuchEntryException expected) {
// expecting a BKNoSuchEntryException, as the entry does not exist on bookies
}
try {
// read all entries within the LastAddConfirmed..numOfEntries range with readEntries
// this is an error, we are going outside the range of existing entries
rlh.readEntries(rlh.getLastAddConfirmed(), numOfEntries);
fail("the read tries to access data for unexisting entry id " + numOfEntries);
} catch (BKException.BKReadException expected) {
// expecting a BKReadException, as the client rejected the request to access entries
// after local LastAddConfirmed
}
}
// ensure that after restarting every bookie entries are not lost
// even entries after the LastAddConfirmed
restartBookies();
try (BookKeeper bkReader = new BookKeeper(clientConfiguration);
LedgerHandle rlh = bkReader.openLedgerNoRecovery(ledgerId, digestType, "testPasswd".getBytes())) {
assertTrue(
"Expected LAC of rlh: " + (numOfEntries - 2) + " actual LAC of rlh: " + rlh.getLastAddConfirmed(),
(rlh.getLastAddConfirmed() == (numOfEntries - 2)));
assertFalse(writeLh.isClosed());
// with readUnconfirmedEntries we are able to read all of the entries
Enumeration<LedgerEntry> entries = rlh.readUnconfirmedEntries(0, numOfEntries - 1);
int entryId = 0;
while (entries.hasMoreElements()) {
LedgerEntry entry = entries.nextElement();
String entryString = new String(entry.getEntry());
assertTrue("Expected entry String: " + ("foobar" + entryId)
+ " actual entry String: " + entryString,
entryString.equals("foobar" + entryId));
entryId++;
}
}
try (BookKeeper bkReader = new BookKeeper(clientConfiguration);
LedgerHandle rlh = bkReader.openLedgerNoRecovery(ledgerId, digestType, "testPasswd".getBytes())) {
assertTrue(
"Expected LAC of rlh: " + (numOfEntries - 2) + " actual LAC of rlh: " + rlh.getLastAddConfirmed(),
(rlh.getLastAddConfirmed() == (numOfEntries - 2)));
assertFalse(writeLh.isClosed());
// without readUnconfirmedEntries we are not able to read all of the entries
try {
rlh.readEntries(0, numOfEntries - 1);
fail("shoud not be able to read up to " + (numOfEntries - 1) + " with readEntries");
} catch (BKException.BKReadException expected) {
}
// read all entries within the 0..LastAddConfirmed range with readEntries
assertEquals(rlh.getLastAddConfirmed() + 1,
Collections.list(rlh.readEntries(0, rlh.getLastAddConfirmed())).size());
// assert local LAC does not change after reads
assertTrue(
"Expected LAC of rlh: " + (numOfEntries - 2) + " actual LAC of rlh: " + rlh.getLastAddConfirmed(),
(rlh.getLastAddConfirmed() == (numOfEntries - 2)));
// read all entries within the 0..LastAddConfirmed range with readUnconfirmedEntries
assertEquals(rlh.getLastAddConfirmed() + 1,
Collections.list(rlh.readUnconfirmedEntries(0, rlh.getLastAddConfirmed())).size());
// assert local LAC does not change after reads
assertTrue(
"Expected LAC of rlh: " + (numOfEntries - 2) + " actual LAC of rlh: " + rlh.getLastAddConfirmed(),
(rlh.getLastAddConfirmed() == (numOfEntries - 2)));
// read all entries within the LastAddConfirmed..numOfEntries - 1 range with readUnconfirmedEntries
assertEquals(numOfEntries - rlh.getLastAddConfirmed(),
Collections.list(rlh.readUnconfirmedEntries(rlh.getLastAddConfirmed(), numOfEntries - 1)).size());
// assert local LAC does not change after reads
assertTrue(
"Expected LAC of rlh: " + (numOfEntries - 2) + " actual LAC of rlh: " + rlh.getLastAddConfirmed(),
(rlh.getLastAddConfirmed() == (numOfEntries - 2)));
try {
// read all entries within the LastAddConfirmed..numOfEntries range with readUnconfirmedEntries
// this is an error, we are going outside the range of existing entries
rlh.readUnconfirmedEntries(rlh.getLastAddConfirmed(), numOfEntries);
fail("the read tried to access data for unexisting entry id " + numOfEntries);
} catch (BKException.BKNoSuchEntryException expected) {
// expecting a BKNoSuchEntryException, as the entry does not exist on bookies
}
try {
// read all entries within the LastAddConfirmed..numOfEntries range with readEntries
// this is an error, we are going outside the range of existing entries
rlh.readEntries(rlh.getLastAddConfirmed(), numOfEntries);
fail("the read tries to access data for unexisting entry id " + numOfEntries);
} catch (BKException.BKReadException expected) {
// expecting a BKReadException, as the client rejected the request to access entries
// after local LastAddConfirmed
}
}
// open ledger with fencing, this will repair the ledger and make the last entry readable
try (BookKeeper bkReader = new BookKeeper(clientConfiguration);
LedgerHandle rlh = bkReader.openLedger(ledgerId, digestType, "testPasswd".getBytes())) {
assertTrue(
"Expected LAC of rlh: " + (numOfEntries - 1) + " actual LAC of rlh: " + rlh.getLastAddConfirmed(),
(rlh.getLastAddConfirmed() == (numOfEntries - 1)));
assertFalse(writeLh.isClosed());
// without readUnconfirmedEntries we are not able to read all of the entries
Enumeration<LedgerEntry> entries = rlh.readEntries(0, numOfEntries - 1);
int entryId = 0;
while (entries.hasMoreElements()) {
LedgerEntry entry = entries.nextElement();
String entryString = new String(entry.getEntry());
assertTrue("Expected entry String: " + ("foobar" + entryId)
+ " actual entry String: " + entryString,
entryString.equals("foobar" + entryId));
entryId++;
}
}
// should still be able to close as long as recovery closed the ledger
// with the same last entryId and length as in the write handle.
writeLh.close();
}
}
@Test
public void testReadWriteWithV2WireProtocol() throws Exception {
ClientConfiguration conf = new ClientConfiguration().setUseV2WireProtocol(true);
conf.setMetadataServiceUri(zkUtil.getMetadataServiceUri());
int numEntries = 100;
byte[] data = "foobar".getBytes();
try (BookKeeper bkc = new BookKeeper(conf)) {
// basic read/write
{
long ledgerId;
try (LedgerHandle lh = bkc.createLedger(digestType, "testPasswd".getBytes())) {
ledgerId = lh.getId();
for (int i = 0; i < numEntries; i++) {
lh.addEntry(data);
}
}
try (LedgerHandle lh = bkc.openLedger(ledgerId, digestType, "testPasswd".getBytes())) {
assertEquals(numEntries - 1, lh.readLastConfirmed());
for (Enumeration<LedgerEntry> readEntries = lh.readEntries(0, numEntries - 1);
readEntries.hasMoreElements();) {
LedgerEntry entry = readEntries.nextElement();
assertArrayEquals(data, entry.getEntry());
}
}
}
// basic fencing
{
long ledgerId;
try (LedgerHandle lh2 = bkc.createLedger(digestType, "testPasswd".getBytes())) {
ledgerId = lh2.getId();
lh2.addEntry(data);
try (LedgerHandle lh2Fence = bkc.openLedger(ledgerId, digestType, "testPasswd".getBytes())) {
}
try {
lh2.addEntry(data);
fail("ledger should be fenced");
} catch (BKException.BKLedgerFencedException ex){
}
}
}
}
}
@SuppressWarnings("deprecation")
@Test
public void testReadEntryReleaseByteBufs() throws Exception {
ClientConfiguration confWriter = new ClientConfiguration();
confWriter.setMetadataServiceUri(zkUtil.getMetadataServiceUri());
int numEntries = 10;
byte[] data = "foobar".getBytes();
long ledgerId;
try (BookKeeper bkc = new BookKeeper(confWriter)) {
try (LedgerHandle lh = bkc.createLedger(digestType, "testPasswd".getBytes())) {
ledgerId = lh.getId();
for (int i = 0; i < numEntries; i++) {
lh.addEntry(data);
}
}
}
// v2 protocol, using pooled buffers
ClientConfiguration confReader1 = new ClientConfiguration()
.setUseV2WireProtocol(true)
.setNettyUsePooledBuffers(true)
.setMetadataServiceUri(zkUtil.getMetadataServiceUri());
try (BookKeeper bkc = new BookKeeper(confReader1)) {
try (LedgerHandle lh = bkc.openLedger(ledgerId, digestType, "testPasswd".getBytes())) {
assertEquals(numEntries - 1, lh.readLastConfirmed());
for (Enumeration<LedgerEntry> readEntries = lh.readEntries(0, numEntries - 1);
readEntries.hasMoreElements();) {
LedgerEntry entry = readEntries.nextElement();
try {
entry.data.release();
} catch (IllegalReferenceCountException ok) {
fail("ByteBuf already released");
}
}
}
}
// v2 protocol, not using pooled buffers
ClientConfiguration confReader2 = new ClientConfiguration()
.setUseV2WireProtocol(true)
.setNettyUsePooledBuffers(false);
confReader2.setMetadataServiceUri(zkUtil.getMetadataServiceUri());
try (BookKeeper bkc = new BookKeeper(confReader2)) {
try (LedgerHandle lh = bkc.openLedger(ledgerId, digestType, "testPasswd".getBytes())) {
assertEquals(numEntries - 1, lh.readLastConfirmed());
for (Enumeration<LedgerEntry> readEntries = lh.readEntries(0, numEntries - 1);
readEntries.hasMoreElements();) {
LedgerEntry entry = readEntries.nextElement();
try {
entry.data.release();
} catch (IllegalReferenceCountException e) {
fail("ByteBuf already released");
}
}
}
}
// v3 protocol, not using pooled buffers
ClientConfiguration confReader3 = new ClientConfiguration()
.setUseV2WireProtocol(false)
.setNettyUsePooledBuffers(false)
.setMetadataServiceUri(zkUtil.getMetadataServiceUri());
try (BookKeeper bkc = new BookKeeper(confReader3)) {
try (LedgerHandle lh = bkc.openLedger(ledgerId, digestType, "testPasswd".getBytes())) {
assertEquals(numEntries - 1, lh.readLastConfirmed());
for (Enumeration<LedgerEntry> readEntries = lh.readEntries(0, numEntries - 1);
readEntries.hasMoreElements();) {
LedgerEntry entry = readEntries.nextElement();
assertTrue("Can't release entry " + entry.getEntryId() + ": ref = " + entry.data.refCnt(),
entry.data.release());
try {
assertFalse(entry.data.release());
fail("ByteBuf already released");
} catch (IllegalReferenceCountException ok) {
}
}
}
}
// v3 protocol, using pooled buffers
// v3 protocol from 4.5 always "wraps" buffers returned by protobuf
ClientConfiguration confReader4 = new ClientConfiguration()
.setUseV2WireProtocol(false)
.setNettyUsePooledBuffers(true)
.setMetadataServiceUri(zkUtil.getMetadataServiceUri());
try (BookKeeper bkc = new BookKeeper(confReader4)) {
try (LedgerHandle lh = bkc.openLedger(ledgerId, digestType, "testPasswd".getBytes())) {
assertEquals(numEntries - 1, lh.readLastConfirmed());
for (Enumeration<LedgerEntry> readEntries = lh.readEntries(0, numEntries - 1);
readEntries.hasMoreElements();) {
LedgerEntry entry = readEntries.nextElement();
// ButeBufs not reference counter
assertTrue("Can't release entry " + entry.getEntryId() + ": ref = " + entry.data.refCnt(),
entry.data.release());
try {
assertFalse(entry.data.release());
fail("ByteBuf already released");
} catch (IllegalReferenceCountException ok) {
}
}
}
}
// cannot read twice an entry
ClientConfiguration confReader5 = new ClientConfiguration();
confReader5.setMetadataServiceUri(zkUtil.getMetadataServiceUri());
try (BookKeeper bkc = new BookKeeper(confReader5)) {
try (LedgerHandle lh = bkc.openLedger(ledgerId, digestType, "testPasswd".getBytes())) {
assertEquals(numEntries - 1, lh.readLastConfirmed());
for (Enumeration<LedgerEntry> readEntries = lh.readEntries(0, numEntries - 1);
readEntries.hasMoreElements();) {
LedgerEntry entry = readEntries.nextElement();
entry.getEntry();
try {
entry.getEntry();
fail("entry data accessed twice");
} catch (IllegalStateException ok){
}
try {
entry.getEntryInputStream();
fail("entry data accessed twice");
} catch (IllegalStateException ok){
}
}
}
}
}
/**
* Tests that issuing multiple reads for the same entry at the same time works as expected.
*
* @throws Exception
*/
@Test
public void testDoubleRead() throws Exception {
LedgerHandle lh = bkc.createLedger(digestType, "".getBytes());
lh.addEntry("test".getBytes());
// Read the same entry more times asynchronously
final int n = 10;
final CountDownLatch latch = new CountDownLatch(n);
for (int i = 0; i < n; i++) {
lh.asyncReadEntries(0, 0, new ReadCallback() {
public void readComplete(int rc, LedgerHandle lh,
Enumeration<LedgerEntry> seq, Object ctx) {
if (rc == BKException.Code.OK) {
latch.countDown();
} else {
fail("Read fail");
}
}
}, null);
}
latch.await();
}
/**
* Tests that issuing multiple reads for the same entry at the same time works as expected.
*
* @throws Exception
*/
@Test
public void testDoubleReadWithV2Protocol() throws Exception {
ClientConfiguration conf = new ClientConfiguration(baseClientConf);
conf.setUseV2WireProtocol(true);
BookKeeperTestClient bkc = new BookKeeperTestClient(conf);
LedgerHandle lh = bkc.createLedger(digestType, "".getBytes());
lh.addEntry("test".getBytes());
// Read the same entry more times asynchronously
final int n = 10;
final CountDownLatch latch = new CountDownLatch(n);
for (int i = 0; i < n; i++) {
lh.asyncReadEntries(0, 0, new ReadCallback() {
public void readComplete(int rc, LedgerHandle lh,
Enumeration<LedgerEntry> seq, Object ctx) {
if (rc == BKException.Code.OK) {
latch.countDown();
} else {
fail("Read fail");
}
}
}, null);
}
latch.await();
bkc.close();
}
@Test(expected = BKIllegalOpException.class)
public void testCannotUseWriteFlagsOnV2Protocol() throws Exception {
ClientConfiguration conf = new ClientConfiguration(baseClientConf);
conf.setUseV2WireProtocol(true);
try (BookKeeperTestClient bkc = new BookKeeperTestClient(conf)) {
try (WriteHandle wh = result(bkc.newCreateLedgerOp()
.withEnsembleSize(3)
.withWriteQuorumSize(3)
.withAckQuorumSize(2)
.withPassword("".getBytes())
.withWriteFlags(WriteFlag.DEFERRED_SYNC)
.execute())) {
result(wh.appendAsync("test".getBytes()));
}
}
}
@Test(expected = BKIllegalOpException.class)
public void testCannotUseForceOnV2Protocol() throws Exception {
ClientConfiguration conf = new ClientConfiguration(baseClientConf);
conf.setUseV2WireProtocol(true);
try (BookKeeperTestClient bkc = new BookKeeperTestClient(conf)) {
try (WriteHandle wh = result(bkc.newCreateLedgerOp()
.withEnsembleSize(3)
.withWriteQuorumSize(3)
.withAckQuorumSize(2)
.withPassword("".getBytes())
.withWriteFlags(WriteFlag.NONE)
.execute())) {
result(wh.appendAsync("".getBytes()));
result(wh.force());
}
}
}
class MockZooKeeperClient extends ZooKeeperClient {
class MockZooKeeper extends ZooKeeper {
public MockZooKeeper(String connectString, int sessionTimeout, Watcher watcher, boolean canBeReadOnly)
throws IOException {
super(connectString, sessionTimeout, watcher, canBeReadOnly);
}
@Override
public void create(final String path, byte[] data, List<ACL> acl, CreateMode createMode, StringCallback cb,
Object ctx) {
StringCallback injectedCallback = new StringCallback() {
@Override
public void processResult(int rc, String path, Object ctx, String name) {
/**
* if ledgerIdToInjectFailure matches with the path of
* the node, then throw CONNECTIONLOSS error and then
* reset it to INVALID_LEDGERID.
*/
if (path.contains(ledgerIdToInjectFailure.toString())) {
ledgerIdToInjectFailure.set(INVALID_LEDGERID);
cb.processResult(KeeperException.Code.CONNECTIONLOSS.intValue(), path, ctx, name);
} else {
cb.processResult(rc, path, ctx, name);
}
}
};
super.create(path, data, acl, createMode, injectedCallback, ctx);
}
}
private final String connectString;
private final int sessionTimeoutMs;
private final ZooKeeperWatcherBase watcherManager;
private final AtomicLong ledgerIdToInjectFailure;
MockZooKeeperClient(String connectString, int sessionTimeoutMs, ZooKeeperWatcherBase watcher,
AtomicLong ledgerIdToInjectFailure) throws IOException {
/*
* in OperationalRetryPolicy maxRetries is > 0. So in case of any
* RecoverableException scenario, it will retry.
*/
super(connectString, sessionTimeoutMs, watcher,
new BoundExponentialBackoffRetryPolicy(sessionTimeoutMs, sessionTimeoutMs, Integer.MAX_VALUE),
new BoundExponentialBackoffRetryPolicy(sessionTimeoutMs, sessionTimeoutMs, 3),
NullStatsLogger.INSTANCE, 1, 0, false);
this.connectString = connectString;
this.sessionTimeoutMs = sessionTimeoutMs;
this.watcherManager = watcher;
this.ledgerIdToInjectFailure = ledgerIdToInjectFailure;
}
@Override
protected ZooKeeper createZooKeeper() throws IOException {
return new MockZooKeeper(this.connectString, this.sessionTimeoutMs, this.watcherManager, false);
}
}
@Test
public void testZKConnectionLossForLedgerCreation() throws Exception {
int zkSessionTimeOut = 10000;
AtomicLong ledgerIdToInjectFailure = new AtomicLong(INVALID_LEDGERID);
ZooKeeperWatcherBase zooKeeperWatcherBase = new ZooKeeperWatcherBase(zkSessionTimeOut,
NullStatsLogger.INSTANCE);
MockZooKeeperClient zkFaultInjectionWrapper = new MockZooKeeperClient(zkUtil.getZooKeeperConnectString(),
zkSessionTimeOut, zooKeeperWatcherBase, ledgerIdToInjectFailure);
zkFaultInjectionWrapper.waitForConnection();
assertEquals("zkFaultInjectionWrapper should be in connected state", States.CONNECTED,
zkFaultInjectionWrapper.getState());
BookKeeper bk = new BookKeeper(baseClientConf, zkFaultInjectionWrapper);
long oldZkInstanceSessionId = zkFaultInjectionWrapper.getSessionId();
long ledgerId = 567L;
LedgerHandle lh = bk.createLedgerAdv(ledgerId, 1, 1, 1, DigestType.CRC32, "".getBytes(), null);
lh.close();
/*
* trigger Expired event so that MockZooKeeperClient would run
* 'clientCreator' and create new zk handle. In this case it would
* create MockZooKeeper.
*/
zooKeeperWatcherBase.process(new WatchedEvent(EventType.None, KeeperState.Expired, ""));
zkFaultInjectionWrapper.waitForConnection();
for (int i = 0; i < 10; i++) {
if (zkFaultInjectionWrapper.getState() == States.CONNECTED) {
break;
}
Thread.sleep(200);
}
assertEquals("zkFaultInjectionWrapper should be in connected state", States.CONNECTED,
zkFaultInjectionWrapper.getState());
assertNotEquals("Session Id of old and new ZK instance should be different", oldZkInstanceSessionId,
zkFaultInjectionWrapper.getSessionId());
ledgerId++;
ledgerIdToInjectFailure.set(ledgerId);
/**
* ledgerIdToInjectFailure is set to 'ledgerId', so zookeeper.create
* would return CONNECTIONLOSS error for the first time and when it is
* retried, as expected it would return NODEEXISTS error.
*
* AbstractZkLedgerManager.createLedgerMetadata should deal with this
* scenario appropriately.
*/
lh = bk.createLedgerAdv(ledgerId, 1, 1, 1, DigestType.CRC32, "".getBytes(), null);
lh.close();
assertEquals("injectZnodeCreationNoNodeFailure should have been reset it to INVALID_LEDGERID", INVALID_LEDGERID,
ledgerIdToInjectFailure.get());
lh = bk.openLedger(ledgerId, DigestType.CRC32, "".getBytes());
lh.close();
ledgerId++;
lh = bk.createLedgerAdv(ledgerId, 1, 1, 1, DigestType.CRC32, "".getBytes(), null);
lh.close();
bk.close();
}
@Test
public void testLedgerDeletionIdempotency() throws Exception {
BookKeeper bk = new BookKeeper(baseClientConf);
long ledgerId = 789L;
LedgerHandle lh = bk.createLedgerAdv(ledgerId, 1, 1, 1, DigestType.CRC32, "".getBytes(), null);
lh.close();
bk.deleteLedger(ledgerId);
bk.deleteLedger(ledgerId);
bk.close();
}
/**
* Mock of RackawareEnsemblePlacementPolicy. Overrides areAckedBookiesAdheringToPlacementPolicy to only return true
* when ackedBookies consists of writeQuorumSizeToUseForTesting bookies.
*/
public static class MockRackawareEnsemblePlacementPolicy extends RackawareEnsemblePlacementPolicy {
private int writeQuorumSizeToUseForTesting;
private CountDownLatch conditionFirstInvocationLatch;
void setWriteQuorumSizeToUseForTesting(int writeQuorumSizeToUseForTesting) {
this.writeQuorumSizeToUseForTesting = writeQuorumSizeToUseForTesting;
}
void setConditionFirstInvocationLatch(CountDownLatch conditionFirstInvocationLatch) {
this.conditionFirstInvocationLatch = conditionFirstInvocationLatch;
}
@Override
public boolean areAckedBookiesAdheringToPlacementPolicy(Set<BookieSocketAddress> ackedBookies,
int writeQuorumSize,
int ackQuorumSize) {
conditionFirstInvocationLatch.countDown();
return ackedBookies.size() == writeQuorumSizeToUseForTesting;
}
}
/**
* Test to verify that PendingAddOp waits for success condition from areAckedBookiesAdheringToPlacementPolicy
* before returning success to client. Also tests working of WRITE_DELAYED_DUE_TO_NOT_ENOUGH_FAULT_DOMAINS and
* WRITE_TIMED_OUT_DUE_TO_NOT_ENOUGH_FAULT_DOMAINS counters.
*/
@Test
public void testEnforceMinNumFaultDomainsForWrite() throws Exception {
byte[] data = "foobar".getBytes();
byte[] password = "testPasswd".getBytes();
startNewBookie();
startNewBookie();
ClientConfiguration conf = new ClientConfiguration();
conf.setMetadataServiceUri(zkUtil.getMetadataServiceUri());
conf.setEnsemblePlacementPolicy(MockRackawareEnsemblePlacementPolicy.class);
conf.setAddEntryTimeout(2);
conf.setAddEntryQuorumTimeout(4);
conf.setEnforceMinNumFaultDomainsForWrite(true);
TestStatsProvider statsProvider = new TestStatsProvider();
// Abnormal values for testing to prevent timeouts
BookKeeperTestClient bk = new BookKeeperTestClient(conf, statsProvider);
StatsLogger statsLogger = bk.getStatsLogger();
int ensembleSize = 3;
int writeQuorumSize = 3;
int ackQuorumSize = 2;
CountDownLatch countDownLatch = new CountDownLatch(1);
MockRackawareEnsemblePlacementPolicy currPlacementPolicy =
(MockRackawareEnsemblePlacementPolicy) bk.getPlacementPolicy();
currPlacementPolicy.setConditionFirstInvocationLatch(countDownLatch);
currPlacementPolicy.setWriteQuorumSizeToUseForTesting(writeQuorumSize);
BookieSocketAddress bookieToSleep;
try (LedgerHandle lh = bk.createLedger(ensembleSize, writeQuorumSize, ackQuorumSize, digestType, password)) {
CountDownLatch sleepLatchCase1 = new CountDownLatch(1);
CountDownLatch sleepLatchCase2 = new CountDownLatch(1);
// Put all non ensemble bookies to sleep
LOG.info("Putting all non ensemble bookies to sleep.");
for (BookieServer bookieServer : bs) {
try {
if (!lh.getCurrentEnsemble().contains(bookieServer.getLocalAddress())) {
sleepBookie(bookieServer.getLocalAddress(), sleepLatchCase2);
}
} catch (UnknownHostException ignored) {}
}
Thread writeToLedger = new Thread(() -> {
try {
LOG.info("Initiating write for entry");
long entryId = lh.addEntry(data);
LOG.info("Wrote entry with entryId = {}", entryId);
} catch (InterruptedException | BKException ignored) {
}
});
bookieToSleep = lh.getCurrentEnsemble().get(0);
LOG.info("Putting picked bookie to sleep");
sleepBookie(bookieToSleep, sleepLatchCase1);
assertEquals(statsLogger
.getCounter(WRITE_DELAYED_DUE_TO_NOT_ENOUGH_FAULT_DOMAINS)
.get()
.longValue(), 0);
// Trying to write entry
writeToLedger.start();
// Waiting and checking to make sure that write has not succeeded
countDownLatch.await(conf.getAddEntryTimeout(), TimeUnit.SECONDS);
assertEquals("Write succeeded but should not have", -1, lh.lastAddConfirmed);
// Wake the bookie
sleepLatchCase1.countDown();
// Waiting and checking to make sure that write has succeeded
writeToLedger.join(conf.getAddEntryTimeout() * 1000);
assertEquals("Write did not succeed but should have", 0, lh.lastAddConfirmed);
assertEquals(statsLogger
.getCounter(WRITE_DELAYED_DUE_TO_NOT_ENOUGH_FAULT_DOMAINS)
.get()
.longValue(), 1);
// AddEntry thread for second scenario
Thread writeToLedger2 = new Thread(() -> {
try {
LOG.info("Initiating write for entry");
long entryId = lh.addEntry(data);
LOG.info("Wrote entry with entryId = {}", entryId);
} catch (InterruptedException | BKException ignored) {
}
});
bookieToSleep = lh.getCurrentEnsemble().get(1);
LOG.info("Putting picked bookie to sleep");
sleepBookie(bookieToSleep, sleepLatchCase2);
// Trying to write entry
writeToLedger2.start();
// Waiting and checking to make sure that write has failed
writeToLedger2.join((conf.getAddEntryQuorumTimeout() + 2) * 1000);
assertEquals("Write succeeded but should not have", 0, lh.lastAddConfirmed);
sleepLatchCase2.countDown();
assertEquals(statsLogger.getCounter(WRITE_DELAYED_DUE_TO_NOT_ENOUGH_FAULT_DOMAINS).get().longValue(),
2);
assertEquals(statsLogger.getCounter(WRITE_TIMED_OUT_DUE_TO_NOT_ENOUGH_FAULT_DOMAINS).get().longValue(),
1);
}
}
}
| sijie/bookkeeper | bookkeeper-server/src/test/java/org/apache/bookkeeper/client/BookKeeperTest.java | Java | apache-2.0 | 49,996 |
<?php
class Template
{
protected $template;
protected $variables = array();
public function __construct($template)
{
$this->template = $template;
}
public function __get($key)
{
return $this->variables[$key];
}
public function __set($key, $value)
{
$this->variables[$key] = $value;
}
public function __toString()
{
$oldir = getcwd();
extract($this->variables);
//Utils::debug($this->template);
chdir(dirname($this->template));
ob_start();
include basename($this->template);
chdir($oldir);
return ob_get_clean();
}
} | efetepe/magicdocumentation | framework/php/Template.php | PHP | apache-2.0 | 666 |
/*!
* UI development toolkit for HTML5 (OpenUI5)
* (c) Copyright 2009-2017 SAP SE or an SAP affiliate company.
* Licensed under the Apache License, Version 2.0 - see LICENSE.txt.
*/
// Provides the Design Time Metadata for the sap.uxap.ObjectPageLayout control
sap.ui.define([],
function() {
"use strict";
return {
name : {
singular : function(){
return sap.uxap.i18nModel.getResourceBundle().getText("LAYOUT_CONTROL_NAME");
},
plural : function(){
return sap.uxap.i18nModel.getResourceBundle().getText("LAYOUT_CONTROL__PLURAL");
}
},
aggregations : {
sections : {
domRef : function(oElement) {
return oElement.$("sectionsContainer").get(0);
},
childNames : {
singular : function(){
return sap.uxap.i18nModel.getResourceBundle().getText("SECTION_CONTROL_NAME");
},
plural : function(){
return sap.uxap.i18nModel.getResourceBundle().getText("SECTION_CONTROL_NAME_PLURAL");
}
},
actions : {
move : "moveControls"
}
}
},
scrollContainers : [{
domRef : "> .sapUxAPObjectPageWrapper",
aggregations : ["sections", "headerContent"]
}, {
domRef : function(oElement) {
return oElement.$("vertSB-sb").get(0);
}
}],
cloneDomRef : ":sap-domref > header"
};
}, /* bExport= */ false);
| thbonk/electron-openui5-boilerplate | libs/openui5-runtime/resources/sap/uxap/ObjectPageLayout.designtime-dbg.js | JavaScript | apache-2.0 | 1,307 |
# frozen_string_literal: true
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
require "minitest/autorun"
require "gapic/grpc/service_stub"
require "google/showcase/v1beta1/echo_pb"
require "google/showcase/v1beta1/echo_services_pb"
require "google/showcase/v1beta1/echo"
class Google::Showcase::V1beta1::Echo::OperationsTest < Minitest::Test
class ClientStub
attr_accessor :call_rpc_count, :requests
def initialize response, operation, &block
@response = response
@operation = operation
@block = block
@call_rpc_count = 0
@requests = []
end
def call_rpc *args
@call_rpc_count += 1
@requests << @block&.call(*args)
yield @response, @operation if block_given?
@response
end
end
def test_list_operations
# Create GRPC objects.
grpc_response = Google::Longrunning::ListOperationsResponse.new
grpc_operation = GRPC::ActiveCall::Operation.new nil
grpc_channel = GRPC::Core::Channel.new "localhost:8888", nil, :this_channel_is_insecure
grpc_options = {}
# Create request parameters for a unary method.
name = "hello world"
filter = "hello world"
page_size = 42
page_token = "hello world"
list_operations_client_stub = ClientStub.new grpc_response, grpc_operation do |name, request, options:|
assert_equal :list_operations, name
assert_kind_of Google::Longrunning::ListOperationsRequest, request
assert_equal "hello world", request.name
assert_equal "hello world", request.filter
assert_equal 42, request.page_size
assert_equal "hello world", request.page_token
refute_nil options
end
Gapic::ServiceStub.stub :new, list_operations_client_stub do
# Create client
client = Google::Showcase::V1beta1::Echo::Operations.new do |config|
config.credentials = grpc_channel
end
# Use hash object
client.list_operations({ name: name, filter: filter, page_size: page_size, page_token: page_token }) do |response, operation|
assert_kind_of Gapic::PagedEnumerable, response
assert_equal grpc_response, response.response
assert_equal grpc_operation, operation
end
# Use named arguments
client.list_operations name: name, filter: filter, page_size: page_size, page_token: page_token do |response, operation|
assert_kind_of Gapic::PagedEnumerable, response
assert_equal grpc_response, response.response
assert_equal grpc_operation, operation
end
# Use protobuf object
client.list_operations Google::Longrunning::ListOperationsRequest.new(name: name, filter: filter, page_size: page_size, page_token: page_token) do |response, operation|
assert_kind_of Gapic::PagedEnumerable, response
assert_equal grpc_response, response.response
assert_equal grpc_operation, operation
end
# Use hash object with options
client.list_operations({ name: name, filter: filter, page_size: page_size, page_token: page_token }, grpc_options) do |response, operation|
assert_kind_of Gapic::PagedEnumerable, response
assert_equal grpc_response, response.response
assert_equal grpc_operation, operation
end
# Use protobuf object with options
client.list_operations Google::Longrunning::ListOperationsRequest.new(name: name, filter: filter, page_size: page_size, page_token: page_token), grpc_options do |response, operation|
assert_kind_of Gapic::PagedEnumerable, response
assert_equal grpc_response, response.response
assert_equal grpc_operation, operation
end
# Verify method calls
assert_equal 5, list_operations_client_stub.call_rpc_count
end
end
def test_get_operation
# Create GRPC objects.
grpc_response = Google::Longrunning::Operation.new
grpc_operation = GRPC::ActiveCall::Operation.new nil
grpc_channel = GRPC::Core::Channel.new "localhost:8888", nil, :this_channel_is_insecure
grpc_options = {}
# Create request parameters for a unary method.
name = "hello world"
get_operation_client_stub = ClientStub.new grpc_response, grpc_operation do |name, request, options:|
assert_equal :get_operation, name
assert_kind_of Google::Longrunning::GetOperationRequest, request
assert_equal "hello world", request.name
refute_nil options
end
Gapic::ServiceStub.stub :new, get_operation_client_stub do
# Create client
client = Google::Showcase::V1beta1::Echo::Operations.new do |config|
config.credentials = grpc_channel
end
# Use hash object
client.get_operation({ name: name }) do |response, operation|
assert_kind_of Gapic::Operation, response
assert_equal grpc_response, response.grpc_op
assert_equal grpc_operation, operation
end
# Use named arguments
client.get_operation name: name do |response, operation|
assert_kind_of Gapic::Operation, response
assert_equal grpc_response, response.grpc_op
assert_equal grpc_operation, operation
end
# Use protobuf object
client.get_operation Google::Longrunning::GetOperationRequest.new(name: name) do |response, operation|
assert_kind_of Gapic::Operation, response
assert_equal grpc_response, response.grpc_op
assert_equal grpc_operation, operation
end
# Use hash object with options
client.get_operation({ name: name }, grpc_options) do |response, operation|
assert_kind_of Gapic::Operation, response
assert_equal grpc_response, response.grpc_op
assert_equal grpc_operation, operation
end
# Use protobuf object with options
client.get_operation Google::Longrunning::GetOperationRequest.new(name: name), grpc_options do |response, operation|
assert_kind_of Gapic::Operation, response
assert_equal grpc_response, response.grpc_op
assert_equal grpc_operation, operation
end
# Verify method calls
assert_equal 5, get_operation_client_stub.call_rpc_count
end
end
def test_delete_operation
# Create GRPC objects.
grpc_response = Google::Protobuf::Empty.new
grpc_operation = GRPC::ActiveCall::Operation.new nil
grpc_channel = GRPC::Core::Channel.new "localhost:8888", nil, :this_channel_is_insecure
grpc_options = {}
# Create request parameters for a unary method.
name = "hello world"
delete_operation_client_stub = ClientStub.new grpc_response, grpc_operation do |name, request, options:|
assert_equal :delete_operation, name
assert_kind_of Google::Longrunning::DeleteOperationRequest, request
assert_equal "hello world", request.name
refute_nil options
end
Gapic::ServiceStub.stub :new, delete_operation_client_stub do
# Create client
client = Google::Showcase::V1beta1::Echo::Operations.new do |config|
config.credentials = grpc_channel
end
# Use hash object
client.delete_operation({ name: name }) do |response, operation|
assert_equal grpc_response, response
assert_equal grpc_operation, operation
end
# Use named arguments
client.delete_operation name: name do |response, operation|
assert_equal grpc_response, response
assert_equal grpc_operation, operation
end
# Use protobuf object
client.delete_operation Google::Longrunning::DeleteOperationRequest.new(name: name) do |response, operation|
assert_equal grpc_response, response
assert_equal grpc_operation, operation
end
# Use hash object with options
client.delete_operation({ name: name }, grpc_options) do |response, operation|
assert_equal grpc_response, response
assert_equal grpc_operation, operation
end
# Use protobuf object with options
client.delete_operation Google::Longrunning::DeleteOperationRequest.new(name: name), grpc_options do |response, operation|
assert_equal grpc_response, response
assert_equal grpc_operation, operation
end
# Verify method calls
assert_equal 5, delete_operation_client_stub.call_rpc_count
end
end
def test_cancel_operation
# Create GRPC objects.
grpc_response = Google::Protobuf::Empty.new
grpc_operation = GRPC::ActiveCall::Operation.new nil
grpc_channel = GRPC::Core::Channel.new "localhost:8888", nil, :this_channel_is_insecure
grpc_options = {}
# Create request parameters for a unary method.
name = "hello world"
cancel_operation_client_stub = ClientStub.new grpc_response, grpc_operation do |name, request, options:|
assert_equal :cancel_operation, name
assert_kind_of Google::Longrunning::CancelOperationRequest, request
assert_equal "hello world", request.name
refute_nil options
end
Gapic::ServiceStub.stub :new, cancel_operation_client_stub do
# Create client
client = Google::Showcase::V1beta1::Echo::Operations.new do |config|
config.credentials = grpc_channel
end
# Use hash object
client.cancel_operation({ name: name }) do |response, operation|
assert_equal grpc_response, response
assert_equal grpc_operation, operation
end
# Use named arguments
client.cancel_operation name: name do |response, operation|
assert_equal grpc_response, response
assert_equal grpc_operation, operation
end
# Use protobuf object
client.cancel_operation Google::Longrunning::CancelOperationRequest.new(name: name) do |response, operation|
assert_equal grpc_response, response
assert_equal grpc_operation, operation
end
# Use hash object with options
client.cancel_operation({ name: name }, grpc_options) do |response, operation|
assert_equal grpc_response, response
assert_equal grpc_operation, operation
end
# Use protobuf object with options
client.cancel_operation Google::Longrunning::CancelOperationRequest.new(name: name), grpc_options do |response, operation|
assert_equal grpc_response, response
assert_equal grpc_operation, operation
end
# Verify method calls
assert_equal 5, cancel_operation_client_stub.call_rpc_count
end
end
end
| googleapis/gapic-generator-ruby | shared/output/cloud/showcase/test/google/showcase/v1beta1/echo_operations_test.rb | Ruby | apache-2.0 | 10,911 |
package org.openqa.selenium.server.browserlaunchers;
import junit.framework.TestCase;
import org.apache.commons.logging.Log;
import org.openqa.jetty.log.LogFactory;
import org.openqa.selenium.browserlaunchers.AsyncExecute;
import org.openqa.selenium.server.RemoteControlConfiguration;
/**
* {@link org.openqa.selenium.server.browserlaunchers.SafariCustomProfileLauncher} integration test class.
*/
public class SafariLauncherIntegrationTest extends TestCase {
private static final Log LOGGER = LogFactory.getLog(SafariLauncherIntegrationTest.class);
private static final int SECONDS = 1000;
private static final int WAIT_TIME = 15 * SECONDS;
public void testLauncherWithDefaultConfiguration() throws Exception {
final SafariCustomProfileLauncher launcher;
launcher = new SafariCustomProfileLauncher(BrowserOptions.newBrowserOptions(), new RemoteControlConfiguration(), "CUST", null);
launcher.launch("http://www.google.com");
int seconds = 15;
LOGGER.info("Killing browser in " + Integer.toString(seconds) + " seconds");
AsyncExecute.sleepTight(WAIT_TIME);
launcher.close();
LOGGER.info("He's dead now, right?");
}
public void testLauncherWithHonorSystemProxyEnabled() throws Exception {
final SafariCustomProfileLauncher launcher;
final RemoteControlConfiguration configuration;
configuration = new RemoteControlConfiguration();
configuration.setHonorSystemProxy(true);
launcher = new SafariCustomProfileLauncher(BrowserOptions.newBrowserOptions(), configuration, "CUST", null);
launcher.launch("http://www.google.com");
int seconds = 15;
LOGGER.info("Killing browser in " + Integer.toString(seconds) + " seconds");
AsyncExecute.sleepTight(WAIT_TIME);
launcher.close();
LOGGER.info("He's dead now, right?");
}
}
| akiellor/selenium | java/server/test/org/openqa/selenium/server/browserlaunchers/SafariLauncherIntegrationTest.java | Java | apache-2.0 | 1,913 |
const Long = require('long');
const User = require('./User');
const Role = require('./Role');
const Emoji = require('./Emoji');
const Presence = require('./Presence').Presence;
const GuildMember = require('./GuildMember');
const Constants = require('../util/Constants');
const Collection = require('../util/Collection');
const Util = require('../util/Util');
const Snowflake = require('../util/Snowflake');
/**
* Represents a guild (or a server) on Discord.
* <info>It's recommended to see if a guild is available before performing operations or reading data from it. You can
* check this with `guild.available`.</info>
*/
class Guild {
constructor(client, data) {
/**
* The client that created the instance of the the guild
* @name Guild#client
* @type {Client}
* @readonly
*/
Object.defineProperty(this, 'client', { value: client });
/**
* A collection of members that are in this guild. The key is the member's ID, the value is the member
* @type {Collection<Snowflake, GuildMember>}
*/
this.members = new Collection();
/**
* A collection of channels that are in this guild. The key is the channel's ID, the value is the channel
* @type {Collection<Snowflake, GuildChannel>}
*/
this.channels = new Collection();
/**
* A collection of roles that are in this guild. The key is the role's ID, the value is the role
* @type {Collection<Snowflake, Role>}
*/
this.roles = new Collection();
/**
* A collection of presences in this guild
* @type {Collection<Snowflake, Presence>}
*/
this.presences = new Collection();
if (!data) return;
if (data.unavailable) {
/**
* Whether the guild is available to access. If it is not available, it indicates a server outage
* @type {boolean}
*/
this.available = false;
/**
* The Unique ID of the guild, useful for comparisons
* @type {Snowflake}
*/
this.id = data.id;
} else {
this.available = true;
this.setup(data);
}
}
/**
* Sets up the guild.
* @param {*} data The raw data of the guild
* @private
*/
setup(data) {
/**
* The name of the guild
* @type {string}
*/
this.name = data.name;
/**
* The hash of the guild icon
* @type {?string}
*/
this.icon = data.icon;
/**
* The hash of the guild splash image (VIP only)
* @type {?string}
*/
this.splash = data.splash;
/**
* The region the guild is located in
* @type {string}
*/
this.region = data.region;
/**
* The full amount of members in this guild as of `READY`
* @type {number}
*/
this.memberCount = data.member_count || this.memberCount;
/**
* Whether the guild is "large" (has more than 250 members)
* @type {boolean}
*/
this.large = Boolean('large' in data ? data.large : this.large);
/**
* An array of guild features
* @type {Object[]}
*/
this.features = data.features;
/**
* The ID of the application that created this guild (if applicable)
* @type {?Snowflake}
*/
this.applicationID = data.application_id;
/**
* The time in seconds before a user is counted as "away from keyboard"
* @type {?number}
*/
this.afkTimeout = data.afk_timeout;
/**
* The ID of the voice channel where AFK members are moved
* @type {?string}
*/
this.afkChannelID = data.afk_channel_id;
/**
* Whether embedded images are enabled on this guild
* @type {boolean}
*/
this.embedEnabled = data.embed_enabled;
/**
* The verification level of the guild
* @type {number}
*/
this.verificationLevel = data.verification_level;
/**
* The explicit content filter level of the guild
* @type {number}
*/
this.explicitContentFilter = data.explicit_content_filter;
/**
* The timestamp the client user joined the guild at
* @type {number}
*/
this.joinedTimestamp = data.joined_at ? new Date(data.joined_at).getTime() : this.joinedTimestamp;
this.id = data.id;
this.available = !data.unavailable;
this.features = data.features || this.features || [];
if (data.members) {
this.members.clear();
for (const guildUser of data.members) this._addMember(guildUser, false);
}
if (data.owner_id) {
/**
* The user ID of this guild's owner
* @type {Snowflake}
*/
this.ownerID = data.owner_id;
}
if (data.channels) {
this.channels.clear();
for (const channel of data.channels) this.client.dataManager.newChannel(channel, this);
}
if (data.roles) {
this.roles.clear();
for (const role of data.roles) {
const newRole = new Role(this, role);
this.roles.set(newRole.id, newRole);
}
}
if (data.presences) {
for (const presence of data.presences) {
this._setPresence(presence.user.id, presence);
}
}
this._rawVoiceStates = new Collection();
if (data.voice_states) {
for (const voiceState of data.voice_states) {
this._rawVoiceStates.set(voiceState.user_id, voiceState);
const member = this.members.get(voiceState.user_id);
if (member) {
member.serverMute = voiceState.mute;
member.serverDeaf = voiceState.deaf;
member.selfMute = voiceState.self_mute;
member.selfDeaf = voiceState.self_deaf;
member.voiceSessionID = voiceState.session_id;
member.voiceChannelID = voiceState.channel_id;
this.channels.get(voiceState.channel_id).members.set(member.user.id, member);
}
}
}
if (!this.emojis) {
/**
* A collection of emojis that are in this guild. The key is the emoji's ID, the value is the emoji.
* @type {Collection<Snowflake, Emoji>}
*/
this.emojis = new Collection();
for (const emoji of data.emojis) this.emojis.set(emoji.id, new Emoji(this, emoji));
} else {
this.client.actions.GuildEmojisUpdate.handle({
guild_id: this.id,
emojis: data.emojis,
});
}
}
/**
* The timestamp the guild was created at
* @type {number}
* @readonly
*/
get createdTimestamp() {
return Snowflake.deconstruct(this.id).timestamp;
}
/**
* The time the guild was created
* @type {Date}
* @readonly
*/
get createdAt() {
return new Date(this.createdTimestamp);
}
/**
* The time the client user joined the guild
* @type {Date}
* @readonly
*/
get joinedAt() {
return new Date(this.joinedTimestamp);
}
/**
* The URL to this guild's icon
* @type {?string}
* @readonly
*/
get iconURL() {
if (!this.icon) return null;
return Constants.Endpoints.Guild(this).Icon(this.client.options.http.cdn, this.icon);
}
/**
* The URL to this guild's splash
* @type {?string}
* @readonly
*/
get splashURL() {
if (!this.splash) return null;
return Constants.Endpoints.Guild(this).Splash(this.client.options.http.cdn, this.splash);
}
/**
* The owner of the guild
* @type {GuildMember}
* @readonly
*/
get owner() {
return this.members.get(this.ownerID);
}
/**
* If the client is connected to any voice channel in this guild, this will be the relevant VoiceConnection
* @type {?VoiceConnection}
* @readonly
*/
get voiceConnection() {
if (this.client.browser) return null;
return this.client.voice.connections.get(this.id) || null;
}
/**
* The `#general` TextChannel of the guild
* @type {TextChannel}
* @readonly
*/
get defaultChannel() {
return this.channels.get(this.id);
}
/**
* The position of this guild
* <warn>This is only available when using a user account.</warn>
* @type {?number}
*/
get position() {
if (this.client.user.bot) return null;
if (!this.client.user.settings.guildPositions) return null;
return this.client.user.settings.guildPositions.indexOf(this.id);
}
/**
* The `@everyone` role of the guild
* @type {Role}
* @readonly
*/
get defaultRole() {
return this.roles.get(this.id);
}
/**
* The client user as a GuildMember of this guild
* @type {?GuildMember}
* @readonly
*/
get me() {
return this.members.get(this.client.user.id);
}
/**
* Fetches a collection of roles in the current guild sorted by position
* @type {Collection<Snowflake, Role>}
* @readonly
* @private
*/
get _sortedRoles() {
return this._sortPositionWithID(this.roles);
}
/**
* Returns the GuildMember form of a User object, if the user is present in the guild.
* @param {UserResolvable} user The user that you want to obtain the GuildMember of
* @returns {?GuildMember}
* @example
* // Get the guild member of a user
* const member = guild.member(message.author);
*/
member(user) {
return this.client.resolver.resolveGuildMember(this, user);
}
/**
* Fetch a collection of banned users in this guild.
* @returns {Promise<Collection<Snowflake, User>>}
*/
fetchBans() {
return this.client.rest.methods.getGuildBans(this)
// This entire re-mapping can be removed in the next major release
.then(bans => {
const users = new Collection();
for (const ban of bans.values()) users.set(ban.user.id, ban.user);
return users;
});
}
/**
* Fetch a collection of invites to this guild. Resolves with a collection mapping invites by their codes.
* @returns {Promise<Collection<string, Invite>>}
*/
fetchInvites() {
return this.client.rest.methods.getGuildInvites(this);
}
/**
* Fetch all webhooks for the guild.
* @returns {Collection<Snowflake, Webhook>}
*/
fetchWebhooks() {
return this.client.rest.methods.getGuildWebhooks(this);
}
/**
* Fetch available voice regions.
* @returns {Collection<string, VoiceRegion>}
*/
fetchVoiceRegions() {
return this.client.rest.methods.fetchVoiceRegions(this.id);
}
/**
* Fetch audit logs for this guild.
* @param {Object} [options={}] Options for fetching audit logs
* @param {Snowflake|GuildAuditLogsEntry} [options.before] Limit to entries from before specified entry
* @param {Snowflake|GuildAuditLogsEntry} [options.after] Limit to entries from after specified entry
* @param {number} [options.limit] Limit number of entries
* @param {UserResolvable} [options.user] Only show entries involving this user
* @param {string|number} [options.type] Only show entries involving this action type
* @returns {Promise<GuildAuditLogs>}
*/
fetchAuditLogs(options) {
return this.client.rest.methods.getGuildAuditLogs(this, options);
}
/**
* Adds a user to the guild using OAuth2. Requires the `CREATE_INSTANT_INVITE` permission.
* @param {UserResolvable} user User to add to the guild
* @param {Object} options Options for the addition
* @param {string} options.accessToken An OAuth2 access token for the user with the `guilds.join` scope granted to the
* bot's application
* @param {string} [options.nick] Nickname to give the member (requires `MANAGE_NICKNAMES`)
* @param {Collection<Snowflake, Role>|Role[]|Snowflake[]} [options.roles] Roles to add to the member
* (requires `MANAGE_ROLES`)
* @param {boolean} [options.mute] Whether the member should be muted (requires `MUTE_MEMBERS`)
* @param {boolean} [options.deaf] Whether the member should be deafened (requires `DEAFEN_MEMBERS`)
* @returns {Promise<GuildMember>}
*/
addMember(user, options) {
if (this.members.has(user.id)) return Promise.resolve(this.members.get(user.id));
return this.client.rest.methods.putGuildMember(this, user, options);
}
/**
* Fetch a single guild member from a user.
* @param {UserResolvable} user The user to fetch the member for
* @param {boolean} [cache=true] Insert the user into the users cache
* @returns {Promise<GuildMember>}
*/
fetchMember(user, cache = true) {
user = this.client.resolver.resolveUser(user);
if (!user) return Promise.reject(new Error('User is not cached. Use Client.fetchUser first.'));
if (this.members.has(user.id)) return Promise.resolve(this.members.get(user.id));
return this.client.rest.methods.getGuildMember(this, user, cache);
}
/**
* Fetches all the members in the guild, even if they are offline. If the guild has less than 250 members,
* this should not be necessary.
* @param {string} [query=''] Limit fetch to members with similar usernames
* @param {number} [limit=0] Maximum number of members to request
* @returns {Promise<Guild>}
*/
fetchMembers(query = '', limit = 0) {
return new Promise((resolve, reject) => {
if (this.memberCount === this.members.size) {
// Uncomment in v12
// resolve(this.members)
resolve(this);
return;
}
this.client.ws.send({
op: Constants.OPCodes.REQUEST_GUILD_MEMBERS,
d: {
guild_id: this.id,
query,
limit,
},
});
const handler = (members, guild) => {
if (guild.id !== this.id) return;
if (this.memberCount === this.members.size || members.length < 1000) {
this.client.removeListener(Constants.Events.GUILD_MEMBERS_CHUNK, handler);
// Uncomment in v12
// resolve(this.members)
resolve(this);
}
};
this.client.on(Constants.Events.GUILD_MEMBERS_CHUNK, handler);
this.client.setTimeout(() => reject(new Error('Members didn\'t arrive in time.')), 120 * 1000);
});
}
/**
* Performs a search within the entire guild.
* <warn>This is only available when using a user account.</warn>
* @param {MessageSearchOptions} [options={}] Options to pass to the search
* @returns {Promise<Array<Message[]>>}
* An array containing arrays of messages. Each inner array is a search context cluster.
* The message which has triggered the result will have the `hit` property set to `true`.
* @example
* guild.search({
* content: 'discord.js',
* before: '2016-11-17'
* }).then(res => {
* const hit = res.messages[0].find(m => m.hit).content;
* console.log(`I found: **${hit}**, total results: ${res.totalResults}`);
* }).catch(console.error);
*/
search(options = {}) {
return this.client.rest.methods.search(this, options);
}
/**
* The data for editing a guild.
* @typedef {Object} GuildEditData
* @property {string} [name] The name of the guild
* @property {string} [region] The region of the guild
* @property {number} [verificationLevel] The verification level of the guild
* @property {ChannelResolvable} [afkChannel] The AFK channel of the guild
* @property {number} [afkTimeout] The AFK timeout of the guild
* @property {Base64Resolvable} [icon] The icon of the guild
* @property {GuildMemberResolvable} [owner] The owner of the guild
* @property {Base64Resolvable} [splash] The splash screen of the guild
*/
/**
* Updates the guild with new information - e.g. a new name.
* @param {GuildEditData} data The data to update the guild with
* @returns {Promise<Guild>}
* @example
* // Set the guild name and region
* guild.edit({
* name: 'Discord Guild',
* region: 'london',
* })
* .then(updated => console.log(`New guild name ${updated.name} in region ${updated.region}`))
* .catch(console.error);
*/
edit(data) {
return this.client.rest.methods.updateGuild(this, data);
}
/**
* Edit the name of the guild.
* @param {string} name The new name of the guild
* @returns {Promise<Guild>}
* @example
* // Edit the guild name
* guild.setName('Discord Guild')
* .then(updated => console.log(`Updated guild name to ${guild.name}`))
* .catch(console.error);
*/
setName(name) {
return this.edit({ name });
}
/**
* Edit the region of the guild.
* @param {string} region The new region of the guild
* @returns {Promise<Guild>}
* @example
* // Edit the guild region
* guild.setRegion('london')
* .then(updated => console.log(`Updated guild region to ${guild.region}`))
* .catch(console.error);
*/
setRegion(region) {
return this.edit({ region });
}
/**
* Edit the verification level of the guild.
* @param {number} verificationLevel The new verification level of the guild
* @returns {Promise<Guild>}
* @example
* // Edit the guild verification level
* guild.setVerificationLevel(1)
* .then(updated => console.log(`Updated guild verification level to ${guild.verificationLevel}`))
* .catch(console.error);
*/
setVerificationLevel(verificationLevel) {
return this.edit({ verificationLevel });
}
/**
* Edit the AFK channel of the guild.
* @param {ChannelResolvable} afkChannel The new AFK channel
* @returns {Promise<Guild>}
* @example
* // Edit the guild AFK channel
* guild.setAFKChannel(channel)
* .then(updated => console.log(`Updated guild AFK channel to ${guild.afkChannel}`))
* .catch(console.error);
*/
setAFKChannel(afkChannel) {
return this.edit({ afkChannel });
}
/**
* Edit the AFK timeout of the guild.
* @param {number} afkTimeout The time in seconds that a user must be idle to be considered AFK
* @returns {Promise<Guild>}
* @example
* // Edit the guild AFK channel
* guild.setAFKTimeout(60)
* .then(updated => console.log(`Updated guild AFK timeout to ${guild.afkTimeout}`))
* .catch(console.error);
*/
setAFKTimeout(afkTimeout) {
return this.edit({ afkTimeout });
}
/**
* Set a new guild icon.
* @param {Base64Resolvable} icon The new icon of the guild
* @returns {Promise<Guild>}
* @example
* // Edit the guild icon
* guild.setIcon(fs.readFileSync('./icon.png'))
* .then(updated => console.log('Updated the guild icon'))
* .catch(console.error);
*/
setIcon(icon) {
return this.edit({ icon });
}
/**
* Sets a new owner of the guild.
* @param {GuildMemberResolvable} owner The new owner of the guild
* @returns {Promise<Guild>}
* @example
* // Edit the guild owner
* guild.setOwner(guild.members.first())
* .then(updated => console.log(`Updated the guild owner to ${updated.owner.username}`))
* .catch(console.error);
*/
setOwner(owner) {
return this.edit({ owner });
}
/**
* Set a new guild splash screen.
* @param {Base64Resolvable} splash The new splash screen of the guild
* @returns {Promise<Guild>}
* @example
* // Edit the guild splash
* guild.setIcon(fs.readFileSync('./splash.png'))
* .then(updated => console.log('Updated the guild splash'))
* .catch(console.error);
*/
setSplash(splash) {
return this.edit({ splash });
}
/**
* @param {number} position Absolute or relative position
* @param {boolean} [relative=false] Whether to position relatively or absolutely
* @returns {Promise<Guild>}
*/
setPosition(position, relative) {
if (this.client.user.bot) {
return Promise.reject(new Error('Setting guild position is only available for user accounts'));
}
return this.client.user.settings.setGuildPosition(this, position, relative);
}
/**
* Marks all messages in this guild as read.
* <warn>This is only available when using a user account.</warn>
* @returns {Promise<Guild>} This guild
*/
acknowledge() {
return this.client.rest.methods.ackGuild(this);
}
/**
* Allow direct messages from guild members.
* @param {boolean} allow Whether to allow direct messages
* @returns {Promise<Guild>}
*/
allowDMs(allow) {
const settings = this.client.user.settings;
if (allow) return settings.removeRestrictedGuild(this);
else return settings.addRestrictedGuild(this);
}
/**
* Bans a user from the guild.
* @param {UserResolvable} user The user to ban
* @param {Object} [options] Ban options.
* @param {number} [options.days=0] Number of days of messages to delete
* @param {string} [options.reason] Reason for banning
* @returns {Promise<GuildMember|User|string>} Result object will be resolved as specifically as possible.
* If the GuildMember cannot be resolved, the User will instead be attempted to be resolved. If that also cannot
* be resolved, the user ID will be the result.
* @example
* // Ban a user by ID (or with a user/guild member object)
* guild.ban('some user ID')
* .then(user => console.log(`Banned ${user.username || user.id || user} from ${guild.name}`))
* .catch(console.error);
*/
ban(user, options = {}) {
if (typeof options === 'number') {
options = { reason: null, days: options };
} else if (typeof options === 'string') {
options = { reason: options, days: 0 };
}
return this.client.rest.methods.banGuildMember(this, user, options);
}
/**
* Unbans a user from the guild.
* @param {UserResolvable} user The user to unban
* @returns {Promise<User>}
* @example
* // Unban a user by ID (or with a user/guild member object)
* guild.unban('some user ID')
* .then(user => console.log(`Unbanned ${user.username} from ${guild.name}`))
* .catch(console.error);
*/
unban(user) {
return this.client.rest.methods.unbanGuildMember(this, user);
}
/**
* Prunes members from the guild based on how long they have been inactive.
* @param {number} days Number of days of inactivity required to kick
* @param {boolean} [dry=false] If true, will return number of users that will be kicked, without actually doing it
* @returns {Promise<number>} The number of members that were/will be kicked
* @example
* // See how many members will be pruned
* guild.pruneMembers(12, true)
* .then(pruned => console.log(`This will prune ${pruned} people!`))
* .catch(console.error);
* @example
* // Actually prune the members
* guild.pruneMembers(12)
* .then(pruned => console.log(`I just pruned ${pruned} people!`))
* .catch(console.error);
*/
pruneMembers(days, dry = false) {
if (typeof days !== 'number') throw new TypeError('Days must be a number.');
return this.client.rest.methods.pruneGuildMembers(this, days, dry);
}
/**
* Syncs this guild (already done automatically every 30 seconds).
* <warn>This is only available when using a user account.</warn>
*/
sync() {
if (!this.client.user.bot) this.client.syncGuilds([this]);
}
/**
* Creates a new channel in the guild.
* @param {string} name The name of the new channel
* @param {string} type The type of the new channel, either `text` or `voice`
* @param {Array<PermissionOverwrites|Object>} overwrites Permission overwrites to apply to the new channel
* @returns {Promise<TextChannel|VoiceChannel>}
* @example
* // Create a new text channel
* guild.createChannel('new-general', 'text')
* .then(channel => console.log(`Created new channel ${channel}`))
* .catch(console.error);
*/
createChannel(name, type, overwrites) {
return this.client.rest.methods.createChannel(this, name, type, overwrites);
}
/**
* The data needed for updating a channel's position.
* @typedef {Object} ChannelPosition
* @property {ChannelResolvable} channel Channel to update
* @property {number} position New position for the channel
*/
/**
* Batch-updates the guild's channels' positions.
* @param {ChannelPosition[]} channelPositions Channel positions to update
* @returns {Promise<Guild>}
* @example
* guild.updateChannels([{ channel: channelID, position: newChannelIndex }])
* .then(guild => console.log(`Updated channel positions for ${guild.id}`))
* .catch(console.error);
*/
setChannelPositions(channelPositions) {
return this.client.rest.methods.updateChannelPositions(this.id, channelPositions);
}
/**
* Creates a new role in the guild with given information
* @param {RoleData} [data] The data to update the role with
* @returns {Promise<Role>}
* @example
* // Create a new role
* guild.createRole()
* .then(role => console.log(`Created role ${role}`))
* .catch(console.error);
* @example
* // Create a new role with data
* guild.createRole({
* name: 'Super Cool People',
* color: 'BLUE',
* })
* .then(role => console.log(`Created role ${role}`))
* .catch(console.error)
*/
createRole(data = {}) {
return this.client.rest.methods.createGuildRole(this, data);
}
/**
* Creates a new custom emoji in the guild.
* @param {BufferResolvable|Base64Resolvable} attachment The image for the emoji
* @param {string} name The name for the emoji
* @param {Collection<Snowflake, Role>|Role[]} [roles] Roles to limit the emoji to
* @returns {Promise<Emoji>} The created emoji
* @example
* // Create a new emoji from a url
* guild.createEmoji('https://i.imgur.com/w3duR07.png', 'rip')
* .then(emoji => console.log(`Created new emoji with name ${emoji.name}!`))
* .catch(console.error);
* @example
* // Create a new emoji from a file on your computer
* guild.createEmoji('./memes/banana.png', 'banana')
* .then(emoji => console.log(`Created new emoji with name ${emoji.name}!`))
* .catch(console.error);
*/
createEmoji(attachment, name, roles) {
return new Promise(resolve => {
if (typeof attachment === 'string' && attachment.startsWith('data:')) {
resolve(this.client.rest.methods.createEmoji(this, attachment, name, roles));
} else {
this.client.resolver.resolveBuffer(attachment).then(data => {
const dataURI = this.client.resolver.resolveBase64(data);
resolve(this.client.rest.methods.createEmoji(this, dataURI, name, roles));
});
}
});
}
/**
* Delete an emoji.
* @param {Emoji|string} emoji The emoji to delete
* @returns {Promise}
*/
deleteEmoji(emoji) {
if (!(emoji instanceof Emoji)) emoji = this.emojis.get(emoji);
return this.client.rest.methods.deleteEmoji(emoji);
}
/**
* Causes the client to leave the guild.
* @returns {Promise<Guild>}
* @example
* // Leave a guild
* guild.leave()
* .then(g => console.log(`Left the guild ${g}`))
* .catch(console.error);
*/
leave() {
return this.client.rest.methods.leaveGuild(this);
}
/**
* Causes the client to delete the guild.
* @returns {Promise<Guild>}
* @example
* // Delete a guild
* guild.delete()
* .then(g => console.log(`Deleted the guild ${g}`))
* .catch(console.error);
*/
delete() {
return this.client.rest.methods.deleteGuild(this);
}
/**
* Whether this guild equals another guild. It compares all properties, so for most operations
* it is advisable to just compare `guild.id === guild2.id` as it is much faster and is often
* what most users need.
* @param {Guild} guild The guild to compare with
* @returns {boolean}
*/
equals(guild) {
let equal =
guild &&
this.id === guild.id &&
this.available === !guild.unavailable &&
this.splash === guild.splash &&
this.region === guild.region &&
this.name === guild.name &&
this.memberCount === guild.member_count &&
this.large === guild.large &&
this.icon === guild.icon &&
Util.arraysEqual(this.features, guild.features) &&
this.ownerID === guild.owner_id &&
this.verificationLevel === guild.verification_level &&
this.embedEnabled === guild.embed_enabled;
if (equal) {
if (this.embedChannel) {
if (this.embedChannel.id !== guild.embed_channel_id) equal = false;
} else if (guild.embed_channel_id) {
equal = false;
}
}
return equal;
}
/**
* When concatenated with a string, this automatically concatenates the guild's name instead of the guild object.
* @returns {string}
* @example
* // Logs: Hello from My Guild!
* console.log(`Hello from ${guild}!`);
* @example
* // Logs: Hello from My Guild!
* console.log('Hello from ' + guild + '!');
*/
toString() {
return this.name;
}
_addMember(guildUser, emitEvent = true) {
const existing = this.members.has(guildUser.user.id);
if (!(guildUser.user instanceof User)) guildUser.user = this.client.dataManager.newUser(guildUser.user);
guildUser.joined_at = guildUser.joined_at || 0;
const member = new GuildMember(this, guildUser);
this.members.set(member.id, member);
if (this._rawVoiceStates && this._rawVoiceStates.has(member.user.id)) {
const voiceState = this._rawVoiceStates.get(member.user.id);
member.serverMute = voiceState.mute;
member.serverDeaf = voiceState.deaf;
member.selfMute = voiceState.self_mute;
member.selfDeaf = voiceState.self_deaf;
member.voiceSessionID = voiceState.session_id;
member.voiceChannelID = voiceState.channel_id;
if (this.client.channels.has(voiceState.channel_id)) {
this.client.channels.get(voiceState.channel_id).members.set(member.user.id, member);
} else {
this.client.emit('warn', `Member ${member.id} added in guild ${this.id} with an uncached voice channel`);
}
}
/**
* Emitted whenever a user joins a guild.
* @event Client#guildMemberAdd
* @param {GuildMember} member The member that has joined a guild
*/
if (this.client.ws.connection.status === Constants.Status.READY && emitEvent && !existing) {
this.client.emit(Constants.Events.GUILD_MEMBER_ADD, member);
}
return member;
}
_updateMember(member, data) {
const oldMember = Util.cloneObject(member);
if (data.roles) member._roles = data.roles;
if (typeof data.nick !== 'undefined') member.nickname = data.nick;
const notSame = member.nickname !== oldMember.nickname || !Util.arraysEqual(member._roles, oldMember._roles);
if (this.client.ws.connection.status === Constants.Status.READY && notSame) {
/**
* Emitted whenever a guild member changes - i.e. new role, removed role, nickname.
* @event Client#guildMemberUpdate
* @param {GuildMember} oldMember The member before the update
* @param {GuildMember} newMember The member after the update
*/
this.client.emit(Constants.Events.GUILD_MEMBER_UPDATE, oldMember, member);
}
return {
old: oldMember,
mem: member,
};
}
_removeMember(guildMember) {
this.members.delete(guildMember.id);
}
_memberSpeakUpdate(user, speaking) {
const member = this.members.get(user);
if (member && member.speaking !== speaking) {
member.speaking = speaking;
/**
* Emitted once a guild member starts/stops speaking.
* @event Client#guildMemberSpeaking
* @param {GuildMember} member The member that started/stopped speaking
* @param {boolean} speaking Whether or not the member is speaking
*/
this.client.emit(Constants.Events.GUILD_MEMBER_SPEAKING, member, speaking);
}
}
_setPresence(id, presence) {
if (this.presences.get(id)) {
this.presences.get(id).update(presence);
return;
}
this.presences.set(id, new Presence(presence));
}
/**
* Set the position of a role in this guild.
* @param {string|Role} role The role to edit, can be a role object or a role ID
* @param {number} position The new position of the role
* @param {boolean} [relative=false] Position Moves the role relative to its current position
* @returns {Promise<Guild>}
*/
setRolePosition(role, position, relative = false) {
if (typeof role === 'string') {
role = this.roles.get(role);
if (!role) return Promise.reject(new Error('Supplied role is not a role or snowflake.'));
}
position = Number(position);
if (isNaN(position)) return Promise.reject(new Error('Supplied position is not a number.'));
let updatedRoles = this._sortedRoles.array();
Util.moveElementInArray(updatedRoles, role, position, relative);
updatedRoles = updatedRoles.map((r, i) => ({ id: r.id, position: i }));
return this.client.rest.methods.setRolePositions(this.id, updatedRoles);
}
/**
* Set the position of a channel in this guild.
* @param {string|GuildChannel} channel The channel to edit, can be a channel object or a channel ID
* @param {number} position The new position of the channel
* @param {boolean} [relative=false] Position Moves the channel relative to its current position
* @returns {Promise<Guild>}
*/
setChannelPosition(channel, position, relative = false) {
if (typeof channel === 'string') {
channel = this.channels.get(channel);
if (!channel) return Promise.reject(new Error('Supplied channel is not a channel or snowflake.'));
}
position = Number(position);
if (isNaN(position)) return Promise.reject(new Error('Supplied position is not a number.'));
let updatedChannels = this._sortedChannels(channel.type).array();
Util.moveElementInArray(updatedChannels, channel, position, relative);
updatedChannels = updatedChannels.map((r, i) => ({ id: r.id, position: i }));
return this.client.rest.methods.setChannelPositions(this.id, updatedChannels);
}
/**
* Fetches a collection of channels in the current guild sorted by position.
* @param {string} type The channel type
* @returns {Collection<Snowflake, GuildChannel>}
* @private
*/
_sortedChannels(type) {
return this._sortPositionWithID(this.channels.filter(c => {
if (type === 'voice' && c.type === 'voice') return true;
else if (type !== 'voice' && c.type !== 'voice') return true;
else return type === c.type;
}));
}
/**
* Sorts a collection by object position or ID if the positions are equivalent.
* Intended to be identical to Discord's sorting method.
* @param {Collection} collection The collection to sort
* @returns {Collection}
* @private
*/
_sortPositionWithID(collection) {
return collection.sort((a, b) =>
a.position !== b.position ?
a.position - b.position :
Long.fromString(a.id).sub(Long.fromString(b.id)).toNumber()
);
}
}
module.exports = Guild;
| aemino/discord.js | src/structures/Guild.js | JavaScript | apache-2.0 | 34,442 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import str
import six
import logging
import filecmp
import os
import re
import sys
import uuid
import json
import time
from nose.plugins.attrib import attr
from nose.tools import assert_raises, assert_equals, assert_less
import tempfile
import shutil
from mock import patch
import synapseclient
import synapseclient.client as client
import synapseclient.utils as utils
import synapseclient.__main__ as cmdline
from synapseclient.evaluation import Evaluation
import integration
from integration import schedule_for_cleanup, QUERY_TIMEOUT_SEC
if six.PY2:
from StringIO import StringIO
else:
from io import StringIO
def setup_module(module):
module.syn = integration.syn
module.project = integration.project
module.parser = cmdline.build_parser()
#used for --description and --descriptionFile tests
module.upload_filename = _create_temp_file_with_cleanup()
module.description_text = "'some description text'"
module.desc_filename = _create_temp_file_with_cleanup(module.description_text)
module.update_description_text = "'SOMEBODY ONCE TOLD ME THE WORLD WAS GONNA ROLL ME I AINT THE SHARPEST TOOL IN THE SHED'"
module.other_user = integration.other_user
def run(*command, **kwargs):
"""
Sends the given command list to the command line client.
:returns: The STDOUT output of the command.
"""
old_stdout = sys.stdout
capturedSTDOUT = StringIO()
syn_client = kwargs.get('syn', syn)
stream_handler = logging.StreamHandler(capturedSTDOUT)
try:
sys.stdout = capturedSTDOUT
syn_client.logger.addHandler(stream_handler)
sys.argv = [item for item in command]
args = parser.parse_args()
args.debug = True
cmdline.perform_main(args, syn_client)
except SystemExit:
pass # Prevent the test from quitting prematurely
finally:
sys.stdout = old_stdout
syn_client.logger.handlers.remove(stream_handler)
capturedSTDOUT = capturedSTDOUT.getvalue()
return capturedSTDOUT
def parse(regex, output):
"""Returns the first match."""
m = re.search(regex, output)
if m:
if len(m.groups()) > 0:
return m.group(1).strip()
else:
raise Exception('ERROR parsing output: "' + str(output) + '"')
def test_command_line_client():
# Create a Project
output = run('synapse',
'--skip-checks',
'create',
'-name',
str(uuid.uuid4()),
'-description',
'test of command line client',
'Project')
project_id = parse(r'Created entity:\s+(syn\d+)\s+', output)
schedule_for_cleanup(project_id)
# Create a File
filename = utils.make_bogus_data_file()
schedule_for_cleanup(filename)
output = run('synapse',
'--skip-checks',
'add',
'-name',
'BogusFileEntity',
'-description',
'Bogus data to test file upload',
'-parentid',
project_id,
filename)
file_entity_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output)
# Verify that we stored the file in Synapse
f1 = syn.get(file_entity_id)
fh = syn._getFileHandle(f1.dataFileHandleId)
assert fh['concreteType'] == 'org.sagebionetworks.repo.model.file.S3FileHandle'
# Get File from the command line
output = run('synapse',
'--skip-checks',
'get',
file_entity_id)
downloaded_filename = parse(r'Downloaded file:\s+(.*)', output)
schedule_for_cleanup(downloaded_filename)
assert os.path.exists(downloaded_filename)
assert filecmp.cmp(filename, downloaded_filename)
# Update the File
filename = utils.make_bogus_data_file()
schedule_for_cleanup(filename)
output = run('synapse',
'--skip-checks',
'store',
'--id',
file_entity_id,
filename)
updated_entity_id = parse(r'Created/Updated entity:\s+(syn\d+)', output)
# Get the File again
output = run('synapse',
'--skip-checks',
'get',
file_entity_id)
downloaded_filename = parse(r'Downloaded file:\s+(.*)', output)
schedule_for_cleanup(downloaded_filename)
assert os.path.exists(downloaded_filename)
assert filecmp.cmp(filename, downloaded_filename)
# Test query
output = ""
start_time = time.time()
while not ('BogusFileEntity' in output and file_entity_id in output):
assert_less(time.time() - start_time, QUERY_TIMEOUT_SEC)
output = run('synapse',
'--skip-checks',
'query',
'select id, name from entity where parentId=="%s"' % project_id)
# Move the file to new folder
folder = syn.store(synapseclient.Folder(parentId=project_id))
output = run('synapse',
'mv',
'--id',
file_entity_id,
'--parentid',
folder.id)
downloaded_filename = parse(r'Moved\s+(.*)', output)
movedFile = syn.get(file_entity_id, downloadFile=False)
assert movedFile.parentId == folder.id
# Test Provenance
repo_url = 'https://github.com/Sage-Bionetworks/synapsePythonClient'
output = run('synapse',
'--skip-checks',
'set-provenance',
'-id',
file_entity_id,
'-name',
'TestActivity',
'-description',
'A very excellent provenance',
'-used',
file_entity_id,
'-executed',
repo_url)
activity_id = parse(r'Set provenance record (\d+) on entity syn\d+', output)
output = run('synapse',
'--skip-checks',
'get-provenance',
'--id',
file_entity_id)
activity = json.loads(output)
assert activity['name'] == 'TestActivity'
assert activity['description'] == 'A very excellent provenance'
used = utils._find_used(activity, lambda used: 'reference' in used)
assert used['reference']['targetId'] == file_entity_id
used = utils._find_used(activity, lambda used: 'url' in used)
assert used['url'] == repo_url
assert used['wasExecuted'] == True
# Note: Tests shouldn't have external dependencies
# but this is a pretty picture of Singapore
singapore_url = 'http://upload.wikimedia.org/wikipedia/commons/' \
'thumb/3/3e/1_singapore_city_skyline_dusk_panorama_2011.jpg' \
'/1280px-1_singapore_city_skyline_dusk_panorama_2011.jpg'
# Test external file handle
output = run('synapse',
'--skip-checks',
'add',
'-name',
'Singapore',
'-description',
'A nice picture of Singapore',
'-parentid',
project_id,
singapore_url)
exteral_entity_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output)
# Verify that we created an external file handle
f2 = syn.get(exteral_entity_id)
fh = syn._getFileHandle(f2.dataFileHandleId)
assert fh['concreteType'] == 'org.sagebionetworks.repo.model.file.ExternalFileHandle'
output = run('synapse',
'--skip-checks',
'get',
exteral_entity_id)
downloaded_filename = parse(r'Downloaded file:\s+(.*)', output)
schedule_for_cleanup(downloaded_filename)
assert os.path.exists(downloaded_filename)
# Delete the Project
output = run('synapse',
'--skip-checks',
'delete',
project_id)
def test_command_line_client_annotations():
# Create a Project
output = run('synapse',
'--skip-checks',
'create',
'-name',
str(uuid.uuid4()),
'-description',
'test of command line client',
'Project')
project_id = parse(r'Created entity:\s+(syn\d+)\s+', output)
schedule_for_cleanup(project_id)
# Create a File
filename = utils.make_bogus_data_file()
schedule_for_cleanup(filename)
output = run('synapse',
'--skip-checks',
'add',
'-name',
'BogusFileEntity',
'-description',
'Bogus data to test file upload',
'-parentid',
project_id,
filename)
file_entity_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output)
# Test setting annotations
output = run('synapse',
'--skip-checks',
'set-annotations',
'--id',
file_entity_id,
'--annotations',
'{"foo": 1, "bar": "1", "baz": [1, 2, 3]}',
)
# Test getting annotations
# check that the three things set are correct
# This test should be adjusted to check for equality of the
# whole annotation dictionary once the issue of other
# attributes (creationDate, eTag, id, uri) being returned is resolved
# See: https://sagebionetworks.jira.com/browse/SYNPY-175
output = run('synapse',
'--skip-checks',
'get-annotations',
'--id',
file_entity_id
)
annotations = json.loads(output)
assert annotations['foo'] == [1]
assert annotations['bar'] == [u"1"]
assert annotations['baz'] == [1, 2, 3]
# Test setting annotations by replacing existing ones.
output = run('synapse',
'--skip-checks',
'set-annotations',
'--id',
file_entity_id,
'--annotations',
'{"foo": 2}',
'--replace'
)
# Test that the annotation was updated
output = run('synapse',
'--skip-checks',
'get-annotations',
'--id',
file_entity_id
)
annotations = json.loads(output)
assert annotations['foo'] == [2]
# Since this replaces the existing annotations, previous values
# Should not be available.
assert_raises(KeyError, lambda key: annotations[key], 'bar')
assert_raises(KeyError, lambda key: annotations[key], 'baz')
# Test running add command to set annotations on a new object
filename2 = utils.make_bogus_data_file()
schedule_for_cleanup(filename2)
output = run('synapse',
'--skip-checks',
'add',
'-name',
'BogusData2',
'-description',
'Bogus data to test file upload with add and add annotations',
'-parentid',
project_id,
'--annotations',
'{"foo": 123}',
filename2)
file_entity_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output)
# Test that the annotation was updated
output = run('synapse',
'--skip-checks',
'get-annotations',
'--id',
file_entity_id
)
annotations = json.loads(output)
assert annotations['foo'] == [123]
# Test running store command to set annotations on a new object
filename3 = utils.make_bogus_data_file()
schedule_for_cleanup(filename3)
output = run('synapse',
'--skip-checks',
'store',
'--name',
'BogusData3',
'--description',
'\"Bogus data to test file upload with store and add annotations\"',
'--parentid',
project_id,
'--annotations',
'{"foo": 456}',
filename3)
file_entity_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output)
# Test that the annotation was updated
output = run('synapse',
'--skip-checks',
'get-annotations',
'--id',
file_entity_id
)
annotations = json.loads(output)
assert annotations['foo'] == [456]
def test_command_line_store_and_submit():
# Create a Project
output = run('synapse',
'--skip-checks',
'store',
'--name',
str(uuid.uuid4()),
'--description',
'test of store command',
'--type',
'Project')
project_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output)
schedule_for_cleanup(project_id)
# Create and upload a file
filename = utils.make_bogus_data_file()
schedule_for_cleanup(filename)
output = run('synapse',
'--skip-checks',
'store',
'--description',
'Bogus data to test file upload',
'--parentid',
project_id,
'--file',
filename)
file_entity_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output)
# Verify that we stored the file in Synapse
f1 = syn.get(file_entity_id)
fh = syn._getFileHandle(f1.dataFileHandleId)
assert fh['concreteType'] == 'org.sagebionetworks.repo.model.file.S3FileHandle'
# Test that entity is named after the file it contains
assert f1.name == os.path.basename(filename)
# Create an Evaluation to submit to
eval = Evaluation(name=str(uuid.uuid4()), contentSource=project_id)
eval = syn.store(eval)
schedule_for_cleanup(eval)
# Submit a bogus file
output = run('synapse',
'--skip-checks',
'submit',
'--evaluation',
eval.id,
'--name',
'Some random name',
'--entity',
file_entity_id)
submission_id = parse(r'Submitted \(id: (\d+)\) entity:\s+', output)
#testing different commmand line options for submitting to an evaluation
#. submitting to an evaluation by evaluationID
output = run('synapse',
'--skip-checks',
'submit',
'--evalID',
eval.id,
'--name',
'Some random name',
'--alias',
'My Team',
'--entity',
file_entity_id)
submission_id = parse(r'Submitted \(id: (\d+)\) entity:\s+', output)
# Update the file
filename = utils.make_bogus_data_file()
schedule_for_cleanup(filename)
output = run('synapse',
'--skip-checks',
'store',
'--id',
file_entity_id,
'--file',
filename)
updated_entity_id = parse(r'Updated entity:\s+(syn\d+)', output)
schedule_for_cleanup(updated_entity_id)
# Submit an updated bogus file and this time by evaluation name
output = run('synapse',
'--skip-checks',
'submit',
'--evaluationName',
eval.name,
'--entity',
file_entity_id)
submission_id = parse(r'Submitted \(id: (\d+)\) entity:\s+', output)
# Tests shouldn't have external dependencies, but here it's required
ducky_url = 'https://www.synapse.org/Portal/clear.cache.gif'
# Test external file handle
output = run('synapse',
'--skip-checks',
'store',
'--name',
'Rubber Ducky',
'--description',
'I like rubber duckies',
'--parentid',
project_id,
'--file',
ducky_url)
exteral_entity_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output)
schedule_for_cleanup(exteral_entity_id)
# Verify that we created an external file handle
f2 = syn.get(exteral_entity_id)
fh = syn._getFileHandle(f2.dataFileHandleId)
assert fh['concreteType'] == 'org.sagebionetworks.repo.model.file.ExternalFileHandle'
#submit an external file to an evaluation and use provenance
filename = utils.make_bogus_data_file()
schedule_for_cleanup(filename)
repo_url = 'https://github.com/Sage-Bionetworks/synapsePythonClient'
output = run('synapse',
'--skip-checks',
'submit',
'--evalID',
eval.id,
'--file',
filename,
'--parent',
project_id,
'--used',
exteral_entity_id,
'--executed',
repo_url
)
submission_id = parse(r'Submitted \(id: (\d+)\) entity:\s+', output)
# Delete project
output = run('synapse',
'--skip-checks',
'delete',
project_id)
def test_command_get_recursive_and_query():
"""Tests the 'synapse get -r' and 'synapse get -q' functions"""
project_entity = project
# Create Folders in Project
folder_entity = syn.store(synapseclient.Folder(name=str(uuid.uuid4()),
parent=project_entity))
folder_entity2 = syn.store(synapseclient.Folder(name=str(uuid.uuid4()),
parent=folder_entity))
# Create and upload two files in sub-Folder
uploaded_paths = []
file_entities = []
for i in range(2):
f = utils.make_bogus_data_file()
uploaded_paths.append(f)
schedule_for_cleanup(f)
file_entity = synapseclient.File(f, parent=folder_entity2)
file_entity = syn.store(file_entity)
file_entities.append(file_entity)
schedule_for_cleanup(f)
#Add a file in the Folder as well
f = utils.make_bogus_data_file()
uploaded_paths.append(f)
schedule_for_cleanup(f)
file_entity = synapseclient.File(f, parent=folder_entity)
file_entity = syn.store(file_entity)
file_entities.append(file_entity)
time.sleep(2) # get -r uses syncFromSynapse() which uses getChildren(), which is not immediately consistent, but faster than chunked queries.
### Test recursive get
output = run('synapse', '--skip-checks',
'get', '-r',
folder_entity.id)
#Verify that we downloaded files:
new_paths = [os.path.join('.', folder_entity2.name, os.path.basename(f)) for f in uploaded_paths[:-1]]
new_paths.append(os.path.join('.', os.path.basename(uploaded_paths[-1])))
schedule_for_cleanup(folder_entity.name)
for downloaded, uploaded in zip(new_paths, uploaded_paths):
assert os.path.exists(downloaded)
assert filecmp.cmp(downloaded, uploaded)
schedule_for_cleanup(downloaded)
time.sleep(3) # get -q uses chunkedQuery which are eventually consistent
### Test query get
### Note: We're not querying on annotations because tests can fail if there
### are lots of jobs queued as happens when staging is syncing
output = run('synapse', '--skip-checks',
'get', '-q', "select id from file where parentId=='%s'" %
folder_entity2.id)
#Verify that we downloaded files from folder_entity2
new_paths = [os.path.join('.', os.path.basename(f)) for f in uploaded_paths[:-1]]
for downloaded, uploaded in zip(new_paths, uploaded_paths[:-1]):
assert os.path.exists(downloaded)
assert filecmp.cmp(downloaded, uploaded)
schedule_for_cleanup(downloaded)
schedule_for_cleanup(new_paths[0])
### Test query get using a Table with an entity column
### This should be replaced when Table File Views are implemented in the client
cols = []
cols.append(synapseclient.Column(name='id', columnType='ENTITYID'))
schema1 = syn.store(synapseclient.Schema(name='Foo Table', columns=cols, parent=project_entity))
schedule_for_cleanup(schema1.id)
data1 =[[x.id] for x in file_entities]
row_reference_set1 = syn.store(synapseclient.RowSet(schema=schema1,
rows=[synapseclient.Row(r) for r in data1]))
time.sleep(3) # get -q uses chunkedQuery which are eventually consistent
### Test Table/View query get
output = run('synapse', '--skip-checks', 'get', '-q',
"select id from %s" % schema1.id)
#Verify that we downloaded files:
new_paths = [os.path.join('.', os.path.basename(f)) for f in uploaded_paths[:-1]]
new_paths.append(os.path.join('.', os.path.basename(uploaded_paths[-1])))
schedule_for_cleanup(folder_entity.name)
for downloaded, uploaded in zip(new_paths, uploaded_paths):
assert os.path.exists(downloaded)
assert filecmp.cmp(downloaded, uploaded)
schedule_for_cleanup(downloaded)
schedule_for_cleanup(new_paths[0])
def test_command_copy():
"""Tests the 'synapse cp' function"""
# Create a Project
project_entity = syn.store(synapseclient.Project(name=str(uuid.uuid4())))
schedule_for_cleanup(project_entity.id)
# Create a Folder in Project
folder_entity = syn.store(synapseclient.Folder(name=str(uuid.uuid4()),
parent=project_entity))
schedule_for_cleanup(folder_entity.id)
# Create and upload a file in Folder
repo_url = 'https://github.com/Sage-Bionetworks/synapsePythonClient'
annots = {'test':['hello_world']}
# Create, upload, and set annotations on a file in Folder
filename = utils.make_bogus_data_file()
schedule_for_cleanup(filename)
file_entity = syn.store(synapseclient.File(filename, parent=folder_entity))
externalURL_entity = syn.store(synapseclient.File(repo_url,name='rand',parent=folder_entity,synapseStore=False))
syn.setAnnotations(file_entity,annots)
syn.setAnnotations(externalURL_entity,annots)
schedule_for_cleanup(file_entity.id)
schedule_for_cleanup(externalURL_entity.id)
### Test cp function
output = run('synapse', '--skip-checks',
'cp',file_entity.id,
'--destinationId',project_entity.id)
output_URL = run('synapse', '--skip-checks',
'cp',externalURL_entity.id,
'--destinationId',project_entity.id)
copied_id = parse(r'Copied syn\d+ to (syn\d+)',output)
copied_URL_id = parse(r'Copied syn\d+ to (syn\d+)',output_URL)
#Verify that our copied files are identical
copied_ent = syn.get(copied_id)
copied_URL_ent = syn.get(copied_URL_id,downloadFile=False)
schedule_for_cleanup(copied_id)
schedule_for_cleanup(copied_URL_id)
copied_ent_annot = syn.getAnnotations(copied_id)
copied_url_annot = syn.getAnnotations(copied_URL_id)
copied_prov = syn.getProvenance(copied_id)['used'][0]['reference']['targetId']
copied_url_prov = syn.getProvenance(copied_URL_id)['used'][0]['reference']['targetId']
#Make sure copied files are the same
assert copied_prov == file_entity.id
assert copied_ent_annot == annots
assert copied_ent.properties.dataFileHandleId == file_entity.properties.dataFileHandleId
#Make sure copied URLs are the same
assert copied_url_prov == externalURL_entity.id
assert copied_url_annot == annots
assert copied_URL_ent.externalURL == repo_url
assert copied_URL_ent.name == 'rand'
assert copied_URL_ent.properties.dataFileHandleId == externalURL_entity.properties.dataFileHandleId
#Verify that errors are being thrown when a
#file is copied to a folder/project that has a file with the same filename
assert_raises(ValueError,run, 'synapse', '--debug', '--skip-checks',
'cp',file_entity.id,
'--destinationId',project_entity.id)
def test_command_line_using_paths():
# Create a Project
project_entity = syn.store(synapseclient.Project(name=str(uuid.uuid4())))
schedule_for_cleanup(project_entity.id)
# Create a Folder in Project
folder_entity = syn.store(synapseclient.Folder(name=str(uuid.uuid4()), parent=project_entity))
# Create and upload a file in Folder
filename = utils.make_bogus_data_file()
schedule_for_cleanup(filename)
file_entity = syn.store(synapseclient.File(filename, parent=folder_entity))
# Verify that we can use show with a filename
output = run('synapse', '--skip-checks', 'show', filename)
id = parse(r'File: %s\s+\((syn\d+)\)\s+' %os.path.split(filename)[1], output)
assert file_entity.id == id
# Verify that limitSearch works by making sure we get the file entity
# that's inside the folder
file_entity2 = syn.store(synapseclient.File(filename, parent=project_entity))
output = run('synapse', '--skip-checks', 'get',
'--limitSearch', folder_entity.id,
filename)
id = parse(r'Associated file: .* with synapse ID (syn\d+)', output)
name = parse(r'Associated file: (.*) with synapse ID syn\d+', output)
assert_equals(file_entity.id, id)
assert utils.equal_paths(name, filename)
#Verify that set-provenance works with filepath
repo_url = 'https://github.com/Sage-Bionetworks/synapsePythonClient'
output = run('synapse', '--skip-checks', 'set-provenance',
'-id', file_entity2.id,
'-name', 'TestActivity',
'-description', 'A very excellent provenance',
'-used', filename,
'-executed', repo_url,
'-limitSearch', folder_entity.id)
activity_id = parse(r'Set provenance record (\d+) on entity syn\d+', output)
output = run('synapse', '--skip-checks', 'get-provenance',
'-id', file_entity2.id)
activity = json.loads(output)
assert activity['name'] == 'TestActivity'
assert activity['description'] == 'A very excellent provenance'
#Verify that store works with provenance specified with filepath
repo_url = 'https://github.com/Sage-Bionetworks/synapsePythonClient'
filename2 = utils.make_bogus_data_file()
schedule_for_cleanup(filename2)
output = run('synapse', '--skip-checks', 'add', filename2,
'-parentid', project_entity.id,
'-used', filename,
'-executed', '%s %s' %(repo_url, filename))
entity_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output)
output = run('synapse', '--skip-checks', 'get-provenance',
'-id', entity_id)
activity = json.loads(output)
a = [a for a in activity['used'] if a['wasExecuted']==False]
assert a[0]['reference']['targetId'] in [file_entity.id, file_entity2.id]
#Test associate command
#I have two files in Synapse filename and filename2
path = tempfile.mkdtemp()
schedule_for_cleanup(path)
shutil.copy(filename, path)
shutil.copy(filename2, path)
output = run('synapse', '--skip-checks', 'associate', path, '-r')
output = run('synapse', '--skip-checks', 'show', filename)
def test_table_query():
"""Test command line ability to do table query.
"""
cols = []
cols.append(synapseclient.Column(name='name', columnType='STRING', maximumSize=1000))
cols.append(synapseclient.Column(name='foo', columnType='STRING', enumValues=['foo', 'bar', 'bat']))
cols.append(synapseclient.Column(name='x', columnType='DOUBLE'))
cols.append(synapseclient.Column(name='age', columnType='INTEGER'))
cols.append(synapseclient.Column(name='cartoon', columnType='BOOLEAN'))
project_entity = project
schema1 = syn.store(synapseclient.Schema(name=str(uuid.uuid4()), columns=cols, parent=project_entity))
schedule_for_cleanup(schema1.id)
data1 =[['Chris', 'bar', 11.23, 45, False],
['Jen', 'bat', 14.56, 40, False],
['Jane', 'bat', 17.89, 6, False],
['Henry', 'bar', 10.12, 1, False]]
row_reference_set1 = syn.store(synapseclient.RowSet(schema=schema1,
rows=[synapseclient.Row(r) for r in data1]))
# Test query
output = run('synapse', '--skip-checks', 'query',
'select * from %s' % schema1.id)
output_rows = output.rstrip("\n").split("\n")
# Check the length of the output
assert len(output_rows) == 5, "got %s rows" % (len(output_rows),)
# Check that headers are correct.
# Should be column names in schema plus the ROW_ID and ROW_VERSION
my_headers_set = output_rows[0].split("\t")
expected_headers_set = ["ROW_ID", "ROW_VERSION"] + list(map(lambda x: x.name, cols))
assert my_headers_set == expected_headers_set, "%r != %r" % (my_headers_set, expected_headers_set)
def test_login():
if not other_user['username']:
raise SkipTest("Skipping test for login command: No [test-authentication] in %s" % client.CONFIG_FILE)
alt_syn = synapseclient.Synapse()
with patch.object(alt_syn, "login") as mock_login, patch.object(alt_syn, "getUserProfile", return_value={"userName":"test_user","ownerId":"ownerId"}) as mock_get_user_profile:
output = run('synapse', '--skip-checks', 'login',
'-u', other_user['username'],
'-p', other_user['password'],
'--rememberMe',
syn=alt_syn)
mock_login.assert_called_once_with(other_user['username'], other_user['password'], forced=True, rememberMe=True, silent=False)
mock_get_user_profile.assert_called_once_with()
def test_configPath():
"""Test using a user-specified configPath for Synapse configuration file.
"""
tmp_config_file = tempfile.NamedTemporaryFile(suffix='.synapseConfig', delete=False)
shutil.copyfile(synapseclient.client.CONFIG_FILE, tmp_config_file.name)
# Create a File
filename = utils.make_bogus_data_file()
schedule_for_cleanup(filename)
output = run('synapse',
'--skip-checks',
'--configPath',
tmp_config_file.name,
'add',
'-name',
'BogusFileEntityTwo',
'-description',
'Bogus data to test file upload',
'-parentid',
project.id,
filename)
file_entity_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output)
# Verify that we stored the file in Synapse
f1 = syn.get(file_entity_id)
fh = syn._getFileHandle(f1.dataFileHandleId)
assert fh['concreteType'] == 'org.sagebionetworks.repo.model.file.S3FileHandle'
def _description_wiki_check(run_output, expected_description):
entity_id = parse(r'Created.* entity:\s+(syn\d+)\s+', run_output)
wiki = syn.getWiki(entity_id)
assert_equals(expected_description, wiki.markdown)
def _create_temp_file_with_cleanup(specific_file_text = None):
if specific_file_text:
with tempfile.NamedTemporaryFile(mode="w", suffix=".txt", delete=False) as file:
file.write(specific_file_text)
filename = file.name
else:
filename = utils.make_bogus_data_file()
schedule_for_cleanup(filename)
return filename
def test_create__with_description():
output = run('synapse',
'create',
'Folder',
'-name',
str(uuid.uuid4()),
'-parentid',
project.id,
'--description',
description_text
)
_description_wiki_check(output, description_text)
def test_store__with_description():
output = run('synapse',
'store',
upload_filename,
'-name',
str(uuid.uuid4()),
'-parentid',
project.id,
'--description',
description_text
)
_description_wiki_check(output, description_text)
def test_add__with_description():
output = run('synapse',
'add',
upload_filename,
'-name',
str(uuid.uuid4()),
'-parentid',
project.id,
'--description',
description_text
)
_description_wiki_check(output, description_text)
def test_create__with_descriptionFile():
output = run('synapse',
'create',
'Folder',
'-name',
str(uuid.uuid4()),
'-parentid',
project.id,
'--descriptionFile',
desc_filename
)
_description_wiki_check(output, description_text)
def test_store__with_descriptionFile():
output = run('synapse',
'store',
upload_filename,
'-name',
str(uuid.uuid4()),
'-parentid',
project.id,
'--descriptionFile',
desc_filename
)
_description_wiki_check(output, description_text)
def test_add__with_descriptionFile():
output = run('synapse',
'add',
upload_filename,
'-name',
str(uuid.uuid4()),
'-parentid',
project.id,
'--descriptionFile',
desc_filename
)
_description_wiki_check(output, description_text)
def test_create__update_description():
name = str(uuid.uuid4())
output = run('synapse',
'create',
'Folder',
'-name',
name,
'-parentid',
project.id,
'--descriptionFile',
desc_filename
)
_description_wiki_check(output, description_text)
output = run('synapse',
'create',
'Folder',
'-name',
name,
'-parentid',
project.id,
'--description',
update_description_text
)
_description_wiki_check(output, update_description_text)
def test_store__update_description():
name = str(uuid.uuid4())
output = run('synapse',
'store',
upload_filename,
'-name',
name,
'-parentid',
project.id,
'--descriptionFile',
desc_filename
)
_description_wiki_check(output, description_text)
output = run('synapse',
'store',
upload_filename,
'-name',
name,
'-parentid',
project.id,
'--description',
update_description_text
)
_description_wiki_check(output, update_description_text)
def test_add__update_description():
name = str(uuid.uuid4())
output = run('synapse',
'add',
upload_filename,
'-name',
name,
'-parentid',
project.id,
'--descriptionFile',
desc_filename
)
_description_wiki_check(output, description_text)
output = run('synapse',
'add',
upload_filename,
'-name',
name,
'-parentid',
project.id,
'--description',
update_description_text
)
_description_wiki_check(output, update_description_text)
| zimingd/synapsePythonClient | tests/integration/test_command_line_client.py | Python | apache-2.0 | 36,272 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.metadata;
import org.apache.hadoop.fs.Path;
/**
* Utility class for validation of metadata tablet files.
*/
public class TabletFileUtil {
/**
* Validate if string is a valid path. Return normalized string or throw exception if not valid.
* This was added to facilitate more use of TabletFile over String but this puts the validation in
* one location in the case where TabletFile can't be used.
*/
public static String validate(String path) {
Path p = new Path(path);
if (p.toUri().getScheme() == null) {
throw new IllegalArgumentException("Invalid path provided, no scheme in " + path);
}
return p.toString();
}
public static Path validate(Path path) {
if (path.toUri().getScheme() == null) {
throw new IllegalArgumentException("Invalid path provided, no scheme in " + path);
}
return path;
}
}
| keith-turner/accumulo | core/src/main/java/org/apache/accumulo/core/metadata/TabletFileUtil.java | Java | apache-2.0 | 1,705 |
package com.codefactoring.android.backlogtracker.sync.fetchers;
import android.util.Log;
import com.codefactoring.android.backlogapi.BacklogApiClient;
import com.codefactoring.android.backlogapi.models.User;
import com.codefactoring.android.backlogtracker.sync.models.BacklogImage;
import com.codefactoring.android.backlogtracker.sync.models.UserDto;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import okhttp3.ResponseBody;
import rx.Observable;
import rx.functions.Func1;
public class UserDataFetcher {
public final String LOG_TAG = UserDataFetcher.class.getSimpleName();
private final BacklogApiClient mBacklogApiClient;
public UserDataFetcher(BacklogApiClient backlogApiClient) {
mBacklogApiClient = backlogApiClient;
}
public List<UserDto> getUserList() {
return mBacklogApiClient.getUserOperations().getUserList()
.onErrorReturn(new Func1<Throwable, List<User>>() {
@Override
public List<User> call(Throwable throwable) {
Log.e(LOG_TAG, "Error on getUserList", throwable);
return new ArrayList<>();
}
})
.flatMapIterable(new Func1<List<User>, Iterable<User>>() {
@Override
public Iterable<User> call(List<User> users) {
return users;
}
})
.flatMap(new Func1<User, Observable<UserDto>>() {
@Override
public Observable<UserDto> call(User user) {
final UserDto userDto = new UserDto();
userDto.setId(user.getId());
userDto.setUserId(user.getUserId());
userDto.setName(user.getName());
userDto.setImage(getBacklogImage(user.getId()));
return Observable.just(userDto);
}
})
.toList()
.toBlocking()
.first();
}
private BacklogImage getBacklogImage(final long id) {
return mBacklogApiClient.getUserOperations().getUserIcon(id)
.flatMap(new Func1<ResponseBody, Observable<BacklogImage>>() {
@Override
public Observable<BacklogImage> call(ResponseBody response) {
final String subtype = response.contentType().subtype();
final byte[] bytes;
try {
bytes = response.bytes();
} catch (IOException ex) {
Log.e(LOG_TAG, "Error on reading image", ex);
return null;
}
return Observable.just(new BacklogImage(id + "." + subtype, bytes));
}
})
.onErrorReturn(new Func1<Throwable, BacklogImage>() {
@Override
public BacklogImage call(Throwable throwable) {
Log.e(LOG_TAG, "Error on get Project Icon", throwable);
return null;
}
})
.toBlocking()
.first();
}
}
| scarrupt/Capstone-Project | app/src/main/java/com/codefactoring/android/backlogtracker/sync/fetchers/UserDataFetcher.java | Java | apache-2.0 | 3,378 |
import collections
import re
import sys
import time
import traceback
from functools import partial
from multiprocessing import Process, Queue
from unittest import skipIf
from cassandra import ConsistencyLevel
from cassandra.cluster import Cluster
from cassandra.query import SimpleStatement
# TODO add in requirements.txt
from enum import Enum # Remove when switching to py3
from assertions import (assert_all, assert_crc_check_chance_equal,
assert_invalid, assert_none, assert_one,
assert_unavailable)
from dtest import Tester, debug
from nose.plugins.attrib import attr
from tools import known_failure, new_node, require, since
# CASSANDRA-10978. Migration wait (in seconds) to use in bootstrapping tests. Needed to handle
# pathological case of flushing schema keyspace for multiple data directories. See CASSANDRA-6696
# for multiple data directory changes and CASSANDRA-10421 for compaction logging that must be
# written.
MIGRATION_WAIT = 5
@since('3.0')
class TestMaterializedViews(Tester):
"""
Test materialized views implementation.
@jira_ticket CASSANDRA-6477
@since 3.0
"""
def prepare(self, user_table=False, rf=1, options=None, nodes=3):
cluster = self.cluster
cluster.populate([nodes, 0])
if options:
cluster.set_configuration_options(values=options)
cluster.start()
node1 = cluster.nodelist()[0]
session = self.patient_cql_connection(node1)
self.create_ks(session, 'ks', rf)
if user_table:
session.execute(
("CREATE TABLE users (username varchar, password varchar, gender varchar, "
"session_token varchar, state varchar, birth_year bigint, "
"PRIMARY KEY (username));")
)
# create a materialized view
session.execute(("CREATE MATERIALIZED VIEW users_by_state AS "
"SELECT * FROM users WHERE STATE IS NOT NULL AND username IS NOT NULL "
"PRIMARY KEY (state, username)"))
return session
def _insert_data(self, session):
# insert data
insert_stmt = "INSERT INTO users (username, password, gender, state, birth_year) VALUES "
session.execute(insert_stmt + "('user1', 'ch@ngem3a', 'f', 'TX', 1968);")
session.execute(insert_stmt + "('user2', 'ch@ngem3b', 'm', 'CA', 1971);")
session.execute(insert_stmt + "('user3', 'ch@ngem3c', 'f', 'FL', 1978);")
session.execute(insert_stmt + "('user4', 'ch@ngem3d', 'm', 'TX', 1974);")
def _replay_batchlogs(self):
debug("Replaying batchlog on all nodes")
for node in self.cluster.nodelist():
if node.is_running():
node.nodetool("replaybatchlog")
def create_test(self):
"""Test the materialized view creation"""
session = self.prepare(user_table=True)
result = list(session.execute(("SELECT * FROM system_schema.views "
"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
self.assertEqual(len(result), 1, "Expecting 1 materialized view, got" + str(result))
def test_gcgs_validation(self):
"""Verify that it's not possible to create or set a too low gc_grace_seconds on MVs"""
session = self.prepare(user_table=True)
# Shouldn't be able to alter the gc_grace_seconds of the base table to 0
assert_invalid(session,
"ALTER TABLE users WITH gc_grace_seconds = 0",
"Cannot alter gc_grace_seconds of the base table of a materialized view "
"to 0, since this value is used to TTL undelivered updates. Setting "
"gc_grace_seconds too low might cause undelivered updates to expire "
"before being replayed.")
# But can alter the gc_grace_seconds of the bease table to a value != 0
session.execute("ALTER TABLE users WITH gc_grace_seconds = 10")
# Shouldn't be able to alter the gc_grace_seconds of the MV to 0
assert_invalid(session,
"ALTER MATERIALIZED VIEW users_by_state WITH gc_grace_seconds = 0",
"Cannot alter gc_grace_seconds of a materialized view to 0, since "
"this value is used to TTL undelivered updates. Setting gc_grace_seconds "
"too low might cause undelivered updates to expire before being replayed.")
# Now let's drop MV
session.execute("DROP MATERIALIZED VIEW ks.users_by_state;")
# Now we should be able to set the gc_grace_seconds of the base table to 0
session.execute("ALTER TABLE users WITH gc_grace_seconds = 0")
# Now we shouldn't be able to create a new MV on this table
assert_invalid(session,
"CREATE MATERIALIZED VIEW users_by_state AS "
"SELECT * FROM users WHERE STATE IS NOT NULL AND username IS NOT NULL "
"PRIMARY KEY (state, username)",
"Cannot create materialized view 'users_by_state' for base table 'users' "
"with gc_grace_seconds of 0, since this value is used to TTL undelivered "
"updates. Setting gc_grace_seconds too low might cause undelivered updates"
" to expire before being replayed.")
def insert_test(self):
"""Test basic insertions"""
session = self.prepare(user_table=True)
self._insert_data(session)
result = list(session.execute("SELECT * FROM users;"))
self.assertEqual(len(result), 4, "Expecting {} users, got {}".format(4, len(result)))
result = list(session.execute("SELECT * FROM users_by_state WHERE state='TX';"))
self.assertEqual(len(result), 2, "Expecting {} users, got {}".format(2, len(result)))
result = list(session.execute("SELECT * FROM users_by_state WHERE state='CA';"))
self.assertEqual(len(result), 1, "Expecting {} users, got {}".format(1, len(result)))
result = list(session.execute("SELECT * FROM users_by_state WHERE state='MA';"))
self.assertEqual(len(result), 0, "Expecting {} users, got {}".format(0, len(result)))
def populate_mv_after_insert_test(self):
"""Test that a view is OK when created with existing data"""
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int)")
for i in xrange(1000):
session.execute("INSERT INTO t (id, v) VALUES ({v}, {v})".format(v=i))
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t WHERE v IS NOT NULL "
"AND id IS NOT NULL PRIMARY KEY (v, id)"))
debug("wait that all batchlogs are replayed")
self._replay_batchlogs()
for i in xrange(1000):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(i), [i, i])
def crc_check_chance_test(self):
"""Test that crc_check_chance parameter is properly populated after mv creation and update"""
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t WHERE v IS NOT NULL "
"AND id IS NOT NULL PRIMARY KEY (v, id) WITH crc_check_chance = 0.5"))
assert_crc_check_chance_equal(session, "t_by_v", 0.5, view=True)
session.execute("ALTER MATERIALIZED VIEW t_by_v WITH crc_check_chance = 0.3")
assert_crc_check_chance_equal(session, "t_by_v", 0.3, view=True)
def prepared_statement_test(self):
"""Test basic insertions with prepared statement"""
session = self.prepare(user_table=True)
insertPrepared = session.prepare(
"INSERT INTO users (username, password, gender, state, birth_year) VALUES (?, ?, ?, ?, ?);"
)
selectPrepared = session.prepare(
"SELECT state, password, session_token FROM users_by_state WHERE state=?;"
)
# insert data
session.execute(insertPrepared.bind(('user1', 'ch@ngem3a', 'f', 'TX', 1968)))
session.execute(insertPrepared.bind(('user2', 'ch@ngem3b', 'm', 'CA', 1971)))
session.execute(insertPrepared.bind(('user3', 'ch@ngem3c', 'f', 'FL', 1978)))
session.execute(insertPrepared.bind(('user4', 'ch@ngem3d', 'm', 'TX', 1974)))
result = list(session.execute("SELECT * FROM users;"))
self.assertEqual(len(result), 4, "Expecting {} users, got {}".format(4, len(result)))
result = list(session.execute(selectPrepared.bind(['TX'])))
self.assertEqual(len(result), 2, "Expecting {} users, got {}".format(2, len(result)))
result = list(session.execute(selectPrepared.bind(['CA'])))
self.assertEqual(len(result), 1, "Expecting {} users, got {}".format(1, len(result)))
result = list(session.execute(selectPrepared.bind(['MA'])))
self.assertEqual(len(result), 0, "Expecting {} users, got {}".format(0, len(result)))
def immutable_test(self):
"""Test that a materialized view is immutable"""
session = self.prepare(user_table=True)
# cannot insert
assert_invalid(session, "INSERT INTO users_by_state (state, username) VALUES ('TX', 'user1');",
"Cannot directly modify a materialized view")
# cannot update
assert_invalid(session, "UPDATE users_by_state SET session_token='XYZ' WHERE username='user1' AND state = 'TX';",
"Cannot directly modify a materialized view")
# cannot delete a row
assert_invalid(session, "DELETE from users_by_state where state='TX';",
"Cannot directly modify a materialized view")
# cannot delete a cell
assert_invalid(session, "DELETE session_token from users_by_state where state='TX';",
"Cannot directly modify a materialized view")
# cannot alter a table
assert_invalid(session, "ALTER TABLE users_by_state ADD first_name varchar",
"Cannot use ALTER TABLE on Materialized View")
def drop_mv_test(self):
"""Test that we can drop a view properly"""
session = self.prepare(user_table=True)
# create another materialized view
session.execute(("CREATE MATERIALIZED VIEW users_by_birth_year AS "
"SELECT * FROM users WHERE birth_year IS NOT NULL AND "
"username IS NOT NULL PRIMARY KEY (birth_year, username)"))
result = list(session.execute(("SELECT * FROM system_schema.views "
"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
self.assertEqual(len(result), 2, "Expecting {} materialized view, got {}".format(2, len(result)))
session.execute("DROP MATERIALIZED VIEW ks.users_by_state;")
result = list(session.execute(("SELECT * FROM system_schema.views "
"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
self.assertEqual(len(result), 1, "Expecting {} materialized view, got {}".format(1, len(result)))
def drop_column_test(self):
"""Test that we cannot drop a column if it is used by a MV"""
session = self.prepare(user_table=True)
result = list(session.execute(("SELECT * FROM system_schema.views "
"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
self.assertEqual(len(result), 1, "Expecting {} materialized view, got {}".format(1, len(result)))
assert_invalid(
session,
"ALTER TABLE ks.users DROP state;",
"Cannot drop column state, depended on by materialized views"
)
def drop_table_test(self):
"""Test that we cannot drop a table without deleting its MVs first"""
session = self.prepare(user_table=True)
result = list(session.execute(("SELECT * FROM system_schema.views "
"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
self.assertEqual(
len(result), 1,
"Expecting {} materialized view, got {}".format(1, len(result))
)
assert_invalid(
session,
"DROP TABLE ks.users;",
"Cannot drop table when materialized views still depend on it"
)
result = list(session.execute(("SELECT * FROM system_schema.views "
"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
self.assertEqual(
len(result), 1,
"Expecting {} materialized view, got {}".format(1, len(result))
)
session.execute("DROP MATERIALIZED VIEW ks.users_by_state;")
session.execute("DROP TABLE ks.users;")
result = list(session.execute(("SELECT * FROM system_schema.views "
"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
self.assertEqual(
len(result), 0,
"Expecting {} materialized view, got {}".format(1, len(result))
)
@known_failure(failure_source='test',
jira_url='https://issues.apache.org/jira/browse/CASSANDRA-12225',
flaky=True)
def clustering_column_test(self):
"""Test that we can use clustering columns as primary key for a materialized view"""
session = self.prepare()
session.default_consistency_level = ConsistencyLevel.QUORUM
session.execute(("CREATE TABLE users (username varchar, password varchar, gender varchar, "
"session_token varchar, state varchar, birth_year bigint, "
"PRIMARY KEY (username, state, birth_year));"))
# create a materialized view that use a compound key
session.execute(("CREATE MATERIALIZED VIEW users_by_state_birth_year "
"AS SELECT * FROM users WHERE state IS NOT NULL AND birth_year IS NOT NULL "
"AND username IS NOT NULL PRIMARY KEY (state, birth_year, username)"))
session.cluster.control_connection.wait_for_schema_agreement()
self._insert_data(session)
result = list(session.execute("SELECT * FROM ks.users_by_state_birth_year WHERE state='TX'"))
self.assertEqual(len(result), 2, "Expecting {} users, got {}".format(2, len(result)))
result = list(session.execute("SELECT * FROM ks.users_by_state_birth_year WHERE state='TX' AND birth_year=1968"))
self.assertEqual(len(result), 1, "Expecting {} users, got {}".format(1, len(result)))
def _add_dc_after_mv_test(self, rf):
"""
@jira_ticket CASSANDRA-10978
Add datacenter with configurable replication.
"""
session = self.prepare(rf=rf)
debug("Creating schema")
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
debug("Writing 1k to base")
for i in xrange(1000):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i))
debug("Reading 1k from view")
for i in xrange(1000):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i])
debug("Reading 1k from base")
for i in xrange(1000):
assert_one(session, "SELECT * FROM t WHERE id = {}".format(i), [i, -i])
debug("Bootstrapping new node in another dc")
node4 = new_node(self.cluster, data_center='dc2')
node4.start(wait_other_notice=True, wait_for_binary_proto=True, jvm_args=["-Dcassandra.migration_task_wait_in_seconds={}".format(MIGRATION_WAIT)])
debug("Bootstrapping new node in another dc")
node5 = new_node(self.cluster, remote_debug_port='1414', data_center='dc2')
node5.start(jvm_args=["-Dcassandra.migration_task_wait_in_seconds={}".format(MIGRATION_WAIT)])
session2 = self.patient_exclusive_cql_connection(node4)
debug("Verifying data from new node in view")
for i in xrange(1000):
assert_one(session2, "SELECT * FROM ks.t_by_v WHERE v = {}".format(-i), [-i, i])
debug("Inserting 100 into base")
for i in xrange(1000, 1100):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i))
debug("Verify 100 in view")
for i in xrange(1000, 1100):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i])
@known_failure(failure_source='test',
jira_url='https://issues.apache.org/jira/browse/CASSANDRA-12140',
flaky=True)
def add_dc_after_mv_simple_replication_test(self):
"""
@jira_ticket CASSANDRA-10634
Test that materialized views work as expected when adding a datacenter with SimpleStrategy.
"""
self._add_dc_after_mv_test(1)
@known_failure(failure_source='test',
jira_url='https://issues.apache.org/jira/browse/CASSANDRA-12140',
flaky=True)
def add_dc_after_mv_network_replication_test(self):
"""
@jira_ticket CASSANDRA-10634
Test that materialized views work as expected when adding a datacenter with NetworkTopologyStrategy.
"""
self._add_dc_after_mv_test({'dc1': 1, 'dc2': 1})
@known_failure(failure_source='test',
jira_url='https://issues.apache.org/jira/browse/CASSANDRA-12140',
flaky=True)
@known_failure(failure_source='test',
jira_url='https://issues.apache.org/jira/browse/CASSANDRA-12446',
flaky=True)
def add_node_after_mv_test(self):
"""
@jira_ticket CASSANDRA-10978
Test that materialized views work as expected when adding a node.
"""
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
for i in xrange(1000):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i))
for i in xrange(1000):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i])
node4 = new_node(self.cluster)
node4.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.migration_task_wait_in_seconds={}".format(MIGRATION_WAIT)])
session2 = self.patient_exclusive_cql_connection(node4)
for i in xrange(1000):
assert_one(session2, "SELECT * FROM ks.t_by_v WHERE v = {}".format(-i), [-i, i])
for i in xrange(1000, 1100):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i))
for i in xrange(1000, 1100):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i])
@known_failure(failure_source='test',
jira_url='https://issues.apache.org/jira/browse/CASSANDRA-12140',
flaky=True)
def add_write_survey_node_after_mv_test(self):
"""
@jira_ticket CASSANDRA-10621
@jira_ticket CASSANDRA-10978
Test that materialized views work as expected when adding a node in write survey mode.
"""
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
for i in xrange(1000):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i))
for i in xrange(1000):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i])
node4 = new_node(self.cluster)
node4.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.write_survey=true", "-Dcassandra.migration_task_wait_in_seconds={}".format(MIGRATION_WAIT)])
for i in xrange(1000, 1100):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i))
for i in xrange(1100):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i])
def allow_filtering_test(self):
"""Test that allow filtering works as usual for a materialized view"""
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
session.execute(("CREATE MATERIALIZED VIEW t_by_v2 AS SELECT * FROM t "
"WHERE v2 IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v2, id)"))
for i in xrange(1000):
session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)".format(v=i))
for i in xrange(1000):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {v}".format(v=i), [i, i, 'a', 3.0])
rows = list(session.execute("SELECT * FROM t_by_v2 WHERE v2 = 'a'"))
self.assertEqual(len(rows), 1000, "Expected 1000 rows but got {}".format(len(rows)))
assert_invalid(session, "SELECT * FROM t_by_v WHERE v = 1 AND v2 = 'a'")
assert_invalid(session, "SELECT * FROM t_by_v2 WHERE v2 = 'a' AND v = 1")
for i in xrange(1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {} AND v3 = 3.0 ALLOW FILTERING".format(i),
[i, i, 'a', 3.0]
)
assert_one(
session,
"SELECT * FROM t_by_v2 WHERE v2 = 'a' AND v = {} ALLOW FILTERING".format(i),
['a', i, i, 3.0]
)
def secondary_index_test(self):
"""Test that secondary indexes cannot be created on a materialized view"""
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
assert_invalid(session, "CREATE INDEX ON t_by_v (v2)",
"Secondary indexes are not supported on materialized views")
def ttl_test(self):
"""
Test that TTL works as expected for a materialized view
@expected_result The TTL is propagated properly between tables.
"""
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 int, v3 int)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v2 AS SELECT * FROM t "
"WHERE v2 IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v2, id)"))
for i in xrange(100):
session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, {v}, {v}) USING TTL 10".format(v=i))
for i in xrange(100):
assert_one(session, "SELECT * FROM t_by_v2 WHERE v2 = {}".format(i), [i, i, i, i])
time.sleep(20)
rows = list(session.execute("SELECT * FROM t_by_v2"))
self.assertEqual(len(rows), 0, "Expected 0 rows but got {}".format(len(rows)))
def query_all_new_column_test(self):
"""
Test that a materialized view created with a 'SELECT *' works as expected when adding a new column
@expected_result The new column is present in the view.
"""
session = self.prepare(user_table=True)
self._insert_data(session)
assert_one(
session,
"SELECT * FROM users_by_state WHERE state = 'TX' AND username = 'user1'",
['TX', 'user1', 1968, 'f', 'ch@ngem3a', None]
)
session.execute("ALTER TABLE users ADD first_name varchar;")
results = list(session.execute("SELECT * FROM users_by_state WHERE state = 'TX' AND username = 'user1'"))
self.assertEqual(len(results), 1)
self.assertTrue(hasattr(results[0], 'first_name'), 'Column "first_name" not found')
assert_one(
session,
"SELECT * FROM users_by_state WHERE state = 'TX' AND username = 'user1'",
['TX', 'user1', 1968, None, 'f', 'ch@ngem3a', None]
)
def query_new_column_test(self):
"""
Test that a materialized view created with 'SELECT <col1, ...>' works as expected when adding a new column
@expected_result The new column is not present in the view.
"""
session = self.prepare(user_table=True)
session.execute(("CREATE MATERIALIZED VIEW users_by_state2 AS SELECT username FROM users "
"WHERE STATE IS NOT NULL AND USERNAME IS NOT NULL PRIMARY KEY (state, username)"))
self._insert_data(session)
assert_one(
session,
"SELECT * FROM users_by_state2 WHERE state = 'TX' AND username = 'user1'",
['TX', 'user1']
)
session.execute("ALTER TABLE users ADD first_name varchar;")
results = list(session.execute("SELECT * FROM users_by_state2 WHERE state = 'TX' AND username = 'user1'"))
self.assertEqual(len(results), 1)
self.assertFalse(hasattr(results[0], 'first_name'), 'Column "first_name" found in view')
assert_one(
session,
"SELECT * FROM users_by_state2 WHERE state = 'TX' AND username = 'user1'",
['TX', 'user1']
)
def lwt_test(self):
"""Test that lightweight transaction behave properly with a materialized view"""
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
debug("Inserting initial data using IF NOT EXISTS")
for i in xrange(1000):
session.execute(
"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) IF NOT EXISTS".format(v=i)
)
self._replay_batchlogs()
debug("All rows should have been inserted")
for i in xrange(1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0]
)
debug("Tyring to UpInsert data with a different value using IF NOT EXISTS")
for i in xrange(1000):
v = i * 2
session.execute(
"INSERT INTO t (id, v, v2, v3) VALUES ({id}, {v}, 'a', 3.0) IF NOT EXISTS".format(id=i, v=v)
)
self._replay_batchlogs()
debug("No rows should have changed")
for i in xrange(1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0]
)
debug("Update the 10 first rows with a different value")
for i in xrange(1000):
v = i + 2000
session.execute(
"UPDATE t SET v={v} WHERE id = {id} IF v < 10".format(id=i, v=v)
)
self._replay_batchlogs()
debug("Verify that only the 10 first rows changed.")
results = list(session.execute("SELECT * FROM t_by_v;"))
self.assertEqual(len(results), 1000)
for i in xrange(1000):
v = i + 2000 if i < 10 else i
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(v),
[v, i, 'a', 3.0]
)
debug("Deleting the first 10 rows")
for i in xrange(1000):
v = i + 2000
session.execute(
"DELETE FROM t WHERE id = {id} IF v = {v} ".format(id=i, v=v)
)
self._replay_batchlogs()
debug("Verify that only the 10 first rows have been deleted.")
results = list(session.execute("SELECT * FROM t_by_v;"))
self.assertEqual(len(results), 990)
for i in xrange(10, 1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0]
)
@known_failure(failure_source='test',
jira_url='https://issues.apache.org/jira/browse/CASSANDRA-11612',
flaky=True,
notes='flaps on Windows')
def interrupt_build_process_test(self):
"""Test that an interupted MV build process is resumed as it should"""
session = self.prepare(options={'hinted_handoff_enabled': False})
node1, node2, node3 = self.cluster.nodelist()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
debug("Inserting initial data")
for i in xrange(10000):
session.execute(
"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) IF NOT EXISTS".format(v=i)
)
debug("Create a MV")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
debug("Stop the cluster. Interrupt the MV build process.")
self.cluster.stop()
debug("Restart the cluster")
self.cluster.start(wait_for_binary_proto=True)
session = self.patient_cql_connection(node1)
session.execute("USE ks")
debug("MV shouldn't be built yet.")
assert_none(session, "SELECT * FROM t_by_v WHERE v=10000;")
debug("Wait and ensure the MV build resumed. Waiting up to 2 minutes.")
start = time.time()
while True:
try:
result = list(session.execute("SELECT count(*) FROM t_by_v;"))
self.assertNotEqual(result[0].count, 10000)
except AssertionError:
debug("MV build process is finished")
break
elapsed = (time.time() - start) / 60
if elapsed > 2:
break
time.sleep(5)
debug("Verify all data")
result = list(session.execute("SELECT count(*) FROM t_by_v;"))
self.assertEqual(result[0].count, 10000)
for i in xrange(10000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0],
cl=ConsistencyLevel.ALL
)
def view_tombstone_test(self):
"""
Test that a materialized views properly tombstone
@jira_ticket CASSANDRA-10261
@jira_ticket CASSANDRA-10910
"""
self.prepare(rf=3, options={'hinted_handoff_enabled': False})
node1, node2, node3 = self.cluster.nodelist()
session = self.patient_exclusive_cql_connection(node1)
session.max_trace_wait = 120
session.execute('USE ks')
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v,id)"))
session.cluster.control_connection.wait_for_schema_agreement()
# Set initial values TS=0, verify
session.execute(SimpleStatement("INSERT INTO t (id, v, v2, v3) VALUES (1, 1, 'a', 3.0) USING TIMESTAMP 0",
consistency_level=ConsistencyLevel.ALL))
self._replay_batchlogs()
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = 1",
[1, 1, 'a', 3.0]
)
session.execute(SimpleStatement("INSERT INTO t (id, v2) VALUES (1, 'b') USING TIMESTAMP 1",
consistency_level=ConsistencyLevel.ALL))
self._replay_batchlogs()
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = 1",
[1, 1, 'b', 3.0]
)
# change v's value and TS=3, tombstones v=1 and adds v=0 record
session.execute(SimpleStatement("UPDATE t USING TIMESTAMP 3 SET v = 0 WHERE id = 1",
consistency_level=ConsistencyLevel.ALL))
self._replay_batchlogs()
assert_none(session, "SELECT * FROM t_by_v WHERE v = 1")
debug('Shutdown node2')
node2.stop(wait_other_notice=True)
session.execute(SimpleStatement("UPDATE t USING TIMESTAMP 4 SET v = 1 WHERE id = 1",
consistency_level=ConsistencyLevel.QUORUM))
self._replay_batchlogs()
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = 1",
[1, 1, 'b', 3.0]
)
node2.start(wait_other_notice=True, wait_for_binary_proto=True)
# We should get a digest mismatch
query = SimpleStatement("SELECT * FROM t_by_v WHERE v = 1",
consistency_level=ConsistencyLevel.ALL)
result = session.execute(query, trace=True)
self.check_trace_events(result.get_query_trace(), True)
# We should not get a digest mismatch the second time
query = SimpleStatement("SELECT * FROM t_by_v WHERE v = 1", consistency_level=ConsistencyLevel.ALL)
result = session.execute(query, trace=True)
self.check_trace_events(result.get_query_trace(), False)
# Verify values one last time
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = 1",
[1, 1, 'b', 3.0],
cl=ConsistencyLevel.ALL
)
def check_trace_events(self, trace, expect_digest):
# we should see multiple requests get enqueued prior to index scan
# execution happening
# Look for messages like:
# Digest mismatch: org.apache.cassandra.service.DigestMismatchException: Mismatch for key DecoratedKey
regex = r"Digest mismatch: org.apache.cassandra.service.DigestMismatchException: Mismatch for key DecoratedKey"
for event in trace.events:
desc = event.description
match = re.match(regex, desc)
if match:
if expect_digest:
break
else:
self.fail("Encountered digest mismatch when we shouldn't")
else:
if expect_digest:
self.fail("Didn't find digest mismatch")
def simple_repair_test(self):
"""
Test that a materialized view are consistent after a simple repair.
"""
session = self.prepare(rf=3, options={'hinted_handoff_enabled': False})
node1, node2, node3 = self.cluster.nodelist()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
session.cluster.control_connection.wait_for_schema_agreement()
debug('Shutdown node2')
node2.stop(wait_other_notice=True)
for i in xrange(1000):
session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)".format(v=i))
self._replay_batchlogs()
debug('Verify the data in the MV with CL=ONE')
for i in xrange(1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0]
)
debug('Verify the data in the MV with CL=ALL. All should be unavailable.')
for i in xrange(1000):
statement = SimpleStatement(
"SELECT * FROM t_by_v WHERE v = {}".format(i),
consistency_level=ConsistencyLevel.ALL
)
assert_unavailable(
session.execute,
statement
)
debug('Start node2, and repair')
node2.start(wait_other_notice=True, wait_for_binary_proto=True)
node1.repair()
debug('Verify the data in the MV with CL=ONE. All should be available now.')
for i in xrange(1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0],
cl=ConsistencyLevel.ONE
)
def base_replica_repair_test(self):
"""
Test that a materialized view are consistent after the repair of the base replica.
"""
self.prepare(rf=3)
node1, node2, node3 = self.cluster.nodelist()
session = self.patient_exclusive_cql_connection(node1)
session.execute('USE ks')
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
session.cluster.control_connection.wait_for_schema_agreement()
debug('Write initial data')
for i in xrange(1000):
session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)".format(v=i))
self._replay_batchlogs()
debug('Verify the data in the MV with CL=ALL')
for i in xrange(1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0],
cl=ConsistencyLevel.ALL
)
debug('Shutdown node1')
node1.stop(wait_other_notice=True)
debug('Delete node1 data')
node1.clear(clear_all=True)
debug('Restarting node1')
node1.start(wait_other_notice=True, wait_for_binary_proto=True)
debug('Shutdown node2 and node3')
node2.stop(wait_other_notice=True)
node3.stop(wait_other_notice=True)
session = self.patient_exclusive_cql_connection(node1)
session.execute('USE ks')
debug('Verify that there is no data on node1')
for i in xrange(1000):
assert_none(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i)
)
debug('Restarting node2 and node3')
node2.start(wait_other_notice=True, wait_for_binary_proto=True)
node3.start(wait_other_notice=True, wait_for_binary_proto=True)
# Just repair the base replica
node1.nodetool("repair ks t")
debug('Verify data with cl=ALL')
for i in xrange(1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0]
)
@attr("resource-intensive")
def complex_repair_test(self):
"""
Test that a materialized view are consistent after a more complex repair.
"""
session = self.prepare(rf=5, options={'hinted_handoff_enabled': False}, nodes=5)
node1, node2, node3, node4, node5 = self.cluster.nodelist()
# we create the base table with gc_grace_seconds=5 so batchlog will expire after 5 seconds
session.execute("CREATE TABLE ks.t (id int PRIMARY KEY, v int, v2 text, v3 decimal)"
"WITH gc_grace_seconds = 5")
session.execute(("CREATE MATERIALIZED VIEW ks.t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
session.cluster.control_connection.wait_for_schema_agreement()
debug('Shutdown node2 and node3')
node2.stop()
node3.stop(wait_other_notice=True)
debug('Write initial data to node1 (will be replicated to node4 and node5)')
for i in xrange(1000):
session.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)".format(v=i))
debug('Verify the data in the MV on node1 with CL=ONE')
for i in xrange(1000):
assert_one(
session,
"SELECT * FROM ks.t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0]
)
debug('Close connection to node1')
session.cluster.shutdown()
debug('Shutdown node1, node4 and node5')
node1.stop()
node4.stop()
node5.stop()
debug('Start nodes 2 and 3')
node2.start()
node3.start(wait_other_notice=True, wait_for_binary_proto=True)
session2 = self.patient_cql_connection(node2)
debug('Verify the data in the MV on node2 with CL=ONE. No rows should be found.')
for i in xrange(1000):
assert_none(
session2,
"SELECT * FROM ks.t_by_v WHERE v = {}".format(i)
)
debug('Write new data in node2 and node3 that overlap those in node1, node4 and node5')
for i in xrange(1000):
# we write i*2 as value, instead of i
session2.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)".format(v=i * 2))
debug('Verify the new data in the MV on node2 with CL=ONE')
for i in xrange(1000):
v = i * 2
assert_one(
session2,
"SELECT * FROM ks.t_by_v WHERE v = {}".format(v),
[v, v, 'a', 3.0]
)
debug('Wait for batchlogs to expire from node2 and node3')
time.sleep(5)
debug('Start remaining nodes')
node1.start(wait_other_notice=True, wait_for_binary_proto=True)
node4.start(wait_other_notice=True, wait_for_binary_proto=True)
node5.start(wait_other_notice=True, wait_for_binary_proto=True)
session = self.patient_cql_connection(node1)
debug('Read data from MV at QUORUM (old data should be returned)')
for i in xrange(1000):
assert_one(
session,
"SELECT * FROM ks.t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0],
cl=ConsistencyLevel.QUORUM
)
debug('Run global repair on node1')
node1.repair()
debug('Read data from MV at quorum (new data should be returned after repair)')
for i in xrange(1000):
v = i * 2
assert_one(
session,
"SELECT * FROM ks.t_by_v WHERE v = {}".format(v),
[v, v, 'a', 3.0],
cl=ConsistencyLevel.QUORUM
)
def really_complex_repair_test(self):
"""
Test that a materialized view are consistent after a more complex repair.
"""
session = self.prepare(rf=5, options={'hinted_handoff_enabled': False}, nodes=5)
node1, node2, node3, node4, node5 = self.cluster.nodelist()
# we create the base table with gc_grace_seconds=5 so batchlog will expire after 5 seconds
session.execute("CREATE TABLE ks.t (id int, v int, v2 text, v3 decimal, PRIMARY KEY(id, v, v2))"
"WITH gc_grace_seconds = 1")
session.execute(("CREATE MATERIALIZED VIEW ks.t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL AND v IS NOT NULL AND "
"v2 IS NOT NULL PRIMARY KEY (v2, v, id)"))
session.cluster.control_connection.wait_for_schema_agreement()
debug('Shutdown node2 and node3')
node2.stop(wait_other_notice=True)
node3.stop(wait_other_notice=True)
session.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'a', 3.0)")
session.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'a', 3.0)")
self._replay_batchlogs()
debug('Verify the data in the MV on node1 with CL=ONE')
assert_all(session, "SELECT * FROM ks.t_by_v WHERE v2 = 'a'", [['a', 1, 1, 3.0], ['a', 2, 2, 3.0]])
session.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'b', 3.0)")
session.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'b', 3.0)")
self._replay_batchlogs()
debug('Verify the data in the MV on node1 with CL=ONE')
assert_all(session, "SELECT * FROM ks.t_by_v WHERE v2 = 'b'", [['b', 1, 1, 3.0], ['b', 2, 2, 3.0]])
session.shutdown()
debug('Shutdown node1, node4 and node5')
node1.stop()
node4.stop()
node5.stop()
debug('Start nodes 2 and 3')
node2.start()
node3.start(wait_other_notice=True, wait_for_binary_proto=True)
session2 = self.patient_cql_connection(node2)
session2.execute('USE ks')
debug('Verify the data in the MV on node2 with CL=ONE. No rows should be found.')
assert_none(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'a'")
debug('Write new data in node2 that overlap those in node1')
session2.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'c', 3.0)")
session2.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'c', 3.0)")
self._replay_batchlogs()
assert_all(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'c'", [['c', 1, 1, 3.0], ['c', 2, 2, 3.0]])
session2.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'd', 3.0)")
session2.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'd', 3.0)")
self._replay_batchlogs()
assert_all(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'd'", [['d', 1, 1, 3.0], ['d', 2, 2, 3.0]])
debug("Composite delete of everything")
session2.execute("DELETE FROM ks.t WHERE id = 1 and v = 1")
session2.execute("DELETE FROM ks.t WHERE id = 2 and v = 2")
self._replay_batchlogs()
assert_none(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'c'")
assert_none(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'd'")
debug('Wait for batchlogs to expire from node2 and node3')
time.sleep(5)
debug('Start remaining nodes')
node1.start(wait_other_notice=True, wait_for_binary_proto=True)
node4.start(wait_other_notice=True, wait_for_binary_proto=True)
node5.start(wait_other_notice=True, wait_for_binary_proto=True)
# at this point the data isn't repaired so we have an inconsistency.
# this value should return None
assert_all(
session2,
"SELECT * FROM ks.t_by_v WHERE v2 = 'a'", [['a', 1, 1, 3.0], ['a', 2, 2, 3.0]],
cl=ConsistencyLevel.QUORUM
)
debug('Run global repair on node1')
node1.repair()
assert_none(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'a'", cl=ConsistencyLevel.QUORUM)
def complex_mv_select_statements_test(self):
"""
Test complex MV select statements
@jira_ticket CASSANDRA-9664
"""
cluster = self.cluster
cluster.populate(3).start()
node1 = cluster.nodelist()[0]
session = self.patient_cql_connection(node1)
session.default_consistency_level = ConsistencyLevel.QUORUM
debug("Creating keyspace")
session.execute("CREATE KEYSPACE mvtest WITH replication = "
"{'class': 'SimpleStrategy', 'replication_factor': '3'}")
session.execute('USE mvtest')
mv_primary_keys = ["((a, b), c)",
"((b, a), c)",
"(a, b, c)",
"(c, b, a)",
"((c, a), b)"]
for mv_primary_key in mv_primary_keys:
session.execute("CREATE TABLE test (a int, b int, c int, d int, PRIMARY KEY (a, b, c))")
insert_stmt = session.prepare("INSERT INTO test (a, b, c, d) VALUES (?, ?, ?, ?)")
update_stmt = session.prepare("UPDATE test SET d = ? WHERE a = ? AND b = ? AND c = ?")
delete_stmt1 = session.prepare("DELETE FROM test WHERE a = ? AND b = ? AND c = ?")
delete_stmt2 = session.prepare("DELETE FROM test WHERE a = ?")
session.cluster.control_connection.wait_for_schema_agreement()
rows = [(0, 0, 0, 0),
(0, 0, 1, 0),
(0, 1, 0, 0),
(0, 1, 1, 0),
(1, 0, 0, 0),
(1, 0, 1, 0),
(1, 1, -1, 0),
(1, 1, 0, 0),
(1, 1, 1, 0)]
for row in rows:
session.execute(insert_stmt, row)
debug("Testing MV primary key: {}".format(mv_primary_key))
session.execute("CREATE MATERIALIZED VIEW mv AS SELECT * FROM test WHERE "
"a = 1 AND b IS NOT NULL AND c = 1 PRIMARY KEY {}".format(mv_primary_key))
time.sleep(3)
assert_all(
session, "SELECT a, b, c, d FROM mv",
[[1, 0, 1, 0], [1, 1, 1, 0]],
ignore_order=True,
cl=ConsistencyLevel.QUORUM
)
# insert new rows that does not match the filter
session.execute(insert_stmt, (0, 0, 1, 0))
session.execute(insert_stmt, (1, 1, 0, 0))
assert_all(
session, "SELECT a, b, c, d FROM mv",
[[1, 0, 1, 0], [1, 1, 1, 0]],
ignore_order=True,
cl=ConsistencyLevel.QUORUM
)
# insert new row that does match the filter
session.execute(insert_stmt, (1, 2, 1, 0))
assert_all(
session, "SELECT a, b, c, d FROM mv",
[[1, 0, 1, 0], [1, 1, 1, 0], [1, 2, 1, 0]],
ignore_order=True,
cl=ConsistencyLevel.QUORUM
)
# update rows that does not match the filter
session.execute(update_stmt, (1, 1, -1, 0))
session.execute(update_stmt, (0, 1, 1, 0))
assert_all(
session, "SELECT a, b, c, d FROM mv",
[[1, 0, 1, 0], [1, 1, 1, 0], [1, 2, 1, 0]],
ignore_order=True,
cl=ConsistencyLevel.QUORUM
)
# update a row that does match the filter
session.execute(update_stmt, (2, 1, 1, 1))
assert_all(
session, "SELECT a, b, c, d FROM mv",
[[1, 0, 1, 0], [1, 1, 1, 2], [1, 2, 1, 0]],
ignore_order=True,
cl=ConsistencyLevel.QUORUM
)
# delete rows that does not match the filter
session.execute(delete_stmt1, (1, 1, -1))
session.execute(delete_stmt1, (2, 0, 1))
session.execute(delete_stmt2, (0,))
assert_all(
session, "SELECT a, b, c, d FROM mv",
[[1, 0, 1, 0], [1, 1, 1, 2], [1, 2, 1, 0]],
ignore_order=True,
cl=ConsistencyLevel.QUORUM
)
# delete a row that does match the filter
session.execute(delete_stmt1, (1, 1, 1))
assert_all(
session, "SELECT a, b, c, d FROM mv",
[[1, 0, 1, 0], [1, 2, 1, 0]],
ignore_order=True,
cl=ConsistencyLevel.QUORUM
)
# delete a partition that matches the filter
session.execute(delete_stmt2, (1,))
assert_all(session, "SELECT a, b, c, d FROM mv", [], cl=ConsistencyLevel.QUORUM)
# Cleanup
session.execute("DROP MATERIALIZED VIEW mv")
session.execute("DROP TABLE test")
# For read verification
class MutationPresence(Enum):
match = 1
extra = 2
missing = 3
excluded = 4
unknown = 5
class MM(object):
mp = None
def out(self):
pass
class Match(MM):
def __init__(self):
self.mp = MutationPresence.match
def out(self):
return None
class Extra(MM):
expecting = None
value = None
row = None
def __init__(self, expecting, value, row):
self.mp = MutationPresence.extra
self.expecting = expecting
self.value = value
self.row = row
def out(self):
return "Extra. Expected {} instead of {}; row: {}".format(self.expecting, self.value, self.row)
class Missing(MM):
value = None
row = None
def __init__(self, value, row):
self.mp = MutationPresence.missing
self.value = value
self.row = row
def out(self):
return "Missing. At {}".format(self.row)
class Excluded(MM):
def __init__(self):
self.mp = MutationPresence.excluded
def out(self):
return None
class Unknown(MM):
def __init__(self):
self.mp = MutationPresence.unknown
def out(self):
return None
readConsistency = ConsistencyLevel.QUORUM
writeConsistency = ConsistencyLevel.QUORUM
SimpleRow = collections.namedtuple('SimpleRow', 'a b c d')
def row_generate(i, num_partitions):
return SimpleRow(a=i % num_partitions, b=(i % 400) / num_partitions, c=i, d=i)
# Create a threaded session and execute queries from a Queue
def thread_session(ip, queue, start, end, rows, num_partitions):
def execute_query(session, select_gi, i):
row = row_generate(i, num_partitions)
if (row.a, row.b) in rows:
base = rows[(row.a, row.b)]
else:
base = -1
gi = list(session.execute(select_gi, [row.c, row.a]))
if base == i and len(gi) == 1:
return Match()
elif base != i and len(gi) == 1:
return Extra(base, i, (gi[0][0], gi[0][1], gi[0][2], gi[0][3]))
elif base == i and len(gi) == 0:
return Missing(base, i)
elif base != i and len(gi) == 0:
return Excluded()
else:
return Unknown()
try:
cluster = Cluster([ip])
session = cluster.connect()
select_gi = session.prepare("SELECT * FROM mvtest.mv1 WHERE c = ? AND a = ?")
select_gi.consistency_level = readConsistency
for i in range(start, end):
ret = execute_query(session, select_gi, i)
queue.put_nowait(ret)
except Exception as e:
print str(e)
queue.close()
@since('3.0')
@skipIf(sys.platform == 'win32', 'Bug in python on Windows: https://bugs.python.org/issue10128')
class TestMaterializedViewsConsistency(Tester):
def prepare(self, user_table=False):
cluster = self.cluster
cluster.populate(3).start()
node2 = cluster.nodelist()[1]
# Keep the status of async requests
self.exception_type = collections.Counter()
self.num_request_done = 0
self.counts = {}
for mp in MutationPresence:
self.counts[mp] = 0
self.rows = {}
self.update_stats_every = 100
debug("Set to talk to node 2")
self.session = self.patient_cql_connection(node2)
return self.session
def _print_write_status(self, row):
output = "\r{}".format(row)
for key in self.exception_type.keys():
output = "{} ({}: {})".format(output, key, self.exception_type[key])
sys.stdout.write(output)
sys.stdout.flush()
def _print_read_status(self, row):
if self.counts[MutationPresence.unknown] == 0:
sys.stdout.write(
"\rOn {}; match: {}; extra: {}; missing: {}".format(
row,
self.counts[MutationPresence.match],
self.counts[MutationPresence.extra],
self.counts[MutationPresence.missing])
)
else:
sys.stdout.write(
"\rOn {}; match: {}; extra: {}; missing: {}; WTF: {}".format(
row,
self.counts[MutationPresence.match],
self.counts[MutationPresence.extra],
self.counts[MutationPresence.missing],
self.counts[MutationPresence.unkown])
)
sys.stdout.flush()
def _do_row(self, insert_stmt, i, num_partitions):
# Error callback for async requests
def handle_errors(row, exc):
self.num_request_done += 1
try:
name = type(exc).__name__
self.exception_type[name] += 1
except Exception as e:
print traceback.format_exception_only(type(e), e)
# Success callback for async requests
def success_callback(row):
self.num_request_done += 1
if i % self.update_stats_every == 0:
self._print_write_status(i)
row = row_generate(i, num_partitions)
async = self.session.execute_async(insert_stmt, row)
errors = partial(handle_errors, row)
async.add_callbacks(success_callback, errors)
def _populate_rows(self):
statement = SimpleStatement(
"SELECT a, b, c FROM mvtest.test1",
consistency_level=readConsistency
)
data = self.session.execute(statement)
for row in data:
self.rows[(row.a, row.b)] = row.c
@known_failure(failure_source='cassandra',
jira_url='https://issues.apache.org/jira/browse/CASSANDRA-11290',
flaky=True)
@require(11290)
def single_partition_consistent_reads_after_write_test(self):
"""
Tests consistency of multiple writes to a single partition
@jira_ticket CASSANDRA-10981
"""
self._consistent_reads_after_write_test(1)
def multi_partition_consistent_reads_after_write_test(self):
"""
Tests consistency of multiple writes to a multiple partitions
@jira_ticket CASSANDRA-10981
"""
self._consistent_reads_after_write_test(20)
def _consistent_reads_after_write_test(self, num_partitions):
session = self.prepare()
node1, node2, node3 = self.cluster.nodelist()
# Test config
lower = 0
upper = 100000
processes = 4
queues = [None] * processes
eachProcess = (upper - lower) / processes
debug("Creating schema")
session.execute(
("CREATE KEYSPACE IF NOT EXISTS mvtest WITH replication = "
"{'class': 'SimpleStrategy', 'replication_factor': '3'}")
)
session.execute(
"CREATE TABLE mvtest.test1 (a int, b int, c int, d int, PRIMARY KEY (a,b))"
)
session.cluster.control_connection.wait_for_schema_agreement()
insert1 = session.prepare("INSERT INTO mvtest.test1 (a,b,c,d) VALUES (?,?,?,?)")
insert1.consistency_level = writeConsistency
debug("Writing data to base table")
for i in range(upper / 10):
self._do_row(insert1, i, num_partitions)
debug("Creating materialized view")
session.execute(
('CREATE MATERIALIZED VIEW mvtest.mv1 AS '
'SELECT a,b,c,d FROM mvtest.test1 WHERE a IS NOT NULL AND b IS NOT NULL AND '
'c IS NOT NULL PRIMARY KEY (c,a,b)')
)
session.cluster.control_connection.wait_for_schema_agreement()
debug("Writing more data to base table")
for i in range(upper / 10, upper):
self._do_row(insert1, i, num_partitions)
# Wait that all requests are done
while self.num_request_done < upper:
time.sleep(1)
debug("Making sure all batchlogs are replayed on node1")
node1.nodetool("replaybatchlog")
debug("Making sure all batchlogs are replayed on node2")
node2.nodetool("replaybatchlog")
debug("Making sure all batchlogs are replayed on node3")
node3.nodetool("replaybatchlog")
debug("Finished writes, now verifying reads")
self._populate_rows()
for i in range(processes):
start = lower + (eachProcess * i)
if i == processes - 1:
end = upper
else:
end = lower + (eachProcess * (i + 1))
q = Queue()
node_ip = self.get_ip_from_node(node2)
p = Process(target=thread_session, args=(node_ip, q, start, end, self.rows, num_partitions))
p.start()
queues[i] = q
for i in range(lower, upper):
if i % 100 == 0:
self._print_read_status(i)
mm = queues[i % processes].get()
if not mm.out() is None:
sys.stdout.write("\r{}\n" .format(mm.out()))
self.counts[mm.mp] += 1
self._print_read_status(upper)
sys.stdout.write("\n")
sys.stdout.flush()
| thobbs/cassandra-dtest | materialized_views_test.py | Python | apache-2.0 | 61,309 |
/*
* Copyright 2010-2011 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package com.amazonaws.services.autoscaling.model.transform;
import org.w3c.dom.Node;
import com.amazonaws.AmazonServiceException;
import com.amazonaws.util.XpathUtils;
import com.amazonaws.transform.StandardErrorUnmarshaller;
import com.amazonaws.services.autoscaling.model.LimitExceededException;
public class LimitExceededExceptionUnmarshaller extends StandardErrorUnmarshaller {
public LimitExceededExceptionUnmarshaller() {
super(LimitExceededException.class);
}
public AmazonServiceException unmarshall(Node node) throws Exception {
// Bail out if this isn't the right error code that this
// marshaller understands.
String errorCode = parseErrorCode(node);
if (errorCode == null || !errorCode.equals("LimitExceeded"))
return null;
LimitExceededException e = (LimitExceededException)super.unmarshall(node);
return e;
}
}
| apetresc/aws-sdk-for-java-on-gae | src/main/java/com/amazonaws/services/autoscaling/model/transform/LimitExceededExceptionUnmarshaller.java | Java | apache-2.0 | 1,518 |
package edu.kit.ipd.sdq.kamp.ruledsl.runtime.rule;
import java.util.concurrent.TimeUnit;
import com.google.common.base.Stopwatch;
import edu.kit.ipd.sdq.kamp.architecture.AbstractArchitectureVersion;
import edu.kit.ipd.sdq.kamp.propagation.AbstractChangePropagationAnalysis;
import edu.kit.ipd.sdq.kamp.ruledsl.support.ChangePropagationStepRegistry;
import edu.kit.ipd.sdq.kamp.ruledsl.support.IRule;
/**
* This standard (helper) rule is used to measure the time of a given rule.
*
* @author Martin Löper
*
*/
public class StopwatchRule implements IRule {
private final Stopwatch stopwatch;
private final IRule rule;
private final long iterations;
/**
* Creates a Stopwatch (wrapper) rule for the given {@code rule}.
* @param rule the rule which will be observed
*/
public StopwatchRule(IRule rule) {
this(rule, 1);
}
/**
* Creates a Stopwatch (wrapper) rule for the given {@code rule}.
* @param rule the rule which will be observed
* @param iterations the number of times the {@link IRule#apply(AbstractArchitectureVersion, ChangePropagationStepRegistry, AbstractChangePropagationAnalysis)} method of {@code rule} is called
*/
public StopwatchRule(IRule rule, long iterations) {
this.stopwatch = Stopwatch.createUnstarted();
this.rule = rule;
this.iterations = iterations;
}
@Override
public void apply(AbstractArchitectureVersion version, ChangePropagationStepRegistry registry) {
this.stopwatch.start();
for(long i=0; i < this.iterations; i++) {
this.rule.apply(version, registry);
}
this.stopwatch.stop();
}
/**
* Returns the elapsed time in the given time format.
* @see Stopwatch#elapsed(TimeUnit)
* @param timeUnit the time unit which is used to express the elapsed time
* @return the elapsed time in the given time unit
*/
public long getElapsedTime(TimeUnit timeUnit) {
return this.stopwatch.elapsed(timeUnit);
}
/**
* Returns the elapsed time per iteration in the given time format.
* This essentially divides the total time by the number of iterations.
* @param timeUnit timeUnit the time unit which is used to express the elapsed time
* @return the elapsed time per iteration in the given time unit
*/
public long getElapsedTimePerIteration(TimeUnit timeUnit) {
return this.stopwatch.elapsed(timeUnit) / this.iterations;
}
/**
* Returns the elapsed time in a human readable format.
* @see Stopwatch#toString()
* @return the elapsed time (human readable)
*/
public String getElapsedTimeAsString() {
return this.stopwatch.toString();
}
}
| MartinLoeper/KAMP-DSL | edu.kit.ipd.sdq.kamp.ruledsl/src/edu/kit/ipd/sdq/kamp/ruledsl/runtime/rule/StopwatchRule.java | Java | apache-2.0 | 2,567 |
var when = require('when');
var request = require('request');
var settings = require("../../../../settings");
var log = require("../../../log");
// view components
var view_start = require('../nodes/view/start');
var view_choice = require('../nodes/view/choice');
var view_form = require('../nodes/view/form');
var view_grid = require('../nodes/view/grid');
var view_info = require('../nodes/view/info');
var view_url = require('../nodes/view/url');
var view_zendesk_ticket = require('../nodes/view/zendesk-ticket');
var submit_call = require('../nodes/view/call');
var zopim_chat = require('../nodes/view/zopim_chat');
// action components
var action_submit_email = require('../nodes/action/submit-email');
var action_submit_zendesk_ticket = require('../nodes/action/submit-zendesk-ticket');
var PLIST_DEPLOY = settings.staticPlistSubmittingService;
var PLIST_HOST = settings.staticPlistHostingUrl || "https://designer.ubicall.com/plist/";
if (!PLIST_DEPLOY) {
throw new Error("ws.ubicall.com is abslote use new configuration i.e. config_version=20150920")
}
function extractFlow(flow) {
return when.promise(function(resolve, reject) {
// initialize flow with content of start node
var __flow = view_start.createStart(flow);
for (var i = 0; i < flow.Nodes.length; i++) {
var node = flow.Nodes[i];
switch (node.type) {
case "view-choice":
__flow[node.id] = view_choice.createChoice(node);
break;
case "view-form":
__flow[node.id] = view_form.createForm(node);
break;
case "view-grid":
__flow[node.id] = view_grid.createGrid(node);
break;
case "view-info":
__flow[node.id] = view_info.createInfo(node);
break;
case "view-url":
__flow[node.id] = view_url.createURL(node);
break;
case "view-zendesk-ticket-form":
__flow[node.id] = view_zendesk_ticket.createZendeskForm(node);
break;
case "view-submit-call":
__flow[node.id] = submit_call.createViewCall(node);
break;
case "view-zopim-chat":
__flow[node.id] = zopim_chat.createZopimChat(node);
break;
// action components
case "action-submit-email":
__flow[node.id] = action_submit_email.createActionEmail(node);
break;
case "action-submit-zendesk-ticket":
__flow[node.id] = action_submit_zendesk_ticket.createActionZendeskTicket(node);
break;
// do nothing nodes
case "view-zendesk-help-center":
break;
case "tab":
break;
default:
if (node.type !== "start") { // it aleardy handle outside switch statment
log.info("unknown node " + JSON.stringify(node));
}
}
}
return resolve(__flow);
});
}
function deployFlowOnline(authorization_header, version) {
return when.promise(function(resolve, reject) {
var options = {
url: PLIST_DEPLOY + version,
method: 'POST'
};
if (authorization_header) {
options.headers = options.headers || {};
options.headers['Authorization'] = authorization_header;
}
if (process.env.node_env !== "production") {
log.warn("This info appear because you are not start with production flag");
log.warn(JSON.stringify(options, null, 4));
}
request(options, function(err, response, body) {
if (err || response.statusCode !== 200) {
log.error(err || response.statusCode);
return reject(err || response.statusCode);
} else {
return resolve(body);
}
});
});
}
module.exports = {
extractFlow: extractFlow,
deployFlowOnline: deployFlowOnline
} | Ubicall/node-red | red/ubicall/plist/utils/index.js | JavaScript | apache-2.0 | 3,754 |
/**
* @license
* Copyright 2012 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @fileoverview JavaScript for Blockly's Code demo.
* @author fraser@google.com (Neil Fraser)
*/
'use strict';
/**
* Create a namespace for the application.
*/
var Code = {};
/**
* Lookup for names of supported languages. Keys should be in ISO 639 format.
*/
Code.LANGUAGE_NAME = {
'ar': 'العربية',
'be-tarask': 'Taraškievica',
'br': 'Brezhoneg',
'ca': 'Català',
'cs': 'Česky',
'da': 'Dansk',
'de': 'Deutsch',
'el': 'Ελληνικά',
'en': 'English',
'es': 'Español',
'et': 'Eesti',
'fa': 'فارسی',
'fr': 'Français',
'he': 'עברית',
'hrx': 'Hunsrik',
'hu': 'Magyar',
'ia': 'Interlingua',
'is': 'Íslenska',
'it': 'Italiano',
'ja': '日本語',
'kab': 'Kabyle',
'ko': '한국어',
'mk': 'Македонски',
'ms': 'Bahasa Melayu',
'nb': 'Norsk Bokmål',
'nl': 'Nederlands, Vlaams',
'oc': 'Lenga d\'òc',
'pl': 'Polski',
'pms': 'Piemontèis',
'pt-br': 'Português Brasileiro',
'ro': 'Română',
'ru': 'Русский',
'sc': 'Sardu',
'sk': 'Slovenčina',
'sr': 'Српски',
'sv': 'Svenska',
'ta': 'தமிழ்',
'th': 'ภาษาไทย',
'tlh': 'tlhIngan Hol',
'tr': 'Türkçe',
'uk': 'Українська',
'vi': 'Tiếng Việt',
'zh-hans': '简体中文',
'zh-hant': '正體中文'
};
/**
* List of RTL languages.
*/
Code.LANGUAGE_RTL = ['ar', 'fa', 'he', 'lki'];
/**
* Blockly's main workspace.
* @type {Blockly.WorkspaceSvg}
*/
Code.workspace = null;
/**
* Extracts a parameter from the URL.
* If the parameter is absent default_value is returned.
* @param {string} name The name of the parameter.
* @param {string} defaultValue Value to return if parameter not found.
* @return {string} The parameter value or the default value if not found.
*/
Code.getStringParamFromUrl = function(name, defaultValue) {
var val = location.search.match(new RegExp('[?&]' + name + '=([^&]+)'));
return val ? decodeURIComponent(val[1].replace(/\+/g, '%20')) : defaultValue;
};
/**
* Get the language of this user from the URL.
* @return {string} User's language.
*/
Code.getLang = function() {
var lang = Code.getStringParamFromUrl('lang', '');
if (Code.LANGUAGE_NAME[lang] === undefined) {
// Default to English.
lang = 'en';
}
return lang;
};
/**
* Is the current language (Code.LANG) an RTL language?
* @return {boolean} True if RTL, false if LTR.
*/
Code.isRtl = function() {
return Code.LANGUAGE_RTL.indexOf(Code.LANG) != -1;
};
/**
* Load blocks saved on App Engine Storage or in session/local storage.
* @param {string} defaultXml Text representation of default blocks.
*/
Code.loadBlocks = function(defaultXml) {
try {
var loadOnce = window.sessionStorage.loadOnceBlocks;
} catch(e) {
// Firefox sometimes throws a SecurityError when accessing sessionStorage.
// Restarting Firefox fixes this, so it looks like a bug.
var loadOnce = null;
}
if ('BlocklyStorage' in window && window.location.hash.length > 1) {
// An href with #key trigers an AJAX call to retrieve saved blocks.
BlocklyStorage.retrieveXml(window.location.hash.substring(1));
} else if (loadOnce) {
// Language switching stores the blocks during the reload.
delete window.sessionStorage.loadOnceBlocks;
var xml = Blockly.Xml.textToDom(loadOnce);
Blockly.Xml.domToWorkspace(xml, Code.workspace);
} else if (defaultXml) {
// Load the editor with default starting blocks.
var xml = Blockly.Xml.textToDom(defaultXml);
Blockly.Xml.domToWorkspace(xml, Code.workspace);
} else if ('BlocklyStorage' in window) {
// Restore saved blocks in a separate thread so that subsequent
// initialization is not affected from a failed load.
window.setTimeout(BlocklyStorage.restoreBlocks, 0);
}
};
/**
* Save the blocks and reload with a different language.
*/
Code.changeLanguage = function() {
// Store the blocks for the duration of the reload.
// MSIE 11 does not support sessionStorage on file:// URLs.
if (window.sessionStorage) {
var xml = Blockly.Xml.workspaceToDom(Code.workspace);
var text = Blockly.Xml.domToText(xml);
window.sessionStorage.loadOnceBlocks = text;
}
var languageMenu = document.getElementById('languageMenu');
var newLang = encodeURIComponent(
languageMenu.options[languageMenu.selectedIndex].value);
var search = window.location.search;
if (search.length <= 1) {
search = '?lang=' + newLang;
} else if (search.match(/[?&]lang=[^&]*/)) {
search = search.replace(/([?&]lang=)[^&]*/, '$1' + newLang);
} else {
search = search.replace(/\?/, '?lang=' + newLang + '&');
}
window.location = window.location.protocol + '//' +
window.location.host + window.location.pathname + search;
};
/**
* Bind a function to a button's click event.
* On touch enabled browsers, ontouchend is treated as equivalent to onclick.
* @param {!Element|string} el Button element or ID thereof.
* @param {!Function} func Event handler to bind.
*/
Code.bindClick = function(el, func) {
if (typeof el == 'string') {
el = document.getElementById(el);
}
el.addEventListener('click', func, true);
el.addEventListener('touchend', func, true);
};
/**
* Load the Prettify CSS and JavaScript.
*/
Code.importPrettify = function() {
var script = document.createElement('script');
script.setAttribute('src', 'https://cdn.rawgit.com/google/code-prettify/master/loader/run_prettify.js');
document.head.appendChild(script);
};
/**
* Compute the absolute coordinates and dimensions of an HTML element.
* @param {!Element} element Element to match.
* @return {!Object} Contains height, width, x, and y properties.
* @private
*/
Code.getBBox_ = function(element) {
var height = element.offsetHeight;
var width = element.offsetWidth;
var x = 0;
var y = 0;
do {
x += element.offsetLeft;
y += element.offsetTop;
element = element.offsetParent;
} while (element);
return {
height: height,
width: width,
x: x,
y: y
};
};
/**
* User's language (e.g. "en").
* @type {string}
*/
Code.LANG = Code.getLang();
/**
* List of tab names.
* @private
*/
Code.TABS_ = ['blocks', 'javascript', 'php', 'python', 'dart', 'lua', 'xml'];
Code.selected = 'blocks';
/**
* Switch the visible pane when a tab is clicked.
* @param {string} clickedName Name of tab clicked.
*/
Code.tabClick = function(clickedName) {
// If the XML tab was open, save and render the content.
if (document.getElementById('tab_xml').className == 'tabon') {
var xmlTextarea = document.getElementById('content_xml');
var xmlText = xmlTextarea.value;
var xmlDom = null;
try {
xmlDom = Blockly.Xml.textToDom(xmlText);
} catch (e) {
var q =
window.confirm(MSG['badXml'].replace('%1', e));
if (!q) {
// Leave the user on the XML tab.
return;
}
}
if (xmlDom) {
Code.workspace.clear();
Blockly.Xml.domToWorkspace(xmlDom, Code.workspace);
}
}
if (document.getElementById('tab_blocks').className == 'tabon') {
Code.workspace.setVisible(false);
}
// Deselect all tabs and hide all panes.
for (var i = 0; i < Code.TABS_.length; i++) {
var name = Code.TABS_[i];
document.getElementById('tab_' + name).className = 'taboff';
document.getElementById('content_' + name).style.visibility = 'hidden';
}
// Select the active tab.
Code.selected = clickedName;
document.getElementById('tab_' + clickedName).className = 'tabon';
// Show the selected pane.
document.getElementById('content_' + clickedName).style.visibility =
'visible';
Code.renderContent();
if (clickedName == 'blocks') {
Code.workspace.setVisible(true);
}
Blockly.svgResize(Code.workspace);
};
/**
* Populate the currently selected pane with content generated from the blocks.
*/
Code.renderContent = function() {
var content = document.getElementById('content_' + Code.selected);
// Initialize the pane.
if (content.id == 'content_xml') {
var xmlTextarea = document.getElementById('content_xml');
var xmlDom = Blockly.Xml.workspaceToDom(Code.workspace);
var xmlText = Blockly.Xml.domToPrettyText(xmlDom);
xmlTextarea.value = xmlText;
xmlTextarea.focus();
} else if (content.id == 'content_javascript') {
Code.attemptCodeGeneration(Blockly.JavaScript);
} else if (content.id == 'content_python') {
Code.attemptCodeGeneration(Blockly.Python);
} else if (content.id == 'content_php') {
Code.attemptCodeGeneration(Blockly.PHP);
} else if (content.id == 'content_dart') {
Code.attemptCodeGeneration(Blockly.Dart);
} else if (content.id == 'content_lua') {
Code.attemptCodeGeneration(Blockly.Lua);
}
if (typeof PR == 'object') {
PR.prettyPrint();
}
};
/**
* Attempt to generate the code and display it in the UI, pretty printed.
* @param generator {!Blockly.Generator} The generator to use.
*/
Code.attemptCodeGeneration = function(generator) {
var content = document.getElementById('content_' + Code.selected);
content.textContent = '';
if (Code.checkAllGeneratorFunctionsDefined(generator)) {
var code = generator.workspaceToCode(Code.workspace);
content.textContent = code;
// Remove the 'prettyprinted' class, so that Prettify will recalculate.
content.className = content.className.replace('prettyprinted', '');
}
};
/**
* Check whether all blocks in use have generator functions.
* @param generator {!Blockly.Generator} The generator to use.
*/
Code.checkAllGeneratorFunctionsDefined = function(generator) {
var blocks = Code.workspace.getAllBlocks(false);
var missingBlockGenerators = [];
for (var i = 0; i < blocks.length; i++) {
var blockType = blocks[i].type;
if (!generator[blockType]) {
if (missingBlockGenerators.indexOf(blockType) == -1) {
missingBlockGenerators.push(blockType);
}
}
}
var valid = missingBlockGenerators.length == 0;
if (!valid) {
var msg = 'The generator code for the following blocks not specified for ' +
generator.name_ + ':\n - ' + missingBlockGenerators.join('\n - ');
Blockly.alert(msg); // Assuming synchronous. No callback.
}
return valid;
};
/**
* Initialize Blockly. Called on page load.
*/
Code.init = function() {
Code.initLanguage();
var rtl = Code.isRtl();
var container = document.getElementById('content_area');
var onresize = function(e) {
var bBox = Code.getBBox_(container);
for (var i = 0; i < Code.TABS_.length; i++) {
var el = document.getElementById('content_' + Code.TABS_[i]);
el.style.top = bBox.y + 'px';
el.style.left = bBox.x + 'px';
// Height and width need to be set, read back, then set again to
// compensate for scrollbars.
el.style.height = bBox.height + 'px';
el.style.height = (2 * bBox.height - el.offsetHeight) + 'px';
el.style.width = bBox.width + 'px';
el.style.width = (2 * bBox.width - el.offsetWidth) + 'px';
}
// Make the 'Blocks' tab line up with the toolbox.
if (Code.workspace && Code.workspace.toolbox_.width) {
document.getElementById('tab_blocks').style.minWidth =
(Code.workspace.toolbox_.width - 38) + 'px';
// Account for the 19 pixel margin and on each side.
}
};
window.addEventListener('resize', onresize, false);
// The toolbox XML specifies each category name using Blockly's messaging
// format (eg. `<category name="%{BKY_CATLOGIC}">`).
// These message keys need to be defined in `Blockly.Msg` in order to
// be decoded by the library. Therefore, we'll use the `MSG` dictionary that's
// been defined for each language to import each category name message
// into `Blockly.Msg`.
// TODO: Clean up the message files so this is done explicitly instead of
// through this for-loop.
for (var messageKey in MSG) {
if (messageKey.indexOf('cat') == 0) {
Blockly.Msg[messageKey.toUpperCase()] = MSG[messageKey];
}
}
// Construct the toolbox XML, replacing translated variable names.
var toolboxText = document.getElementById('toolbox').outerHTML;
toolboxText = toolboxText.replace(/(^|[^%]){(\w+)}/g,
function(m, p1, p2) {return p1 + MSG[p2];});
var toolboxXml = Blockly.Xml.textToDom(toolboxText);
Code.workspace = Blockly.inject('content_blocks',
{grid:
{spacing: 25,
length: 3,
colour: '#ccc',
snap: true},
media: '../../media/',
rtl: rtl,
toolbox: toolboxXml,
zoom:
{controls: true,
wheel: true}
});
// Add to reserved word list: Local variables in execution environment (runJS)
// and the infinite loop detection function.
Blockly.JavaScript.addReservedWords('code,timeouts,checkTimeout');
Code.loadBlocks('');
if ('BlocklyStorage' in window) {
// Hook a save function onto unload.
BlocklyStorage.backupOnUnload(Code.workspace);
}
Code.tabClick(Code.selected);
Code.bindClick('trashButton',
function() {Code.discard(); Code.renderContent();});
Code.bindClick('runButton', Code.runJS);
// Disable the link button if page isn't backed by App Engine storage.
var linkButton = document.getElementById('linkButton');
if ('BlocklyStorage' in window) {
BlocklyStorage['HTTPREQUEST_ERROR'] = MSG['httpRequestError'];
BlocklyStorage['LINK_ALERT'] = MSG['linkAlert'];
BlocklyStorage['HASH_ERROR'] = MSG['hashError'];
BlocklyStorage['XML_ERROR'] = MSG['xmlError'];
Code.bindClick(linkButton,
function() {BlocklyStorage.link(Code.workspace);});
} else if (linkButton) {
linkButton.className = 'disabled';
}
for (var i = 0; i < Code.TABS_.length; i++) {
var name = Code.TABS_[i];
Code.bindClick('tab_' + name,
function(name_) {return function() {Code.tabClick(name_);};}(name));
}
onresize();
Blockly.svgResize(Code.workspace);
// Lazy-load the syntax-highlighting.
window.setTimeout(Code.importPrettify, 1);
};
/**
* Initialize the page language.
*/
Code.initLanguage = function() {
// Set the HTML's language and direction.
var rtl = Code.isRtl();
document.dir = rtl ? 'rtl' : 'ltr';
document.head.parentElement.setAttribute('lang', Code.LANG);
// Sort languages alphabetically.
var languages = [];
for (var lang in Code.LANGUAGE_NAME) {
languages.push([Code.LANGUAGE_NAME[lang], lang]);
}
var comp = function(a, b) {
// Sort based on first argument ('English', 'Русский', '简体字', etc).
if (a[0] > b[0]) return 1;
if (a[0] < b[0]) return -1;
return 0;
};
languages.sort(comp);
// Populate the language selection menu.
var languageMenu = document.getElementById('languageMenu');
languageMenu.options.length = 0;
for (var i = 0; i < languages.length; i++) {
var tuple = languages[i];
var lang = tuple[tuple.length - 1];
var option = new Option(tuple[0], lang);
if (lang == Code.LANG) {
option.selected = true;
}
languageMenu.options.add(option);
}
languageMenu.addEventListener('change', Code.changeLanguage, true);
// Inject language strings.
document.title += ' ' + MSG['title'];
document.getElementById('title').textContent = MSG['title'];
document.getElementById('tab_blocks').textContent = MSG['blocks'];
document.getElementById('linkButton').title = MSG['linkTooltip'];
document.getElementById('runButton').title = MSG['runTooltip'];
document.getElementById('trashButton').title = MSG['trashTooltip'];
};
/**
* Execute the user's code.
* Just a quick and dirty eval. Catch infinite loops.
*/
Code.runJS = function() {
Blockly.JavaScript.INFINITE_LOOP_TRAP = 'checkTimeout();\n';
var timeouts = 0;
var checkTimeout = function() {
if (timeouts++ > 1000000) {
throw MSG['timeout'];
}
};
var code = Blockly.JavaScript.workspaceToCode(Code.workspace);
Blockly.JavaScript.INFINITE_LOOP_TRAP = null;
try {
eval(code);
} catch (e) {
alert(MSG['badCode'].replace('%1', e));
}
};
/**
* Discard all blocks from the workspace.
*/
Code.discard = function() {
var count = Code.workspace.getAllBlocks(false).length;
if (count < 2 ||
window.confirm(Blockly.Msg['DELETE_ALL_BLOCKS'].replace('%1', count))) {
Code.workspace.clear();
if (window.location.hash) {
window.location.hash = '';
}
}
};
// Load the Code demo's language strings.
document.write('<script src="msg/' + Code.LANG + '.js"></script>\n');
// Load Blockly's language strings.
document.write('<script src="../../msg/js/' + Code.LANG + '.js"></script>\n');
window.addEventListener('load', Code.init);
| picklesrus/blockly | demos/code/code.js | JavaScript | apache-2.0 | 17,281 |
package com.bookify.web.models;
import com.bookify.core.Bill;
import com.bookify.core.BillItem;
import java.util.ArrayList;
/**
* Created by idumancic on 11/07/2017.
*/
public class PaymentViewModel {
private Bill bill;
private String username;
private ArrayList<ItemViewModel> billItems;
private String paymentMethod;
private int totalCost;
public Bill getBill() {
return bill;
}
public void setBill(Bill bill) {
this.bill = bill;
}
public ArrayList<ItemViewModel> getBillItems() {
return billItems;
}
public void setBillItems(ArrayList<ItemViewModel> billItems) {
this.billItems = billItems;
}
public String getPaymentMethod() {
return paymentMethod;
}
public void setPaymentMethod(String paymentMethod) {
this.paymentMethod = paymentMethod;
}
public int getTotalCost() {
return totalCost;
}
public void setTotalCost(int totalCost) {
this.totalCost = totalCost;
}
public String getUsername() {
return username;
}
public void setUsername(String username) {
this.username = username;
}
}
| idumancic/bookify | web/src/main/java/com/bookify/web/models/PaymentViewModel.java | Java | apache-2.0 | 1,184 |
/*
* Copyright 2018 Google, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License")
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package com.netflix.spinnaker.halyard.cli.command.v1.config.deploy.ha;
import com.beust.jcommander.Parameters;
import com.netflix.spinnaker.halyard.cli.command.v1.NestableCommand;
import com.netflix.spinnaker.halyard.cli.services.v1.Daemon;
import com.netflix.spinnaker.halyard.cli.services.v1.OperationHandler;
import java.util.HashMap;
import java.util.Map;
import lombok.AccessLevel;
import lombok.Getter;
@Parameters(separators = "=")
public abstract class AbstractHaServiceEnableDisableCommand extends AbstractHaServiceCommand {
@Override
public String getCommandName() {
return isEnable() ? "enable" : "disable";
}
private String subjunctivePerfectAction() {
return isEnable() ? "enabled" : "disabled";
}
private String indicativePastPerfectAction() {
return isEnable() ? "enabled" : "disabled";
}
protected abstract boolean isEnable();
@Getter(AccessLevel.PROTECTED)
private Map<String, NestableCommand> subcommands = new HashMap<>();
@Override
public String getShortDescription() {
return "Set the "
+ getServiceName()
+ " high availability service as "
+ subjunctivePerfectAction();
}
@Override
protected void executeThis() {
String currentDeployment = getCurrentDeployment();
String serviceName = getServiceName();
boolean enable = isEnable();
new OperationHandler<Void>()
.setSuccessMessage("Successfully " + indicativePastPerfectAction() + " " + serviceName)
.setFailureMesssage("Failed to " + getCommandName() + " " + serviceName)
.setOperation(
Daemon.setHaServiceEnableDisable(currentDeployment, serviceName, !noValidate, enable))
.get();
}
}
| spinnaker/halyard | halyard-cli/src/main/java/com/netflix/spinnaker/halyard/cli/command/v1/config/deploy/ha/AbstractHaServiceEnableDisableCommand.java | Java | apache-2.0 | 2,298 |
/*
* Copyright (c) 2016 GitHub, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <signal.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <unistd.h>
#include "catch.hpp"
#include "usdt.h"
#include "api/BPF.h"
/* required to insert USDT probes on this very executable --
* we're gonna be testing them live! */
#include "folly/tracing/StaticTracepoint.h"
static int a_probed_function() {
int an_int = 23 + getpid();
void *a_pointer = malloc(4);
FOLLY_SDT(libbcc_test, sample_probe_1, an_int, a_pointer);
free(a_pointer);
return an_int;
}
extern "C" int lib_probed_function();
int call_shared_lib_func() {
return lib_probed_function();
}
TEST_CASE("test finding a probe in our own process", "[usdt]") {
USDT::Context ctx(getpid());
REQUIRE(ctx.num_probes() >= 1);
SECTION("our test probe") {
auto probe = ctx.get("sample_probe_1");
REQUIRE(probe);
if(probe->in_shared_object(probe->bin_path()))
return;
REQUIRE(probe->name() == "sample_probe_1");
REQUIRE(probe->provider() == "libbcc_test");
REQUIRE(probe->bin_path().find("/test_libbcc") != std::string::npos);
REQUIRE(probe->num_locations() == 1);
REQUIRE(probe->num_arguments() == 2);
REQUIRE(probe->need_enable() == false);
REQUIRE(a_probed_function() != 0);
}
}
TEST_CASE("test probe's attributes with C++ API", "[usdt]") {
const ebpf::USDT u("/proc/self/exe", "libbcc_test", "sample_probe_1", "on_event");
REQUIRE(u.binary_path() == "/proc/self/exe");
REQUIRE(u.pid() == -1);
REQUIRE(u.provider() == "libbcc_test");
REQUIRE(u.name() == "sample_probe_1");
REQUIRE(u.probe_func() == "on_event");
}
TEST_CASE("test fine a probe in our own binary with C++ API", "[usdt]") {
ebpf::BPF bpf;
ebpf::USDT u("/proc/self/exe", "libbcc_test", "sample_probe_1", "on_event");
auto res = bpf.init("int on_event() { return 0; }", {}, {u});
REQUIRE(res.code() == 0);
res = bpf.attach_usdt(u);
REQUIRE(res.code() == 0);
res = bpf.detach_usdt(u);
REQUIRE(res.code() == 0);
}
TEST_CASE("test fine probes in our own binary with C++ API", "[usdt]") {
ebpf::BPF bpf;
ebpf::USDT u("/proc/self/exe", "libbcc_test", "sample_probe_1", "on_event");
auto res = bpf.init("int on_event() { return 0; }", {}, {u});
REQUIRE(res.ok());
res = bpf.attach_usdt_all();
REQUIRE(res.ok());
res = bpf.detach_usdt_all();
REQUIRE(res.ok());
}
TEST_CASE("test fine a probe in our Process with C++ API", "[usdt]") {
ebpf::BPF bpf;
ebpf::USDT u(::getpid(), "libbcc_test", "sample_probe_1", "on_event");
auto res = bpf.init("int on_event() { return 0; }", {}, {u});
REQUIRE(res.code() == 0);
res = bpf.attach_usdt(u);
REQUIRE(res.code() == 0);
res = bpf.detach_usdt(u);
REQUIRE(res.code() == 0);
}
TEST_CASE("test find a probe in our process' shared libs with c++ API", "[usdt]") {
ebpf::BPF bpf;
ebpf::USDT u(::getpid(), "libbcc_test", "sample_lib_probe_1", "on_event");
auto res = bpf.init("int on_event() { return 0; }", {}, {u});
REQUIRE(res.msg() == "");
REQUIRE(res.code() == 0);
}
TEST_CASE("test usdt partial init w/ fail init_usdt", "[usdt]") {
ebpf::BPF bpf;
ebpf::USDT u(::getpid(), "libbcc_test", "sample_lib_probe_nonexistent", "on_event");
ebpf::USDT p(::getpid(), "libbcc_test", "sample_lib_probe_1", "on_event");
// We should be able to fail initialization and subsequently do bpf.init w/o USDT
// successfully
auto res = bpf.init_usdt(u);
REQUIRE(res.msg() != "");
REQUIRE(res.code() != 0);
// Shouldn't be necessary to re-init bpf object either after failure to init w/
// bad USDT
res = bpf.init("int on_event() { return 0; }", {}, {u});
REQUIRE(res.msg() != "");
REQUIRE(res.code() != 0);
res = bpf.init_usdt(p);
REQUIRE(res.msg() == "");
REQUIRE(res.code() == 0);
res = bpf.init("int on_event() { return 0; }", {}, {});
REQUIRE(res.msg() == "");
REQUIRE(res.code() == 0);
}
class ChildProcess {
pid_t pid_;
public:
ChildProcess(const char *name, char *const argv[]) {
pid_ = fork();
if (pid_ == 0) {
execvp(name, argv);
exit(0);
}
if (spawned()) {
usleep(250000);
if (kill(pid_, 0) < 0)
pid_ = -1;
}
}
~ChildProcess() {
if (spawned()) {
int status;
kill(pid_, SIGKILL);
if (waitpid(pid_, &status, 0) != pid_)
abort();
}
}
bool spawned() const { return pid_ > 0; }
pid_t pid() const { return pid_; }
};
extern int cmd_scanf(const char *cmd, const char *fmt, ...);
static int probe_num_locations(const char *bin_path, const char *func_name) {
int num_locations;
char cmd[512];
const char *cmdfmt = "readelf -n %s | grep -c \"Name: %s$\"";
sprintf(cmd, cmdfmt, bin_path, func_name);
if (cmd_scanf(cmd, "%d", &num_locations) != 0) {
return -1;
}
return num_locations;
}
static int probe_num_arguments(const char *bin_path, const char *func_name) {
int num_arguments;
char cmd[512];
const char *cmdfmt = "readelf -n %s | grep -m 1 -A 2 \" %s$\" | " \
"tail -1 | cut -d \" \" -f 6- | wc -w";
sprintf(cmd, cmdfmt, bin_path, func_name);
if (cmd_scanf(cmd, "%d", &num_arguments) != 0) {
return -1;
}
return num_arguments;
}
// Unsharing pid namespace requires forking
// this uses pgrep to find the child process, by searching for a process
// that has the unshare as its parent
static int unshared_child_pid(const int ppid) {
int child_pid;
char cmd[512];
const char *cmdfmt = "pgrep -P %d";
sprintf(cmd, cmdfmt, ppid);
if (cmd_scanf(cmd, "%d", &child_pid) != 0) {
return -1;
}
return child_pid;
}
// FIXME This seems like a legitimate bug with probing ruby where the
// ruby symbols are in libruby.so?
TEST_CASE("test listing all USDT probes in Ruby/MRI", "[usdt][!mayfail]") {
size_t mri_probe_count = 0;
SECTION("without a running Ruby process") {
USDT::Context ctx("ruby");
if (!ctx.loaded())
return;
REQUIRE(ctx.num_probes() > 10);
mri_probe_count = ctx.num_probes();
SECTION("GC static probe") {
auto name = "gc__mark__begin";
auto probe = ctx.get(name);
REQUIRE(probe);
REQUIRE(probe->in_shared_object(probe->bin_path()) == true);
REQUIRE(probe->name() == name);
REQUIRE(probe->provider() == "ruby");
auto bin_path = probe->bin_path();
bool bin_path_match =
(bin_path.find("/ruby") != std::string::npos) ||
(bin_path.find("/libruby") != std::string::npos);
REQUIRE(bin_path_match);
int exp_locations, exp_arguments;
exp_locations = probe_num_locations(bin_path.c_str(), name);
exp_arguments = probe_num_arguments(bin_path.c_str(), name);
REQUIRE(probe->num_locations() == exp_locations);
REQUIRE(probe->num_arguments() == exp_arguments);
REQUIRE(probe->need_enable() == true);
}
SECTION("object creation probe") {
auto name = "object__create";
auto probe = ctx.get(name);
REQUIRE(probe);
REQUIRE(probe->in_shared_object(probe->bin_path()) == true);
REQUIRE(probe->name() == name);
REQUIRE(probe->provider() == "ruby");
auto bin_path = probe->bin_path();
bool bin_path_match =
(bin_path.find("/ruby") != std::string::npos) ||
(bin_path.find("/libruby") != std::string::npos);
REQUIRE(bin_path_match);
int exp_locations, exp_arguments;
exp_locations = probe_num_locations(bin_path.c_str(), name);
exp_arguments = probe_num_arguments(bin_path.c_str(), name);
REQUIRE(probe->num_locations() == exp_locations);
REQUIRE(probe->num_arguments() == exp_arguments);
REQUIRE(probe->need_enable() == true);
}
SECTION("array creation probe") {
auto name = "array__create";
auto probe = ctx.get(name);
REQUIRE(probe);
REQUIRE(probe->name() == name);
auto bin_path = probe->bin_path().c_str();
int exp_locations, exp_arguments;
exp_locations = probe_num_locations(bin_path, name);
exp_arguments = probe_num_arguments(bin_path, name);
REQUIRE(probe->num_locations() == exp_locations);
REQUIRE(probe->num_arguments() == exp_arguments);
REQUIRE(probe->need_enable() == true);
}
}
SECTION("with a running Ruby process") {
static char _ruby[] = "ruby";
char *const argv[2] = {_ruby, NULL};
ChildProcess ruby(argv[0], argv);
if (!ruby.spawned())
return;
USDT::Context ctx(ruby.pid());
REQUIRE(ctx.num_probes() >= mri_probe_count);
SECTION("get probe in running process") {
auto name = "gc__mark__begin";
auto probe = ctx.get(name);
REQUIRE(probe);
REQUIRE(probe->in_shared_object(probe->bin_path()) == true);
REQUIRE(probe->name() == name);
REQUIRE(probe->provider() == "ruby");
auto bin_path = probe->bin_path();
bool bin_path_match =
(bin_path.find("/ruby") != std::string::npos) ||
(bin_path.find("/libruby") != std::string::npos);
REQUIRE(bin_path_match);
int exp_locations, exp_arguments;
exp_locations = probe_num_locations(bin_path.c_str(), name);
exp_arguments = probe_num_arguments(bin_path.c_str(), name);
REQUIRE(probe->num_locations() == exp_locations);
REQUIRE(probe->num_arguments() == exp_arguments);
REQUIRE(probe->need_enable() == true);
}
}
}
// These tests are expected to fail if there is no Ruby with dtrace probes
TEST_CASE("test probing running Ruby process in namespaces",
"[usdt][!mayfail]") {
SECTION("in separate mount namespace") {
static char _unshare[] = "unshare";
const char *const argv[4] = {_unshare, "--mount", "ruby", NULL};
ChildProcess unshare(argv[0], (char **const)argv);
if (!unshare.spawned())
return;
int ruby_pid = unshare.pid();
ebpf::BPF bpf;
ebpf::USDT u(ruby_pid, "ruby", "gc__mark__begin", "on_event");
u.set_probe_matching_kludge(1); // Also required for overlayfs...
auto res = bpf.init("int on_event() { return 0; }", {}, {u});
REQUIRE(res.msg() == "");
REQUIRE(res.code() == 0);
res = bpf.attach_usdt(u, ruby_pid);
REQUIRE(res.code() == 0);
res = bpf.detach_usdt(u, ruby_pid);
REQUIRE(res.code() == 0);
}
SECTION("in separate mount namespace and separate PID namespace") {
static char _unshare[] = "unshare";
const char *const argv[8] = {_unshare, "--fork", "--kill-child",
"--mount", "--pid", "--mount-proc",
"ruby", NULL};
ChildProcess unshare(argv[0], (char **const)argv);
if (!unshare.spawned())
return;
int ruby_pid = unshared_child_pid(unshare.pid());
ebpf::BPF bpf;
ebpf::USDT u(ruby_pid, "ruby", "gc__mark__begin", "on_event");
u.set_probe_matching_kludge(1); // Also required for overlayfs...
auto res = bpf.init("int on_event() { return 0; }", {}, {u});
REQUIRE(res.msg() == "");
REQUIRE(res.code() == 0);
res = bpf.attach_usdt(u, ruby_pid);
REQUIRE(res.code() == 0);
res = bpf.detach_usdt(u, ruby_pid);
REQUIRE(res.code() == 0);
struct bcc_symbol sym;
std::string pid_root= "/proc/" + std::to_string(ruby_pid) + "/root/";
std::string module = pid_root + "usr/local/bin/ruby";
REQUIRE(bcc_resolve_symname(module.c_str(), "rb_gc_mark", 0x0, ruby_pid, nullptr, &sym) == 0);
REQUIRE(std::string(sym.module).find(pid_root, 1) == std::string::npos);
}
}
| tuxology/bcc | tests/cc/test_usdt_probes.cc | C++ | apache-2.0 | 12,086 |
# Copyright 2012 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import copy
import eventlet
import mock
import netaddr
from oslo.config import cfg
from oslo import messaging
from testtools import matchers
from neutron.agent.common import config as agent_config
from neutron.agent.l3 import agent as l3_agent
from neutron.agent.l3 import config as l3_config
from neutron.agent.l3 import dvr
from neutron.agent.l3 import dvr_router
from neutron.agent.l3 import ha
from neutron.agent.l3 import link_local_allocator as lla
from neutron.agent.l3 import router_info as l3router
from neutron.agent.linux import interface
from neutron.agent.linux import ra
from neutron.agent.metadata import driver as metadata_driver
from neutron.common import config as base_config
from neutron.common import constants as l3_constants
from neutron.common import exceptions as n_exc
from neutron.common import utils as common_utils
from neutron.i18n import _LE
from neutron.openstack.common import log
from neutron.openstack.common import uuidutils
from neutron.plugins.common import constants as p_const
from neutron.tests import base
_uuid = uuidutils.generate_uuid
HOSTNAME = 'myhost'
FAKE_ID = _uuid()
FAKE_ID_2 = _uuid()
FIP_PRI = 32768
class FakeDev(object):
def __init__(self, name):
self.name = name
def router_append_interface(router, count=1, ip_version=4, ra_mode=None,
addr_mode=None):
if ip_version == 4:
ip_pool = '35.4.%i.4'
cidr_pool = '35.4.%i.0/24'
gw_pool = '35.4.%i.1'
elif ip_version == 6:
ip_pool = 'fd01:%x::6'
cidr_pool = 'fd01:%x::/64'
gw_pool = 'fd01:%x::1'
else:
raise ValueError("Invalid ip_version: %s" % ip_version)
interfaces = router[l3_constants.INTERFACE_KEY]
current = sum(
[netaddr.IPNetwork(p['subnet']['cidr']).version == ip_version
for p in interfaces])
mac_address = netaddr.EUI('ca:fe:de:ad:be:ef')
mac_address.dialect = netaddr.mac_unix
for i in range(current, current + count):
interfaces.append(
{'id': _uuid(),
'network_id': _uuid(),
'admin_state_up': True,
'fixed_ips': [{'ip_address': ip_pool % i,
'subnet_id': _uuid()}],
'mac_address': str(mac_address),
'subnet': {'cidr': cidr_pool % i,
'gateway_ip': gw_pool % i,
'ipv6_ra_mode': ra_mode,
'ipv6_address_mode': addr_mode}})
mac_address.value += 1
def prepare_router_data(ip_version=4, enable_snat=None, num_internal_ports=1,
enable_floating_ip=False, enable_ha=False,
extra_routes=False):
if ip_version == 4:
ip_addr = '19.4.4.4'
cidr = '19.4.4.0/24'
gateway_ip = '19.4.4.1'
elif ip_version == 6:
ip_addr = 'fd00::4'
cidr = 'fd00::/64'
gateway_ip = 'fd00::1'
else:
raise ValueError("Invalid ip_version: %s" % ip_version)
router_id = _uuid()
ex_gw_port = {'id': _uuid(),
'mac_address': 'ca:fe:de:ad:be:ee',
'network_id': _uuid(),
'fixed_ips': [{'ip_address': ip_addr,
'subnet_id': _uuid()}],
'subnet': {'cidr': cidr,
'gateway_ip': gateway_ip}}
routes = []
if extra_routes:
routes = [{'destination': '8.8.8.0/24', 'nexthop': ip_addr}]
router = {
'id': router_id,
'distributed': False,
l3_constants.INTERFACE_KEY: [],
'routes': routes,
'gw_port': ex_gw_port}
if enable_floating_ip:
router[l3_constants.FLOATINGIP_KEY] = [{
'id': _uuid(),
'port_id': _uuid(),
'floating_ip_address': '19.4.4.2',
'fixed_ip_address': '10.0.0.1'}]
router_append_interface(router, count=num_internal_ports,
ip_version=ip_version)
if enable_ha:
router['ha'] = True
router['ha_vr_id'] = 1
router[l3_constants.HA_INTERFACE_KEY] = get_ha_interface()
if enable_snat is not None:
router['enable_snat'] = enable_snat
return router
def _get_subnet_id(port):
return port['fixed_ips'][0]['subnet_id']
#TODO(jschwarz): This is a shared function with both the unit tests
# and the functional tests, and should be moved elsewhere (probably
# neutron/tests/common/).
def get_ha_interface(ip='169.254.192.1', mac='12:34:56:78:2b:5d'):
return {'admin_state_up': True,
'device_id': _uuid(),
'device_owner': 'network:router_ha_interface',
'fixed_ips': [{'ip_address': ip,
'subnet_id': _uuid()}],
'id': _uuid(),
'mac_address': mac,
'name': u'L3 HA Admin port 0',
'network_id': _uuid(),
'status': u'ACTIVE',
'subnet': {'cidr': '169.254.192.0/18',
'gateway_ip': '169.254.255.254',
'id': _uuid()},
'tenant_id': '',
'agent_id': _uuid(),
'agent_host': 'aaa',
'priority': 1}
class TestBasicRouterOperations(base.BaseTestCase):
def setUp(self):
super(TestBasicRouterOperations, self).setUp()
self.conf = agent_config.setup_conf()
self.conf.register_opts(base_config.core_opts)
self.conf.register_cli_opts(log.common_cli_opts)
self.conf.register_cli_opts(log.logging_cli_opts)
self.conf.register_opts(l3_config.OPTS)
self.conf.register_opts(ha.OPTS)
agent_config.register_interface_driver_opts_helper(self.conf)
agent_config.register_use_namespaces_opts_helper(self.conf)
agent_config.register_root_helper(self.conf)
self.conf.register_opts(interface.OPTS)
self.conf.set_override('router_id', 'fake_id')
self.conf.set_override('interface_driver',
'neutron.agent.linux.interface.NullDriver')
self.conf.set_override('send_arp_for_ha', 1)
self.conf.set_override('state_path', '')
self.conf.root_helper = 'sudo'
self.device_exists_p = mock.patch(
'neutron.agent.linux.ip_lib.device_exists')
self.device_exists = self.device_exists_p.start()
mock.patch('neutron.agent.l3.ha.AgentMixin'
'._init_ha_conf_path').start()
mock.patch('neutron.agent.linux.keepalived.KeepalivedNotifierMixin'
'._get_full_config_file_path').start()
self.utils_exec_p = mock.patch(
'neutron.agent.linux.utils.execute')
self.utils_exec = self.utils_exec_p.start()
self.utils_replace_file_p = mock.patch(
'neutron.agent.linux.utils.replace_file')
self.utils_replace_file = self.utils_replace_file_p.start()
self.external_process_p = mock.patch(
'neutron.agent.linux.external_process.ProcessManager')
self.external_process = self.external_process_p.start()
self.send_arp_p = mock.patch(
'neutron.agent.linux.ip_lib.send_gratuitous_arp')
self.send_arp = self.send_arp_p.start()
self.send_arp_proxyarp_p = mock.patch(
'neutron.agent.linux.ip_lib.send_garp_for_proxyarp')
self.send_arp_proxyarp = self.send_arp_proxyarp_p.start()
self.dvr_cls_p = mock.patch('neutron.agent.linux.interface.NullDriver')
driver_cls = self.dvr_cls_p.start()
self.mock_driver = mock.MagicMock()
self.mock_driver.DEV_NAME_LEN = (
interface.LinuxInterfaceDriver.DEV_NAME_LEN)
driver_cls.return_value = self.mock_driver
self.ip_cls_p = mock.patch('neutron.agent.linux.ip_lib.IPWrapper')
ip_cls = self.ip_cls_p.start()
self.mock_ip = mock.MagicMock()
ip_cls.return_value = self.mock_ip
ip_rule = mock.patch('neutron.agent.linux.ip_lib.IpRule').start()
self.mock_rule = mock.MagicMock()
ip_rule.return_value = self.mock_rule
ip_dev = mock.patch('neutron.agent.linux.ip_lib.IPDevice').start()
self.mock_ip_dev = mock.MagicMock()
ip_dev.return_value = self.mock_ip_dev
self.l3pluginApi_cls_p = mock.patch(
'neutron.agent.l3.agent.L3PluginApi')
l3pluginApi_cls = self.l3pluginApi_cls_p.start()
self.plugin_api = mock.MagicMock()
l3pluginApi_cls.return_value = self.plugin_api
self.looping_call_p = mock.patch(
'neutron.openstack.common.loopingcall.FixedIntervalLoopingCall')
self.looping_call_p.start()
self.snat_ports = [{'subnet': {'cidr': '152.2.0.0/16',
'gateway_ip': '152.2.0.1',
'id': _uuid()},
'network_id': _uuid(),
'device_owner': 'network:router_centralized_snat',
'ip_cidr': '152.2.0.13/16',
'mac_address': 'fa:16:3e:80:8d:80',
'fixed_ips': [{'subnet_id': _uuid(),
'ip_address': '152.2.0.13'}],
'id': _uuid(), 'device_id': _uuid()},
{'subnet': {'cidr': '152.10.0.0/16',
'gateway_ip': '152.10.0.1',
'id': _uuid()},
'network_id': _uuid(),
'device_owner': 'network:router_centralized_snat',
'ip_cidr': '152.10.0.13/16',
'mac_address': 'fa:16:3e:80:8d:80',
'fixed_ips': [{'subnet_id': _uuid(),
'ip_address': '152.10.0.13'}],
'id': _uuid(), 'device_id': _uuid()}]
def _prepare_internal_network_data(self):
port_id = _uuid()
router_id = _uuid()
network_id = _uuid()
router = prepare_router_data(num_internal_ports=2)
router_id = router['id']
ri = l3router.RouterInfo(router_id, self.conf.root_helper,
router=router)
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
cidr = '99.0.1.9/24'
mac = 'ca:fe:de:ad:be:ef'
port = {'network_id': network_id,
'id': port_id, 'ip_cidr': cidr,
'mac_address': mac}
return agent, ri, port
def test_periodic_sync_routers_task_raise_exception(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.plugin_api.get_routers.side_effect = ValueError()
with mock.patch.object(agent, '_cleanup_namespaces') as f:
self.assertRaises(ValueError, agent.periodic_sync_routers_task,
agent.context)
self.assertTrue(agent.fullsync)
self.assertFalse(f.called)
def test_l3_initial_full_sync_done(self):
with mock.patch.object(l3_agent.L3NATAgent,
'periodic_sync_routers_task') as router_sync:
with mock.patch.object(eventlet, 'spawn_n'):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent.after_start()
router_sync.assert_called_once_with(agent.context)
def test_periodic_sync_routers_task_call_clean_stale_namespaces(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.plugin_api.get_routers.return_value = []
with mock.patch.object(agent, '_cleanup_namespaces') as f:
agent.periodic_sync_routers_task(agent.context)
self.assertTrue(f.called)
def test_router_info_create(self):
id = _uuid()
ns = "ns-" + id
ri = l3router.RouterInfo(id, self.conf.root_helper, {}, ns_name=ns)
self.assertTrue(ri.ns_name.endswith(id))
def test_router_info_create_with_router(self):
id = _uuid()
ex_gw_port = {'id': _uuid(),
'network_id': _uuid(),
'fixed_ips': [{'ip_address': '19.4.4.4',
'subnet_id': _uuid()}],
'subnet': {'cidr': '19.4.4.0/24',
'gateway_ip': '19.4.4.1'}}
router = {
'id': _uuid(),
'enable_snat': True,
'routes': [],
'gw_port': ex_gw_port}
ns = "ns-" + id
ri = l3router.RouterInfo(id, self.conf.root_helper, router, ns_name=ns)
self.assertTrue(ri.ns_name.endswith(id))
self.assertEqual(ri.router, router)
def test_agent_create(self):
l3_agent.L3NATAgent(HOSTNAME, self.conf)
def _test_internal_network_action(self, action):
agent, ri, port = self._prepare_internal_network_data()
interface_name = agent.get_internal_device_name(port['id'])
if action == 'add':
self.device_exists.return_value = False
agent.internal_network_added(ri, port)
self.assertEqual(self.mock_driver.plug.call_count, 1)
self.assertEqual(self.mock_driver.init_l3.call_count, 1)
self.send_arp.assert_called_once_with(ri.ns_name, interface_name,
'99.0.1.9',
mock.ANY, mock.ANY)
elif action == 'remove':
self.device_exists.return_value = True
agent.internal_network_removed(ri, port)
self.assertEqual(self.mock_driver.unplug.call_count, 1)
else:
raise Exception("Invalid action %s" % action)
def _test_internal_network_action_dist(self, action):
agent, ri, port = self._prepare_internal_network_data()
ri.router['distributed'] = True
ri.router['gw_port_host'] = HOSTNAME
agent.host = HOSTNAME
agent.conf.agent_mode = 'dvr_snat'
sn_port = {'fixed_ips': [{'ip_address': '20.0.0.31',
'subnet_id': _uuid()}],
'subnet': {'gateway_ip': '20.0.0.1'},
'extra_subnets': [{'cidr': '172.16.0.0/24'}],
'id': _uuid(),
'network_id': _uuid(),
'mac_address': 'ca:fe:de:ad:be:ef',
'ip_cidr': '20.0.0.31/24'}
if action == 'add':
self.device_exists.return_value = False
agent._map_internal_interfaces = mock.Mock(return_value=sn_port)
agent._snat_redirect_add = mock.Mock()
agent._set_subnet_info = mock.Mock()
agent._internal_network_added = mock.Mock()
agent.internal_network_added(ri, port)
self.assertEqual(agent._snat_redirect_add.call_count, 1)
self.assertEqual(agent._set_subnet_info.call_count, 1)
self.assertEqual(agent._internal_network_added.call_count, 2)
agent._internal_network_added.assert_called_with(
agent.get_snat_ns_name(ri.router['id']),
sn_port['network_id'],
sn_port['id'],
sn_port['ip_cidr'],
sn_port['mac_address'],
agent.get_snat_int_device_name(sn_port['id']),
dvr.SNAT_INT_DEV_PREFIX)
def test_agent_add_internal_network(self):
self._test_internal_network_action('add')
def test_agent_add_internal_network_dist(self):
self._test_internal_network_action_dist('add')
def test_agent_remove_internal_network(self):
self._test_internal_network_action('remove')
def _test_external_gateway_action(self, action, router):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ri = l3router.RouterInfo(router['id'], self.conf.root_helper,
router=router,
ns_name=agent.get_ns_name(router['id']))
# Special setup for dvr routers
if router.get('distributed'):
agent.conf.agent_mode = 'dvr_snat'
agent.host = HOSTNAME
agent._create_dvr_gateway = mock.Mock()
agent.get_snat_interfaces = mock.Mock(return_value=self.snat_ports)
ex_gw_port = {'fixed_ips': [{'ip_address': '20.0.0.30',
'subnet_id': _uuid()}],
'subnet': {'gateway_ip': '20.0.0.1'},
'extra_subnets': [{'cidr': '172.16.0.0/24'}],
'id': _uuid(),
'network_id': _uuid(),
'mac_address': 'ca:fe:de:ad:be:ef',
'ip_cidr': '20.0.0.30/24'}
interface_name = agent.get_external_device_name(ex_gw_port['id'])
if action == 'add':
self.device_exists.return_value = False
fake_fip = {'floatingips': [{'id': _uuid(),
'floating_ip_address': '192.168.1.34',
'fixed_ip_address': '192.168.0.1',
'port_id': _uuid()}]}
router[l3_constants.FLOATINGIP_KEY] = fake_fip['floatingips']
agent.external_gateway_added(ri, ex_gw_port, interface_name)
if not router.get('distributed'):
self.assertEqual(self.mock_driver.plug.call_count, 1)
self.assertEqual(self.mock_driver.init_l3.call_count, 1)
self.send_arp.assert_called_once_with(ri.ns_name,
interface_name,
'20.0.0.30',
mock.ANY, mock.ANY)
kwargs = {'preserve_ips': ['192.168.1.34/32'],
'namespace': 'qrouter-' + router['id'],
'gateway': '20.0.0.1',
'extra_subnets': [{'cidr': '172.16.0.0/24'}]}
self.mock_driver.init_l3.assert_called_with(interface_name,
['20.0.0.30/24'],
**kwargs)
else:
agent._create_dvr_gateway.assert_called_once_with(
ri, ex_gw_port, interface_name,
self.snat_ports)
elif action == 'remove':
self.device_exists.return_value = True
agent.external_gateway_removed(ri, ex_gw_port, interface_name)
self.assertEqual(self.mock_driver.unplug.call_count, 1)
else:
raise Exception("Invalid action %s" % action)
def _prepare_ext_gw_test(self, agent):
ex_gw_port = {'fixed_ips': [{'ip_address': '20.0.0.30',
'subnet_id': _uuid()}],
'subnet': {'gateway_ip': '20.0.0.1'},
'extra_subnets': [{'cidr': '172.16.0.0/24'}],
'id': _uuid(),
'network_id': _uuid(),
'mac_address': 'ca:fe:de:ad:be:ef',
'ip_cidr': '20.0.0.30/24'}
interface_name = agent.get_external_device_name(ex_gw_port['id'])
self.device_exists.return_value = True
return interface_name, ex_gw_port
def test_external_gateway_updated(self):
router = prepare_router_data(num_internal_ports=2)
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ri = l3router.RouterInfo(router['id'], self.conf.root_helper,
router=router,
ns_name=agent.get_ns_name(router['id']))
interface_name, ex_gw_port = self._prepare_ext_gw_test(agent)
fake_fip = {'floatingips': [{'id': _uuid(),
'floating_ip_address': '192.168.1.34',
'fixed_ip_address': '192.168.0.1',
'port_id': _uuid()}]}
router[l3_constants.FLOATINGIP_KEY] = fake_fip['floatingips']
agent.external_gateway_updated(ri, ex_gw_port,
interface_name)
self.assertEqual(self.mock_driver.plug.call_count, 0)
self.assertEqual(self.mock_driver.init_l3.call_count, 1)
self.send_arp.assert_called_once_with(ri.ns_name, interface_name,
'20.0.0.30', mock.ANY, mock.ANY)
kwargs = {'preserve_ips': ['192.168.1.34/32'],
'namespace': 'qrouter-' + router['id'],
'gateway': '20.0.0.1',
'extra_subnets': [{'cidr': '172.16.0.0/24'}]}
self.mock_driver.init_l3.assert_called_with(interface_name,
['20.0.0.30/24'],
**kwargs)
def _test_ext_gw_updated_dvr_agent_mode(self, host,
agent_mode, expected_call_count):
router = prepare_router_data(num_internal_ports=2)
ri = l3router.RouterInfo(router['id'], self.conf.root_helper,
router=router)
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
interface_name, ex_gw_port = self._prepare_ext_gw_test(agent)
agent._external_gateway_added = mock.Mock()
# test agent mode = dvr (compute node)
router['distributed'] = True
router['gw_port_host'] = host
agent.conf.agent_mode = agent_mode
agent.external_gateway_updated(ri, ex_gw_port,
interface_name)
# no gateway should be added on dvr node
self.assertEqual(expected_call_count,
agent._external_gateway_added.call_count)
def test_ext_gw_updated_dvr_agent_mode(self):
# no gateway should be added on dvr node
self._test_ext_gw_updated_dvr_agent_mode('any-foo', 'dvr', 0)
def test_ext_gw_updated_dvr_snat_agent_mode_no_host(self):
# no gateway should be added on dvr_snat node without host match
self._test_ext_gw_updated_dvr_agent_mode('any-foo', 'dvr_snat', 0)
def test_ext_gw_updated_dvr_snat_agent_mode_host(self):
# gateway should be added on dvr_snat node
self._test_ext_gw_updated_dvr_agent_mode(self.conf.host,
'dvr_snat', 1)
def test_agent_add_external_gateway(self):
router = prepare_router_data(num_internal_ports=2)
self._test_external_gateway_action('add', router)
def test_agent_add_external_gateway_dist(self):
router = prepare_router_data(num_internal_ports=2)
router['distributed'] = True
router['gw_port_host'] = HOSTNAME
self._test_external_gateway_action('add', router)
def test_agent_remove_external_gateway(self):
router = prepare_router_data(num_internal_ports=2)
self._test_external_gateway_action('remove', router)
def test_agent_remove_external_gateway_dist(self):
router = prepare_router_data(num_internal_ports=2)
router['distributed'] = True
router['gw_port_host'] = HOSTNAME
self._test_external_gateway_action('remove', router)
def _check_agent_method_called(self, agent, calls, namespace):
self.mock_ip.netns.execute.assert_has_calls(
[mock.call(call, check_exit_code=False) for call in calls],
any_order=True)
def _test_routing_table_update(self, namespace):
if not namespace:
self.conf.set_override('use_namespaces', False)
router_id = _uuid()
ri = l3router.RouterInfo(router_id, self.conf.root_helper, {})
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
fake_route1 = {'destination': '135.207.0.0/16',
'nexthop': '1.2.3.4'}
fake_route2 = {'destination': '135.207.111.111/32',
'nexthop': '1.2.3.4'}
agent._update_routing_table(ri, 'replace', fake_route1)
expected = [['ip', 'route', 'replace', 'to', '135.207.0.0/16',
'via', '1.2.3.4']]
self._check_agent_method_called(agent, expected, namespace)
agent._update_routing_table(ri, 'delete', fake_route1)
expected = [['ip', 'route', 'delete', 'to', '135.207.0.0/16',
'via', '1.2.3.4']]
self._check_agent_method_called(agent, expected, namespace)
agent._update_routing_table(ri, 'replace', fake_route2)
expected = [['ip', 'route', 'replace', 'to', '135.207.111.111/32',
'via', '1.2.3.4']]
self._check_agent_method_called(agent, expected, namespace)
agent._update_routing_table(ri, 'delete', fake_route2)
expected = [['ip', 'route', 'delete', 'to', '135.207.111.111/32',
'via', '1.2.3.4']]
self._check_agent_method_called(agent, expected, namespace)
def test_agent_routing_table_updated(self):
self._test_routing_table_update(namespace=True)
def test_agent_routing_table_updated_no_namespace(self):
self._test_routing_table_update(namespace=False)
def test_routes_updated(self):
self._test_routes_updated(namespace=True)
def test_routes_updated_no_namespace(self):
self._test_routes_updated(namespace=False)
def _test_routes_updated(self, namespace=True):
if not namespace:
self.conf.set_override('use_namespaces', False)
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router_id = _uuid()
ri = l3router.RouterInfo(router_id, self.conf.root_helper, {})
ri.router = {}
fake_old_routes = []
fake_new_routes = [{'destination': "110.100.31.0/24",
'nexthop': "10.100.10.30"},
{'destination': "110.100.30.0/24",
'nexthop': "10.100.10.30"}]
ri.routes = fake_old_routes
ri.router['routes'] = fake_new_routes
agent.routes_updated(ri)
expected = [['ip', 'route', 'replace', 'to', '110.100.30.0/24',
'via', '10.100.10.30'],
['ip', 'route', 'replace', 'to', '110.100.31.0/24',
'via', '10.100.10.30']]
self._check_agent_method_called(agent, expected, namespace)
fake_new_routes = [{'destination': "110.100.30.0/24",
'nexthop': "10.100.10.30"}]
ri.router['routes'] = fake_new_routes
agent.routes_updated(ri)
expected = [['ip', 'route', 'delete', 'to', '110.100.31.0/24',
'via', '10.100.10.30']]
self._check_agent_method_called(agent, expected, namespace)
fake_new_routes = []
ri.router['routes'] = fake_new_routes
agent.routes_updated(ri)
expected = [['ip', 'route', 'delete', 'to', '110.100.30.0/24',
'via', '10.100.10.30']]
self._check_agent_method_called(agent, expected, namespace)
def _verify_snat_rules(self, rules, router, negate=False):
interfaces = router[l3_constants.INTERFACE_KEY]
source_cidrs = []
for iface in interfaces:
prefix = iface['subnet']['cidr'].split('/')[1]
source_cidr = "%s/%s" % (iface['fixed_ips'][0]['ip_address'],
prefix)
source_cidrs.append(source_cidr)
source_nat_ip = router['gw_port']['fixed_ips'][0]['ip_address']
interface_name = ('qg-%s' % router['gw_port']['id'])[:14]
expected_rules = [
'! -i %s ! -o %s -m conntrack ! --ctstate DNAT -j ACCEPT' %
(interface_name, interface_name),
'-o %s -j SNAT --to-source %s' % (interface_name, source_nat_ip)]
for r in rules:
if negate:
self.assertNotIn(r.rule, expected_rules)
else:
self.assertIn(r.rule, expected_rules)
def test__get_snat_idx_ipv4(self):
ip_cidr = '101.12.13.00/24'
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
snat_idx = agent._get_snat_idx(ip_cidr)
# 0x650C0D00 is numerical value of 101.12.13.00
self.assertEqual(0x650C0D00, snat_idx)
def test__get_snat_idx_ipv6(self):
ip_cidr = '2620:0:a03:e100::/64'
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
snat_idx = agent._get_snat_idx(ip_cidr)
# 0x3D345705 is 30 bit xor folded crc32 of the ip_cidr
self.assertEqual(0x3D345705, snat_idx)
def test__get_snat_idx_ipv6_below_32768(self):
ip_cidr = 'd488::/30'
# crc32 of this ip_cidr is 0x1BD7
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
snat_idx = agent._get_snat_idx(ip_cidr)
# 0x1BD7 + 0x3FFFFFFF = 0x40001BD6
self.assertEqual(0x40001BD6, snat_idx)
def test__map_internal_interfaces(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = prepare_router_data(num_internal_ports=4)
ri = l3router.RouterInfo(router['id'], self.conf.root_helper,
router=router)
test_port = {
'mac_address': '00:12:23:34:45:56',
'fixed_ips': [{'subnet_id': _get_subnet_id(
router[l3_constants.INTERFACE_KEY][0]),
'ip_address': '101.12.13.14'}]}
internal_ports = ri.router.get(l3_constants.INTERFACE_KEY, [])
# test valid case
res_port = agent._map_internal_interfaces(ri,
internal_ports[0],
[test_port])
self.assertEqual(test_port, res_port)
# test invalid case
test_port['fixed_ips'][0]['subnet_id'] = 1234
res_ip = agent._map_internal_interfaces(ri,
internal_ports[0],
[test_port])
self.assertNotEqual(test_port, res_ip)
self.assertIsNone(res_ip)
def test_get_internal_port(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = prepare_router_data(num_internal_ports=4)
subnet_ids = [_get_subnet_id(port) for port in
router[l3_constants.INTERFACE_KEY]]
ri = l3router.RouterInfo(router['id'], self.conf.root_helper,
router=router)
# Test Basic cases
port = agent.get_internal_port(ri, subnet_ids[0])
fips = port.get('fixed_ips', [])
subnet_id = fips[0]['subnet_id']
self.assertEqual(subnet_ids[0], subnet_id)
port = agent.get_internal_port(ri, subnet_ids[1])
fips = port.get('fixed_ips', [])
subnet_id = fips[0]['subnet_id']
self.assertEqual(subnet_ids[1], subnet_id)
port = agent.get_internal_port(ri, subnet_ids[3])
fips = port.get('fixed_ips', [])
subnet_id = fips[0]['subnet_id']
self.assertEqual(subnet_ids[3], subnet_id)
# Test miss cases
no_port = agent.get_internal_port(ri, FAKE_ID)
self.assertIsNone(no_port)
port = agent.get_internal_port(ri, subnet_ids[0])
fips = port.get('fixed_ips', [])
subnet_id = fips[0]['subnet_id']
self.assertNotEqual(subnet_ids[3], subnet_id)
def test__set_subnet_arp_info(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = prepare_router_data(num_internal_ports=2)
router['distributed'] = True
ri = l3router.RouterInfo(router['id'], self.conf.root_helper,
router=router)
ports = ri.router.get(l3_constants.INTERFACE_KEY, [])
test_ports = [{'mac_address': '00:11:22:33:44:55',
'device_owner': 'network:dhcp',
'subnet_id': _get_subnet_id(ports[0]),
'fixed_ips': [{'ip_address': '1.2.3.4'}]}]
self.plugin_api.get_ports_by_subnet.return_value = test_ports
# Test basic case
ports[0]['subnet']['id'] = _get_subnet_id(ports[0])
agent._set_subnet_arp_info(ri, ports[0])
self.mock_ip_dev.neigh.add.assert_called_once_with(
4, '1.2.3.4', '00:11:22:33:44:55')
# Test negative case
router['distributed'] = False
agent._set_subnet_arp_info(ri, ports[0])
self.mock_ip_dev.neigh.add.never_called()
def test_add_arp_entry(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = prepare_router_data(num_internal_ports=2)
subnet_id = _get_subnet_id(router[l3_constants.INTERFACE_KEY][0])
arp_table = {'ip_address': '1.7.23.11',
'mac_address': '00:11:22:33:44:55',
'subnet_id': subnet_id}
payload = {'arp_table': arp_table, 'router_id': router['id']}
agent._router_added(router['id'], router)
agent.add_arp_entry(None, payload)
agent.router_deleted(None, router['id'])
self.mock_ip_dev.neigh.add.assert_called_once_with(
4, '1.7.23.11', '00:11:22:33:44:55')
def test_add_arp_entry_no_routerinfo(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = prepare_router_data(num_internal_ports=2)
subnet_id = _get_subnet_id(router[l3_constants.INTERFACE_KEY][0])
arp_table = {'ip_address': '1.7.23.11',
'mac_address': '00:11:22:33:44:55',
'subnet_id': subnet_id}
payload = {'arp_table': arp_table, 'router_id': router['id']}
agent._update_arp_entry = mock.Mock()
agent.add_arp_entry(None, payload)
self.assertFalse(agent._update_arp_entry.called)
def test__update_arp_entry_with_no_subnet(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ri = l3router.RouterInfo(
'foo_router_id', mock.ANY,
{'distributed': True, 'gw_port_host': HOSTNAME})
with mock.patch.object(l3_agent.ip_lib, 'IPDevice') as f:
agent._update_arp_entry(ri, mock.ANY, mock.ANY,
'foo_subnet_id', 'add')
self.assertFalse(f.call_count)
def test_del_arp_entry(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = prepare_router_data(num_internal_ports=2)
subnet_id = _get_subnet_id(router[l3_constants.INTERFACE_KEY][0])
arp_table = {'ip_address': '1.5.25.15',
'mac_address': '00:44:33:22:11:55',
'subnet_id': subnet_id}
payload = {'arp_table': arp_table, 'router_id': router['id']}
agent._router_added(router['id'], router)
# first add the entry
agent.add_arp_entry(None, payload)
# now delete it
agent.del_arp_entry(None, payload)
self.mock_ip_dev.neigh.delete.assert_called_once_with(
4, '1.5.25.15', '00:44:33:22:11:55')
agent.router_deleted(None, router['id'])
@mock.patch('neutron.agent.linux.ip_lib.IPDevice')
def _test_scan_fip_ports(self, ri, ip_list, IPDevice):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.device_exists.return_value = True
IPDevice.return_value = device = mock.Mock()
device.addr.list.return_value = ip_list
agent.scan_fip_ports(ri)
def test_scan_fip_ports_restart_fips(self):
router = prepare_router_data()
ri = dvr_router.DvrRouter(router['id'], self.conf.root_helper,
router=router)
ri.router['distributed'] = True
ip_list = [{'cidr': '111.2.3.4/32'}, {'cidr': '111.2.3.5/32'}]
self._test_scan_fip_ports(ri, ip_list)
self.assertEqual(ri.dist_fip_count, 2)
def test_scan_fip_ports_restart_none(self):
router = prepare_router_data()
ri = dvr_router.DvrRouter(router['id'], self.conf.root_helper,
router=router)
ri.router['distributed'] = True
ip_list = []
self._test_scan_fip_ports(ri, ip_list)
self.assertEqual(ri.dist_fip_count, 0)
def test_scan_fip_ports_restart_zero(self):
router = prepare_router_data()
ri = dvr_router.DvrRouter(router['id'], self.conf.root_helper,
router=router)
ri.router['distributed'] = True
ri.dist_fip_count = 0
ip_list = None
self._test_scan_fip_ports(ri, ip_list)
self.assertEqual(ri.dist_fip_count, 0)
def test_process_cent_router(self):
router = prepare_router_data()
ri = l3router.RouterInfo(router['id'], self.conf.root_helper,
router=router)
self._test_process_router(ri)
def test_process_dist_router(self):
router = prepare_router_data()
ri = dvr_router.DvrRouter(router['id'], self.conf.root_helper,
router=router)
subnet_id = _get_subnet_id(router[l3_constants.INTERFACE_KEY][0])
ri.router['distributed'] = True
ri.router['_snat_router_interfaces'] = [{
'fixed_ips': [{'subnet_id': subnet_id,
'ip_address': '1.2.3.4'}]}]
ri.router['gw_port_host'] = None
self._test_process_router(ri)
def _test_process_router(self, ri):
router = ri.router
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent.host = HOSTNAME
fake_fip_id = 'fake_fip_id'
agent.create_dvr_fip_interfaces = mock.Mock()
agent.process_router_floating_ip_addresses = mock.Mock()
agent.process_router_floating_ip_nat_rules = mock.Mock()
agent.process_router_floating_ip_addresses.return_value = {
fake_fip_id: 'ACTIVE'}
agent.external_gateway_added = mock.Mock()
agent.external_gateway_updated = mock.Mock()
fake_floatingips1 = {'floatingips': [
{'id': fake_fip_id,
'floating_ip_address': '8.8.8.8',
'fixed_ip_address': '7.7.7.7',
'port_id': _uuid(),
'host': HOSTNAME}]}
agent.process_router(ri)
ex_gw_port = agent._get_ex_gw_port(ri)
agent.process_router_floating_ip_addresses.assert_called_with(
ri, ex_gw_port)
agent.process_router_floating_ip_addresses.reset_mock()
agent.process_router_floating_ip_nat_rules.assert_called_with(ri)
agent.process_router_floating_ip_nat_rules.reset_mock()
agent.external_gateway_added.reset_mock()
# remap floating IP to a new fixed ip
fake_floatingips2 = copy.deepcopy(fake_floatingips1)
fake_floatingips2['floatingips'][0]['fixed_ip_address'] = '7.7.7.8'
router[l3_constants.FLOATINGIP_KEY] = fake_floatingips2['floatingips']
agent.process_router(ri)
ex_gw_port = agent._get_ex_gw_port(ri)
agent.process_router_floating_ip_addresses.assert_called_with(
ri, ex_gw_port)
agent.process_router_floating_ip_addresses.reset_mock()
agent.process_router_floating_ip_nat_rules.assert_called_with(ri)
agent.process_router_floating_ip_nat_rules.reset_mock()
self.assertEqual(agent.external_gateway_added.call_count, 0)
self.assertEqual(agent.external_gateway_updated.call_count, 0)
agent.external_gateway_added.reset_mock()
agent.external_gateway_updated.reset_mock()
# change the ex_gw_port a bit to test gateway update
new_gw_port = copy.deepcopy(ri.router['gw_port'])
ri.router['gw_port'] = new_gw_port
old_ip = (netaddr.IPAddress(ri.router['gw_port']
['fixed_ips'][0]['ip_address']))
ri.router['gw_port']['fixed_ips'][0]['ip_address'] = str(old_ip + 1)
agent.process_router(ri)
ex_gw_port = agent._get_ex_gw_port(ri)
agent.process_router_floating_ip_addresses.reset_mock()
agent.process_router_floating_ip_nat_rules.reset_mock()
self.assertEqual(agent.external_gateway_added.call_count, 0)
self.assertEqual(agent.external_gateway_updated.call_count, 1)
# remove just the floating ips
del router[l3_constants.FLOATINGIP_KEY]
agent.process_router(ri)
ex_gw_port = agent._get_ex_gw_port(ri)
agent.process_router_floating_ip_addresses.assert_called_with(
ri, ex_gw_port)
agent.process_router_floating_ip_addresses.reset_mock()
agent.process_router_floating_ip_nat_rules.assert_called_with(ri)
agent.process_router_floating_ip_nat_rules.reset_mock()
# now no ports so state is torn down
del router[l3_constants.INTERFACE_KEY]
del router['gw_port']
agent.process_router(ri)
self.assertEqual(self.send_arp.call_count, 1)
distributed = ri.router.get('distributed', False)
self.assertEqual(agent.process_router_floating_ip_addresses.called,
distributed)
self.assertEqual(agent.process_router_floating_ip_nat_rules.called,
distributed)
@mock.patch('neutron.agent.linux.ip_lib.IPDevice')
def _test_process_router_floating_ip_addresses_add(self, ri,
agent, IPDevice):
floating_ips = agent.get_floating_ips(ri)
fip_id = floating_ips[0]['id']
IPDevice.return_value = device = mock.Mock()
device.addr.list.return_value = []
ri.iptables_manager.ipv4['nat'] = mock.MagicMock()
ex_gw_port = {'id': _uuid()}
with mock.patch.object(lla.LinkLocalAllocator, '_write'):
if ri.router['distributed']:
agent.create_dvr_fip_interfaces(ri, ex_gw_port)
fip_statuses = agent.process_router_floating_ip_addresses(
ri, ex_gw_port)
self.assertEqual({fip_id: l3_constants.FLOATINGIP_STATUS_ACTIVE},
fip_statuses)
device.addr.add.assert_called_once_with(4, '15.1.2.3/32', '15.1.2.3')
def test_process_router_floating_ip_nat_rules_add(self):
fip = {
'id': _uuid(), 'port_id': _uuid(),
'floating_ip_address': '15.1.2.3',
'fixed_ip_address': '192.168.0.1'
}
ri = mock.MagicMock()
ri.router['distributed'].__nonzero__ = lambda self: False
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent.get_floating_ips = mock.Mock(return_value=[fip])
agent.process_router_floating_ip_nat_rules(ri)
nat = ri.iptables_manager.ipv4['nat']
nat.clear_rules_by_tag.assert_called_once_with('floating_ip')
rules = agent.floating_forward_rules('15.1.2.3', '192.168.0.1')
for chain, rule in rules:
nat.add_rule.assert_any_call(chain, rule, tag='floating_ip')
def test_process_router_cent_floating_ip_add(self):
fake_floatingips = {'floatingips': [
{'id': _uuid(),
'floating_ip_address': '15.1.2.3',
'fixed_ip_address': '192.168.0.1',
'floating_network_id': _uuid(),
'port_id': _uuid(),
'host': HOSTNAME}]}
router = prepare_router_data(enable_snat=True)
router[l3_constants.FLOATINGIP_KEY] = fake_floatingips['floatingips']
ri = l3router.RouterInfo(router['id'], self.conf.root_helper,
router=router)
ri.iptables_manager.ipv4['nat'] = mock.MagicMock()
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self._test_process_router_floating_ip_addresses_add(ri, agent)
def test_process_router_dist_floating_ip_add(self):
fake_floatingips = {'floatingips': [
{'id': _uuid(),
'host': HOSTNAME,
'floating_ip_address': '15.1.2.3',
'fixed_ip_address': '192.168.0.1',
'floating_network_id': _uuid(),
'port_id': _uuid()},
{'id': _uuid(),
'host': 'some-other-host',
'floating_ip_address': '15.1.2.4',
'fixed_ip_address': '192.168.0.10',
'floating_network_id': _uuid(),
'port_id': _uuid()}]}
router = prepare_router_data(enable_snat=True)
router[l3_constants.FLOATINGIP_KEY] = fake_floatingips['floatingips']
router['distributed'] = True
ri = dvr_router.DvrRouter(router['id'], self.conf.root_helper,
router=router)
ri.iptables_manager.ipv4['nat'] = mock.MagicMock()
ri.dist_fip_count = 0
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent.host = HOSTNAME
agent.agent_gateway_port = (
{'fixed_ips': [{'ip_address': '20.0.0.30',
'subnet_id': _uuid()}],
'subnet': {'gateway_ip': '20.0.0.1'},
'id': _uuid(),
'network_id': _uuid(),
'mac_address': 'ca:fe:de:ad:be:ef',
'ip_cidr': '20.0.0.30/24'}
)
self._test_process_router_floating_ip_addresses_add(ri, agent)
def test_get_router_cidrs_returns_cidrs(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ri = mock.MagicMock()
ri.is_ha = False
addresses = ['15.1.2.2/24', '15.1.2.3/32']
device = mock.MagicMock()
device.addr.list.return_value = [{'cidr': addresses[0]},
{'cidr': addresses[1]}]
self.assertEqual(set(addresses), agent._get_router_cidrs(ri, device))
def test_get_router_cidrs_returns_ha_cidrs(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ri = mock.MagicMock()
ri.is_ha = True
device = mock.MagicMock()
device.name.return_value = 'eth2'
addresses = ['15.1.2.2/24', '15.1.2.3/32']
agent._ha_get_existing_cidrs = mock.MagicMock()
agent._ha_get_existing_cidrs.return_value = addresses
self.assertEqual(set(addresses), agent._get_router_cidrs(ri, device))
# TODO(mrsmith): refactor for DVR cases
@mock.patch('neutron.agent.linux.ip_lib.IPDevice')
def test_process_router_floating_ip_addresses_remove(self, IPDevice):
IPDevice.return_value = device = mock.Mock()
device.addr.list.return_value = [{'cidr': '15.1.2.3/32'}]
ri = mock.MagicMock()
ri.router.get.return_value = []
type(ri).is_ha = mock.PropertyMock(return_value=False)
ri.router['distributed'].__nonzero__ = lambda self: False
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
fip_statuses = agent.process_router_floating_ip_addresses(
ri, {'id': _uuid()})
self.assertEqual({}, fip_statuses)
device.addr.delete.assert_called_once_with(4, '15.1.2.3/32')
self.mock_driver.delete_conntrack_state.assert_called_once_with(
root_helper=self.conf.root_helper,
namespace=ri.ns_name,
ip='15.1.2.3/32')
def test_process_router_floating_ip_nat_rules_remove(self):
ri = mock.MagicMock()
ri.router.get.return_value = []
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent.process_router_floating_ip_nat_rules(ri)
nat = ri.iptables_manager.ipv4['nat']
nat = ri.iptables_manager.ipv4['nat`']
nat.clear_rules_by_tag.assert_called_once_with('floating_ip')
@mock.patch('neutron.agent.linux.ip_lib.IPDevice')
def test_process_router_floating_ip_addresses_remap(self, IPDevice):
fip_id = _uuid()
fip = {
'id': fip_id, 'port_id': _uuid(),
'floating_ip_address': '15.1.2.3',
'fixed_ip_address': '192.168.0.2'
}
IPDevice.return_value = device = mock.Mock()
device.addr.list.return_value = [{'cidr': '15.1.2.3/32'}]
ri = mock.MagicMock()
ri.router['distributed'].__nonzero__ = lambda self: False
type(ri).is_ha = mock.PropertyMock(return_value=False)
ri.router.get.return_value = [fip]
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
fip_statuses = agent.process_router_floating_ip_addresses(
ri, {'id': _uuid()})
self.assertEqual({fip_id: l3_constants.FLOATINGIP_STATUS_ACTIVE},
fip_statuses)
self.assertFalse(device.addr.add.called)
self.assertFalse(device.addr.delete.called)
@mock.patch('neutron.agent.linux.ip_lib.IPDevice')
def test_process_router_with_disabled_floating_ip(self, IPDevice):
fip_id = _uuid()
fip = {
'id': fip_id, 'port_id': _uuid(),
'floating_ip_address': '15.1.2.3',
'fixed_ip_address': '192.168.0.2'
}
ri = mock.MagicMock()
ri.floating_ips = [fip]
ri.router.get.return_value = []
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
fip_statuses = agent.process_router_floating_ip_addresses(
ri, {'id': _uuid()})
self.assertIsNone(fip_statuses.get(fip_id))
@mock.patch('neutron.agent.linux.ip_lib.IPDevice')
def test_process_router_floating_ip_with_device_add_error(self, IPDevice):
IPDevice.return_value = device = mock.Mock()
device.addr.add.side_effect = RuntimeError()
device.addr.list.return_value = []
fip_id = _uuid()
fip = {
'id': fip_id, 'port_id': _uuid(),
'floating_ip_address': '15.1.2.3',
'fixed_ip_address': '192.168.0.2'
}
ri = mock.MagicMock()
type(ri).is_ha = mock.PropertyMock(return_value=False)
ri.router.get.return_value = [fip]
ri.router['distributed'].__nonzero__ = lambda self: False
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
fip_statuses = agent.process_router_floating_ip_addresses(
ri, {'id': _uuid()})
self.assertEqual({fip_id: l3_constants.FLOATINGIP_STATUS_ERROR},
fip_statuses)
def test_process_router_snat_disabled(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = prepare_router_data(enable_snat=True)
ri = l3router.RouterInfo(router['id'], self.conf.root_helper,
router=router)
agent.external_gateway_added = mock.Mock()
# Process with NAT
agent.process_router(ri)
orig_nat_rules = ri.iptables_manager.ipv4['nat'].rules[:]
# Reprocess without NAT
router['enable_snat'] = False
# Reassign the router object to RouterInfo
ri.router = router
agent.process_router(ri)
# For some reason set logic does not work well with
# IpTablesRule instances
nat_rules_delta = [r for r in orig_nat_rules
if r not in ri.iptables_manager.ipv4['nat'].rules]
self.assertEqual(len(nat_rules_delta), 2)
self._verify_snat_rules(nat_rules_delta, router)
self.assertEqual(self.send_arp.call_count, 1)
def test_process_router_snat_enabled(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = prepare_router_data(enable_snat=False)
ri = l3router.RouterInfo(router['id'], self.conf.root_helper,
router=router)
agent.external_gateway_added = mock.Mock()
# Process without NAT
agent.process_router(ri)
orig_nat_rules = ri.iptables_manager.ipv4['nat'].rules[:]
# Reprocess with NAT
router['enable_snat'] = True
# Reassign the router object to RouterInfo
ri.router = router
agent.process_router(ri)
# For some reason set logic does not work well with
# IpTablesRule instances
nat_rules_delta = [r for r in ri.iptables_manager.ipv4['nat'].rules
if r not in orig_nat_rules]
self.assertEqual(len(nat_rules_delta), 2)
self._verify_snat_rules(nat_rules_delta, router)
self.assertEqual(self.send_arp.call_count, 1)
def test_process_router_interface_added(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = prepare_router_data()
ri = l3router.RouterInfo(router['id'], self.conf.root_helper,
router=router)
agent.external_gateway_added = mock.Mock()
# Process with NAT
agent.process_router(ri)
# Add an interface and reprocess
router_append_interface(router)
# Reassign the router object to RouterInfo
ri.router = router
agent.process_router(ri)
# send_arp is called both times process_router is called
self.assertEqual(self.send_arp.call_count, 2)
def test_process_ipv6_only_gw(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = prepare_router_data(ip_version=6)
# Get NAT rules without the gw_port
gw_port = router['gw_port']
router['gw_port'] = None
ri = l3router.RouterInfo(router['id'], self.conf.root_helper,
router=router)
agent.external_gateway_added = mock.Mock()
agent.process_router(ri)
orig_nat_rules = ri.iptables_manager.ipv4['nat'].rules[:]
# Get NAT rules with the gw_port
router['gw_port'] = gw_port
ri = l3router.RouterInfo(router['id'], self.conf.root_helper,
router=router)
with mock.patch.object(
agent,
'external_gateway_nat_rules') as external_gateway_nat_rules:
agent.process_router(ri)
new_nat_rules = ri.iptables_manager.ipv4['nat'].rules[:]
# There should be no change with the NAT rules
self.assertFalse(external_gateway_nat_rules.called)
self.assertEqual(orig_nat_rules, new_nat_rules)
def _process_router_ipv6_interface_added(
self, router, ra_mode=None, addr_mode=None):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ri = l3router.RouterInfo(router['id'], self.conf.root_helper,
router=router)
agent.external_gateway_added = mock.Mock()
# Process with NAT
agent.process_router(ri)
orig_nat_rules = ri.iptables_manager.ipv4['nat'].rules[:]
# Add an IPv6 interface and reprocess
router_append_interface(router, count=1, ip_version=6, ra_mode=ra_mode,
addr_mode=addr_mode)
# Reassign the router object to RouterInfo
ri.router = router
agent.process_router(ri)
# IPv4 NAT rules should not be changed by adding an IPv6 interface
nat_rules_delta = [r for r in ri.iptables_manager.ipv4['nat'].rules
if r not in orig_nat_rules]
self.assertFalse(nat_rules_delta)
return ri
def _expected_call_lookup_ri_process(self, ri, process):
"""Expected call if a process is looked up in a router instance."""
return [mock.call(cfg.CONF,
ri.router['id'],
self.conf.root_helper,
ri.ns_name,
process)]
def _assert_ri_process_enabled(self, ri, process):
"""Verify that process was enabled for a router instance."""
expected_calls = self._expected_call_lookup_ri_process(ri, process)
expected_calls.append(mock.call().enable(mock.ANY, True))
self.assertEqual(expected_calls, self.external_process.mock_calls)
def _assert_ri_process_disabled(self, ri, process):
"""Verify that process was disabled for a router instance."""
expected_calls = self._expected_call_lookup_ri_process(ri, process)
expected_calls.append(mock.call().disable())
self.assertEqual(expected_calls, self.external_process.mock_calls)
def test_process_router_ipv6_interface_added(self):
router = prepare_router_data()
ri = self._process_router_ipv6_interface_added(router)
self._assert_ri_process_enabled(ri, 'radvd')
# Expect radvd configured without prefix
self.assertNotIn('prefix',
self.utils_replace_file.call_args[0][1].split())
def test_process_router_ipv6_slaac_interface_added(self):
router = prepare_router_data()
ri = self._process_router_ipv6_interface_added(
router, ra_mode=l3_constants.IPV6_SLAAC)
self._assert_ri_process_enabled(ri, 'radvd')
# Expect radvd configured with prefix
self.assertIn('prefix',
self.utils_replace_file.call_args[0][1].split())
def test_process_router_ipv6v4_interface_added(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = prepare_router_data()
ri = l3router.RouterInfo(router['id'], self.conf.root_helper,
router=router)
agent.external_gateway_added = mock.Mock()
# Process with NAT
agent.process_router(ri)
# Add an IPv4 and IPv6 interface and reprocess
router_append_interface(router, count=1, ip_version=4)
router_append_interface(router, count=1, ip_version=6)
# Reassign the router object to RouterInfo
ri.router = router
agent.process_router(ri)
self._assert_ri_process_enabled(ri, 'radvd')
def test_process_router_interface_removed(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = prepare_router_data(num_internal_ports=2)
ri = l3router.RouterInfo(router['id'], self.conf.root_helper,
router=router)
agent.external_gateway_added = mock.Mock()
# Process with NAT
agent.process_router(ri)
# Add an interface and reprocess
del router[l3_constants.INTERFACE_KEY][1]
# Reassign the router object to RouterInfo
ri.router = router
agent.process_router(ri)
# send_arp is called both times process_router is called
self.assertEqual(self.send_arp.call_count, 2)
def test_process_router_ipv6_interface_removed(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = prepare_router_data()
ri = l3router.RouterInfo(router['id'], self.conf.root_helper,
router=router)
agent.external_gateway_added = mock.Mock()
ri.router = router
agent.process_router(ri)
# Add an IPv6 interface and reprocess
router_append_interface(router, count=1, ip_version=6)
agent.process_router(ri)
self._assert_ri_process_enabled(ri, 'radvd')
# Reset the calls so we can check for disable radvd
self.external_process.reset_mock()
# Remove the IPv6 interface and reprocess
del router[l3_constants.INTERFACE_KEY][1]
agent.process_router(ri)
self._assert_ri_process_disabled(ri, 'radvd')
def test_process_router_internal_network_added_unexpected_error(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = prepare_router_data()
ri = l3router.RouterInfo(router['id'], self.conf.root_helper,
router=router)
agent.external_gateway_added = mock.Mock()
with mock.patch.object(
l3_agent.L3NATAgent,
'internal_network_added') as internal_network_added:
# raise RuntimeError to simulate that an unexpected exception
# occurs
internal_network_added.side_effect = RuntimeError
self.assertRaises(RuntimeError, agent.process_router, ri)
self.assertNotIn(
router[l3_constants.INTERFACE_KEY][0], ri.internal_ports)
# The unexpected exception has been fixed manually
internal_network_added.side_effect = None
# periodic_sync_routers_task finds out that _rpc_loop failed to
# process the router last time, it will retry in the next run.
agent.process_router(ri)
# We were able to add the port to ri.internal_ports
self.assertIn(
router[l3_constants.INTERFACE_KEY][0], ri.internal_ports)
def test_process_router_internal_network_removed_unexpected_error(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = prepare_router_data()
ri = l3router.RouterInfo(router['id'], self.conf.root_helper,
router=router)
agent.external_gateway_added = mock.Mock()
# add an internal port
agent.process_router(ri)
with mock.patch.object(
l3_agent.L3NATAgent,
'internal_network_removed') as internal_net_removed:
# raise RuntimeError to simulate that an unexpected exception
# occurs
internal_net_removed.side_effect = RuntimeError
ri.internal_ports[0]['admin_state_up'] = False
# The above port is set to down state, remove it.
self.assertRaises(RuntimeError, agent.process_router, ri)
self.assertIn(
router[l3_constants.INTERFACE_KEY][0], ri.internal_ports)
# The unexpected exception has been fixed manually
internal_net_removed.side_effect = None
# periodic_sync_routers_task finds out that _rpc_loop failed to
# process the router last time, it will retry in the next run.
agent.process_router(ri)
# We were able to remove the port from ri.internal_ports
self.assertNotIn(
router[l3_constants.INTERFACE_KEY][0], ri.internal_ports)
def test_process_router_floatingip_disabled(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
with mock.patch.object(
agent.plugin_rpc,
'update_floatingip_statuses') as mock_update_fip_status:
fip_id = _uuid()
router = prepare_router_data(num_internal_ports=1)
router[l3_constants.FLOATINGIP_KEY] = [
{'id': fip_id,
'floating_ip_address': '8.8.8.8',
'fixed_ip_address': '7.7.7.7',
'port_id': router[l3_constants.INTERFACE_KEY][0]['id']}]
ri = l3router.RouterInfo(router['id'], self.conf.root_helper,
router=router)
agent.external_gateway_added = mock.Mock()
agent.process_router(ri)
# Assess the call for putting the floating IP up was performed
mock_update_fip_status.assert_called_once_with(
mock.ANY, ri.router_id,
{fip_id: l3_constants.FLOATINGIP_STATUS_ACTIVE})
mock_update_fip_status.reset_mock()
# Process the router again, this time without floating IPs
router[l3_constants.FLOATINGIP_KEY] = []
ri.router = router
agent.process_router(ri)
# Assess the call for putting the floating IP up was performed
mock_update_fip_status.assert_called_once_with(
mock.ANY, ri.router_id,
{fip_id: l3_constants.FLOATINGIP_STATUS_DOWN})
def test_process_router_floatingip_exception(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent.process_router_floating_ip_addresses = mock.Mock()
agent.process_router_floating_ip_addresses.side_effect = RuntimeError
with mock.patch.object(
agent.plugin_rpc,
'update_floatingip_statuses') as mock_update_fip_status:
fip_id = _uuid()
router = prepare_router_data(num_internal_ports=1)
router[l3_constants.FLOATINGIP_KEY] = [
{'id': fip_id,
'floating_ip_address': '8.8.8.8',
'fixed_ip_address': '7.7.7.7',
'port_id': router[l3_constants.INTERFACE_KEY][0]['id']}]
ri = l3router.RouterInfo(router['id'], self.conf.root_helper,
router=router)
agent.external_gateway_added = mock.Mock()
agent.process_router(ri)
# Assess the call for putting the floating IP into Error
# was performed
mock_update_fip_status.assert_called_once_with(
mock.ANY, ri.router_id,
{fip_id: l3_constants.FLOATINGIP_STATUS_ERROR})
def test_handle_router_snat_rules_distributed_without_snat_manager(self):
ri = dvr_router.DvrRouter(
'foo_router_id', mock.ANY, {'distributed': True})
ri.iptables_manager = mock.Mock()
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
with mock.patch.object(l3_agent.LOG, 'debug') as log_debug:
agent._handle_router_snat_rules(
ri, mock.ANY, mock.ANY, mock.ANY)
self.assertIsNone(ri.snat_iptables_manager)
self.assertFalse(ri.iptables_manager.called)
self.assertTrue(log_debug.called)
def test_handle_router_snat_rules_add_back_jump(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ri = mock.MagicMock()
port = {'fixed_ips': [{'ip_address': '192.168.1.4'}]}
ri.router = {'distributed': False}
agent._handle_router_snat_rules(ri, port, "iface", "add_rules")
nat = ri.iptables_manager.ipv4['nat']
nat.empty_chain.assert_any_call('snat')
nat.add_rule.assert_any_call('snat', '-j $float-snat')
for call in nat.mock_calls:
name, args, kwargs = call
if name == 'add_rule':
self.assertEqual(args, ('snat', '-j $float-snat'))
self.assertEqual(kwargs, {})
break
def test_handle_router_snat_rules_add_rules(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ri = l3router.RouterInfo(_uuid(), self.conf.root_helper, {})
ex_gw_port = {'fixed_ips': [{'ip_address': '192.168.1.4'}]}
ri.router = {'distributed': False}
agent._handle_router_snat_rules(ri, ex_gw_port,
"iface", "add_rules")
nat_rules = map(str, ri.iptables_manager.ipv4['nat'].rules)
wrap_name = ri.iptables_manager.wrap_name
jump_float_rule = "-A %s-snat -j %s-float-snat" % (wrap_name,
wrap_name)
snat_rule = ("-A %s-snat -o iface -j SNAT --to-source %s") % (
wrap_name, ex_gw_port['fixed_ips'][0]['ip_address'])
self.assertIn(jump_float_rule, nat_rules)
self.assertIn(snat_rule, nat_rules)
self.assertThat(nat_rules.index(jump_float_rule),
matchers.LessThan(nat_rules.index(snat_rule)))
def test_process_router_delete_stale_internal_devices(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
stale_devlist = [FakeDev('qr-a1b2c3d4-e5'),
FakeDev('qr-b2c3d4e5-f6')]
stale_devnames = [dev.name for dev in stale_devlist]
get_devices_return = []
get_devices_return.extend(stale_devlist)
self.mock_ip.get_devices.return_value = get_devices_return
router = prepare_router_data(enable_snat=True, num_internal_ports=1)
ri = l3router.RouterInfo(router['id'],
self.conf.root_helper,
router=router)
internal_ports = ri.router.get(l3_constants.INTERFACE_KEY, [])
self.assertEqual(len(internal_ports), 1)
internal_port = internal_ports[0]
with contextlib.nested(mock.patch.object(l3_agent.L3NATAgent,
'internal_network_removed'),
mock.patch.object(l3_agent.L3NATAgent,
'internal_network_added'),
mock.patch.object(l3_agent.L3NATAgent,
'external_gateway_removed'),
mock.patch.object(l3_agent.L3NATAgent,
'external_gateway_added')
) as (internal_network_removed,
internal_network_added,
external_gateway_removed,
external_gateway_added):
agent.process_router(ri)
self.assertEqual(external_gateway_added.call_count, 1)
self.assertFalse(external_gateway_removed.called)
self.assertFalse(internal_network_removed.called)
internal_network_added.assert_called_once_with(
ri, internal_port)
self.assertEqual(self.mock_driver.unplug.call_count,
len(stale_devnames))
calls = [mock.call(stale_devname,
namespace=ri.ns_name,
prefix=l3_agent.INTERNAL_DEV_PREFIX)
for stale_devname in stale_devnames]
self.mock_driver.unplug.assert_has_calls(calls, any_order=True)
def test_process_router_delete_stale_external_devices(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
stale_devlist = [FakeDev('qg-a1b2c3d4-e5')]
stale_devnames = [dev.name for dev in stale_devlist]
router = prepare_router_data(enable_snat=True, num_internal_ports=1)
del router['gw_port']
ri = l3router.RouterInfo(router['id'],
self.conf.root_helper,
router=router)
self.mock_ip.get_devices.return_value = stale_devlist
agent.process_router(ri)
self.mock_driver.unplug.assert_called_with(
stale_devnames[0],
bridge="br-ex",
namespace=ri.ns_name,
prefix=l3_agent.EXTERNAL_DEV_PREFIX)
def test_router_deleted(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent._queue = mock.Mock()
agent.router_deleted(None, FAKE_ID)
self.assertEqual(1, agent._queue.add.call_count)
def test_routers_updated(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent._queue = mock.Mock()
agent.routers_updated(None, [FAKE_ID])
self.assertEqual(1, agent._queue.add.call_count)
def test_removed_from_agent(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent._queue = mock.Mock()
agent.router_removed_from_agent(None, {'router_id': FAKE_ID})
self.assertEqual(1, agent._queue.add.call_count)
def test_added_to_agent(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent._queue = mock.Mock()
agent.router_added_to_agent(None, [FAKE_ID])
self.assertEqual(1, agent._queue.add.call_count)
def test_destroy_fip_namespace(self):
namespaces = ['qrouter-foo', 'qrouter-bar']
self.mock_ip.get_namespaces.return_value = namespaces
self.mock_ip.get_devices.return_value = [FakeDev('fpr-aaaa'),
FakeDev('fg-aaaa')]
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent._destroy_fip_namespace(namespaces[0])
self.mock_driver.unplug.assert_called_once_with('fg-aaaa',
bridge='br-ex',
prefix='fg-',
namespace='qrouter'
'-foo')
self.mock_ip.del_veth.assert_called_once_with('fpr-aaaa')
def test_destroy_namespace(self):
namespace = 'qrouter-bar'
self.mock_ip.get_namespaces.return_value = [namespace]
self.mock_ip.get_devices.return_value = [FakeDev('qr-aaaa'),
FakeDev('rfp-aaaa')]
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent._destroy_namespace(namespace)
self.mock_driver.unplug.assert_called_once_with('qr-aaaa',
prefix='qr-',
namespace='qrouter'
'-bar')
self.mock_ip.del_veth.assert_called_once_with('rfp-aaaa')
def test_destroy_router_namespace_skips_ns_removal(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent._destroy_router_namespace("fakens")
self.assertEqual(self.mock_ip.netns.delete.call_count, 0)
def test_destroy_router_namespace_removes_ns(self):
self.conf.set_override('router_delete_namespaces', True)
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent._destroy_router_namespace("fakens")
self.mock_ip.netns.delete.assert_called_once_with("fakens")
def _configure_metadata_proxy(self, enableflag=True):
if not enableflag:
self.conf.set_override('enable_metadata_proxy', False)
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router_id = _uuid()
router = {'id': router_id,
'external_gateway_info': {},
'routes': [],
'distributed': False}
driver = metadata_driver.MetadataDriver
with mock.patch.object(
driver, '_destroy_metadata_proxy') as destroy_proxy:
with mock.patch.object(
driver, '_spawn_metadata_proxy') as spawn_proxy:
agent._process_added_router(router)
if enableflag:
spawn_proxy.assert_called_with(router_id,
mock.ANY,
mock.ANY)
else:
self.assertFalse(spawn_proxy.call_count)
agent._router_removed(router_id)
if enableflag:
destroy_proxy.assert_called_with(router_id,
mock.ANY,
mock.ANY)
else:
self.assertFalse(destroy_proxy.call_count)
def test_enable_metadata_proxy(self):
self._configure_metadata_proxy()
def test_disable_metadata_proxy_spawn(self):
self._configure_metadata_proxy(enableflag=False)
def test_router_id_specified_in_conf(self):
self.conf.set_override('use_namespaces', False)
self.conf.set_override('router_id', '')
self.assertRaises(SystemExit, l3_agent.L3NATAgent,
HOSTNAME, self.conf)
self.conf.set_override('router_id', '1234')
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.assertEqual('1234', agent.conf.router_id)
self.assertFalse(agent._clean_stale_namespaces)
def test_process_router_if_compatible_with_no_ext_net_in_conf(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.plugin_api.get_external_network_id.return_value = 'aaa'
router = {'id': _uuid(),
'routes': [],
'admin_state_up': True,
'external_gateway_info': {'network_id': 'aaa'}}
agent._process_router_if_compatible(router)
self.assertIn(router['id'], agent.router_info)
self.plugin_api.get_external_network_id.assert_called_with(
agent.context)
def test_process_router_if_compatible_with_cached_ext_net(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.plugin_api.get_external_network_id.return_value = 'aaa'
agent.target_ex_net_id = 'aaa'
router = {'id': _uuid(),
'routes': [],
'admin_state_up': True,
'external_gateway_info': {'network_id': 'aaa'}}
agent._process_router_if_compatible(router)
self.assertIn(router['id'], agent.router_info)
self.assertFalse(self.plugin_api.get_external_network_id.called)
def test_process_router_if_compatible_with_stale_cached_ext_net(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.plugin_api.get_external_network_id.return_value = 'aaa'
agent.target_ex_net_id = 'bbb'
router = {'id': _uuid(),
'routes': [],
'admin_state_up': True,
'external_gateway_info': {'network_id': 'aaa'}}
agent._process_router_if_compatible(router)
self.assertIn(router['id'], agent.router_info)
self.plugin_api.get_external_network_id.assert_called_with(
agent.context)
def test_process_router_if_compatible_w_no_ext_net_and_2_net_plugin(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = {'id': _uuid(),
'routes': [],
'admin_state_up': True,
'external_gateway_info': {'network_id': 'aaa'}}
agent.router_info = {}
self.plugin_api.get_external_network_id.side_effect = (
n_exc.TooManyExternalNetworks())
self.assertRaises(n_exc.TooManyExternalNetworks,
agent._process_router_if_compatible,
router)
self.assertNotIn(router['id'], agent.router_info)
def test_process_router_if_compatible_with_ext_net_in_conf(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.plugin_api.get_external_network_id.return_value = 'aaa'
router = {'id': _uuid(),
'routes': [],
'admin_state_up': True,
'external_gateway_info': {'network_id': 'bbb'}}
agent.router_info = {}
self.conf.set_override('gateway_external_network_id', 'aaa')
self.assertRaises(n_exc.RouterNotCompatibleWithAgent,
agent._process_router_if_compatible,
router)
self.assertNotIn(router['id'], agent.router_info)
def test_process_router_if_compatible_with_no_bridge_no_ext_net(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.plugin_api.get_external_network_id.return_value = 'aaa'
router = {'id': _uuid(),
'routes': [],
'admin_state_up': True,
'external_gateway_info': {'network_id': 'aaa'}}
agent.router_info = {}
self.conf.set_override('external_network_bridge', '')
agent._process_router_if_compatible(router)
self.assertIn(router['id'], agent.router_info)
def test_nonexistent_interface_driver(self):
self.conf.set_override('interface_driver', None)
with mock.patch.object(l3_agent, 'LOG') as log:
self.assertRaises(SystemExit, l3_agent.L3NATAgent,
HOSTNAME, self.conf)
msg = 'An interface driver must be specified'
log.error.assert_called_once_with(msg)
self.conf.set_override('interface_driver', 'wrong_driver')
with mock.patch.object(l3_agent, 'LOG') as log:
self.assertRaises(SystemExit, l3_agent.L3NATAgent,
HOSTNAME, self.conf)
msg = _LE("Error importing interface driver '%s'")
log.error.assert_called_once_with(msg, 'wrong_driver')
def _cleanup_namespace_test(self,
stale_namespace_list,
router_list,
other_namespaces):
self.conf.set_override('router_delete_namespaces', True)
good_namespace_list = [l3_agent.NS_PREFIX + r['id']
for r in router_list]
good_namespace_list += [dvr.SNAT_NS_PREFIX + r['id']
for r in router_list]
self.mock_ip.get_namespaces.return_value = (stale_namespace_list +
good_namespace_list +
other_namespaces)
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.assertTrue(agent._clean_stale_namespaces)
pm = self.external_process.return_value
pm.reset_mock()
agent._destroy_router_namespace = mock.MagicMock()
agent._destroy_snat_namespace = mock.MagicMock()
ns_list = agent._list_namespaces()
agent._cleanup_namespaces(ns_list, [r['id'] for r in router_list])
# Expect process manager to disable metadata proxy per qrouter ns
qrouters = [n for n in stale_namespace_list
if n.startswith(l3_agent.NS_PREFIX)]
self.assertEqual(agent._destroy_router_namespace.call_count,
len(qrouters))
self.assertEqual(agent._destroy_snat_namespace.call_count,
len(stale_namespace_list) - len(qrouters))
expected_args = [mock.call(ns) for ns in qrouters]
agent._destroy_router_namespace.assert_has_calls(expected_args,
any_order=True)
self.assertFalse(agent._clean_stale_namespaces)
def test_cleanup_namespace(self):
self.conf.set_override('router_id', None)
stale_namespaces = [l3_agent.NS_PREFIX + 'foo',
l3_agent.NS_PREFIX + 'bar',
dvr.SNAT_NS_PREFIX + 'foo']
other_namespaces = ['unknown']
self._cleanup_namespace_test(stale_namespaces,
[],
other_namespaces)
def test_cleanup_namespace_with_registered_router_ids(self):
self.conf.set_override('router_id', None)
stale_namespaces = [l3_agent.NS_PREFIX + 'cccc',
l3_agent.NS_PREFIX + 'eeeee',
dvr.SNAT_NS_PREFIX + 'fffff']
router_list = [{'id': 'foo', 'distributed': False},
{'id': 'aaaa', 'distributed': False}]
other_namespaces = ['qdhcp-aabbcc', 'unknown']
self._cleanup_namespace_test(stale_namespaces,
router_list,
other_namespaces)
def test_cleanup_namespace_with_conf_router_id(self):
self.conf.set_override('router_id', 'bbbbb')
stale_namespaces = [l3_agent.NS_PREFIX + 'cccc',
l3_agent.NS_PREFIX + 'eeeee',
l3_agent.NS_PREFIX + self.conf.router_id]
router_list = [{'id': 'foo', 'distributed': False},
{'id': 'aaaa', 'distributed': False}]
other_namespaces = ['qdhcp-aabbcc', 'unknown']
self._cleanup_namespace_test(stale_namespaces,
router_list,
other_namespaces)
def test_create_dvr_gateway(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = prepare_router_data()
ri = l3router.RouterInfo(router['id'], self.conf.root_helper,
router=router)
port_id = _uuid()
dvr_gw_port = {'fixed_ips': [{'ip_address': '20.0.0.30',
'subnet_id': _uuid()}],
'subnet': {'gateway_ip': '20.0.0.1'},
'id': port_id,
'network_id': _uuid(),
'mac_address': 'ca:fe:de:ad:be:ef',
'ip_cidr': '20.0.0.30/24'}
interface_name = agent.get_snat_int_device_name(port_id)
self.device_exists.return_value = False
agent._create_dvr_gateway(ri, dvr_gw_port, interface_name,
self.snat_ports)
# check 2 internal ports are plugged
# check 1 ext-gw-port is plugged
self.assertEqual(self.mock_driver.plug.call_count, 3)
self.assertEqual(self.mock_driver.init_l3.call_count, 3)
def test_agent_gateway_added(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
network_id = _uuid()
port_id = _uuid()
agent_gw_port = {'fixed_ips': [{'ip_address': '20.0.0.30',
'subnet_id': _uuid()}],
'subnet': {'gateway_ip': '20.0.0.1'},
'id': port_id,
'network_id': network_id,
'mac_address': 'ca:fe:de:ad:be:ef',
'ip_cidr': '20.0.0.30/24'}
fip_ns_name = (
agent.get_fip_ns_name(str(network_id)))
interface_name = (
agent.get_fip_ext_device_name(port_id))
self.device_exists.return_value = False
agent.agent_gateway_added(fip_ns_name, agent_gw_port,
interface_name)
self.assertEqual(self.mock_driver.plug.call_count, 1)
self.assertEqual(self.mock_driver.init_l3.call_count, 1)
if self.conf.use_namespaces:
self.send_arp.assert_called_once_with(fip_ns_name, interface_name,
'20.0.0.30',
mock.ANY, mock.ANY)
else:
self.utils_exec.assert_any_call(
check_exit_code=True, root_helper=self.conf.root_helper)
def test_create_rtr_2_fip_link(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = prepare_router_data()
fip = {'id': _uuid(),
'host': HOSTNAME,
'floating_ip_address': '15.1.2.3',
'fixed_ip_address': '192.168.0.1',
'floating_network_id': _uuid(),
'port_id': _uuid()}
ri = dvr_router.DvrRouter(router['id'], self.conf.root_helper,
router=router)
rtr_2_fip_name = agent.get_rtr_int_device_name(ri.router_id)
fip_2_rtr_name = agent.get_fip_int_device_name(ri.router_id)
fip_ns_name = agent.get_fip_ns_name(str(fip['floating_network_id']))
with mock.patch.object(lla.LinkLocalAllocator, '_write'):
self.device_exists.return_value = False
agent.create_rtr_2_fip_link(ri, fip['floating_network_id'])
self.mock_ip.add_veth.assert_called_with(rtr_2_fip_name,
fip_2_rtr_name, fip_ns_name)
# TODO(mrsmith): add more aasserts -
self.mock_ip_dev.route.add_gateway.assert_called_once_with(
'169.254.31.29', table=16)
# TODO(mrsmith): test _create_agent_gateway_port
def test_create_rtr_2_fip_link_already_exists(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = prepare_router_data()
ri = dvr_router.DvrRouter(router['id'], self.conf.root_helper,
router=router)
self.device_exists.return_value = True
with mock.patch.object(lla.LinkLocalAllocator, '_write'):
agent.create_rtr_2_fip_link(ri, {})
self.assertFalse(self.mock_ip.add_veth.called)
def test_floating_ip_added_dist(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = prepare_router_data()
ri = dvr_router.DvrRouter(router['id'], self.conf.root_helper,
router=router)
agent_gw_port = {'fixed_ips': [{'ip_address': '20.0.0.30',
'subnet_id': _uuid()}],
'subnet': {'gateway_ip': '20.0.0.1'},
'id': _uuid(),
'network_id': _uuid(),
'mac_address': 'ca:fe:de:ad:be:ef',
'ip_cidr': '20.0.0.30/24'}
fip = {'id': _uuid(),
'host': HOSTNAME,
'floating_ip_address': '15.1.2.3',
'fixed_ip_address': '192.168.0.1',
'floating_network_id': _uuid(),
'port_id': _uuid()}
agent.agent_gateway_port = agent_gw_port
ri.rtr_fip_subnet = lla.LinkLocalAddressPair('169.254.30.42/31')
ri.dist_fip_count = 0
ip_cidr = common_utils.ip_to_cidr(fip['floating_ip_address'])
agent.floating_ip_added_dist(ri, fip, ip_cidr)
self.mock_rule.add_rule_from.assert_called_with('192.168.0.1',
16, FIP_PRI)
# TODO(mrsmith): add more asserts
@mock.patch.object(l3_agent.L3NATAgent, '_fip_ns_unsubscribe')
@mock.patch.object(lla.LinkLocalAllocator, '_write')
def test_floating_ip_removed_dist(self, write, unsubscribe):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = prepare_router_data()
agent_gw_port = {'fixed_ips': [{'ip_address': '20.0.0.30',
'subnet_id': _uuid()}],
'subnet': {'gateway_ip': '20.0.0.1'},
'id': _uuid(),
'network_id': _uuid(),
'mac_address': 'ca:fe:de:ad:be:ef',
'ip_cidr': '20.0.0.30/24'}
fip_cidr = '11.22.33.44/24'
ri = dvr_router.DvrRouter(router['id'], self.conf.root_helper,
router=router)
ri.dist_fip_count = 2
agent.fip_ns_subscribers.add(ri.router_id)
ri.floating_ips_dict['11.22.33.44'] = FIP_PRI
ri.fip_2_rtr = '11.22.33.42'
ri.rtr_2_fip = '11.22.33.40'
agent.agent_gateway_port = agent_gw_port
s = lla.LinkLocalAddressPair('169.254.30.42/31')
ri.rtr_fip_subnet = s
agent.floating_ip_removed_dist(ri, fip_cidr)
self.mock_rule.delete_rule_priority.assert_called_with(FIP_PRI)
self.mock_ip_dev.route.delete_route.assert_called_with(fip_cidr,
str(s.ip))
self.assertFalse(unsubscribe.called, '_fip_ns_unsubscribe called!')
with mock.patch.object(agent, '_destroy_fip_namespace') as f:
ri.dist_fip_count = 1
fip_ns_name = agent.get_fip_ns_name(
str(agent._fetch_external_net_id()))
ri.rtr_fip_subnet = agent.local_subnets.allocate(ri.router_id)
_, fip_to_rtr = ri.rtr_fip_subnet.get_pair()
agent.floating_ip_removed_dist(ri, fip_cidr)
self.mock_ip.del_veth.assert_called_once_with(
agent.get_fip_int_device_name(router['id']))
self.mock_ip_dev.route.delete_gateway.assert_called_once_with(
str(fip_to_rtr.ip), table=16)
f.assert_called_once_with(fip_ns_name)
unsubscribe.assert_called_once_with(ri.router_id)
def test_get_service_plugin_list(self):
service_plugins = [p_const.L3_ROUTER_NAT]
self.plugin_api.get_service_plugin_list.return_value = service_plugins
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.assertEqual(service_plugins, agent.neutron_service_plugins)
self.assertTrue(self.plugin_api.get_service_plugin_list.called)
def test_get_service_plugin_list_failed(self):
raise_rpc = messaging.RemoteError()
self.plugin_api.get_service_plugin_list.side_effect = raise_rpc
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.assertIsNone(agent.neutron_service_plugins)
self.assertTrue(self.plugin_api.get_service_plugin_list.called)
def test_get_service_plugin_list_retried(self):
raise_timeout = messaging.MessagingTimeout()
# Raise a timeout the first 2 times it calls
# get_service_plugin_list then return a empty tuple
self.plugin_api.get_service_plugin_list.side_effect = (
raise_timeout, raise_timeout, tuple()
)
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.assertEqual(agent.neutron_service_plugins, tuple())
def test_get_service_plugin_list_retried_max(self):
raise_timeout = messaging.MessagingTimeout()
# Raise a timeout 5 times
self.plugin_api.get_service_plugin_list.side_effect = (
(raise_timeout, ) * 5
)
self.assertRaises(messaging.MessagingTimeout, l3_agent.L3NATAgent,
HOSTNAME, self.conf)
def test__fip_ns_subscribe_is_first_true(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router_id = _uuid()
is_first = agent._fip_ns_subscribe(router_id)
self.assertTrue(is_first)
self.assertEqual(len(agent.fip_ns_subscribers), 1)
def test__fip_ns_subscribe_is_first_false(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router_id = _uuid()
router2_id = _uuid()
agent._fip_ns_subscribe(router_id)
is_first = agent._fip_ns_subscribe(router2_id)
self.assertFalse(is_first)
self.assertEqual(len(agent.fip_ns_subscribers), 2)
def test__fip_ns_unsubscribe_is_last_true(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router_id = _uuid()
agent.fip_ns_subscribers.add(router_id)
is_last = agent._fip_ns_unsubscribe(router_id)
self.assertTrue(is_last)
self.assertEqual(len(agent.fip_ns_subscribers), 0)
def test__fip_ns_unsubscribe_is_last_false(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router_id = _uuid()
router2_id = _uuid()
agent.fip_ns_subscribers.add(router_id)
agent.fip_ns_subscribers.add(router2_id)
is_last = agent._fip_ns_unsubscribe(router_id)
self.assertFalse(is_last)
self.assertEqual(len(agent.fip_ns_subscribers), 1)
def test_external_gateway_removed_ext_gw_port_and_fip(self):
self.conf.set_override('state_path', '/tmp')
self.conf.set_override('router_delete_namespaces', True)
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent.conf.agent_mode = 'dvr'
agent.agent_gateway_port = {'fixed_ips': [{'ip_address': '20.0.0.30',
'subnet_id': _uuid()}],
'subnet': {'gateway_ip': '20.0.0.1'},
'id': _uuid(),
'network_id': _uuid(),
'mac_address': 'ca:fe:de:ad:be:ef',
'ip_cidr': '20.0.0.30/24'}
external_net_id = _uuid()
agent._fetch_external_net_id = mock.Mock(return_value=external_net_id)
router = prepare_router_data(num_internal_ports=2)
router['distributed'] = True
router['gw_port_host'] = HOSTNAME
ri = dvr_router.DvrRouter(router['id'], self.conf.root_helper, router)
vm_floating_ip = '19.4.4.2'
ri.floating_ips_dict[vm_floating_ip] = FIP_PRI
ri.dist_fip_count = 1
ri.ex_gw_port = ri.router['gw_port']
del ri.router['gw_port']
ri.rtr_fip_subnet = agent.local_subnets.allocate(ri.router_id)
_, fip_to_rtr = ri.rtr_fip_subnet.get_pair()
nat = ri.iptables_manager.ipv4['nat']
nat.clear_rules_by_tag = mock.Mock()
nat.add_rule = mock.Mock()
self.mock_ip.get_devices.return_value = [
FakeDev(agent.get_fip_ext_device_name(_uuid()))]
self.mock_ip_dev.addr.list.return_value = [
{'cidr': vm_floating_ip + '/32'},
{'cidr': '19.4.4.1/24'}]
self.device_exists.return_value = True
agent.external_gateway_removed(
ri, ri.ex_gw_port,
agent.get_external_device_name(ri.ex_gw_port['id']))
self.mock_ip.del_veth.assert_called_once_with(
agent.get_fip_int_device_name(ri.router['id']))
self.mock_ip_dev.route.delete_gateway.assert_called_once_with(
str(fip_to_rtr.ip), table=dvr.FIP_RT_TBL)
self.assertEqual(ri.dist_fip_count, 0)
self.assertEqual(len(agent.fip_ns_subscribers), 0)
self.assertEqual(self.mock_driver.unplug.call_count, 1)
self.assertIsNone(agent.agent_gateway_port)
self.mock_ip.netns.delete.assert_called_once_with(
agent.get_fip_ns_name(external_net_id))
self.assertFalse(nat.add_rule.called)
nat.clear_rules_by_tag.assert_called_once_with('floating_ip')
def test_spawn_radvd(self):
router = prepare_router_data()
conffile = '/fake/radvd.conf'
pidfile = '/fake/radvd.pid'
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
# we don't want the whole process manager to be mocked to be
# able to catch execute() calls
self.external_process_p.stop()
self.ip_cls_p.stop()
get_pid_file_name = ('neutron.agent.linux.external_process.'
'ProcessManager.get_pid_file_name')
with mock.patch('neutron.agent.linux.utils.execute') as execute:
with mock.patch(get_pid_file_name) as get_pid:
get_pid.return_value = pidfile
ra._spawn_radvd(router['id'],
conffile,
agent.get_ns_name(router['id']),
self.conf.root_helper)
cmd = execute.call_args[0][0]
self.assertIn('radvd', cmd)
_join = lambda *args: ' '.join(args)
cmd = _join(*cmd)
self.assertIn(_join('-C', conffile), cmd)
self.assertIn(_join('-p', pidfile), cmd)
self.assertIn(_join('-m', 'syslog'), cmd)
def test_generate_radvd_conf_other_flag(self):
# we don't check other flag for stateful since it's redundant
# for this mode and can be ignored by clients, as per RFC4861
expected = {l3_constants.IPV6_SLAAC: False,
l3_constants.DHCPV6_STATELESS: True}
for ra_mode, flag_set in expected.iteritems():
router = prepare_router_data()
ri = self._process_router_ipv6_interface_added(router,
ra_mode=ra_mode)
ra._generate_radvd_conf(ri.router['id'],
router[l3_constants.INTERFACE_KEY],
mock.Mock())
asserter = self.assertIn if flag_set else self.assertNotIn
asserter('AdvOtherConfigFlag on;',
self.utils_replace_file.call_args[0][1])
def test__put_fips_in_error_state(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ri = mock.Mock()
ri.router.get.return_value = [{'id': mock.sentinel.id1},
{'id': mock.sentinel.id2}]
statuses = agent._put_fips_in_error_state(ri)
expected = [{mock.sentinel.id1: l3_constants.FLOATINGIP_STATUS_ERROR,
mock.sentinel.id2: l3_constants.FLOATINGIP_STATUS_ERROR}]
self.assertNotEqual(expected, statuses)
def test__process_snat_dnat_for_fip(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent.process_router_floating_ip_nat_rules = mock.Mock(
side_effect=Exception)
self.assertRaises(n_exc.FloatingIpSetupException,
agent._process_snat_dnat_for_fip,
mock.sentinel.ri)
agent.process_router_floating_ip_nat_rules.assert_called_with(
mock.sentinel.ri)
def test__configure_fip_addresses(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent.process_router_floating_ip_addresses = mock.Mock(
side_effect=Exception)
self.assertRaises(n_exc.FloatingIpSetupException,
agent._configure_fip_addresses,
mock.sentinel.ri,
mock.sentinel.ex_gw_port)
agent.process_router_floating_ip_addresses.assert_called_with(
mock.sentinel.ri,
mock.sentinel.ex_gw_port)
| blueboxgroup/neutron | neutron/tests/unit/test_l3_agent.py | Python | apache-2.0 | 100,926 |
import { Component, ElementRef, NgZone, OnInit, ViewChild} from '@angular/core';
import { Router, ActivatedRoute } from '@angular/router';
import { EventoModel } from './../../../model/evento.model';
import { TipoIngressoModel } from './../../../model/tipoingresso.model';
import { EventoService } from './../../../service/evento.service';
import * as firebase from 'firebase';
import { } from 'googlemaps';
import { MapsAPILoader } from '@agm/core';
@Component({
selector: 'app-evento-editar',
templateUrl: './evento-editar.component.html'
})
export class EventoEditarComponent implements OnInit {
imagemUrl:string;
idevento:number;
progress:string;
storageRef:any;
public dateMask = [/\d/, /\d/, '/', /\d/, /\d/, '/', /\d/, /\d/, /\d/, /\d/,' ',/\d/,/\d/,':',/\d/,/\d/];
@ViewChild("search")
public searchElementRef: ElementRef;
constructor(
public actroute: ActivatedRoute,
public eventoService:EventoService,
public evento:EventoModel,
public router:Router,
public mapsAPILoader: MapsAPILoader,
public ngZone: NgZone
)
{
this.storageRef = firebase.storage().ref();
}
ngOnInit() {
this.actroute.params.subscribe(
(params: any) => {
this.evento.idevento = params['id'];
this.eventoService.getEvento(this.evento)
.subscribe((evento)=>{
this.evento = evento.json().data[0];
console.log(this.evento);
this.imagemUrl = this.evento.imagem;
})
this.eventoService.getAllTipoIngressos(this.evento)
.subscribe((tipos)=>{
this.evento.tipos = tipos;
console.log(this.evento.tipos);
})
}
);
this.mapsAPILoader.load().then(() => {
let autocomplete = new google.maps.places.Autocomplete(this.searchElementRef.nativeElement, {
types: ["address"]
});
autocomplete.addListener("place_changed", () => {
this.ngZone.run(() => {
//get the place result
let place: google.maps.places.PlaceResult = autocomplete.getPlace();
console.log(autocomplete.getPlace())
//verify result
if (place.geometry === undefined || place.geometry === null) {
return;
}
//set latitude, longitude and zoom
this.evento.latitude = place.geometry.location.lat();
this.evento.longitude = place.geometry.location.lng();
});
});
});
}
uploadImagem($event){
//this.deleteImgStorage();
let files = $event.target.files || $event.srcElement.files;
let file = files[0];
let uploadTask = this.storageRef.child(file.name).put(file);
uploadTask.on(firebase.storage.TaskEvent.STATE_CHANGED,
(snapshot)=>{
let vlrPorcent = (snapshot.bytesTransferred / snapshot.totalBytes) * 100;
this.progress = vlrPorcent.toFixed(2);
},(error)=>{
switch (error.code) {
case 'storage/unauthorized':
// User doesn't have permission to access the object
break;
case 'storage/canceled':
// User canceled the upload
break;
case 'storage/unknown':
// Unknown error occurred, inspect error.serverResponse
break;
}
},()=>{
this.imagemUrl = uploadTask.snapshot.downloadURL;
});
}
atualizarDados(evento){
this.evento.imagem = this.imagemUrl;
console.log(this.evento.data);
this.eventoService.atualizarEvento(this.evento.idevento,evento);
}
cancelar(){
this.router.navigate(['/eventos/listar']);
}
novotipo(){
this.evento.tipos.push(new TipoIngressoModel());
}
}
| Wellington-Junior/ingressos | src/app/pages/evento/evento-editar/evento-editar.component.ts | TypeScript | apache-2.0 | 3,826 |
/**
* Copyright (C) 2012 Ryan W Tenney (ryan@10e.us)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.ryantenney.metrics.spring.reporter;
import java.util.Map;
import java.util.regex.Pattern;
import org.springframework.beans.factory.BeanFactory;
import org.springframework.beans.factory.BeanFactoryAware;
import org.springframework.beans.factory.FactoryBean;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.beans.factory.config.ConfigurableBeanFactory;
import org.springframework.core.convert.ConversionService;
import org.springframework.core.convert.TypeDescriptor;
import org.springframework.core.convert.support.DefaultConversionService;
import com.codahale.metrics.Metric;
import com.codahale.metrics.MetricFilter;
import com.codahale.metrics.MetricRegistry;
public abstract class AbstractReporterFactoryBean<T> implements FactoryBean<T>, InitializingBean, BeanFactoryAware {
protected static final String FILTER_PATTERN = "filter";
protected static final String FILTER_REF = "filter-ref";
protected static final String PREFIX = "prefix";
protected static final String PREFIX_SUPPLIER_REF = "prefix-supplier-ref";
private MetricRegistry metricRegistry;
private BeanFactory beanFactory;
private ConversionService conversionService;
private Map<String, String> properties;
private T instance;
private boolean enabled = true;
private boolean initialized = false;
@Override
public abstract Class<? extends T> getObjectType();
@Override
public boolean isSingleton() {
return true;
}
@Override
public T getObject() {
if (!this.enabled) {
return null;
}
if (!this.initialized) {
throw new IllegalStateException("Singleton instance not initialized yet");
}
return this.instance;
}
@Override
public void afterPropertiesSet() throws Exception {
this.instance = createInstance();
this.initialized = true;
}
protected abstract T createInstance() throws Exception;
@Override
public void setBeanFactory(final BeanFactory beanFactory) {
this.beanFactory = beanFactory;
if (beanFactory instanceof ConfigurableBeanFactory) {
this.conversionService = ((ConfigurableBeanFactory) beanFactory).getConversionService();
}
}
public BeanFactory getBeanFactory() {
return this.beanFactory;
}
public ConversionService getConversionService() {
if (this.conversionService == null) {
this.conversionService = new DefaultConversionService();
}
return this.conversionService;
}
public void setMetricRegistry(final MetricRegistry metricRegistry) {
this.metricRegistry = metricRegistry;
}
public MetricRegistry getMetricRegistry() {
return metricRegistry;
}
public void setEnabled(boolean enabled) {
this.enabled = enabled;
}
public boolean isEnabled() {
return this.enabled;
}
public void setProperties(final Map<String, String> properties) {
this.properties = properties;
}
public Map<String, String> getProperties() {
return properties;
}
protected boolean hasProperty(String key) {
return getProperty(key) != null;
}
protected String getProperty(String key) {
return this.properties.get(key);
}
protected String getProperty(String key, String defaultValue) {
final String value = this.properties.get(key);
if (value == null) {
return defaultValue;
}
return value;
}
protected <V> V getProperty(String key, Class<V> requiredType) {
return getProperty(key, requiredType, null);
}
@SuppressWarnings("unchecked")
protected <V> V getProperty(String key, Class<V> requiredType, V defaultValue) {
final String value = this.properties.get(key);
if (value == null) {
return defaultValue;
}
return (V) getConversionService().convert(value, TypeDescriptor.forObject(value), TypeDescriptor.valueOf(requiredType));
}
protected Object getPropertyRef(String key) {
return getPropertyRef(key, null);
}
protected <V> V getPropertyRef(String key, Class<V> requiredType) {
final String value = this.properties.get(key);
if (value == null) {
return null;
}
return this.beanFactory.getBean(value, requiredType);
}
protected MetricFilter getMetricFilter() {
if (hasProperty(FILTER_PATTERN)) {
return metricFilterPattern(getProperty(FILTER_PATTERN));
}
else if (hasProperty(FILTER_REF)) {
return getPropertyRef(FILTER_REF, MetricFilter.class);
}
return MetricFilter.ALL;
}
protected String getPrefix() {
if (hasProperty(PREFIX)) {
return getProperty(PREFIX);
}
else if (hasProperty(PREFIX_SUPPLIER_REF)) {
return getPropertyRef(PREFIX_SUPPLIER_REF, MetricPrefixSupplier.class).getPrefix();
}
return null;
}
protected MetricFilter metricFilterPattern(String pattern) {
final Pattern filter = Pattern.compile(pattern);
return new MetricFilter() {
@Override
public boolean matches(String name, Metric metric) {
return filter.matcher(name).matches();
}
@Override
public String toString() {
return "[MetricFilter regex=" + filter.pattern() + "]";
}
};
}
}
| ryantenney/metrics-spring | src/main/java/com/ryantenney/metrics/spring/reporter/AbstractReporterFactoryBean.java | Java | apache-2.0 | 5,530 |
/*
* Copyright 2017 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.cloud.task.app.composedtaskrunner.configuration;
import org.springframework.batch.core.ExitStatus;
import org.springframework.batch.core.Step;
import org.springframework.batch.core.StepContribution;
import org.springframework.batch.core.StepExecution;
import org.springframework.batch.core.StepExecutionListener;
import org.springframework.batch.core.configuration.annotation.EnableBatchProcessing;
import org.springframework.batch.core.configuration.annotation.StepBuilderFactory;
import org.springframework.batch.core.scope.context.ChunkContext;
import org.springframework.batch.core.step.tasklet.Tasklet;
import org.springframework.batch.repeat.RepeatStatus;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.cloud.task.app.composedtaskrunner.ComposedRunnerJobFactory;
import org.springframework.cloud.task.app.composedtaskrunner.ComposedRunnerVisitor;
import org.springframework.cloud.task.app.composedtaskrunner.properties.ComposedTaskProperties;
import org.springframework.cloud.task.configuration.EnableTask;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.core.task.TaskExecutor;
import org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor;
import org.springframework.transaction.annotation.Isolation;
import org.springframework.transaction.interceptor.DefaultTransactionAttribute;
import org.springframework.transaction.interceptor.TransactionAttribute;
/**
* @author Glenn Renfro
*/
@Configuration
@EnableBatchProcessing
@EnableTask
@EnableConfigurationProperties(ComposedTaskProperties.class)
public class ComposedRunnerVisitorConfiguration {
@Autowired
private StepBuilderFactory steps;
@Autowired
private ComposedTaskProperties composedTaskProperties;
@Bean
public ComposedRunnerJobFactory job() {
return new ComposedRunnerJobFactory(this.composedTaskProperties.getGraph());
}
@Bean
public ComposedRunnerVisitor composedRunnerStack() {
return new ComposedRunnerVisitor();
}
@Bean
public Step AAA_0() {
return createTaskletStep("AAA_0");
}
@Bean
public Step AAA_1() {
return createTaskletStep("AAA_1");
}
@Bean
public Step AAA_2() {
return createTaskletStep("AAA_2");
}
@Bean
public Step BBB_0() {
return createTaskletStep("BBB_0");
}
@Bean
public Step BBB_1() {
return createTaskletStep("BBB_1");
}
@Bean
public Step CCC_0() {
return createTaskletStep("CCC_0");
}
@Bean
public Step DDD_0() {
return createTaskletStep("DDD_0");
}
@Bean
public Step EEE_0() {
return createTaskletStep("EEE_0");
}
@Bean
public Step FFF_0() {
return createTaskletStep("FFF_0");
}
@Bean
public Step LABELA() {
return createTaskletStep("LABELA");
}
@Bean
public Step failedStep_0() {
return createTaskletStepWithListener("failedStep_0",
failedStepExecutionListener());
}
@Bean
public Step successStep() {
return createTaskletStepWithListener("successStep",
successStepExecutionListener());
}
@Bean
public StepExecutionListener failedStepExecutionListener() {
return new StepExecutionListener() {
@Override
public void beforeStep(StepExecution stepExecution) {
}
@Override
public ExitStatus afterStep(StepExecution stepExecution) {
return ExitStatus.FAILED;
}
};
}
@Bean
public StepExecutionListener successStepExecutionListener() {
return new StepExecutionListener() {
@Override
public void beforeStep(StepExecution stepExecution) {
}
@Override
public ExitStatus afterStep(StepExecution stepExecution) {
return ExitStatus.COMPLETED;
}
};
}
@Bean
public TaskExecutor taskExecutor() {
return new ThreadPoolTaskExecutor();
}
private Step createTaskletStepWithListener(final String taskName,
StepExecutionListener stepExecutionListener) {
return this.steps.get(taskName)
.tasklet(new Tasklet() {
@Override
public RepeatStatus execute(StepContribution contribution, ChunkContext chunkContext) throws Exception {
return RepeatStatus.FINISHED;
}
})
.transactionAttribute(getTransactionAttribute())
.listener(stepExecutionListener)
.build();
}
private Step createTaskletStep(final String taskName) {
return this.steps.get(taskName)
.tasklet(new Tasklet() {
@Override
public RepeatStatus execute(StepContribution contribution, ChunkContext chunkContext) throws Exception {
return RepeatStatus.FINISHED;
}
})
.transactionAttribute(getTransactionAttribute())
.build();
}
/**
* Using the default transaction attribute for the job will cause the
* TaskLauncher not to see the latest state in the database but rather
* what is in its transaction. By setting isolation to READ_COMMITTED
* The task launcher can see latest state of the db. Since the changes
* to the task execution are done by the tasks.
* @return DefaultTransactionAttribute with isolation set to READ_COMMITTED.
*/
private TransactionAttribute getTransactionAttribute() {
DefaultTransactionAttribute defaultTransactionAttribute =
new DefaultTransactionAttribute();
defaultTransactionAttribute.setIsolationLevel(
Isolation.READ_COMMITTED.value());
return defaultTransactionAttribute;
}
}
| mminella/composed-task-runner | spring-cloud-starter-task-composedtaskrunner/src/test/java/org/springframework/cloud/task/app/composedtaskrunner/configuration/ComposedRunnerVisitorConfiguration.java | Java | apache-2.0 | 6,001 |
#include "kernel.h"
#include "ilwisdata.h"
#include "datadefinition.h"
#include "columndefinition.h"
#include "table.h"
#include "visualattributemodel.h"
#include "mapinformationattributesetter.h"
REGISTER_PROPERTYEDITOR("mapinfopropertyeditor",MapInformationPropertySetter)
MapInformationPropertySetter::MapInformationPropertySetter(QObject *parent) :
VisualAttributeEditor("mapinfopropertyeditor",TR("Mouse over Info"),QUrl("MapinfoProperties.qml"), parent)
{
}
MapInformationPropertySetter::~MapInformationPropertySetter()
{
}
bool MapInformationPropertySetter::canUse(const IIlwisObject& obj, const QString& name ) const
{
if (!obj.isValid())
return false;
if(!hasType(obj->ilwisType(), itCOVERAGE))
return false;
return name == VisualAttributeModel::LAYER_ONLY;
}
VisualAttributeEditor *MapInformationPropertySetter::create()
{
return new MapInformationPropertySetter();
}
bool MapInformationPropertySetter::showInfo() const
{
if ( attribute()->layer())
return attribute()->layer()->showInfo();
return true;
}
void MapInformationPropertySetter::setShowInfo(bool yesno)
{
if (!attribute()->layer())
return;
attribute()->layer()->showInfo(yesno);
}
| ridoo/IlwisCore | ilwiscoreui/propertyeditors/mapinformationattributesetter.cpp | C++ | apache-2.0 | 1,233 |
/*
Copyright 2020 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package upgrade_test
import (
"bytes"
"fmt"
"testing"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"knative.dev/pkg/test/upgrade"
)
const (
failureTestingMessage = "This error is expected to be seen. Upgrade suite should fail."
)
func newConfig(t *testing.T) (upgrade.Configuration, fmt.Stringer) {
var buf bytes.Buffer
cfg := zap.NewDevelopmentConfig()
cfg.EncoderConfig.TimeKey = ""
cfg.EncoderConfig.CallerKey = ""
syncedBuf := zapcore.AddSync(&buf)
c := upgrade.Configuration{
T: t,
LogConfig: upgrade.LogConfig{
Config: cfg,
Options: []zap.Option{
zap.WrapCore(func(core zapcore.Core) zapcore.Core {
return zapcore.NewCore(
zapcore.NewConsoleEncoder(cfg.EncoderConfig),
zapcore.NewMultiWriteSyncer(syncedBuf), cfg.Level)
}),
zap.ErrorOutput(syncedBuf),
},
},
}
return c, &buf
}
func createSteps(s upgrade.Suite) []*step {
continualTestsGeneralized := generalizeOpsFromBg(s.Tests.Continual)
return []*step{{
messages: messageFormatters.baseInstall,
ops: generalizeOps(s.Installations.Base),
updateSuite: func(ops operations, s *upgrade.Suite) {
s.Installations.Base = ops.asOperations()
},
}, {
messages: messageFormatters.preUpgrade,
ops: generalizeOps(s.Tests.PreUpgrade),
updateSuite: func(ops operations, s *upgrade.Suite) {
s.Tests.PreUpgrade = ops.asOperations()
},
}, {
messages: messageFormatters.startContinual,
ops: continualTestsGeneralized,
updateSuite: func(ops operations, s *upgrade.Suite) {
s.Tests.Continual = ops.asBackgroundOperation()
},
}, {
messages: messageFormatters.upgrade,
ops: generalizeOps(s.Installations.UpgradeWith),
updateSuite: func(ops operations, s *upgrade.Suite) {
s.Installations.UpgradeWith = ops.asOperations()
},
}, {
messages: messageFormatters.postUpgrade,
ops: generalizeOps(s.Tests.PostUpgrade),
updateSuite: func(ops operations, s *upgrade.Suite) {
s.Tests.PostUpgrade = ops.asOperations()
},
}, {
messages: messageFormatters.downgrade,
ops: generalizeOps(s.Installations.DowngradeWith),
updateSuite: func(ops operations, s *upgrade.Suite) {
s.Installations.DowngradeWith = ops.asOperations()
},
}, {
messages: messageFormatters.postDowngrade,
ops: generalizeOps(s.Tests.PostDowngrade),
updateSuite: func(ops operations, s *upgrade.Suite) {
s.Tests.PostDowngrade = ops.asOperations()
},
}, {
messages: messageFormatters.verifyContinual,
ops: continualTestsGeneralized,
updateSuite: func(ops operations, s *upgrade.Suite) {
s.Tests.Continual = ops.asBackgroundOperation()
},
}}
}
func expectedTexts(s upgrade.Suite, fp failurePoint) texts {
steps := createSteps(s)
tt := texts{elms: nil}
for i, st := range steps {
stepIdx := i + 1
if st.ops.length() == 0 {
tt.append(st.skipped(stepIdx))
} else {
tt.append(st.starting(stepIdx, st.ops.length()))
for j, op := range st.ops.ops {
elemIdx := j + 1
tt.append(st.element(stepIdx, elemIdx, op.Name()))
if fp.step == stepIdx && fp.element == elemIdx {
return tt
}
}
}
}
return tt
}
func generalizeOps(ops []upgrade.Operation) operations {
gen := make([]*operation, len(ops))
for idx, op := range ops {
gen[idx] = &operation{op: op}
}
return operations{ops: gen}
}
func generalizeOpsFromBg(ops []upgrade.BackgroundOperation) operations {
gen := make([]*operation, len(ops))
for idx, op := range ops {
gen[idx] = &operation{bg: op}
}
return operations{ops: gen}
}
func createMessages(mf formats) messages {
return messages{
skipped: func(args ...interface{}) string {
empty := ""
if mf.skipped == empty {
return empty
}
return fmt.Sprintf(mf.skipped, args...)
},
starting: func(args ...interface{}) string {
return fmt.Sprintf(mf.starting, args...)
},
element: func(args ...interface{}) string {
return fmt.Sprintf(mf.element, args...)
},
}
}
func (tt *texts) append(messages ...string) {
for _, msg := range messages {
if msg == "" {
continue
}
tt.elms = append(tt.elms, msg)
}
}
func completeSuiteExample(fp failurePoint) upgrade.Suite {
serving := servingComponent()
eventing := eventingComponent()
suite := upgrade.Suite{
Tests: upgrade.Tests{
PreUpgrade: []upgrade.Operation{
serving.tests.preUpgrade, eventing.tests.preUpgrade,
},
PostUpgrade: []upgrade.Operation{
serving.tests.postUpgrade, eventing.tests.postUpgrade,
},
PostDowngrade: []upgrade.Operation{
serving.tests.postDowngrade, eventing.tests.postDowngrade,
},
Continual: []upgrade.BackgroundOperation{
serving.tests.continual, eventing.tests.continual,
},
},
Installations: upgrade.Installations{
Base: []upgrade.Operation{
serving.installs.stable, eventing.installs.stable,
},
UpgradeWith: []upgrade.Operation{
serving.installs.head, eventing.installs.head,
},
DowngradeWith: []upgrade.Operation{
serving.installs.stable, eventing.installs.stable,
},
},
}
return enrichSuiteWithFailures(suite, fp)
}
func emptySuiteExample() upgrade.Suite {
return upgrade.Suite{
Tests: upgrade.Tests{},
Installations: upgrade.Installations{},
}
}
func enrichSuiteWithFailures(suite upgrade.Suite, fp failurePoint) upgrade.Suite {
steps := createSteps(suite)
for i, st := range steps {
for j, op := range st.ops.ops {
if fp.step == i+1 && fp.element == j+1 {
op.fail(fp.step == 3)
}
}
}
return recreateSuite(steps)
}
func recreateSuite(steps []*step) upgrade.Suite {
suite := &upgrade.Suite{
Tests: upgrade.Tests{},
Installations: upgrade.Installations{},
}
for _, st := range steps {
st.updateSuite(st.ops, suite)
}
return *suite
}
func (o operation) Name() string {
if o.op != nil {
return o.op.Name()
}
return o.bg.Name()
}
func (o *operation) fail(setupFail bool) {
testName := fmt.Sprintf("FailingOf%s", o.Name())
if o.op != nil {
prev := o.op
o.op = upgrade.NewOperation(testName, func(c upgrade.Context) {
handler := prev.Handler()
handler(c)
c.T.Error(failureTestingMessage)
c.Log.Error(failureTestingMessage)
})
} else {
prev := o.bg
o.bg = upgrade.NewBackgroundOperation(testName, func(c upgrade.Context) {
handler := prev.Setup()
handler(c)
if setupFail {
c.T.Error(failureTestingMessage)
c.Log.Error(failureTestingMessage)
}
}, func(bc upgrade.BackgroundContext) {
upgrade.WaitForStopEvent(bc, upgrade.WaitForStopEventConfiguration{
Name: testName,
OnStop: func(event upgrade.StopEvent) {
if !setupFail {
event.T.Error(failureTestingMessage)
bc.Log.Error(failureTestingMessage)
}
},
OnWait: func(bc upgrade.BackgroundContext, self upgrade.WaitForStopEventConfiguration) {
bc.Log.Debugf("%s - probing functionality...", self.Name)
},
WaitTime: shortWait,
})
})
}
}
func (o operations) length() int {
return len(o.ops)
}
func (o operations) asOperations() []upgrade.Operation {
ops := make([]upgrade.Operation, o.length())
for i, op := range o.ops {
ops[i] = op.op
}
return ops
}
func (o operations) asBackgroundOperation() []upgrade.BackgroundOperation {
ops := make([]upgrade.BackgroundOperation, o.length())
for i, op := range o.ops {
ops[i] = op.bg
}
return ops
}
| knative/pkg | test/upgrade/testing_operations_test.go | GO | apache-2.0 | 7,868 |
// Copyright (c) Microsoft. All Rights Reserved. Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
using System;
using System.Collections.Immutable;
using System.Linq;
using System.Threading;
using System.Threading.Tasks;
using Microsoft.CodeAnalysis.CodeActions;
using Microsoft.CodeAnalysis.PooledObjects;
using Microsoft.CodeAnalysis.Text;
using Microsoft.CodeAnalysis.LanguageServices;
using Microsoft.CodeAnalysis.Shared.Extensions;
using Microsoft.CodeAnalysis.Editing;
using Microsoft.CodeAnalysis.Simplification;
using Microsoft.CodeAnalysis.Formatting;
using Microsoft.CodeAnalysis.CodeRefactorings;
using System.Collections.Generic;
namespace Microsoft.CodeAnalysis.ConvertToInterpolatedString
{
internal abstract class AbstractConvertPlaceholderToInterpolatedStringRefactoringProvider<TInvocationExpressionSyntax, TExpressionSyntax, TArgumentSyntax, TLiteralExpressionSyntax> : CodeRefactoringProvider
where TExpressionSyntax : SyntaxNode
where TInvocationExpressionSyntax : TExpressionSyntax
where TArgumentSyntax : SyntaxNode
where TLiteralExpressionSyntax : SyntaxNode
{
protected abstract SyntaxNode GetInterpolatedString(string text);
public override async Task ComputeRefactoringsAsync(CodeRefactoringContext context)
{
var semanticModel = await context.Document.GetSemanticModelAsync(context.CancellationToken).ConfigureAwait(false);
var stringType = semanticModel.Compilation.GetSpecialType(SpecialType.System_String);
if (stringType == null)
{
return;
}
var formatMethods = stringType
.GetMembers(nameof(string.Format))
.OfType<IMethodSymbol>()
.Where(ShouldIncludeFormatMethod)
.ToImmutableArray();
if (formatMethods.Length == 0)
{
return;
}
var syntaxFactsService = context.Document.GetLanguageService<ISyntaxFactsService>();
if (syntaxFactsService == null)
{
return;
}
var root = await context.Document.GetSyntaxRootAsync(context.CancellationToken).ConfigureAwait(false);
if (TryFindInvocation(context.Span, root, semanticModel, formatMethods, syntaxFactsService, context.CancellationToken, out var invocation, out var invocationSymbol) &&
IsArgumentListCorrect(syntaxFactsService.GetArgumentsOfInvocationExpression(invocation), invocationSymbol, formatMethods, semanticModel, syntaxFactsService, context.CancellationToken))
{
context.RegisterRefactoring(
new ConvertToInterpolatedStringCodeAction(
FeaturesResources.Convert_to_interpolated_string,
c => CreateInterpolatedString(invocation, context.Document, syntaxFactsService, c)));
}
}
private bool TryFindInvocation(
TextSpan span,
SyntaxNode root,
SemanticModel semanticModel,
ImmutableArray<IMethodSymbol> formatMethods,
ISyntaxFactsService syntaxFactsService,
CancellationToken cancellationToken,
out TInvocationExpressionSyntax invocation,
out ISymbol invocationSymbol)
{
invocationSymbol = null;
invocation = root.FindNode(span, getInnermostNodeForTie: true)?.FirstAncestorOrSelf<TInvocationExpressionSyntax>();
while (invocation != null)
{
var arguments = syntaxFactsService.GetArgumentsOfInvocationExpression(invocation);
if (arguments.Count >= 2)
{
var firstArgumentExpression = syntaxFactsService.GetExpressionOfArgument(GetFormatArgument(arguments, syntaxFactsService)) as TLiteralExpressionSyntax;
if (firstArgumentExpression != null && syntaxFactsService.IsStringLiteral(firstArgumentExpression.GetFirstToken()))
{
invocationSymbol = semanticModel.GetSymbolInfo(invocation, cancellationToken).Symbol;
if (formatMethods.Contains(invocationSymbol))
{
break;
}
}
}
invocation = invocation.Parent?.FirstAncestorOrSelf<TInvocationExpressionSyntax>();
}
return invocation != null;
}
private bool IsArgumentListCorrect(
SeparatedSyntaxList<TArgumentSyntax>? nullableArguments,
ISymbol invocationSymbol,
ImmutableArray<IMethodSymbol> formatMethods,
SemanticModel semanticModel,
ISyntaxFactsService syntaxFactsService,
CancellationToken cancellationToken)
{
var arguments = nullableArguments.Value;
var firstExpression = syntaxFactsService.GetExpressionOfArgument(GetFormatArgument(arguments, syntaxFactsService)) as TLiteralExpressionSyntax;
if (arguments.Count >= 2 &&
firstExpression != null &&
syntaxFactsService.IsStringLiteral(firstExpression.GetFirstToken()))
{
// We do not want to substitute the expression if it is being passed to params array argument
// Example:
// string[] args;
// String.Format("{0}{1}{2}", args);
return IsArgumentListNotPassingArrayToParams(
syntaxFactsService.GetExpressionOfArgument(GetParamsArgument(arguments, syntaxFactsService)),
invocationSymbol,
formatMethods,
semanticModel,
cancellationToken);
}
return false;
}
private async Task<Document> CreateInterpolatedString(
TInvocationExpressionSyntax invocation,
Document document,
ISyntaxFactsService syntaxFactsService,
CancellationToken cancellationToken)
{
var semanticModel = await document.GetSemanticModelAsync(cancellationToken).ConfigureAwait(false);
var arguments = syntaxFactsService.GetArgumentsOfInvocationExpression(invocation);
var literalExpression = syntaxFactsService.GetExpressionOfArgument(GetFormatArgument(arguments, syntaxFactsService)) as TLiteralExpressionSyntax;
var text = literalExpression.GetFirstToken().ToString();
var syntaxGenerator = document.Project.LanguageServices.GetService<SyntaxGenerator>();
var expandedArguments = GetExpandedArguments(semanticModel, arguments, syntaxGenerator, syntaxFactsService);
var interpolatedString = GetInterpolatedString(text);
var newInterpolatedString = VisitArguments(expandedArguments, interpolatedString, syntaxFactsService);
var root = await document.GetSyntaxRootAsync(cancellationToken).ConfigureAwait(false);
var newRoot = root.ReplaceNode(invocation, newInterpolatedString.WithTriviaFrom(invocation));
return document.WithSyntaxRoot(newRoot);
}
private string GetArgumentName(TArgumentSyntax argument, ISyntaxFactsService syntaxFactsService)
=> syntaxFactsService.GetNameForArgument(argument);
private SyntaxNode GetParamsArgument(SeparatedSyntaxList<TArgumentSyntax> arguments, ISyntaxFactsService syntaxFactsService)
=> arguments.FirstOrDefault(argument => string.Equals(GetArgumentName(argument, syntaxFactsService), StringFormatArguments.FormatArgumentName, StringComparison.OrdinalIgnoreCase)) ?? arguments[1];
private TArgumentSyntax GetFormatArgument(SeparatedSyntaxList<TArgumentSyntax> arguments, ISyntaxFactsService syntaxFactsService)
=> arguments.FirstOrDefault(argument => string.Equals(GetArgumentName(argument, syntaxFactsService), StringFormatArguments.FormatArgumentName, StringComparison.OrdinalIgnoreCase)) ?? arguments[0];
private TArgumentSyntax GetArgument(SeparatedSyntaxList<TArgumentSyntax> arguments, int index, ISyntaxFactsService syntaxFactsService)
{
if (arguments.Count > 4)
{
return arguments[index];
}
return arguments.FirstOrDefault(
argument => string.Equals(GetArgumentName(argument, syntaxFactsService), StringFormatArguments.ParamsArgumentNames[index], StringComparison.OrdinalIgnoreCase))
?? arguments[index];
}
private ImmutableArray<TExpressionSyntax> GetExpandedArguments(
SemanticModel semanticModel,
SeparatedSyntaxList<TArgumentSyntax> arguments,
SyntaxGenerator syntaxGenerator,
ISyntaxFactsService syntaxFactsService)
{
var builder = ArrayBuilder<TExpressionSyntax>.GetInstance();
for (int i = 1; i < arguments.Count; i++)
{
var argumentExpression = syntaxFactsService.GetExpressionOfArgument(GetArgument(arguments, i, syntaxFactsService));
var convertedType = semanticModel.GetTypeInfo(argumentExpression).ConvertedType;
if (convertedType == null)
{
builder.Add(syntaxFactsService.Parenthesize(argumentExpression) as TExpressionSyntax);
}
else
{
var castExpression = syntaxGenerator.CastExpression(convertedType, syntaxFactsService.Parenthesize(argumentExpression)).WithAdditionalAnnotations(Simplifier.Annotation);
builder.Add(castExpression as TExpressionSyntax);
}
}
var expandedArguments = builder.ToImmutableAndFree();
return expandedArguments;
}
private SyntaxNode VisitArguments(
ImmutableArray<TExpressionSyntax> expandedArguments,
SyntaxNode interpolatedString,
ISyntaxFactsService syntaxFactsService)
{
return interpolatedString.ReplaceNodes(syntaxFactsService.GetContentsOfInterpolatedString(interpolatedString), (oldNode, newNode) =>
{
var interpolationSyntaxNode = newNode;
if (interpolationSyntaxNode != null)
{
var literalExpression = syntaxFactsService.GetExpressionOfInterpolation(interpolationSyntaxNode) as TLiteralExpressionSyntax;
if (literalExpression != null && syntaxFactsService.IsNumericLiteralExpression(literalExpression))
{
if (int.TryParse(literalExpression.GetFirstToken().ValueText, out var index))
{
if (index >= 0 && index < expandedArguments.Length)
{
return interpolationSyntaxNode.ReplaceNode(
syntaxFactsService.GetExpressionOfInterpolation(interpolationSyntaxNode),
syntaxFactsService.ConvertToSingleLine(expandedArguments[index], useElasticTrivia: true).WithAdditionalAnnotations(Formatter.Annotation));
}
}
}
}
return newNode;
});
}
private static bool ShouldIncludeFormatMethod(IMethodSymbol methodSymbol)
{
if (!methodSymbol.IsStatic)
{
return false;
}
if (methodSymbol.Parameters.Length == 0)
{
return false;
}
var firstParameter = methodSymbol.Parameters[0];
if (firstParameter?.Name != StringFormatArguments.FormatArgumentName)
{
return false;
}
return true;
}
private static bool IsArgumentListNotPassingArrayToParams(
SyntaxNode expression,
ISymbol invocationSymbol,
ImmutableArray<IMethodSymbol> formatMethods,
SemanticModel semanticModel,
CancellationToken cancellationToken)
{
var formatMethodsAcceptingParamsArray = formatMethods
.Where(x => x.Parameters.Length > 1 && x.Parameters[1].Type.Kind == SymbolKind.ArrayType);
if (formatMethodsAcceptingParamsArray.Contains(invocationSymbol))
{
return semanticModel.GetTypeInfo(expression, cancellationToken).Type?.Kind != SymbolKind.ArrayType;
}
return true;
}
private class ConvertToInterpolatedStringCodeAction : CodeAction.DocumentChangeAction
{
public ConvertToInterpolatedStringCodeAction(string title, Func<CancellationToken, Task<Document>> createChangedDocument) :
base(title, createChangedDocument)
{
}
}
private static class StringFormatArguments
{
public const string FormatArgumentName = "format";
public const string ArgsArgumentName = "args";
public static readonly ImmutableArray<string> ParamsArgumentNames =
ImmutableArray.Create("", "arg0", "arg1", "arg2");
}
}
}
| kelltrick/roslyn | src/Features/Core/Portable/ConvertToInterpolatedString/AbstractConvertPlaceholderToInterpolatedStringRefactoringProvider.cs | C# | apache-2.0 | 13,484 |
package cyclops.control.trytests;
import cyclops.control.Either;
import cyclops.control.Future;
import cyclops.control.Ior;
import cyclops.control.Maybe;
import cyclops.control.Option;
import cyclops.control.Trampoline;
import cyclops.control.Try;
import cyclops.function.Monoid;
import cyclops.companion.Semigroups;
import com.oath.cyclops.util.box.Mutable;
import cyclops.companion.Streams;
import org.junit.Before;
import org.junit.Test;
import java.util.Arrays;
import java.util.Optional;
import java.util.concurrent.Executor;
import java.util.concurrent.Executors;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import java.util.stream.StreamSupport;
import static org.hamcrest.Matchers.equalTo;
import static org.junit.Assert.*;
public class TryTest {
Try<Integer,RuntimeException> just;
Try<Integer,RuntimeException> none;
RuntimeException exception = new RuntimeException();
@Before
public void setUp() throws Exception {
just = Try.success(10);
none = Try.failure(exception);
just.toEither(-5000).mapLeft(x-> new Exception()).toTry(Exception.class);
}
@Test
public void infinite() {
Try<String, Exception> result = Option.<String>none().toTry(new Exception("asdf"));
for (int i = 0; i < 10; i++) {
result = result.recoverFlatMap(e -> Try.failure(new Exception("asdf")));
}
System.out.println(result.toString());
}
@Test
public void recover(){
final String result = Try.withCatch(() -> "takeOne", RuntimeException.class)
.recoverFlatMap(__ -> Try.<String,RuntimeException>success("ignored"))
.orElse("boo!");
Try.withCatch(() -> "hello", RuntimeException.class)
.recover(()->"world");
}
@Test
public void coFlatMap(){
assertThat(just.coflatMap(m-> m.isPresent()? m.toOptional().get() : 50),equalTo(just));
assertThat(none.coflatMap(m-> m.isPresent()? m.toOptional().get() : 50),equalTo(Try.success(50)));
}
@Test
public void testToMaybe() {
assertThat(just.toMaybe(),equalTo(Maybe.of(10)));
assertThat(none.toMaybe(),equalTo(Maybe.nothing()));
}
private int add1(int i){
return i+1;
}
@Test
public void testOfT() {
assertThat(Ior.right(1),equalTo(Ior.right(1)));
}
@Test
public void testUnitT() {
assertThat(just.unit(20),equalTo(Try.success(20)));
}
@Test
public void testisPrimary() {
assertTrue(just.isSuccess());
assertFalse(none.isSuccess());
}
@Test
public void testMapFunctionOfQsuperTQextendsR() {
assertThat(just.map(i->i+5),equalTo(Try.success(15)));
assertThat(none.map(i->i+5).toEither(),equalTo(Either.left(exception)));
}
@Test
public void testFlatMap() {
assertThat(just.flatMap(i->Try.success(i+5)),equalTo(Try.success(15)));
assertThat(none.flatMap(i->Try.success(i+5)),equalTo(Try.failure(exception)));
}
@Test
public void testWhenFunctionOfQsuperTQextendsRSupplierOfQextendsR() {
assertThat(just.fold(i->i+1,()->20),equalTo(11));
assertThat(none.fold(i->i+1,()->20),equalTo(20));
}
@Test
public void testStream() {
assertThat(just.stream().toList(),equalTo(Arrays.asList(10)));
assertThat(none.stream().toList(),equalTo(Arrays.asList()));
}
@Test
public void testOfSupplierOfT() {
}
@Test
public void testConvertTo() {
Stream<Integer> toStream = just.fold(m->Stream.of(m),()->Stream.of());
assertThat(toStream.collect(Collectors.toList()),equalTo(Arrays.asList(10)));
}
@Test
public void testConvertToAsync() {
Future<Stream<Integer>> async = Future.of(()->just.fold(f->Stream.of((int)f),()->Stream.of()));
assertThat(async.orElse(Stream.empty()).collect(Collectors.toList()),equalTo(Arrays.asList(10)));
}
@Test
public void testIterate() {
assertThat(just.asSupplier(-100).iterate(i->i+1).limit(10).sumInt(i->i),equalTo(145));
}
@Test
public void testGenerate() {
assertThat(just.asSupplier(-100).generate().limit(10).sumInt(i->i),equalTo(100));
}
@Test
public void testToXor() {
assertThat(just.toEither(-5000),equalTo(Either.right(10)));
}
@Test
public void testToXorNone(){
Either<RuntimeException,Integer> xor = none.toEither();
assertTrue(xor.isLeft());
assertThat(xor,equalTo(Either.left(exception)));
}
@Test
public void testToXorSecondary() {
assertThat(just.toEither(-5000).swap(),equalTo(Either.left(10)));
}
@Test
public void testToXorSecondaryNone(){
Either<Integer,RuntimeException> xorNone = none.toEither().swap();
assertThat(xorNone,equalTo(Either.right(exception)));
}
@Test
public void testToTry() {
assertTrue(none.toTry().isFailure());
assertThat(just.toTry(),equalTo(Try.success(10)));
}
@Test
public void testToTryClassOfXArray() {
assertTrue(none.toTry(Throwable.class).isFailure());
}
@Test
public void testToIor() {
assertThat(just.toIor(),equalTo(Ior.right(10)));
}
@Test
public void testToIorNone(){
Ior<RuntimeException,Integer> ior = none.toIor();
assertTrue(ior.isLeft());
assertThat(ior,equalTo(Ior.left(exception)));
}
@Test
public void testToIorSecondary() {
assertThat(just.toIor().swap(),equalTo(Ior.left(10)));
}
@Test
public void testToIorSecondaryNone(){
Ior<Integer,RuntimeException> ior = none.toIor().swap();
assertTrue(ior.isRight());
assertThat(ior,equalTo(Ior.right(exception)));
}
@Test
public void testMkString() {
assertThat(just.mkString(),equalTo("Success[10]"));
assertThat(none.mkString(),equalTo("Failure["+exception+"]"));
}
@Test
public void testGet() {
assertThat(just.get(),equalTo(Option.some(10)));
}
@Test
public void testFilter() {
assertFalse(just.filter(i->i<5).isPresent());
assertTrue(just.filter(i->i>5).isPresent());
assertFalse(none.filter(i->i<5).isPresent());
assertFalse(none.filter(i->i>5).isPresent());
}
@Test
public void testOfType() {
assertFalse(just.ofType(String.class).isPresent());
assertTrue(just.ofType(Integer.class).isPresent());
assertFalse(none.ofType(String.class).isPresent());
assertFalse(none.ofType(Integer.class).isPresent());
}
@Test
public void testFilterNot() {
assertTrue(just.filterNot(i->i<5).isPresent());
assertFalse(just.filterNot(i->i>5).isPresent());
assertFalse(none.filterNot(i->i<5).isPresent());
assertFalse(none.filterNot(i->i>5).isPresent());
}
@Test
public void testNotNull() {
assertTrue(just.notNull().isPresent());
assertFalse(none.notNull().isPresent());
}
private int add(int a, int b){
return a+b;
}
private int add3(int a, int b, int c){
return a+b+c;
}
private int add4(int a, int b, int c,int d){
return a+b+c+d;
}
private int add5(int a, int b, int c,int d,int e){
return a+b+c+d+e;
}
@Test
public void testFoldRightMonoidOfT() {
assertThat(just.fold(Monoid.of(1,Semigroups.intMult)),equalTo(10));
}
@Test
public void testWhenFunctionOfQsuperMaybeOfTQextendsR() {
assertThat(just.fold(s->"hello", ()->"world"),equalTo("hello"));
assertThat(none.fold(s->"hello", ()->"world"),equalTo("world"));
}
@Test
public void testOrElseGet() {
assertThat(none.orElseGet(()->2),equalTo(2));
assertThat(just.orElseGet(()->2),equalTo(10));
}
@Test
public void testToOptional() {
assertFalse(none.toOptional().isPresent());
assertTrue(just.toOptional().isPresent());
assertThat(just.toOptional(),equalTo(Optional.of(10)));
}
@Test
public void testToStream() {
assertThat(none.stream().collect(Collectors.toList()).size(),equalTo(0));
assertThat(just.stream().collect(Collectors.toList()).size(),equalTo(1));
}
@Test
public void testOrElse() {
assertThat(none.orElse(20),equalTo(20));
assertThat(just.orElse(20),equalTo(10));
}
Executor exec = Executors.newFixedThreadPool(1);
@Test
public void testIterator1() {
assertThat(Streams.stream(just.iterator()).collect(Collectors.toList()),
equalTo(Arrays.asList(10)));
}
@Test
public void testForEach() {
Mutable<Integer> capture = Mutable.of(null);
none.forEach(c->capture.set(c));
assertNull(capture.get());
just.forEach(c->capture.set(c));
assertThat(capture.get(),equalTo(10));
}
@Test
public void testSpliterator() {
assertThat(StreamSupport.stream(just.spliterator(),false).collect(Collectors.toList()),
equalTo(Arrays.asList(10)));
}
@Test
public void testMapFunctionOfQsuperTQextendsR1() {
assertThat(just.map(i->i+5),equalTo(Try.success(15)));
}
@Test
public void testPeek() {
Mutable<Integer> capture = Mutable.of(null);
just = just.peek(c->capture.set(c));
assertThat(capture.get(),equalTo(10));
}
private Trampoline<Integer> sum(int times, int sum){
return times ==0 ? Trampoline.done(sum) : Trampoline.more(()->sum(times-1,sum+times));
}
@Test
public void testUnitT1() {
assertThat(none.unit(10),equalTo(just));
}
}
| aol/cyclops | cyclops/src/test/java/cyclops/control/trytests/TryTest.java | Java | apache-2.0 | 8,892 |
/*
* Copyright 2015 Brent Douglas and other contributors
* as indicated by the @author tags. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.machinecode.chainlink.core.jsl.fluent.task;
import io.machinecode.chainlink.core.jsl.fluent.FluentPropertyReference;
import io.machinecode.chainlink.spi.jsl.task.CheckpointAlgorithm;
/**
* @author <a href="mailto:brent.n.douglas@gmail.com">Brent Douglas</a>
* @since 1.0
*/
public class FluentCheckpointAlgorithm extends FluentPropertyReference<FluentCheckpointAlgorithm> implements CheckpointAlgorithm {
@Override
public FluentCheckpointAlgorithm copy() {
return copy(new FluentCheckpointAlgorithm());
}
}
| BrentDouglas/chainlink | core/src/main/java/io/machinecode/chainlink/core/jsl/fluent/task/FluentCheckpointAlgorithm.java | Java | apache-2.0 | 1,211 |
/*
* Copyright 2017 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package jp.classmethod.sparrow.model;
import java.util.List;
import org.springframework.data.repository.query.Param;
import jp.xet.sparwings.spring.data.repository.CreatableRepository;
/**
* Created by mochizukimasao on 2017/04/11.
*
* Calculator repository interface.
*
* @author mochizukimasao
* @since version
*/
public interface LineMessageEntityRepository extends CreatableRepository<LineMessageEntity, String> {
/**
* 引数で受け取るuserIdと一致するoffsetの位置(0始まり)からlimitに指定した数以下の要素を返します
*
* @param userId ユーザーID
* @param offset 読み飛ばす行数
* @param limit 取得行数
* @return offsetからlimitに指定した数以下の要素を返します。一致するデータがない場合は空のコレクションを返します。
*/
List<LineMessageEntity> findByUser(
@Param("userId") String userId, @Param("offset") int offset, @Param("size") int limit);
}
| classmethod-sandbox/sparrow | src/main/java/jp/classmethod/sparrow/model/LineMessageEntityRepository.java | Java | apache-2.0 | 1,590 |
/*
* Copyright 2012-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.simplesystemsmanagement.model.transform;
import javax.annotation.Generated;
import com.amazonaws.SdkClientException;
import com.amazonaws.services.simplesystemsmanagement.model.*;
import com.amazonaws.protocol.*;
import com.amazonaws.annotation.SdkInternalApi;
/**
* InstancePatchStateMarshaller
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
@SdkInternalApi
public class InstancePatchStateMarshaller {
private static final MarshallingInfo<String> INSTANCEID_BINDING = MarshallingInfo.builder(MarshallingType.STRING)
.marshallLocation(MarshallLocation.PAYLOAD).marshallLocationName("InstanceId").build();
private static final MarshallingInfo<String> PATCHGROUP_BINDING = MarshallingInfo.builder(MarshallingType.STRING)
.marshallLocation(MarshallLocation.PAYLOAD).marshallLocationName("PatchGroup").build();
private static final MarshallingInfo<String> BASELINEID_BINDING = MarshallingInfo.builder(MarshallingType.STRING)
.marshallLocation(MarshallLocation.PAYLOAD).marshallLocationName("BaselineId").build();
private static final MarshallingInfo<String> SNAPSHOTID_BINDING = MarshallingInfo.builder(MarshallingType.STRING)
.marshallLocation(MarshallLocation.PAYLOAD).marshallLocationName("SnapshotId").build();
private static final MarshallingInfo<String> OWNERINFORMATION_BINDING = MarshallingInfo.builder(MarshallingType.STRING)
.marshallLocation(MarshallLocation.PAYLOAD).marshallLocationName("OwnerInformation").build();
private static final MarshallingInfo<Integer> INSTALLEDCOUNT_BINDING = MarshallingInfo.builder(MarshallingType.INTEGER)
.marshallLocation(MarshallLocation.PAYLOAD).marshallLocationName("InstalledCount").build();
private static final MarshallingInfo<Integer> INSTALLEDOTHERCOUNT_BINDING = MarshallingInfo.builder(MarshallingType.INTEGER)
.marshallLocation(MarshallLocation.PAYLOAD).marshallLocationName("InstalledOtherCount").build();
private static final MarshallingInfo<Integer> MISSINGCOUNT_BINDING = MarshallingInfo.builder(MarshallingType.INTEGER)
.marshallLocation(MarshallLocation.PAYLOAD).marshallLocationName("MissingCount").build();
private static final MarshallingInfo<Integer> FAILEDCOUNT_BINDING = MarshallingInfo.builder(MarshallingType.INTEGER)
.marshallLocation(MarshallLocation.PAYLOAD).marshallLocationName("FailedCount").build();
private static final MarshallingInfo<Integer> NOTAPPLICABLECOUNT_BINDING = MarshallingInfo.builder(MarshallingType.INTEGER)
.marshallLocation(MarshallLocation.PAYLOAD).marshallLocationName("NotApplicableCount").build();
private static final MarshallingInfo<java.util.Date> OPERATIONSTARTTIME_BINDING = MarshallingInfo.builder(MarshallingType.DATE)
.marshallLocation(MarshallLocation.PAYLOAD).marshallLocationName("OperationStartTime").build();
private static final MarshallingInfo<java.util.Date> OPERATIONENDTIME_BINDING = MarshallingInfo.builder(MarshallingType.DATE)
.marshallLocation(MarshallLocation.PAYLOAD).marshallLocationName("OperationEndTime").build();
private static final MarshallingInfo<String> OPERATION_BINDING = MarshallingInfo.builder(MarshallingType.STRING).marshallLocation(MarshallLocation.PAYLOAD)
.marshallLocationName("Operation").build();
private static final InstancePatchStateMarshaller instance = new InstancePatchStateMarshaller();
public static InstancePatchStateMarshaller getInstance() {
return instance;
}
/**
* Marshall the given parameter object.
*/
public void marshall(InstancePatchState instancePatchState, ProtocolMarshaller protocolMarshaller) {
if (instancePatchState == null) {
throw new SdkClientException("Invalid argument passed to marshall(...)");
}
try {
protocolMarshaller.marshall(instancePatchState.getInstanceId(), INSTANCEID_BINDING);
protocolMarshaller.marshall(instancePatchState.getPatchGroup(), PATCHGROUP_BINDING);
protocolMarshaller.marshall(instancePatchState.getBaselineId(), BASELINEID_BINDING);
protocolMarshaller.marshall(instancePatchState.getSnapshotId(), SNAPSHOTID_BINDING);
protocolMarshaller.marshall(instancePatchState.getOwnerInformation(), OWNERINFORMATION_BINDING);
protocolMarshaller.marshall(instancePatchState.getInstalledCount(), INSTALLEDCOUNT_BINDING);
protocolMarshaller.marshall(instancePatchState.getInstalledOtherCount(), INSTALLEDOTHERCOUNT_BINDING);
protocolMarshaller.marshall(instancePatchState.getMissingCount(), MISSINGCOUNT_BINDING);
protocolMarshaller.marshall(instancePatchState.getFailedCount(), FAILEDCOUNT_BINDING);
protocolMarshaller.marshall(instancePatchState.getNotApplicableCount(), NOTAPPLICABLECOUNT_BINDING);
protocolMarshaller.marshall(instancePatchState.getOperationStartTime(), OPERATIONSTARTTIME_BINDING);
protocolMarshaller.marshall(instancePatchState.getOperationEndTime(), OPERATIONENDTIME_BINDING);
protocolMarshaller.marshall(instancePatchState.getOperation(), OPERATION_BINDING);
} catch (Exception e) {
throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e);
}
}
}
| dagnir/aws-sdk-java | aws-java-sdk-ssm/src/main/java/com/amazonaws/services/simplesystemsmanagement/model/transform/InstancePatchStateMarshaller.java | Java | apache-2.0 | 5,993 |
/*****************************************************************************************************
*
* Authors:
*
* <b> Java SDK for CWL </b>
*
* @author Paul Grosu (pgrosu@gmail.com), Northeastern University
* @version 0.20
* @since April 28, 2016
*
* <p> Alternate SDK (via Avro):
*
* Denis Yuen (denis.yuen@gmail.com)
*
* CWL Draft:
*
* Peter Amstutz (peter.amstutz@curoverse.com), Curoverse
* Nebojsa Tijanic (nebojsa.tijanic@sbgenomics.com), Seven Bridges Genomics
*
* Contributors:
*
* Luka Stojanovic (luka.stojanovic@sbgenomics.com), Seven Bridges Genomics
* John Chilton (jmchilton@gmail.com), Galaxy Project, Pennsylvania State University
* Michael R. Crusoe (crusoe@ucdavis.edu), University of California, Davis
* Herve Menager (herve.menager@gmail.com), Institut Pasteur
* Maxim Mikheev (mikhmv@biodatomics.com), BioDatomics
* Stian Soiland-Reyes (soiland-reyes@cs.manchester.ac.uk), University of Manchester
*
*****************************************************************************************************/
package org.commonwl.lang;
/*****************************************************************************************************
*
* An output parameter for a CommandLineTool.
*/
public class CommandOutputParameter extends OutputParameter {
/*****************************************************************************************************
*
* Specify valid types of data that may be assigned to this parameter.
*/
Object type = null;
/*****************************************************************************************************
*
* Describes how to handle the outputs of a process.
*/
CommandOutputBinding outputBinding = null;
/*****************************************************************************************************
*
* The unique identifier for this parameter object.
*/
String id = null;
/*****************************************************************************************************
*
* Only valid when `type: File` or is an array of `items: File`. A value of `true` indicates that the file is read or written sequentially without seeking. An implementation may use this flag to indicate whether it is valid to stream file contents using a named pipe. Default: `false`.
*/
Boolean streamable = null;
/*****************************************************************************************************
*
* Only valid when `type: File` or is an array of `items: File`. For input parameters, this must be one or more IRIs of concept nodes that represents file formats which are allowed as input to this parameter, preferrably defined within an ontology. If no ontology is available, file formats may be tested by exact match. For output parameters, this is the file format that will be assigned to the output parameter.
*/
Object format = null;
/*****************************************************************************************************
*
* A documentation string for this type, or an array of strings which should be concatenated.
*/
Object doc = null;
/*****************************************************************************************************
*
* Only valid when `type: File` or is an array of `items: File`. Describes files that must be included alongside the primary file(s). If the value is an expression, the value of `self` in the expression must be the primary input or output File to which this binding applies. If the value is a string, it specifies that the following pattern should be applied to the primary file: 1. If string begins with one or more caret `^` characters, for each caret, remove the last file extension from the path (the last period `.` and all following characters). If there are no file extensions, the path is unchanged. 2. Append the remainder of the string to the end of the file path.
*/
Object secondaryFiles = null;
/*****************************************************************************************************
*
* A short, human-readable label of this object.
*/
String label = null;
public CommandOutputParameter() { super(); }
/*****************************************************************************************************
*
* This method sets the value of type.
*
* @param value will update type, which is a CommandOutputArraySchema type.
*
*/
public void settype( CommandOutputArraySchema value ) {
type = value;
}
/*****************************************************************************************************
*
* This method sets the value of type.
*
* @param value will update type, which is a stderr type.
*
*/
public void settype( stderr value ) {
type = value;
}
/*****************************************************************************************************
*
* This method sets the value of type.
*
* @param value will update type, which is a String array.
*
*/
public void settype( String [] value ) {
type = value;
}
/*****************************************************************************************************
*
* This method sets the value of type.
*
* @param value will update type, which is a CommandOutputArraySchema array.
*
*/
public void settype( CommandOutputArraySchema [] value ) {
type = value;
}
/*****************************************************************************************************
*
* This method sets the value of type.
*
* @param value will update type, which is a CWLType array.
*
*/
public void settype( CWLType [] value ) {
type = value;
}
/*****************************************************************************************************
*
* This method sets the value of type.
*
* @param value will update type, which is a CommandOutputRecordSchema array.
*
*/
public void settype( CommandOutputRecordSchema [] value ) {
type = value;
}
/*****************************************************************************************************
*
* This method sets the value of type.
*
* @param value will update type, which is a CWLType type.
*
*/
public void settype( CWLType value ) {
type = value;
}
/*****************************************************************************************************
*
* This method sets the value of type.
*
* @param value will update type, which is a stdout type.
*
*/
public void settype( stdout value ) {
type = value;
}
/*****************************************************************************************************
*
* This method sets the value of type.
*
* @param value will update type, which is a CommandOutputEnumSchema type.
*
*/
public void settype( CommandOutputEnumSchema value ) {
type = value;
}
/*****************************************************************************************************
*
* This method sets the value of type.
*
* @param value will update type, which is a String type.
*
*/
public void settype( String value ) {
type = value;
}
/*****************************************************************************************************
*
* This method sets the value of type.
*
* @param value will update type, which is a CommandOutputRecordSchema type.
*
*/
public void settype( CommandOutputRecordSchema value ) {
type = value;
}
/*****************************************************************************************************
*
* This method sets the value of type.
*
* @param value will update type, which is a CommandOutputEnumSchema array.
*
*/
public void settype( CommandOutputEnumSchema [] value ) {
type = value;
}
/*****************************************************************************************************
*
* This method returns the value of type.
*
* @return This method will return the value of type, which is a Object type.
*
*/
public Object gettype() {
return type;
}
/*****************************************************************************************************
*
* This method sets the value of outputBinding.
*
* @param value will update outputBinding, which is a CommandOutputBinding type.
*
*/
public void setoutputBinding( CommandOutputBinding value ) {
outputBinding = value;
}
/*****************************************************************************************************
*
* This method returns the value of outputBinding.
*
* @return This method will return the value of outputBinding, which is a CommandOutputBinding type.
*
*/
public CommandOutputBinding getoutputBinding() {
return outputBinding;
}
/*****************************************************************************************************
*
* This method sets the value of id.
*
* @param value will update id, which is a String type.
*
*/
public void setid( String value ) {
id = value;
}
/*****************************************************************************************************
*
* This method returns the value of id.
*
* @return This method will return the value of id, which is a String type.
*
*/
public String getid() {
return id;
}
/*****************************************************************************************************
*
* This method sets the value of streamable.
*
* @param value will update streamable, which is a Boolean type.
*
*/
public void setstreamable( Boolean value ) {
streamable = value;
}
/*****************************************************************************************************
*
* This method returns the value of streamable.
*
* @return This method will return the value of streamable, which is a Boolean type.
*
*/
public Boolean getstreamable() {
return streamable;
}
/*****************************************************************************************************
*
* This method sets the value of format.
*
* @param value will update format, which is a Expression array.
*
*/
public void setformat( Expression [] value ) {
format = value;
}
/*****************************************************************************************************
*
* This method sets the value of format.
*
* @param value will update format, which is a String type.
*
*/
public void setformat( String value ) {
format = value;
}
/*****************************************************************************************************
*
* This method sets the value of format.
*
* @param value will update format, which is a String array.
*
*/
public void setformat( String [] value ) {
format = value;
}
/*****************************************************************************************************
*
* This method returns the value of format.
*
* @return This method will return the value of format, which is a Object type.
*
*/
public Object getformat() {
return format;
}
/*****************************************************************************************************
*
* This method sets the value of doc.
*
* @param value will update doc, which is a String type.
*
*/
public void setdoc( String value ) {
doc = value;
}
/*****************************************************************************************************
*
* This method sets the value of doc.
*
* @param value will update doc, which is a String array.
*
*/
public void setdoc( String [] value ) {
doc = value;
}
/*****************************************************************************************************
*
* This method returns the value of doc.
*
* @return This method will return the value of doc, which is a Object type.
*
*/
public Object getdoc() {
return doc;
}
/*****************************************************************************************************
*
* This method sets the value of secondaryFiles.
*
* @param value will update secondaryFiles, which is a Expression array.
*
*/
public void setsecondaryFiles( Expression [] value ) {
secondaryFiles = value;
}
/*****************************************************************************************************
*
* This method sets the value of secondaryFiles.
*
* @param value will update secondaryFiles, which is a String type.
*
*/
public void setsecondaryFiles( String value ) {
secondaryFiles = value;
}
/*****************************************************************************************************
*
* This method sets the value of secondaryFiles.
*
* @param value will update secondaryFiles, which is a String array.
*
*/
public void setsecondaryFiles( String [] value ) {
secondaryFiles = value;
}
/*****************************************************************************************************
*
* This method sets the value of secondaryFiles.
*
* @param value will update secondaryFiles, which is a Expression type.
*
*/
public void setsecondaryFiles( Expression value ) {
secondaryFiles = value;
}
/*****************************************************************************************************
*
* This method returns the value of secondaryFiles.
*
* @return This method will return the value of secondaryFiles, which is a Object type.
*
*/
public Object getsecondaryFiles() {
return secondaryFiles;
}
/*****************************************************************************************************
*
* This method sets the value of label.
*
* @param value will update label, which is a String type.
*
*/
public void setlabel( String value ) {
label = value;
}
/*****************************************************************************************************
*
* This method returns the value of label.
*
* @return This method will return the value of label, which is a String type.
*
*/
public String getlabel() {
return label;
}
} | common-workflow-language/cwljava | sdk-and-javadoc-generation/org/commonwl/lang/CommandOutputParameter.java | Java | apache-2.0 | 14,693 |
package io.quarkus.maven.it.assertions;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import java.io.File;
import java.io.FileInputStream;
import java.util.Optional;
import java.util.Properties;
import org.apache.maven.model.Model;
import org.apache.maven.model.Plugin;
import org.apache.maven.model.Profile;
import org.apache.maven.model.io.xpp3.MavenXpp3Reader;
import org.apache.maven.project.MavenProject;
import org.codehaus.plexus.util.xml.Xpp3Dom;
import io.quarkus.devtools.project.QuarkusProjectHelper;
import io.quarkus.devtools.testing.RegistryClientTestHelper;
import io.quarkus.maven.utilities.MojoUtils;
import io.quarkus.platform.tools.ToolsConstants;
import io.quarkus.registry.catalog.ExtensionCatalog;
public class SetupVerifier {
public static void assertThatJarExists(File archive) throws Exception {
JarVerifier jarVerifier = new JarVerifier(archive);
jarVerifier.assertThatJarIsCreated();
jarVerifier.assertThatJarHasManifest();
}
public static void assertThatJarContainsFile(File archive, String file) throws Exception {
JarVerifier jarVerifier = new JarVerifier(archive);
jarVerifier.assertThatFileIsContained(file);
}
public static void assertThatJarDoesNotContainFile(File archive, String file) throws Exception {
JarVerifier jarVerifier = new JarVerifier(archive);
jarVerifier.assertThatFileIsNotContained(file);
}
public static void assertThatJarContainsFileWithContent(File archive, String path, String... lines) throws Exception {
JarVerifier jarVerifier = new JarVerifier(archive);
jarVerifier.assertThatFileContains(path, lines);
}
public static void verifySetup(File pomFile) throws Exception {
assertNotNull(pomFile, "Unable to find pom.xml");
MavenXpp3Reader xpp3Reader = new MavenXpp3Reader();
Model model = xpp3Reader.read(new FileInputStream(pomFile));
MavenProject project = new MavenProject(model);
Optional<Plugin> maybe = hasPlugin(project, ToolsConstants.IO_QUARKUS + ":" + ToolsConstants.QUARKUS_MAVEN_PLUGIN);
assertThat(maybe).isNotEmpty();
//Check if the properties have been set correctly
Properties properties = model.getProperties();
assertThat(properties.containsKey(MojoUtils.TEMPLATE_PROPERTY_QUARKUS_PLATFORM_GROUP_ID_NAME)).isTrue();
assertThat(properties.containsKey(MojoUtils.TEMPLATE_PROPERTY_QUARKUS_PLATFORM_ARTIFACT_ID_NAME)).isTrue();
assertThat(properties.containsKey(MojoUtils.TEMPLATE_PROPERTY_QUARKUS_PLATFORM_VERSION_NAME)).isTrue();
assertThat(properties.containsKey(MojoUtils.TEMPLATE_PROPERTY_QUARKUS_PLUGIN_VERSION_NAME)).isTrue();
// Check plugin is set
Plugin plugin = maybe.orElseThrow(() -> new AssertionError("Plugin expected"));
assertThat(plugin).isNotNull().satisfies(p -> {
assertThat(p.getArtifactId()).isEqualTo(ToolsConstants.QUARKUS_MAVEN_PLUGIN);
assertThat(p.getGroupId()).isEqualTo(ToolsConstants.IO_QUARKUS);
assertThat(p.getVersion()).isEqualTo(MojoUtils.TEMPLATE_PROPERTY_QUARKUS_PLUGIN_VERSION_VALUE);
});
// Check build execution Configuration
assertThat(plugin.getExecutions()).hasSize(1).allSatisfy(execution -> {
assertThat(execution.getGoals()).containsExactly("build");
assertThat(execution.getConfiguration()).isNull();
});
// Check profile
assertThat(model.getProfiles()).hasSize(1);
Profile profile = model.getProfiles().get(0);
assertThat(profile.getId()).isEqualTo("native");
Plugin actual = profile.getBuild().getPluginsAsMap()
.get(ToolsConstants.IO_QUARKUS + ":" + ToolsConstants.QUARKUS_MAVEN_PLUGIN);
assertThat(actual).isNotNull();
assertThat(actual.getExecutions()).hasSize(1).allSatisfy(exec -> {
assertThat(exec.getGoals()).containsExactly("native-image");
assertThat(exec.getConfiguration()).isInstanceOf(Xpp3Dom.class)
.satisfies(o -> assertThat(o.toString()).contains("enableHttpUrlHandler"));
});
}
public static Optional<Plugin> hasPlugin(MavenProject project, String pluginKey) {
Optional<Plugin> optPlugin = project.getBuildPlugins().stream()
.filter(plugin -> pluginKey.equals(plugin.getKey()))
.findFirst();
if (!optPlugin.isPresent() && project.getPluginManagement() != null) {
optPlugin = project.getPluginManagement().getPlugins().stream()
.filter(plugin -> pluginKey.equals(plugin.getKey()))
.findFirst();
}
return optPlugin;
}
public static void verifySetupWithVersion(File pomFile) throws Exception {
MavenXpp3Reader xpp3Reader = new MavenXpp3Reader();
Model model = xpp3Reader.read(new FileInputStream(pomFile));
MavenProject project = new MavenProject(model);
Properties projectProps = project.getProperties();
assertNotNull(projectProps);
assertFalse(projectProps.isEmpty());
final String quarkusVersion = getPlatformDescriptor().getQuarkusCoreVersion();
assertEquals(quarkusVersion, projectProps.getProperty(MojoUtils.TEMPLATE_PROPERTY_QUARKUS_PLUGIN_VERSION_NAME));
}
private static ExtensionCatalog getPlatformDescriptor() throws Exception {
RegistryClientTestHelper.enableRegistryClientTestConfig();
try {
return QuarkusProjectHelper.getCatalogResolver().resolveExtensionCatalog();
} finally {
RegistryClientTestHelper.disableRegistryClientTestConfig();
}
}
}
| quarkusio/quarkus | test-framework/maven/src/main/java/io/quarkus/maven/it/assertions/SetupVerifier.java | Java | apache-2.0 | 5,917 |
/*
* Copyright 2021 Red Hat, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jboss.migration.wfly.task.subsystem.microprofile;
import org.jboss.migration.core.jboss.JBossExtensionNames;
import org.jboss.migration.core.jboss.JBossSubsystemNames;
import org.jboss.migration.wfly10.config.management.ProfileResource;
import org.jboss.migration.wfly10.config.task.management.subsystem.AddSubsystemResourceSubtaskBuilder;
import org.jboss.migration.wfly10.config.task.management.subsystem.AddSubsystemResources;
/**
* @author emmartins
*/
public class WildFly23_0AddMicroprofileJwtSmallryeSubsystem<S> extends AddSubsystemResources<S> {
public WildFly23_0AddMicroprofileJwtSmallryeSubsystem() {
super(JBossExtensionNames.MICROPROFILE_JWT_SMALLRYE, new SubtaskBuilder<>());
// do not add subsystem config to "standalone-load-balancer.xml" config
skipPolicyBuilders(getSkipPolicyBuilder(),
buildParameters -> context -> buildParameters.getServerConfiguration().getConfigurationPath().getPath().endsWith("standalone-load-balancer.xml"));
}
static class SubtaskBuilder<S> extends AddSubsystemResourceSubtaskBuilder<S> {
SubtaskBuilder() {
super(JBossSubsystemNames.MICROPROFILE_JWT_SMALLRYE);
// do not add subsystem config to profile "load-balancer"
skipPolicyBuilder(buildParameters -> context -> buildParameters.getResource().getResourceType() == ProfileResource.RESOURCE_TYPE && buildParameters.getResource().getResourceName().equals("load-balancer"));
}
}
}
| emmartins/wildfly-server-migration | servers/wildfly23.0/src/main/java/org/jboss/migration/wfly/task/subsystem/microprofile/WildFly23_0AddMicroprofileJwtSmallryeSubsystem.java | Java | apache-2.0 | 2,097 |
package p321
import (
"github.com/stretchr/testify/assert"
"testing"
)
func Test0(t *testing.T) {
assert.Equal(t, []int{5, 4}, maxOneArray([]int{5, 2, 3, 4, 1}, 2))
assert.Equal(t, []int{6}, maxOneArray([]int{2, 4, 6, 5}, 1))
assert.Equal(t, []int{6, 5}, maxOneArray([]int{2, 4, 6, 5}, 2))
assert.Equal(t, []int{4, 6, 5}, maxOneArray([]int{2, 4, 6, 5}, 3))
assert.Equal(t, []int{2, 4, 6, 5}, maxOneArray([]int{2, 4, 6, 5}, 4))
}
func Test1(t *testing.T) {
assert.Equal(t, []int{9, 8, 6, 5, 3}, maxNumber([]int{3, 4, 6, 5}, []int{9, 1, 2, 5, 8, 3}, 5))
}
| baishuai/leetcode | algorithms/p321/321_test.go | GO | apache-2.0 | 565 |
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import stubout
import webob
from nova import exception
from nova import context
from nova import test
from nova import log as logging
from nova.api.openstack.contrib import volumetypes
from nova.volume import volume_types
from nova.tests.api.openstack import fakes
LOG = logging.getLogger('nova.tests.api.openstack.test_volume_types')
last_param = {}
def stub_volume_type(id):
specs = {
"key1": "value1",
"key2": "value2",
"key3": "value3",
"key4": "value4",
"key5": "value5"}
return dict(id=id, name='vol_type_%s' % str(id), extra_specs=specs)
def return_volume_types_get_all_types(context):
return dict(vol_type_1=stub_volume_type(1),
vol_type_2=stub_volume_type(2),
vol_type_3=stub_volume_type(3))
def return_empty_volume_types_get_all_types(context):
return {}
def return_volume_types_get_volume_type(context, id):
if id == "777":
raise exception.VolumeTypeNotFound(volume_type_id=id)
return stub_volume_type(int(id))
def return_volume_types_destroy(context, name):
if name == "777":
raise exception.VolumeTypeNotFoundByName(volume_type_name=name)
pass
def return_volume_types_create(context, name, specs):
pass
def return_volume_types_get_by_name(context, name):
if name == "777":
raise exception.VolumeTypeNotFoundByName(volume_type_name=name)
return stub_volume_type(int(name.split("_")[2]))
class VolumeTypesApiTest(test.TestCase):
def setUp(self):
super(VolumeTypesApiTest, self).setUp()
fakes.stub_out_key_pair_funcs(self.stubs)
self.controller = volumetypes.VolumeTypesController()
def tearDown(self):
self.stubs.UnsetAll()
super(VolumeTypesApiTest, self).tearDown()
def test_volume_types_index(self):
self.stubs.Set(volume_types, 'get_all_types',
return_volume_types_get_all_types)
req = fakes.HTTPRequest.blank('/v1.1/123/os-volume-types')
res_dict = self.controller.index(req)
self.assertEqual(3, len(res_dict))
for name in ['vol_type_1', 'vol_type_2', 'vol_type_3']:
self.assertEqual(name, res_dict[name]['name'])
self.assertEqual('value1', res_dict[name]['extra_specs']['key1'])
def test_volume_types_index_no_data(self):
self.stubs.Set(volume_types, 'get_all_types',
return_empty_volume_types_get_all_types)
req = fakes.HTTPRequest.blank('/v1.1/123/os-volume-types')
res_dict = self.controller.index(req)
self.assertEqual(0, len(res_dict))
def test_volume_types_show(self):
self.stubs.Set(volume_types, 'get_volume_type',
return_volume_types_get_volume_type)
req = fakes.HTTPRequest.blank('/v1.1/123/os-volume-types/1')
res_dict = self.controller.show(req, 1)
self.assertEqual(1, len(res_dict))
self.assertEqual('vol_type_1', res_dict['volume_type']['name'])
def test_volume_types_show_not_found(self):
self.stubs.Set(volume_types, 'get_volume_type',
return_volume_types_get_volume_type)
req = fakes.HTTPRequest.blank('/v1.1/123/os-volume-types/777')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
req, '777')
def test_volume_types_delete(self):
self.stubs.Set(volume_types, 'get_volume_type',
return_volume_types_get_volume_type)
self.stubs.Set(volume_types, 'destroy',
return_volume_types_destroy)
req = fakes.HTTPRequest.blank('/v1.1/123/os-volume-types/1')
self.controller.delete(req, 1)
def test_volume_types_delete_not_found(self):
self.stubs.Set(volume_types, 'get_volume_type',
return_volume_types_get_volume_type)
self.stubs.Set(volume_types, 'destroy',
return_volume_types_destroy)
req = fakes.HTTPRequest.blank('/v1.1/123/os-volume-types/777')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
req, '777')
def test_create(self):
self.stubs.Set(volume_types, 'create',
return_volume_types_create)
self.stubs.Set(volume_types, 'get_volume_type_by_name',
return_volume_types_get_by_name)
body = {"volume_type": {"name": "vol_type_1",
"extra_specs": {"key1": "value1"}}}
req = fakes.HTTPRequest.blank('/v1.1/123/os-volume-types')
res_dict = self.controller.create(req, body)
self.assertEqual(1, len(res_dict))
self.assertEqual('vol_type_1', res_dict['volume_type']['name'])
def test_create_empty_body(self):
self.stubs.Set(volume_types, 'create',
return_volume_types_create)
self.stubs.Set(volume_types, 'get_volume_type_by_name',
return_volume_types_get_by_name)
req = fakes.HTTPRequest.blank('/v1.1/123/os-volume-types')
self.assertRaises(webob.exc.HTTPUnprocessableEntity,
self.controller.create, req, '')
| salv-orlando/MyRepo | nova/tests/api/openstack/contrib/test_volume_types.py | Python | apache-2.0 | 5,886 |
from typing import Callable, List, Sequence, Tuple
import numpy as np
import pytest
import scipy
import tensorflow as tf
from _pytest.fixtures import SubRequest
import gpflow
import gpflow.inducing_variables.multioutput as mf
import gpflow.kernels.multioutput as mk
from gpflow import set_trainable
from gpflow.base import AnyNDArray, RegressionData
from gpflow.conditionals import sample_conditional
from gpflow.conditionals.util import (
fully_correlated_conditional,
fully_correlated_conditional_repeat,
independent_interdomain_conditional,
sample_mvn,
)
from gpflow.config import default_float, default_jitter
from gpflow.inducing_variables import InducingPoints
from gpflow.kernels import SquaredExponential
from gpflow.likelihoods import Gaussian
from gpflow.models import SVGP
float_type = default_float()
rng = np.random.RandomState(99201)
# ------------------------------------------
# Helpers
# ------------------------------------------
def predict_all(
models: Sequence[SVGP], Xnew: tf.Tensor, full_cov: bool, full_output_cov: bool
) -> Tuple[List[tf.Tensor], List[tf.Tensor]]:
"""
Returns the mean and variance of f(Xnew) for each model in `models`.
"""
ms, vs = [], []
for model in models:
m, v = model.predict_f(Xnew, full_cov=full_cov, full_output_cov=full_output_cov)
ms.append(m)
vs.append(v)
return ms, vs
def assert_all_array_elements_almost_equal(arr: Sequence[tf.Tensor]) -> None:
"""
Check if consecutive elements of `arr` are almost equal.
"""
for i in range(len(arr) - 1):
np.testing.assert_allclose(arr[i], arr[i + 1], atol=1e-5)
def check_equality_predictions(
data: RegressionData, models: Sequence[SVGP], decimal: int = 3
) -> None:
"""
Executes a couple of checks to compare the equality of predictions
of different models. The models should be configured with the same
training data (X, Y). The following checks are done:
- check if elbo is (almost) equal for all models
- check if predicted mean is (almost) equal
- check if predicted variance is (almost) equal.
All possible variances over the inputs and outputs are calculated
and equality is checked.
- check if variances within model are consistent. Parts of the covariance
matrices should overlap, and this is tested.
"""
elbos = [m.elbo(data) for m in models]
# Check equality of log likelihood
assert_all_array_elements_almost_equal(elbos)
# Predict: full_cov = True and full_output_cov = True
means_tt, vars_tt = predict_all(models, Data.Xs, full_cov=True, full_output_cov=True)
# Predict: full_cov = True and full_output_cov = False
means_tf, vars_tf = predict_all(models, Data.Xs, full_cov=True, full_output_cov=False)
# Predict: full_cov = False and full_output_cov = True
means_ft, vars_ft = predict_all(models, Data.Xs, full_cov=False, full_output_cov=True)
# Predict: full_cov = False and full_output_cov = False
means_ff, vars_ff = predict_all(models, Data.Xs, full_cov=False, full_output_cov=False)
# check equality of all the means
all_means = means_tt + means_tf + means_ft + means_ff
assert_all_array_elements_almost_equal(all_means)
# check equality of all the variances within a category
# (e.g. full_cov=True and full_output_cov=False)
for var in [vars_tt, vars_tf, vars_ft, vars_ff]:
assert_all_array_elements_almost_equal(var)
# Here we check that the variance in different categories are equal
# after transforming to the right shape.
var_tt = vars_tt[0] # N x P x N x P
var_tf = vars_tf[0] # P x N x c
var_ft = vars_ft[0] # N x P x P
var_ff = vars_ff[0] # N x P
np.testing.assert_almost_equal(
np.diagonal(var_tt, axis1=1, axis2=3),
np.transpose(var_tf, [1, 2, 0]),
decimal=decimal,
)
np.testing.assert_almost_equal(
np.diagonal(var_tt, axis1=0, axis2=2),
np.transpose(var_ft, [1, 2, 0]),
decimal=decimal,
)
np.testing.assert_almost_equal(
np.diagonal(np.diagonal(var_tt, axis1=0, axis2=2)), var_ff, decimal=decimal
)
def expand_cov(q_sqrt: tf.Tensor, W: tf.Tensor) -> tf.Tensor:
"""
:param G: cholesky of covariance matrices, L x M x M
:param W: mixing matrix (square), L x L
:return: cholesky of 1 x LM x LM covariance matrix
"""
q_cov = np.matmul(q_sqrt, q_sqrt.transpose([0, 2, 1])) # [L, M, M]
q_cov_expanded = scipy.linalg.block_diag(*q_cov) # [LM, LM]
q_sqrt_expanded = np.linalg.cholesky(q_cov_expanded) # [LM, LM]
return q_sqrt_expanded[None, ...]
def create_q_sqrt(M: int, L: int) -> AnyNDArray:
""" returns an array of L lower triangular matrices of size M x M """
return np.array([np.tril(rng.randn(M, M)) for _ in range(L)]) # [L, M, M]
# ------------------------------------------
# Data classes: storing constants
# ------------------------------------------
class Data:
N, Ntest = 20, 5
D = 1 # input dimension
M = 3 # inducing points
L = 2 # latent gps
P = 3 # output dimension
MAXITER = int(15e2)
X = tf.random.normal((N,), dtype=tf.float64)[:, None] * 10 - 5
G = np.hstack((0.5 * np.sin(3 * X) + X, 3.0 * np.cos(X) - X))
Ptrue = np.array([[0.5, -0.3, 1.5], [-0.4, 0.43, 0.0]]) # [L, P]
Y = tf.convert_to_tensor(G @ Ptrue)
G = tf.convert_to_tensor(np.hstack((0.5 * np.sin(3 * X) + X, 3.0 * np.cos(X) - X)))
Ptrue = tf.convert_to_tensor(np.array([[0.5, -0.3, 1.5], [-0.4, 0.43, 0.0]])) # [L, P]
Y += tf.random.normal(Y.shape, dtype=tf.float64) * [0.2, 0.2, 0.2]
Xs = tf.convert_to_tensor(np.linspace(-6, 6, Ntest)[:, None])
data = (X, Y)
class DataMixedKernelWithEye(Data):
""" Note in this class L == P """
M, L = 4, 3
W = np.eye(L)
G = np.hstack(
[0.5 * np.sin(3 * Data.X) + Data.X, 3.0 * np.cos(Data.X) - Data.X, 1.0 + Data.X]
) # [N, P]
mu_data = tf.random.uniform((M, L), dtype=tf.float64) # [M, L]
sqrt_data = create_q_sqrt(M, L) # [L, M, M]
mu_data_full = tf.reshape(mu_data @ W, [-1, 1]) # [L, 1]
sqrt_data_full = expand_cov(sqrt_data, W) # [1, LM, LM]
Y = tf.convert_to_tensor(G @ W)
G = tf.convert_to_tensor(G)
W = tf.convert_to_tensor(W)
sqrt_data = tf.convert_to_tensor(sqrt_data)
sqrt_data_full = tf.convert_to_tensor(sqrt_data_full)
Y += tf.random.normal(Y.shape, dtype=tf.float64) * tf.ones((L,), dtype=tf.float64) * 0.2
data = (Data.X, Y)
class DataMixedKernel(Data):
M = 5
L = 2
P = 3
W = rng.randn(P, L)
G = np.hstack([0.5 * np.sin(3 * Data.X) + Data.X, 3.0 * np.cos(Data.X) - Data.X]) # [N, L]
mu_data = tf.random.normal((M, L), dtype=tf.float64) # [M, L]
sqrt_data = create_q_sqrt(M, L) # [L, M, M]
Y = tf.convert_to_tensor(G @ W.T)
G = tf.convert_to_tensor(G)
W = tf.convert_to_tensor(W)
sqrt_data = tf.convert_to_tensor(sqrt_data)
Y += tf.random.normal(Y.shape, dtype=tf.float64) * tf.ones((P,), dtype=tf.float64) * 0.1
data = (Data.X, Y)
# ------------------------------------------
# Test sample conditional
# ------------------------------------------
def test_sample_mvn(full_cov: bool) -> None:
"""
Draws 10,000 samples from a distribution
with known mean and covariance. The test checks
if the mean and covariance of the samples is
close to the true mean and covariance.
"""
N, D = 10000, 2
means = tf.ones((N, D), dtype=float_type)
if full_cov:
covs = tf.eye(D, batch_shape=[N], dtype=float_type)
else:
covs = tf.ones((N, D), dtype=float_type)
samples = sample_mvn(means, covs, full_cov)
samples_mean = np.mean(samples, axis=0)
samples_cov = np.cov(samples, rowvar=False)
np.testing.assert_array_almost_equal(samples_mean, [1.0, 1.0], decimal=1)
np.testing.assert_array_almost_equal(samples_cov, [[1.0, 0.0], [0.0, 1.0]], decimal=1)
def test_sample_conditional(whiten: bool, full_cov: bool, full_output_cov: bool) -> None:
if full_cov and full_output_cov:
return
q_mu = tf.random.uniform((Data.M, Data.P), dtype=tf.float64) # [M, P]
q_sqrt = tf.convert_to_tensor(
[np.tril(tf.random.uniform((Data.M, Data.M), dtype=tf.float64)) for _ in range(Data.P)]
) # [P, M, M]
Z = Data.X[: Data.M, ...] # [M, D]
Xs: AnyNDArray = np.ones((Data.N, Data.D), dtype=float_type)
inducing_variable = InducingPoints(Z)
kernel = SquaredExponential()
# Path 1
value_f, mean_f, var_f = sample_conditional(
Xs,
inducing_variable,
kernel,
q_mu,
q_sqrt=q_sqrt,
white=whiten,
full_cov=full_cov,
full_output_cov=full_output_cov,
num_samples=int(1e5),
)
value_f = value_f.numpy().reshape((-1,) + value_f.numpy().shape[2:])
# Path 2
if full_output_cov:
pytest.skip(
"sample_conditional with X instead of inducing_variable does not support full_output_cov"
)
value_x, mean_x, var_x = sample_conditional(
Xs,
Z,
kernel,
q_mu,
q_sqrt=q_sqrt,
white=whiten,
full_cov=full_cov,
full_output_cov=full_output_cov,
num_samples=int(1e5),
)
value_x = value_x.numpy().reshape((-1,) + value_x.numpy().shape[2:])
# check if mean and covariance of samples are similar
np.testing.assert_array_almost_equal(
np.mean(value_x, axis=0), np.mean(value_f, axis=0), decimal=1
)
np.testing.assert_array_almost_equal(
np.cov(value_x, rowvar=False), np.cov(value_f, rowvar=False), decimal=1
)
np.testing.assert_allclose(mean_x, mean_f)
np.testing.assert_allclose(var_x, var_f)
def test_sample_conditional_mixedkernel() -> None:
q_mu = tf.random.uniform((Data.M, Data.L), dtype=tf.float64) # M x L
q_sqrt = tf.convert_to_tensor(
[np.tril(tf.random.uniform((Data.M, Data.M), dtype=tf.float64)) for _ in range(Data.L)]
) # L x M x M
Z = Data.X[: Data.M, ...] # M x D
N = int(10e5)
Xs: AnyNDArray = np.ones((N, Data.D), dtype=float_type)
# Path 1: mixed kernel: most efficient route
W = np.random.randn(Data.P, Data.L)
mixed_kernel = mk.LinearCoregionalization([SquaredExponential() for _ in range(Data.L)], W)
optimal_inducing_variable = mf.SharedIndependentInducingVariables(InducingPoints(Z))
value, mean, var = sample_conditional(
Xs, optimal_inducing_variable, mixed_kernel, q_mu, q_sqrt=q_sqrt, white=True
)
# Path 2: independent kernels, mixed later
separate_kernel = mk.SeparateIndependent([SquaredExponential() for _ in range(Data.L)])
fallback_inducing_variable = mf.SharedIndependentInducingVariables(InducingPoints(Z))
value2, mean2, var2 = sample_conditional(
Xs, fallback_inducing_variable, separate_kernel, q_mu, q_sqrt=q_sqrt, white=True
)
value2 = np.matmul(value2, W.T)
# check if mean and covariance of samples are similar
np.testing.assert_array_almost_equal(np.mean(value, axis=0), np.mean(value2, axis=0), decimal=1)
np.testing.assert_array_almost_equal(
np.cov(value, rowvar=False), np.cov(value2, rowvar=False), decimal=1
)
QSqrtFactory = Callable[[tf.Tensor, int], tf.Tensor]
@pytest.fixture(
name="fully_correlated_q_sqrt_factory",
params=[lambda _, __: None, lambda LM, R: tf.eye(LM, batch_shape=(R,))],
)
def _q_sqrt_factory_fixture(request: SubRequest) -> QSqrtFactory:
return request.param
@pytest.mark.parametrize("R", [1, 2, 5])
def test_fully_correlated_conditional_repeat_shapes_fc_and_foc(
R: int,
fully_correlated_q_sqrt_factory: QSqrtFactory,
full_cov: bool,
full_output_cov: bool,
whiten: bool,
) -> None:
L, M, N, P = Data.L, Data.M, Data.N, Data.P
Kmm = tf.ones((L * M, L * M)) + default_jitter() * tf.eye(L * M)
Kmn = tf.ones((L * M, N, P))
if full_cov and full_output_cov:
Knn = tf.ones((N, P, N, P))
expected_v_shape = [R, N, P, N, P]
elif not full_cov and full_output_cov:
Knn = tf.ones((N, P, P))
expected_v_shape = [R, N, P, P]
elif full_cov and not full_output_cov:
Knn = tf.ones((P, N, N))
expected_v_shape = [R, P, N, N]
else:
Knn = tf.ones((N, P))
expected_v_shape = [R, N, P]
f = tf.ones((L * M, R))
q_sqrt = fully_correlated_q_sqrt_factory(L * M, R)
m, v = fully_correlated_conditional_repeat(
Kmn,
Kmm,
Knn,
f,
full_cov=full_cov,
full_output_cov=full_output_cov,
q_sqrt=q_sqrt,
white=whiten,
)
assert m.shape.as_list() == [R, N, P]
assert v.shape.as_list() == expected_v_shape
def test_fully_correlated_conditional_repeat_whiten(whiten: bool) -> None:
"""
This test checks the effect of the `white` flag, which changes the projection matrix `A`.
The impact of the flag on the value of `A` can be easily verified by its effect on the
predicted mean. While the predicted covariance is also a function of `A` this test does not
inspect that value.
"""
N, P = Data.N, Data.P
Lm = np.random.randn(1, 1).astype(np.float32) ** 2
Kmm = Lm * Lm + default_jitter()
Kmn = tf.ones((1, N, P))
Knn = tf.ones((N, P))
f = np.random.randn(1, 1).astype(np.float32)
mean, _ = fully_correlated_conditional_repeat(
Kmn,
Kmm,
Knn,
f,
white=whiten,
)
if whiten:
expected_mean = (f * Kmn) / Lm
else:
expected_mean = (f * Kmn) / Kmm
np.testing.assert_allclose(mean, expected_mean, rtol=1e-3)
def test_fully_correlated_conditional_shapes_fc_and_foc(
fully_correlated_q_sqrt_factory: QSqrtFactory,
full_cov: bool,
full_output_cov: bool,
whiten: bool,
) -> None:
L, M, N, P = Data.L, Data.M, Data.N, Data.P
Kmm = tf.ones((L * M, L * M)) + default_jitter() * tf.eye(L * M)
Kmn = tf.ones((L * M, N, P))
if full_cov and full_output_cov:
Knn = tf.ones((N, P, N, P))
expected_v_shape = [N, P, N, P]
elif not full_cov and full_output_cov:
Knn = tf.ones((N, P, P))
expected_v_shape = [N, P, P]
elif full_cov and not full_output_cov:
Knn = tf.ones((P, N, N))
expected_v_shape = [P, N, N]
else:
Knn = tf.ones((N, P))
expected_v_shape = [N, P]
f = tf.ones((L * M, 1))
q_sqrt = fully_correlated_q_sqrt_factory(L * M, 1)
m, v = fully_correlated_conditional(
Kmn,
Kmm,
Knn,
f,
full_cov=full_cov,
full_output_cov=full_output_cov,
q_sqrt=q_sqrt,
white=whiten,
)
assert m.shape.as_list() == [N, P]
assert v.shape.as_list() == expected_v_shape
# ------------------------------------------
# Test Mok Output Dims
# ------------------------------------------
def test_shapes_of_mok() -> None:
data = DataMixedKernel
kern_list = [SquaredExponential() for _ in range(data.L)]
k1 = mk.LinearCoregionalization(kern_list, W=data.W)
assert k1.num_latent_gps == data.L
k2 = mk.SeparateIndependent(kern_list)
assert k2.num_latent_gps == data.L
dims = 5
k3 = mk.SharedIndependent(SquaredExponential(), dims)
assert k3.num_latent_gps == dims
# ------------------------------------------
# Test Mixed Mok Kgg
# ------------------------------------------
def test_MixedMok_Kgg() -> None:
data = DataMixedKernel
kern_list = [SquaredExponential() for _ in range(data.L)]
kernel = mk.LinearCoregionalization(kern_list, W=data.W)
Kgg = kernel.Kgg(Data.X, Data.X) # L x N x N
Kff = kernel.K(Data.X, Data.X) # N x P x N x P
# Kff = W @ Kgg @ W^T
Kff_infered = np.einsum("lnm,pl,ql->npmq", Kgg, data.W, data.W)
np.testing.assert_array_almost_equal(Kff, Kff_infered, decimal=5)
# ------------------------------------------
# Integration tests
# ------------------------------------------
def test_shared_independent_mok() -> None:
"""
In this test we use the same kernel and the same inducing inducing
for each of the outputs. The outputs are considered to be uncorrelated.
This is how GPflow handled multiple outputs before the multioutput framework was added.
We compare three models here:
1) an ineffient one, where we use a SharedIndepedentMok with InducingPoints.
This combination will uses a Kff of size N x P x N x P, Kfu if size N x P x M x P
which is extremely inefficient as most of the elements are zero.
2) efficient: SharedIndependentMok and SharedIndependentMof
This combinations uses the most efficient form of matrices
3) the old way, efficient way: using Kernel and InducingPoints
Model 2) and 3) follow more or less the same code path.
"""
np.random.seed(0)
# Model 1
q_mu_1 = np.random.randn(Data.M * Data.P, 1) # MP x 1
q_sqrt_1 = np.tril(np.random.randn(Data.M * Data.P, Data.M * Data.P))[None, ...] # 1 x MP x MP
kernel_1 = mk.SharedIndependent(SquaredExponential(variance=0.5, lengthscales=1.2), Data.P)
inducing_variable = InducingPoints(Data.X[: Data.M, ...])
model_1 = SVGP(
kernel_1,
Gaussian(),
inducing_variable,
q_mu=q_mu_1,
q_sqrt=q_sqrt_1,
num_latent_gps=Data.Y.shape[-1],
)
set_trainable(model_1, False)
set_trainable(model_1.q_sqrt, True)
gpflow.optimizers.Scipy().minimize(
model_1.training_loss_closure(Data.data),
variables=model_1.trainable_variables,
options=dict(maxiter=500),
method="BFGS",
compile=True,
)
# Model 2
q_mu_2 = np.reshape(q_mu_1, [Data.M, Data.P]) # M x P
q_sqrt_2 = np.array(
[np.tril(np.random.randn(Data.M, Data.M)) for _ in range(Data.P)]
) # P x M x M
kernel_2 = SquaredExponential(variance=0.5, lengthscales=1.2)
inducing_variable_2 = InducingPoints(Data.X[: Data.M, ...])
model_2 = SVGP(
kernel_2,
Gaussian(),
inducing_variable_2,
num_latent_gps=Data.P,
q_mu=q_mu_2,
q_sqrt=q_sqrt_2,
)
set_trainable(model_2, False)
set_trainable(model_2.q_sqrt, True)
gpflow.optimizers.Scipy().minimize(
model_2.training_loss_closure(Data.data),
variables=model_2.trainable_variables,
options=dict(maxiter=500),
method="BFGS",
compile=True,
)
# Model 3
q_mu_3 = np.reshape(q_mu_1, [Data.M, Data.P]) # M x P
q_sqrt_3 = np.array(
[np.tril(np.random.randn(Data.M, Data.M)) for _ in range(Data.P)]
) # P x M x M
kernel_3 = mk.SharedIndependent(SquaredExponential(variance=0.5, lengthscales=1.2), Data.P)
inducing_variable_3 = mf.SharedIndependentInducingVariables(
InducingPoints(Data.X[: Data.M, ...])
)
model_3 = SVGP(
kernel_3,
Gaussian(),
inducing_variable_3,
num_latent_gps=Data.P,
q_mu=q_mu_3,
q_sqrt=q_sqrt_3,
)
set_trainable(model_3, False)
set_trainable(model_3.q_sqrt, True)
gpflow.optimizers.Scipy().minimize(
model_3.training_loss_closure(Data.data),
variables=model_3.trainable_variables,
options=dict(maxiter=500),
method="BFGS",
compile=True,
)
check_equality_predictions(Data.data, [model_1, model_2, model_3])
def test_separate_independent_mok() -> None:
"""
We use different independent kernels for each of the output dimensions.
We can achieve this in two ways:
1) efficient: SeparateIndependentMok with Shared/SeparateIndependentMof
2) inefficient: SeparateIndependentMok with InducingPoints
However, both methods should return the same conditional,
and after optimization return the same log likelihood.
"""
# Model 1 (Inefficient)
q_mu_1 = np.random.randn(Data.M * Data.P, 1)
q_sqrt_1 = np.tril(np.random.randn(Data.M * Data.P, Data.M * Data.P))[None, ...] # 1 x MP x MP
kern_list_1 = [SquaredExponential(variance=0.5, lengthscales=1.2) for _ in range(Data.P)]
kernel_1 = mk.SeparateIndependent(kern_list_1)
inducing_variable_1 = InducingPoints(Data.X[: Data.M, ...])
model_1 = SVGP(
kernel_1,
Gaussian(),
inducing_variable_1,
num_latent_gps=1,
q_mu=q_mu_1,
q_sqrt=q_sqrt_1,
)
set_trainable(model_1, False)
set_trainable(model_1.q_sqrt, True)
set_trainable(model_1.q_mu, True)
gpflow.optimizers.Scipy().minimize(
model_1.training_loss_closure(Data.data),
variables=model_1.trainable_variables,
method="BFGS",
compile=True,
)
# Model 2 (efficient)
q_mu_2 = np.random.randn(Data.M, Data.P)
q_sqrt_2 = np.array(
[np.tril(np.random.randn(Data.M, Data.M)) for _ in range(Data.P)]
) # P x M x M
kern_list_2 = [SquaredExponential(variance=0.5, lengthscales=1.2) for _ in range(Data.P)]
kernel_2 = mk.SeparateIndependent(kern_list_2)
inducing_variable_2 = mf.SharedIndependentInducingVariables(
InducingPoints(Data.X[: Data.M, ...])
)
model_2 = SVGP(
kernel_2,
Gaussian(),
inducing_variable_2,
num_latent_gps=Data.P,
q_mu=q_mu_2,
q_sqrt=q_sqrt_2,
)
set_trainable(model_2, False)
set_trainable(model_2.q_sqrt, True)
set_trainable(model_2.q_mu, True)
gpflow.optimizers.Scipy().minimize(
model_2.training_loss_closure(Data.data),
variables=model_2.trainable_variables,
method="BFGS",
compile=True,
)
check_equality_predictions(Data.data, [model_1, model_2])
def test_separate_independent_mof() -> None:
"""
Same test as above but we use different (i.e. separate) inducing inducing
for each of the output dimensions.
"""
np.random.seed(0)
# Model 1 (INefficient)
q_mu_1 = np.random.randn(Data.M * Data.P, 1)
q_sqrt_1 = np.tril(np.random.randn(Data.M * Data.P, Data.M * Data.P))[None, ...] # 1 x MP x MP
kernel_1 = mk.SharedIndependent(SquaredExponential(variance=0.5, lengthscales=1.2), Data.P)
inducing_variable_1 = InducingPoints(Data.X[: Data.M, ...])
model_1 = SVGP(kernel_1, Gaussian(), inducing_variable_1, q_mu=q_mu_1, q_sqrt=q_sqrt_1)
set_trainable(model_1, False)
set_trainable(model_1.q_sqrt, True)
set_trainable(model_1.q_mu, True)
gpflow.optimizers.Scipy().minimize(
model_1.training_loss_closure(Data.data),
variables=model_1.trainable_variables,
method="BFGS",
compile=True,
)
# Model 2 (efficient)
q_mu_2 = np.random.randn(Data.M, Data.P)
q_sqrt_2 = np.array(
[np.tril(np.random.randn(Data.M, Data.M)) for _ in range(Data.P)]
) # P x M x M
kernel_2 = mk.SharedIndependent(SquaredExponential(variance=0.5, lengthscales=1.2), Data.P)
inducing_variable_list_2 = [InducingPoints(Data.X[: Data.M, ...]) for _ in range(Data.P)]
inducing_variable_2 = mf.SeparateIndependentInducingVariables(inducing_variable_list_2)
model_2 = SVGP(kernel_2, Gaussian(), inducing_variable_2, q_mu=q_mu_2, q_sqrt=q_sqrt_2)
set_trainable(model_2, False)
set_trainable(model_2.q_sqrt, True)
set_trainable(model_2.q_mu, True)
gpflow.optimizers.Scipy().minimize(
model_2.training_loss_closure(Data.data),
variables=model_2.trainable_variables,
method="BFGS",
compile=True,
)
# Model 3 (Inefficient): an idenitical inducing variable is used P times,
# and treated as a separate one.
q_mu_3 = np.random.randn(Data.M, Data.P)
q_sqrt_3 = np.array(
[np.tril(np.random.randn(Data.M, Data.M)) for _ in range(Data.P)]
) # P x M x M
kern_list = [SquaredExponential(variance=0.5, lengthscales=1.2) for _ in range(Data.P)]
kernel_3 = mk.SeparateIndependent(kern_list)
inducing_variable_list_3 = [InducingPoints(Data.X[: Data.M, ...]) for _ in range(Data.P)]
inducing_variable_3 = mf.SeparateIndependentInducingVariables(inducing_variable_list_3)
model_3 = SVGP(kernel_3, Gaussian(), inducing_variable_3, q_mu=q_mu_3, q_sqrt=q_sqrt_3)
set_trainable(model_3, False)
set_trainable(model_3.q_sqrt, True)
set_trainable(model_3.q_mu, True)
gpflow.optimizers.Scipy().minimize(
model_3.training_loss_closure(Data.data),
variables=model_3.trainable_variables,
method="BFGS",
compile=True,
)
check_equality_predictions(Data.data, [model_1, model_2, model_3])
def test_mixed_mok_with_Id_vs_independent_mok() -> None:
data = DataMixedKernelWithEye
# Independent model
k1 = mk.SharedIndependent(SquaredExponential(variance=0.5, lengthscales=1.2), data.L)
f1 = InducingPoints(data.X[: data.M, ...])
model_1 = SVGP(k1, Gaussian(), f1, q_mu=data.mu_data_full, q_sqrt=data.sqrt_data_full)
set_trainable(model_1, False)
set_trainable(model_1.q_sqrt, True)
gpflow.optimizers.Scipy().minimize(
model_1.training_loss_closure(Data.data),
variables=model_1.trainable_variables,
method="BFGS",
compile=True,
)
# Mixed Model
kern_list = [SquaredExponential(variance=0.5, lengthscales=1.2) for _ in range(data.L)]
k2 = mk.LinearCoregionalization(kern_list, data.W)
f2 = InducingPoints(data.X[: data.M, ...])
model_2 = SVGP(k2, Gaussian(), f2, q_mu=data.mu_data_full, q_sqrt=data.sqrt_data_full)
set_trainable(model_2, False)
set_trainable(model_2.q_sqrt, True)
gpflow.optimizers.Scipy().minimize(
model_2.training_loss_closure(Data.data),
variables=model_2.trainable_variables,
method="BFGS",
compile=True,
)
check_equality_predictions(Data.data, [model_1, model_2])
def test_compare_mixed_kernel() -> None:
data = DataMixedKernel
kern_list = [SquaredExponential() for _ in range(data.L)]
k1 = mk.LinearCoregionalization(kern_list, W=data.W)
f1 = mf.SharedIndependentInducingVariables(InducingPoints(data.X[: data.M, ...]))
model_1 = SVGP(k1, Gaussian(), inducing_variable=f1, q_mu=data.mu_data, q_sqrt=data.sqrt_data)
kern_list = [SquaredExponential() for _ in range(data.L)]
k2 = mk.LinearCoregionalization(kern_list, W=data.W)
f2 = mf.SharedIndependentInducingVariables(InducingPoints(data.X[: data.M, ...]))
model_2 = SVGP(k2, Gaussian(), inducing_variable=f2, q_mu=data.mu_data, q_sqrt=data.sqrt_data)
check_equality_predictions(Data.data, [model_1, model_2])
def test_multioutput_with_diag_q_sqrt() -> None:
data = DataMixedKernel
q_sqrt_diag = np.ones((data.M, data.L)) * 2
q_sqrt = np.repeat(np.eye(data.M)[None, ...], data.L, axis=0) * 2 # L x M x M
kern_list = [SquaredExponential() for _ in range(data.L)]
k1 = mk.LinearCoregionalization(kern_list, W=data.W)
f1 = mf.SharedIndependentInducingVariables(InducingPoints(data.X[: data.M, ...]))
model_1 = SVGP(
k1,
Gaussian(),
inducing_variable=f1,
q_mu=data.mu_data,
q_sqrt=q_sqrt_diag,
q_diag=True,
)
kern_list = [SquaredExponential() for _ in range(data.L)]
k2 = mk.LinearCoregionalization(kern_list, W=data.W)
f2 = mf.SharedIndependentInducingVariables(InducingPoints(data.X[: data.M, ...]))
model_2 = SVGP(
k2,
Gaussian(),
inducing_variable=f2,
q_mu=data.mu_data,
q_sqrt=q_sqrt,
q_diag=False,
)
check_equality_predictions(Data.data, [model_1, model_2])
def test_MixedKernelSeparateMof() -> None:
data = DataMixedKernel
kern_list = [SquaredExponential() for _ in range(data.L)]
inducing_variable_list = [InducingPoints(data.X[: data.M, ...]) for _ in range(data.L)]
k1 = mk.LinearCoregionalization(kern_list, W=data.W)
f1 = mf.SeparateIndependentInducingVariables(inducing_variable_list)
model_1 = SVGP(k1, Gaussian(), inducing_variable=f1, q_mu=data.mu_data, q_sqrt=data.sqrt_data)
kern_list = [SquaredExponential() for _ in range(data.L)]
inducing_variable_list = [InducingPoints(data.X[: data.M, ...]) for _ in range(data.L)]
k2 = mk.LinearCoregionalization(kern_list, W=data.W)
f2 = mf.SeparateIndependentInducingVariables(inducing_variable_list)
model_2 = SVGP(k2, Gaussian(), inducing_variable=f2, q_mu=data.mu_data, q_sqrt=data.sqrt_data)
check_equality_predictions(Data.data, [model_1, model_2])
def test_separate_independent_conditional_with_q_sqrt_none() -> None:
"""
In response to bug #1523, this test checks that separate_independent_condtional
does not fail when q_sqrt=None.
"""
q_sqrt = None
data = DataMixedKernel
kern_list = [SquaredExponential() for _ in range(data.L)]
kernel = gpflow.kernels.SeparateIndependent(kern_list)
inducing_variable_list = [InducingPoints(data.X[: data.M, ...]) for _ in range(data.L)]
inducing_variable = mf.SeparateIndependentInducingVariables(inducing_variable_list)
mu_1, var_1 = gpflow.conditionals.conditional(
data.X,
inducing_variable,
kernel,
data.mu_data,
full_cov=False,
full_output_cov=False,
q_sqrt=q_sqrt,
white=True,
)
def test_independent_interdomain_conditional_bug_regression() -> None:
"""
Regression test for https://github.com/GPflow/GPflow/issues/818
Not an exhaustive test
"""
M = 31
N = 11
D_lat = 5
D_inp = D_lat * 7
L = 2
P = 3
X = np.random.randn(N, D_inp)
Zs = [np.random.randn(M, D_lat) for _ in range(L)]
k = gpflow.kernels.SquaredExponential(lengthscales=np.ones(D_lat))
def compute_Kmn(Z: tf.Tensor, X: tf.Tensor) -> tf.Tensor:
return tf.stack([k(Z, X[:, i * D_lat : (i + 1) * D_lat]) for i in range(P)])
def compute_Knn(X: tf.Tensor) -> tf.Tensor:
return tf.stack([k(X[:, i * D_lat : (i + 1) * D_lat], full_cov=False) for i in range(P)])
Kmm = tf.stack([k(Z) for Z in Zs]) # L x M x M
Kmn = tf.stack([compute_Kmn(Z, X) for Z in Zs]) # L x P x M x N
Kmn = tf.transpose(Kmn, [2, 0, 3, 1]) # -> M x L x N x P
Knn = tf.transpose(compute_Knn(X)) # N x P
q_mu = tf.convert_to_tensor(np.zeros((M, L)))
q_sqrt = tf.convert_to_tensor(np.stack([np.eye(M) for _ in range(L)]))
tf.debugging.assert_shapes(
[
(Kmm, ["L", "M", "M"]),
(Kmn, ["M", "L", "N", "P"]),
(Knn, ["N", "P"]),
(q_mu, ["M", "L"]),
(q_sqrt, ["L", "M", "M"]),
]
)
_, _ = independent_interdomain_conditional(
Kmn, Kmm, Knn, q_mu, q_sqrt=q_sqrt, full_cov=False, full_output_cov=False
)
def test_independent_interdomain_conditional_whiten(whiten: bool) -> None:
"""
This test checks the effect of the `white` flag, which changes the projection matrix `A`.
The impact of the flag on the value of `A` can be easily verified by its effect on the
predicted mean. While the predicted covariance is also a function of `A` this test does not
inspect that value.
"""
N, P = Data.N, Data.P
Lm = np.random.randn(1, 1, 1).astype(np.float32) ** 2
Kmm = Lm * Lm + default_jitter()
Kmn = tf.ones((1, 1, N, P))
Knn = tf.ones((N, P))
f = np.random.randn(1, 1).astype(np.float32)
mean, _ = independent_interdomain_conditional(
Kmn,
Kmm,
Knn,
f,
white=whiten,
)
if whiten:
expected_mean = (f * Kmn) / Lm
else:
expected_mean = (f * Kmn) / Kmm
np.testing.assert_allclose(mean, expected_mean[0][0], rtol=1e-2)
| GPflow/GPflow | tests/gpflow/conditionals/test_multioutput.py | Python | apache-2.0 | 31,704 |
# frozen_string_literal: true
# encoding: utf-8
# Copyright (C) 2018-2020 MongoDB Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
module Mongo
module Operation
class Aggregate
# A MongoDB aggregate operation sent as an op message.
#
# @api private
#
# @since 2.5.2
class OpMsg < OpMsgBase
include CausalConsistencySupported
include ExecutableTransactionLabel
include PolymorphicResult
end
end
end
end
| mongodb/mongo-ruby-driver | lib/mongo/operation/aggregate/op_msg.rb | Ruby | apache-2.0 | 983 |
/*
* JBoss, Home of Professional Open Source.
* Copyright 2014 Red Hat, Inc., and individual contributors
* as indicated by the @author tags.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.undertow.server;
import java.io.IOException;
import java.io.InputStream;
import java.nio.ByteBuffer;
import java.nio.channels.Channel;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import io.undertow.testutils.AjpIgnore;
import io.undertow.testutils.DefaultServer;
import io.undertow.testutils.SpdyIgnore;
import io.undertow.testutils.TestHttpClient;
import org.apache.http.HttpResponse;
import org.apache.http.client.methods.HttpGet;
import org.junit.Assert;
import org.junit.Ignore;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.xnio.ChannelListener;
import org.xnio.Options;
import org.xnio.channels.StreamSinkChannel;
import org.xnio.channels.WriteTimeoutException;
/**
* Tests read timeout with a client that is slow to read the response
*
* @author Stuart Douglas
*/
@RunWith(DefaultServer.class)
@AjpIgnore
@SpdyIgnore
@Ignore("This test fails intermittently")
public class WriteTimeoutTestCase {
private volatile Exception exception;
private static final CountDownLatch errorLatch = new CountDownLatch(1);
@Test
public void testWriteTimeout() throws IOException, InterruptedException {
DefaultServer.setRootHandler(new HttpHandler() {
@Override
public void handleRequest(final HttpServerExchange exchange) throws Exception {
final StreamSinkChannel response = exchange.getResponseChannel();
try {
response.setOption(Options.WRITE_TIMEOUT, 10);
} catch (IOException e) {
throw new RuntimeException(e);
}
final int capacity = 1 * 1024 * 1024; //1mb
final ByteBuffer originalBuffer = ByteBuffer.allocateDirect(capacity);
for (int i = 0; i < capacity; ++i) {
originalBuffer.put((byte) '*');
}
originalBuffer.flip();
response.getWriteSetter().set(new ChannelListener<Channel>() {
private ByteBuffer buffer = originalBuffer.duplicate();
int count = 0;
@Override
public void handleEvent(final Channel channel) {
do {
try {
int res = response.write(buffer);
if (res == 0) {
return;
}
} catch (IOException e) {
exception = e;
errorLatch.countDown();
}
if(!buffer.hasRemaining()) {
count++;
buffer = originalBuffer.duplicate();
}
} while (count < 1000);
exchange.endExchange();
}
});
response.wakeupWrites();
}
});
final TestHttpClient client = new TestHttpClient();
try {
HttpGet get = new HttpGet(DefaultServer.getDefaultServerURL());
try {
HttpResponse result = client.execute(get);
InputStream content = result.getEntity().getContent();
byte[] buffer = new byte[512];
int r = 0;
while ((r = content.read(buffer)) > 0) {
Thread.sleep(200);
if (exception != null) {
Assert.assertEquals(WriteTimeoutException.class, exception.getClass());
return;
}
}
Assert.fail("Write did not time out");
} catch (IOException e) {
if (errorLatch.await(5, TimeUnit.SECONDS)) {
Assert.assertEquals(WriteTimeoutException.class, exception.getClass());
} else {
Assert.fail("Write did not time out");
}
}
} finally {
client.getConnectionManager().shutdown();
}
}
}
| emag/codereading-undertow | core/src/test/java/io/undertow/server/WriteTimeoutTestCase.java | Java | apache-2.0 | 4,935 |
// Copyright 2018 NetApp, Inc. All Rights Reserved.
package main
import (
"os"
"github.com/netapp/trident/cli/cmd"
)
func main() {
cmd.ExitCode = cmd.ExitCodeSuccess
if err := cmd.RootCmd.Execute(); err != nil {
cmd.SetExitCodeFromError(err)
}
os.Exit(cmd.ExitCode)
}
| NetApp/trident | cli/main.go | GO | apache-2.0 | 282 |
package ru.stqa.pft.mantis.appmanager;
import org.openqa.selenium.WebDriver;
import org.openqa.selenium.chrome.ChromeDriver;
import org.openqa.selenium.firefox.FirefoxDriver;
import org.openqa.selenium.ie.InternetExplorerDriver;
import org.openqa.selenium.remote.BrowserType;
import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import java.util.Objects;
import java.util.Properties;
import java.util.concurrent.TimeUnit;
/**
* Created by Александр on 06.11.2016.
*/
public class ApplicationManager {
WebDriver wd;
private final Properties properties;
private String browser;
public ApplicationManager(String browser){
this.browser = browser;
properties = new Properties();
}
public void init() throws IOException {
//String browser = BrowserType.CHROME;
String target = System.getProperty("target", "local");
properties.load(new FileReader(new File(String.format("L:/Devel/java_pft/addressbook-web-tests/src/test/resources/%s.properties", target))));
if (Objects.equals(browser, BrowserType.FIREFOX)) {
wd = new FirefoxDriver();
} else if (Objects.equals(browser, BrowserType.CHROME)){
wd = new ChromeDriver();
} else if (Objects.equals(browser, BrowserType.IE)){
wd = new InternetExplorerDriver();
}
wd.manage().timeouts().implicitlyWait(0, TimeUnit.SECONDS);
wd.get(properties.getProperty("web.baseUrl"));
}
public void stop() {
wd.quit();
}
}
| martyanova/java_pft | mantis-tests/src/test/java/ru/stqa/pft/mantis/appmanager/ApplicationManager.java | Java | apache-2.0 | 1,475 |
class Fluentd
module Setting
class BufferMemory
include Fluentd::Setting::Plugin
register_plugin("buffer", "memory")
def self.initial_params
{}
end
def common_options
[]
end
def advanced_options
[]
end
end
end
end
| fluent/fluentd-ui | app/models/fluentd/setting/buffer_memory.rb | Ruby | apache-2.0 | 302 |
/*
* Copyright 2014-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.elasticache.model.transform;
import java.util.ArrayList;
import javax.xml.stream.events.XMLEvent;
import javax.annotation.Generated;
import com.amazonaws.services.elasticache.model.*;
import com.amazonaws.transform.Unmarshaller;
import com.amazonaws.transform.StaxUnmarshallerContext;
import com.amazonaws.transform.SimpleTypeStaxUnmarshallers.*;
/**
* DescribeUpdateActionsResult StAX Unmarshaller
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class DescribeUpdateActionsResultStaxUnmarshaller implements Unmarshaller<DescribeUpdateActionsResult, StaxUnmarshallerContext> {
public DescribeUpdateActionsResult unmarshall(StaxUnmarshallerContext context) throws Exception {
DescribeUpdateActionsResult describeUpdateActionsResult = new DescribeUpdateActionsResult();
int originalDepth = context.getCurrentDepth();
int targetDepth = originalDepth + 1;
if (context.isStartOfDocument())
targetDepth += 2;
while (true) {
XMLEvent xmlEvent = context.nextEvent();
if (xmlEvent.isEndDocument())
return describeUpdateActionsResult;
if (xmlEvent.isAttribute() || xmlEvent.isStartElement()) {
if (context.testExpression("Marker", targetDepth)) {
describeUpdateActionsResult.setMarker(StringStaxUnmarshaller.getInstance().unmarshall(context));
continue;
}
if (context.testExpression("UpdateActions", targetDepth)) {
describeUpdateActionsResult.withUpdateActions(new ArrayList<UpdateAction>());
continue;
}
if (context.testExpression("UpdateActions/UpdateAction", targetDepth)) {
describeUpdateActionsResult.withUpdateActions(UpdateActionStaxUnmarshaller.getInstance().unmarshall(context));
continue;
}
} else if (xmlEvent.isEndElement()) {
if (context.getCurrentDepth() < originalDepth) {
return describeUpdateActionsResult;
}
}
}
}
private static DescribeUpdateActionsResultStaxUnmarshaller instance;
public static DescribeUpdateActionsResultStaxUnmarshaller getInstance() {
if (instance == null)
instance = new DescribeUpdateActionsResultStaxUnmarshaller();
return instance;
}
}
| jentfoo/aws-sdk-java | aws-java-sdk-elasticache/src/main/java/com/amazonaws/services/elasticache/model/transform/DescribeUpdateActionsResultStaxUnmarshaller.java | Java | apache-2.0 | 3,071 |
using System.ComponentModel.DataAnnotations;
namespace ChatSystem.Server.Models.Account
{
public class AddExternalLoginBindingModel
{
[Required]
[Display(Name = "External access token")]
public string ExternalAccessToken { get; set; }
}
} | deyantodorov/Zeus-WebServicesCould-TeamWork | Source/ChatSystem/Server/ChatSystem.Server/Models/Account/AddExternalLoginBindingModel.cs | C# | apache-2.0 | 278 |
package mtag
import (
"github.com/mabetle/mcore"
"strings"
)
// label tag format:
// label="zh='' en=''"
// GetLabelTag returns field "label" tag value.
func GetLabelTag(v interface{}, fieldName string) (string, bool) {
return GetTag(v, fieldName, "label")
}
// parse string to KeyValue map.
func ParseKeyValueMap(value string) map[string]string {
result := make(map[string]string)
rows := strings.Split(value, " ")
for _, row := range rows {
// skip blank
if strings.TrimSpace(row) == "" {
continue
}
kv := strings.Split(row, "=")
if len(kv) == 2 {
k := strings.TrimSpace(kv[0])
v := strings.TrimSpace(kv[1])
v = strings.Trim(v, "'")
v = strings.Trim(v, "\"")
v = strings.TrimSpace(v)
result[k] = v
}
}
return result
}
// GetLocaleLabel returns field label by locale.
// locale format: en en_US / zh zh_CN zh_HK etc.
func GetLocaleLabel(v interface{}, fieldName string, locale string) string {
labelValue, e := GetLabelTag(v, fieldName)
// not exist
if !e {
return mcore.ToLabel(fieldName)
}
locale = strings.Replace(locale, "-", "_", -1)
lang := strings.Split(locale, "_")[0]
m := ParseKeyValueMap(labelValue)
// include lang_coutry locale
if v, ok := m[locale]; ok {
return v
}
// include lang
if v, ok := m[lang]; ok {
return v
}
// defult en
if v, ok := m["en"]; ok {
return v
}
// default return
return mcore.ToLabel(fieldName)
}
// GetLabelZH
func GetLabelZH(v interface{}, fieldName string) string {
return GetLocaleLabel(v, fieldName, "zh")
}
// GetLabelEN
func GetLabelEN(v interface{}, fieldName string) string {
return GetLocaleLabel(v, fieldName, "en")
}
| mabetle/mcore | mtag/tag_label.go | GO | apache-2.0 | 1,652 |
from __future__ import unicode_literals
import pygst
pygst.require('0.10')
import gst # noqa
from mopidy.audio import output
import logging
logger = logging.getLogger(__name__)
# This variable is a global that is set by the Backend
# during initialization from the extension properties
encoder = 'identity'
class RtpSink(gst.Bin):
def __init__(self):
super(RtpSink, self).__init__()
# These elements are 'always on' even if nobody is
# subscribed to listen. It streamlines the process
# of adding/removing listeners.
queue = gst.element_factory_make('queue')
rate = gst.element_factory_make('audiorate')
enc = gst.element_factory_make(encoder)
pay = gst.element_factory_make('rtpgstpay')
# Re-use of the audio output bin which handles
# dynamic element addition/removal nicely
self.tee = output.AudioOutput()
self.add_many(queue, rate, enc, pay, self.tee)
gst.element_link_many(queue, rate, enc, pay, self.tee)
pad = queue.get_pad('sink')
ghost_pad = gst.GhostPad('sink', pad)
self.add_pad(ghost_pad)
def add(self, host, port):
b = gst.Bin()
queue = gst.element_factory_make('queue')
udpsink = gst.element_factory_make('udpsink')
udpsink.set_property('host', host)
udpsink.set_property('port', port)
# Both async and sync must be true to avoid seek
# timestamp sync problems
udpsink.set_property('sync', True)
udpsink.set_property('async', True)
b.add_many(queue, udpsink)
gst.element_link_many(queue, udpsink)
pad = queue.get_pad('sink')
ghost_pad = gst.GhostPad('sink', pad)
b.add_pad(ghost_pad)
ident = str(port) + '@' + host
self.tee.add_sink(ident, b)
def remove(self, host, port):
ident = str(port) + '@' + host
self.tee.remove_sink(ident)
| liamw9534/mopidy-rtp | mopidy_rtp/sink.py | Python | apache-2.0 | 1,944 |
/*
* Copyright 2014-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.simplesystemsmanagement.model.transform;
import javax.annotation.Generated;
import com.amazonaws.SdkClientException;
import com.amazonaws.Request;
import com.amazonaws.http.HttpMethodName;
import com.amazonaws.services.simplesystemsmanagement.model.*;
import com.amazonaws.transform.Marshaller;
import com.amazonaws.protocol.*;
import com.amazonaws.protocol.Protocol;
import com.amazonaws.annotation.SdkInternalApi;
/**
* DescribePatchPropertiesRequest Marshaller
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
@SdkInternalApi
public class DescribePatchPropertiesRequestProtocolMarshaller implements Marshaller<Request<DescribePatchPropertiesRequest>, DescribePatchPropertiesRequest> {
private static final OperationInfo SDK_OPERATION_BINDING = OperationInfo.builder().protocol(Protocol.AWS_JSON).requestUri("/")
.httpMethodName(HttpMethodName.POST).hasExplicitPayloadMember(false).hasPayloadMembers(true)
.operationIdentifier("AmazonSSM.DescribePatchProperties").serviceName("AWSSimpleSystemsManagement").build();
private final com.amazonaws.protocol.json.SdkJsonProtocolFactory protocolFactory;
public DescribePatchPropertiesRequestProtocolMarshaller(com.amazonaws.protocol.json.SdkJsonProtocolFactory protocolFactory) {
this.protocolFactory = protocolFactory;
}
public Request<DescribePatchPropertiesRequest> marshall(DescribePatchPropertiesRequest describePatchPropertiesRequest) {
if (describePatchPropertiesRequest == null) {
throw new SdkClientException("Invalid argument passed to marshall(...)");
}
try {
final ProtocolRequestMarshaller<DescribePatchPropertiesRequest> protocolMarshaller = protocolFactory.createProtocolMarshaller(
SDK_OPERATION_BINDING, describePatchPropertiesRequest);
protocolMarshaller.startMarshalling();
DescribePatchPropertiesRequestMarshaller.getInstance().marshall(describePatchPropertiesRequest, protocolMarshaller);
return protocolMarshaller.finishMarshalling();
} catch (Exception e) {
throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e);
}
}
}
| jentfoo/aws-sdk-java | aws-java-sdk-ssm/src/main/java/com/amazonaws/services/simplesystemsmanagement/model/transform/DescribePatchPropertiesRequestProtocolMarshaller.java | Java | apache-2.0 | 2,847 |
/*
#
# Copyright 2014 The Trustees of Indiana University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
*/
package edu.indiana.d2i.komadu.ingest.db;
public class TableAttributeData {
/**
* enum to keep data types of the value to be inserted
* add to this list if a new type is needed
*/
public static enum DataType {
STRING,
INT,
LONG,
FLOAT,
DOUBLE,
SHORT,
DATE,
TIME,
TIMESTAMP
}
private String attributeName;
private Object value;
private DataType type;
public TableAttributeData(String attributeName, Object value, DataType type) {
this.attributeName = attributeName;
this.value = value;
this.type = type;
}
public String getAttributeName() {
return attributeName;
}
public void setAttributeName(String attributeName) {
this.attributeName = attributeName;
}
public Object getValue() {
return value;
}
public void setValue(Object value) {
this.value = value;
}
public DataType getType() {
return type;
}
public void setType(DataType type) {
this.type = type;
}
}
| Data-to-Insight-Center/komadu | service-core/src/main/java/edu/indiana/d2i/komadu/ingest/db/TableAttributeData.java | Java | apache-2.0 | 1,714 |
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using AC_ServiceClient.ACServiceReference;
using AC_SessionReport;
namespace AC_ServiceClient
{
public class ACServiceSessionReportHandler : ISessionReportHandler
{
public void HandleReport(SessionReport report)
{
ACServiceClient client = new ACServiceClient();
client.PostResult(report);
}
}
}
| flitzi/AC_SERVER_APPS | AC_ServiceClient/ACServiceSessionReportHandler.cs | C# | apache-2.0 | 494 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.runtime.webmonitor.utils;
import io.netty.bootstrap.ServerBootstrap;
import io.netty.channel.Channel;
import io.netty.channel.ChannelFuture;
import io.netty.channel.ChannelInitializer;
import io.netty.channel.nio.NioEventLoopGroup;
import io.netty.channel.socket.SocketChannel;
import io.netty.channel.socket.nio.NioServerSocketChannel;
import io.netty.handler.codec.http.HttpServerCodec;
import io.netty.handler.codec.http.router.Handler;
import io.netty.handler.codec.http.router.Router;
import io.netty.handler.ssl.SslHandler;
import io.netty.handler.stream.ChunkedWriteHandler;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.runtime.net.SSLUtils;
import org.apache.flink.runtime.webmonitor.HttpRequestHandler;
import org.apache.flink.runtime.webmonitor.PipelineErrorHandler;
import org.apache.flink.util.Preconditions;
import org.slf4j.Logger;
import javax.net.ssl.SSLContext;
import javax.net.ssl.SSLEngine;
import java.io.File;
import java.net.InetSocketAddress;
/**
* This classes encapsulates the boot-strapping of netty for the web-frontend.
*/
public class WebFrontendBootstrap {
private final Router router;
private final Logger log;
private final File uploadDir;
private final SSLContext serverSSLContext;
private final ServerBootstrap bootstrap;
private final Channel serverChannel;
public WebFrontendBootstrap(
Router router,
Logger log,
File directory,
SSLContext sslContext,
String configuredAddress,
int configuredPort,
final Configuration config) throws InterruptedException {
this.router = Preconditions.checkNotNull(router);
this.log = Preconditions.checkNotNull(log);
this.uploadDir = directory;
this.serverSSLContext = sslContext;
ChannelInitializer<SocketChannel> initializer = new ChannelInitializer<SocketChannel>() {
@Override
protected void initChannel(SocketChannel ch) {
Handler handler = new Handler(WebFrontendBootstrap.this.router);
// SSL should be the first handler in the pipeline
if (serverSSLContext != null) {
SSLEngine sslEngine = serverSSLContext.createSSLEngine();
SSLUtils.setSSLVerAndCipherSuites(sslEngine, config);
sslEngine.setUseClientMode(false);
ch.pipeline().addLast("ssl", new SslHandler(sslEngine));
}
ch.pipeline()
.addLast(new HttpServerCodec())
.addLast(new ChunkedWriteHandler())
.addLast(new HttpRequestHandler(uploadDir))
.addLast(handler.name(), handler)
.addLast(new PipelineErrorHandler(WebFrontendBootstrap.this.log));
}
};
NioEventLoopGroup bossGroup = new NioEventLoopGroup(1);
NioEventLoopGroup workerGroup = new NioEventLoopGroup();
this.bootstrap = new ServerBootstrap();
this.bootstrap
.group(bossGroup, workerGroup)
.channel(NioServerSocketChannel.class)
.childHandler(initializer);
ChannelFuture ch;
if (configuredAddress == null) {
ch = this.bootstrap.bind(configuredPort);
} else {
ch = this.bootstrap.bind(configuredAddress, configuredPort);
}
this.serverChannel = ch.sync().channel();
InetSocketAddress bindAddress = (InetSocketAddress) serverChannel.localAddress();
String address = bindAddress.getAddress().getHostAddress();
int port = bindAddress.getPort();
this.log.info("Web frontend listening at {}" + ':' + "{}", address, port);
}
public ServerBootstrap getBootstrap() {
return bootstrap;
}
public int getServerPort() {
Channel server = this.serverChannel;
if (server != null) {
try {
return ((InetSocketAddress) server.localAddress()).getPort();
}
catch (Exception e) {
log.error("Cannot access local server port", e);
}
}
return -1;
}
public void shutdown() {
if (this.serverChannel != null) {
this.serverChannel.close().awaitUninterruptibly();
}
if (bootstrap != null) {
if (bootstrap.group() != null) {
bootstrap.group().shutdownGracefully();
}
if (bootstrap.childGroup() != null) {
bootstrap.childGroup().shutdownGracefully();
}
}
}
}
| oscarceballos/flink-1.3.2 | flink-runtime-web/src/main/java/org/apache/flink/runtime/webmonitor/utils/WebFrontendBootstrap.java | Java | apache-2.0 | 4,828 |
// Copyright 2022 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// ** This file is automatically generated by gapic-generator-typescript. **
// ** https://github.com/googleapis/gapic-generator-typescript **
// ** All changes to this file may be overwritten. **
'use strict';
function main() {
// [START cloudkms_v1_generated_KeyManagementService_GenerateRandomBytes_async]
/**
* TODO(developer): Uncomment these variables before running the sample.
*/
/**
* The project-specific location in which to generate random bytes.
* For example, "projects/my-project/locations/us-central1".
*/
// const location = 'abc123'
/**
* The length in bytes of the amount of randomness to retrieve. Minimum 8
* bytes, maximum 1024 bytes.
*/
// const lengthBytes = 1234
/**
* The ProtectionLevel google.cloud.kms.v1.ProtectionLevel to use when
* generating the random data. Currently, only
* HSM google.cloud.kms.v1.ProtectionLevel.HSM protection level is
* supported.
*/
// const protectionLevel = {}
// Imports the Kms library
const {KeyManagementServiceClient} = require('@google-cloud/kms').v1;
// Instantiates a client
const kmsClient = new KeyManagementServiceClient();
async function callGenerateRandomBytes() {
// Construct request
const request = {
};
// Run request
const response = await kmsClient.generateRandomBytes(request);
console.log(response);
}
callGenerateRandomBytes();
// [END cloudkms_v1_generated_KeyManagementService_GenerateRandomBytes_async]
}
process.on('unhandledRejection', err => {
console.error(err.message);
process.exitCode = 1;
});
main(...process.argv.slice(2));
| googleapis/nodejs-kms | samples/generated/v1/key_management_service.generate_random_bytes.js | JavaScript | apache-2.0 | 2,228 |
package ru.job4j.toDoList.model.dao;
import org.apache.log4j.LogManager;
import org.apache.log4j.Logger;
import org.hibernate.Session;
import org.hibernate.SessionFactory;
import org.hibernate.Transaction;
import org.hibernate.cfg.Configuration;
import ru.job4j.toDoList.model.entity.Item;
import java.sql.Timestamp;
import java.util.Date;
import java.util.List;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Function;
public class HiberStorage implements Storage {
private static final Logger LOGGER = LogManager.getLogger(HiberStorage.class);
private static AtomicInteger NEXT_ID = new AtomicInteger(-1);
private static final HiberStorage INSTANCE = new HiberStorage();
private final SessionFactory factory;
private HiberStorage() {
factory = new Configuration().configure().buildSessionFactory();
}
public static HiberStorage getInstance() {
return INSTANCE;
}
private <T> T tx(final Function<Session, T> command) {
final Session session = factory.openSession();
final Transaction tx = session.beginTransaction();
try {
return command.apply(session);
} catch (final Exception e) {
session.getTransaction().rollback();
throw e;
} finally {
tx.commit();
session.close();
}
}
@Override
public Item add(Item item) {
item.setId(NEXT_ID.getAndIncrement());
item.setCreated(new Timestamp(new Date().getTime()));
return tx(session -> {
session.save(item);
return item;
});
}
@Override
public Item update(Item item) {
return tx(session -> {
session.update(item);
return item;
});
}
@Override
public void delete(Item item) {
tx(session -> {
session.delete(item);
return null;
});
}
@Override
public List findAll() {
return tx(session -> session.createQuery("FROM Item i ORDER BY i.id").list());
}
@Override
public void doneItem(int id) {
tx(session -> {
Item item = session.get(Item.class, id);
item.setDone(true);
return null;
});
}
}
| Mrsananabos/ashveytser | chapter_009/src/main/java/ru/job4j/toDoList/model/dao/HiberStorage.java | Java | apache-2.0 | 2,289 |
import { AbstractSimpleComponent } from './AbstractSimpleComponent';
import { AllowedTokens } from './AllowedTokens';
/**
* Scalar component used to represent a categorical value as a simple token
* identifying a term in a code space
*/
export class SweCategory extends AbstractSimpleComponent {
/**
* Value is optional, to enable structure to act as a schema for values
* provided using other encodings
*/
value: string;
/**
* Name of the dictionary where the possible values for this component are
* listed and defined
*/
codeSpace: string;
constraint: AllowedTokens;
}
| autermann/smle | src/app/model/swe/SweCategory.ts | TypeScript | apache-2.0 | 604 |
// Copyright 2016-2020 Authors of Cilium
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cmd
import (
"context"
"fmt"
"strings"
"time"
"github.com/cilium/cilium/api/v1/models"
. "github.com/cilium/cilium/api/v1/server/restapi/daemon"
"github.com/cilium/cilium/pkg/backoff"
"github.com/cilium/cilium/pkg/controller"
"github.com/cilium/cilium/pkg/datapath"
"github.com/cilium/cilium/pkg/k8s"
k8smetrics "github.com/cilium/cilium/pkg/k8s/metrics"
"github.com/cilium/cilium/pkg/kvstore"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/maps/eppolicymap"
"github.com/cilium/cilium/pkg/maps/eventsmap"
ipcachemap "github.com/cilium/cilium/pkg/maps/ipcache"
ipmasqmap "github.com/cilium/cilium/pkg/maps/ipmasq"
"github.com/cilium/cilium/pkg/maps/lbmap"
"github.com/cilium/cilium/pkg/maps/lxcmap"
"github.com/cilium/cilium/pkg/maps/metricsmap"
"github.com/cilium/cilium/pkg/maps/signalmap"
"github.com/cilium/cilium/pkg/maps/sockmap"
tunnelmap "github.com/cilium/cilium/pkg/maps/tunnel"
nodeTypes "github.com/cilium/cilium/pkg/node/types"
"github.com/cilium/cilium/pkg/option"
"github.com/cilium/cilium/pkg/rand"
"github.com/cilium/cilium/pkg/status"
"github.com/sirupsen/logrus"
"github.com/go-openapi/runtime/middleware"
"github.com/go-openapi/strfmt"
versionapi "k8s.io/apimachinery/pkg/version"
)
const (
// k8sVersionCheckInterval is the interval in which the Kubernetes
// version is verified even if connectivity is given
k8sVersionCheckInterval = 15 * time.Minute
// k8sMinimumEventHearbeat is the time interval in which any received
// event will be considered proof that the apiserver connectivity is
// healthty
k8sMinimumEventHearbeat = time.Minute
)
var randGen = rand.NewSafeRand(time.Now().UnixNano())
type k8sVersion struct {
version string
lastVersionCheck time.Time
lock lock.Mutex
}
func (k *k8sVersion) cachedVersion() (string, bool) {
k.lock.Lock()
defer k.lock.Unlock()
if time.Since(k8smetrics.LastInteraction.Time()) > k8sMinimumEventHearbeat {
return "", false
}
if k.version == "" || time.Since(k.lastVersionCheck) > k8sVersionCheckInterval {
return "", false
}
return k.version, true
}
func (k *k8sVersion) update(version *versionapi.Info) string {
k.lock.Lock()
defer k.lock.Unlock()
k.version = fmt.Sprintf("%s.%s (%s) [%s]", version.Major, version.Minor, version.GitVersion, version.Platform)
k.lastVersionCheck = time.Now()
return k.version
}
var k8sVersionCache k8sVersion
func (d *Daemon) getK8sStatus() *models.K8sStatus {
if !k8s.IsEnabled() {
return &models.K8sStatus{State: models.StatusStateDisabled}
}
version, valid := k8sVersionCache.cachedVersion()
if !valid {
k8sVersion, err := k8s.Client().Discovery().ServerVersion()
if err != nil {
return &models.K8sStatus{State: models.StatusStateFailure, Msg: err.Error()}
}
version = k8sVersionCache.update(k8sVersion)
}
k8sStatus := &models.K8sStatus{
State: models.StatusStateOk,
Msg: version,
K8sAPIVersions: d.k8sWatcher.GetAPIGroups(),
}
return k8sStatus
}
func (d *Daemon) getMasqueradingStatus() *models.Masquerading {
s := &models.Masquerading{
Enabled: option.Config.Masquerade,
}
if !option.Config.Masquerade {
return s
}
if option.Config.EnableIPv4 {
s.SnatExclusionCidr = datapath.RemoteSNATDstAddrExclusionCIDR().String()
}
if option.Config.EnableBPFMasquerade {
s.Mode = models.MasqueradingModeBPF
s.IPMasqAgent = option.Config.EnableIPMasqAgent
return s
}
s.Mode = models.MasqueradingModeIptables
return s
}
func (d *Daemon) getBandwidthManagerStatus() *models.BandwidthManager {
s := &models.BandwidthManager{
Enabled: option.Config.EnableBandwidthManager,
}
if !option.Config.EnableBandwidthManager {
return s
}
devices := make([]string, len(option.Config.Devices))
for i, iface := range option.Config.Devices {
devices[i] = iface
}
s.Devices = devices
return s
}
func (d *Daemon) getKubeProxyReplacementStatus() *models.KubeProxyReplacement {
if !k8s.IsEnabled() {
return &models.KubeProxyReplacement{Mode: models.KubeProxyReplacementModeDisabled}
}
var mode string
switch option.Config.KubeProxyReplacement {
case option.KubeProxyReplacementStrict:
mode = models.KubeProxyReplacementModeStrict
case option.KubeProxyReplacementPartial:
mode = models.KubeProxyReplacementModePartial
case option.KubeProxyReplacementProbe:
mode = models.KubeProxyReplacementModeProbe
case option.KubeProxyReplacementDisabled:
mode = models.KubeProxyReplacementModeDisabled
}
devices := make([]string, len(option.Config.Devices))
for i, iface := range option.Config.Devices {
devices[i] = iface
}
features := &models.KubeProxyReplacementFeatures{
NodePort: &models.KubeProxyReplacementFeaturesNodePort{},
HostPort: &models.KubeProxyReplacementFeaturesHostPort{},
ExternalIPs: &models.KubeProxyReplacementFeaturesExternalIPs{},
HostReachableServices: &models.KubeProxyReplacementFeaturesHostReachableServices{},
SessionAffinity: &models.KubeProxyReplacementFeaturesSessionAffinity{},
}
if option.Config.EnableNodePort {
features.NodePort.Enabled = true
features.NodePort.Mode = strings.ToUpper(option.Config.NodePortMode)
features.NodePort.Algorithm = models.KubeProxyReplacementFeaturesNodePortAlgorithmRandom
if option.Config.NodePortAlg == option.NodePortAlgMaglev {
features.NodePort.Algorithm = models.KubeProxyReplacementFeaturesNodePortAlgorithmMaglev
features.NodePort.LutSize = int64(option.Config.MaglevTableSize)
}
if option.Config.NodePortAcceleration == option.NodePortAccelerationGeneric {
features.NodePort.Acceleration = models.KubeProxyReplacementFeaturesNodePortAccelerationGeneric
} else {
features.NodePort.Acceleration = strings.ToTitle(option.Config.NodePortAcceleration)
}
features.NodePort.PortMin = int64(option.Config.NodePortMin)
features.NodePort.PortMax = int64(option.Config.NodePortMax)
}
if option.Config.EnableHostPort {
features.HostPort.Enabled = true
}
if option.Config.EnableExternalIPs {
features.ExternalIPs.Enabled = true
}
if option.Config.EnableHostServicesTCP {
features.HostReachableServices.Enabled = true
protocols := []string{}
if option.Config.EnableHostServicesTCP {
protocols = append(protocols, "TCP")
}
if option.Config.EnableHostServicesUDP {
protocols = append(protocols, "UDP")
}
features.HostReachableServices.Protocols = protocols
}
if option.Config.EnableSessionAffinity {
features.SessionAffinity.Enabled = true
}
return &models.KubeProxyReplacement{
Mode: mode,
Devices: devices,
DirectRoutingDevice: option.Config.DirectRoutingDevice,
Features: features,
}
}
func (d *Daemon) getBPFMapStatus() *models.BPFMapStatus {
return &models.BPFMapStatus{
DynamicSizeRatio: option.Config.BPFMapsDynamicSizeRatio,
Maps: []*models.BPFMapProperties{
{
Name: "Non-TCP connection tracking",
Size: int64(option.Config.CTMapEntriesGlobalAny),
},
{
Name: "TCP connection tracking",
Size: int64(option.Config.CTMapEntriesGlobalTCP),
},
{
Name: "Endpoint policy",
Size: int64(lxcmap.MaxEntries),
},
{
Name: "Events",
Size: int64(eventsmap.MaxEntries),
},
{
Name: "IP cache",
Size: int64(ipcachemap.MaxEntries),
},
{
Name: "IP masquerading agent",
Size: int64(ipmasqmap.MaxEntries),
},
{
Name: "IPv4 fragmentation",
Size: int64(option.Config.FragmentsMapEntries),
},
{
Name: "IPv4 service", // cilium_lb4_services_v2
Size: int64(lbmap.MaxEntries),
},
{
Name: "IPv6 service", // cilium_lb6_services_v2
Size: int64(lbmap.MaxEntries),
},
{
Name: "IPv4 service backend", // cilium_lb4_backends
Size: int64(lbmap.MaxEntries),
},
{
Name: "IPv6 service backend", // cilium_lb6_backends
Size: int64(lbmap.MaxEntries),
},
{
Name: "IPv4 service reverse NAT", // cilium_lb4_reverse_nat
Size: int64(lbmap.MaxEntries),
},
{
Name: "IPv6 service reverse NAT", // cilium_lb6_reverse_nat
Size: int64(lbmap.MaxEntries),
},
{
Name: "Metrics",
Size: int64(metricsmap.MaxEntries),
},
{
Name: "NAT",
Size: int64(option.Config.NATMapEntriesGlobal),
},
{
Name: "Neighbor table",
Size: int64(option.Config.NeighMapEntriesGlobal),
},
{
Name: "Global policy",
Size: int64(option.Config.PolicyMapEntries),
},
{
Name: "Per endpoint policy",
Size: int64(eppolicymap.MaxEntries),
},
{
Name: "Session affinity",
Size: int64(lbmap.MaxEntries),
},
{
Name: "Signal",
Size: int64(signalmap.MaxEntries),
},
{
Name: "Sockmap",
Size: int64(sockmap.MaxEntries),
},
{
Name: "Sock reverse NAT",
Size: int64(option.Config.SockRevNatEntries),
},
{
Name: "Tunnel",
Size: int64(tunnelmap.MaxEntries),
},
},
}
}
type getHealthz struct {
daemon *Daemon
}
func NewGetHealthzHandler(d *Daemon) GetHealthzHandler {
return &getHealthz{daemon: d}
}
func (d *Daemon) getNodeStatus() *models.ClusterStatus {
clusterStatus := models.ClusterStatus{
Self: d.nodeDiscovery.LocalNode.Fullname(),
}
for _, node := range d.nodeDiscovery.Manager.GetNodes() {
clusterStatus.Nodes = append(clusterStatus.Nodes, node.GetModel())
}
return &clusterStatus
}
func (h *getHealthz) Handle(params GetHealthzParams) middleware.Responder {
brief := params.Brief != nil && *params.Brief
sr := h.daemon.getStatus(brief)
return NewGetHealthzOK().WithPayload(&sr)
}
type getNodes struct {
d *Daemon
// mutex to protect the clients map against concurrent access
lock.RWMutex
// clients maps a client ID to a clusterNodesClient
clients map[int64]*clusterNodesClient
}
func NewGetClusterNodesHandler(d *Daemon) GetClusterNodesHandler {
return &getNodes{
d: d,
clients: map[int64]*clusterNodesClient{},
}
}
// clientGCTimeout is the time for which the clients are kept. After timeout
// is reached, clients will be cleaned up.
const clientGCTimeout = 15 * time.Minute
type clusterNodesClient struct {
// mutex to protect the client against concurrent access
lock.RWMutex
lastSync time.Time
*models.ClusterNodeStatus
}
func (c *clusterNodesClient) NodeAdd(newNode nodeTypes.Node) error {
c.Lock()
c.NodesAdded = append(c.NodesAdded, newNode.GetModel())
c.Unlock()
return nil
}
func (c *clusterNodesClient) NodeUpdate(oldNode, newNode nodeTypes.Node) error {
c.Lock()
defer c.Unlock()
// If the node is on the added list, just update it
for i, added := range c.NodesAdded {
if added.Name == newNode.Fullname() {
c.NodesAdded[i] = newNode.GetModel()
return nil
}
}
// otherwise, add the new node and remove the old one
c.NodesAdded = append(c.NodesAdded, newNode.GetModel())
c.NodesRemoved = append(c.NodesRemoved, oldNode.GetModel())
return nil
}
func (c *clusterNodesClient) NodeDelete(node nodeTypes.Node) error {
c.Lock()
// If the node was added/updated and removed before the clusterNodesClient
// was aware of it then we can safely remove it from the list of added
// nodes and not set it in the list of removed nodes.
found := -1
for i, added := range c.NodesAdded {
if added.Name == node.Fullname() {
found = i
}
}
if found != -1 {
c.NodesAdded = append(c.NodesAdded[:found], c.NodesAdded[found+1:]...)
} else {
c.NodesRemoved = append(c.NodesRemoved, node.GetModel())
}
c.Unlock()
return nil
}
func (c *clusterNodesClient) NodeValidateImplementation(node nodeTypes.Node) error {
// no-op
return nil
}
func (c *clusterNodesClient) NodeConfigurationChanged(config datapath.LocalNodeConfiguration) error {
// no-op
return nil
}
func (h *getNodes) cleanupClients() {
past := time.Now().Add(-clientGCTimeout)
for k, v := range h.clients {
if v.lastSync.Before(past) {
h.d.nodeDiscovery.Manager.Unsubscribe(v)
delete(h.clients, k)
}
}
}
func (h *getNodes) Handle(params GetClusterNodesParams) middleware.Responder {
var cns *models.ClusterNodeStatus
// If ClientID is not set then we send all nodes, otherwise we will store
// the client ID in the list of clients and we subscribe this new client
// to the list of clients.
if params.ClientID == nil {
ns := h.d.getNodeStatus()
cns = &models.ClusterNodeStatus{
Self: ns.Self,
NodesAdded: ns.Nodes,
}
return NewGetClusterNodesOK().WithPayload(cns)
}
h.Lock()
defer h.Unlock()
var clientID int64
c, exists := h.clients[*params.ClientID]
if exists {
clientID = *params.ClientID
} else {
clientID = randGen.Int63()
// make sure we haven't allocated an existing client ID nor the
// randomizer has allocated ID 0, if we have then we will return
// clientID 0.
_, exists := h.clients[clientID]
if exists || clientID == 0 {
ns := h.d.getNodeStatus()
cns = &models.ClusterNodeStatus{
ClientID: 0,
Self: ns.Self,
NodesAdded: ns.Nodes,
}
return NewGetClusterNodesOK().WithPayload(cns)
}
c = &clusterNodesClient{
lastSync: time.Now(),
ClusterNodeStatus: &models.ClusterNodeStatus{
ClientID: clientID,
Self: h.d.nodeDiscovery.LocalNode.Fullname(),
},
}
h.d.nodeDiscovery.Manager.Subscribe(c)
// Clean up other clients before adding a new one
h.cleanupClients()
h.clients[clientID] = c
}
c.Lock()
// Copy the ClusterNodeStatus to the response
cns = c.ClusterNodeStatus
// Store a new ClusterNodeStatus to reset the list of nodes
// added / removed.
c.ClusterNodeStatus = &models.ClusterNodeStatus{
ClientID: clientID,
Self: h.d.nodeDiscovery.LocalNode.Fullname(),
}
c.lastSync = time.Now()
c.Unlock()
return NewGetClusterNodesOK().WithPayload(cns)
}
// getStatus returns the daemon status. If brief is provided a minimal version
// of the StatusResponse is provided.
func (d *Daemon) getStatus(brief bool) models.StatusResponse {
staleProbes := d.statusCollector.GetStaleProbes()
stale := make(map[string]strfmt.DateTime, len(staleProbes))
for probe, startTime := range staleProbes {
stale[probe] = strfmt.DateTime(startTime)
}
d.statusCollectMutex.RLock()
defer d.statusCollectMutex.RUnlock()
var sr models.StatusResponse
if brief {
csCopy := new(models.ClusterStatus)
if d.statusResponse.Cluster != nil && d.statusResponse.Cluster.CiliumHealth != nil {
in, out := &d.statusResponse.Cluster.CiliumHealth, &csCopy.CiliumHealth
*out = new(models.Status)
**out = **in
}
var minimalControllers models.ControllerStatuses
if d.statusResponse.Controllers != nil {
for _, c := range d.statusResponse.Controllers {
if c.Status == nil {
continue
}
// With brief, the client should only care if a single controller
// is failing and its status so we don't need to continuing
// checking for failure messages for the remaining controllers.
if c.Status.LastFailureMsg != "" {
minimalControllers = append(minimalControllers, c.DeepCopy())
break
}
}
}
sr = models.StatusResponse{
Cluster: csCopy,
Controllers: minimalControllers,
}
} else {
// d.statusResponse contains references, so we do a deep copy to be able to
// safely use sr after the method has returned
sr = *d.statusResponse.DeepCopy()
}
sr.Stale = stale
switch {
case len(sr.Stale) > 0:
sr.Cilium = &models.Status{
State: models.StatusStateWarning,
Msg: "Stale status data",
}
case d.statusResponse.Kvstore != nil && d.statusResponse.Kvstore.State != models.StatusStateOk:
sr.Cilium = &models.Status{
State: d.statusResponse.Kvstore.State,
Msg: "Kvstore service is not ready",
}
case d.statusResponse.ContainerRuntime != nil && d.statusResponse.ContainerRuntime.State != models.StatusStateOk:
msg := "Container runtime is not ready"
if d.statusResponse.ContainerRuntime.State == models.StatusStateDisabled {
msg = "Container runtime is disabled"
}
sr.Cilium = &models.Status{
State: d.statusResponse.ContainerRuntime.State,
Msg: msg,
}
case k8s.IsEnabled() && d.statusResponse.Kubernetes != nil && d.statusResponse.Kubernetes.State != models.StatusStateOk:
sr.Cilium = &models.Status{
State: d.statusResponse.Kubernetes.State,
Msg: "Kubernetes service is not ready",
}
default:
sr.Cilium = &models.Status{State: models.StatusStateOk, Msg: "OK"}
}
return sr
}
func (d *Daemon) startStatusCollector() {
probes := []status.Probe{
{
Name: "check-locks",
Probe: func(ctx context.Context) (interface{}, error) {
// Try to acquire a couple of global locks to have the status API fail
// in case of a deadlock on these locks
option.Config.ConfigPatchMutex.Lock()
option.Config.ConfigPatchMutex.Unlock()
return nil, nil
},
OnStatusUpdate: func(status status.Status) {
d.statusCollectMutex.Lock()
defer d.statusCollectMutex.Unlock()
// FIXME we have no field for the lock status
},
},
{
Name: "kvstore",
Probe: func(ctx context.Context) (interface{}, error) {
if option.Config.KVStore == "" {
return models.StatusStateDisabled, nil
} else {
return kvstore.Client().Status()
}
},
OnStatusUpdate: func(status status.Status) {
var msg string
state := models.StatusStateOk
info, ok := status.Data.(string)
switch {
case ok && status.Err != nil:
state = models.StatusStateFailure
msg = fmt.Sprintf("Err: %s - %s", status.Err, info)
case status.Err != nil:
state = models.StatusStateFailure
msg = fmt.Sprintf("Err: %s", status.Err)
case ok:
msg = fmt.Sprintf("%s", info)
}
d.statusCollectMutex.Lock()
defer d.statusCollectMutex.Unlock()
d.statusResponse.Kvstore = &models.Status{
State: state,
Msg: msg,
}
},
},
{
Name: "kubernetes",
Interval: func(failures int) time.Duration {
if failures > 0 {
// While failing, we want an initial
// quick retry with exponential backoff
// to avoid continuous load on the
// apiserver
return backoff.CalculateDuration(5*time.Second, 2*time.Minute, 2.0, false, failures)
}
// The base interval is dependant on the
// cluster size. One status interval does not
// automatically translate to an apiserver
// interaction as any regular apiserver
// interaction is also used as an indication of
// successful connectivity so we can continue
// to be fairly aggressive.
//
// 1 | 7s
// 2 | 12s
// 4 | 15s
// 64 | 42s
// 512 | 1m02s
// 2048 | 1m15s
// 8192 | 1m30s
// 16384 | 1m32s
return d.nodeDiscovery.Manager.ClusterSizeDependantInterval(10 * time.Second)
},
Probe: func(ctx context.Context) (interface{}, error) {
return d.getK8sStatus(), nil
},
OnStatusUpdate: func(status status.Status) {
d.statusCollectMutex.Lock()
defer d.statusCollectMutex.Unlock()
if status.Err != nil {
d.statusResponse.Kubernetes = &models.K8sStatus{
State: models.StatusStateFailure,
Msg: status.Err.Error(),
}
return
}
if s, ok := status.Data.(*models.K8sStatus); ok {
d.statusResponse.Kubernetes = s
}
},
},
{
Name: "ipam",
Probe: func(ctx context.Context) (interface{}, error) {
return d.DumpIPAM(), nil
},
OnStatusUpdate: func(status status.Status) {
d.statusCollectMutex.Lock()
defer d.statusCollectMutex.Unlock()
// IPAMStatus has no way to show errors
if status.Err == nil {
if s, ok := status.Data.(*models.IPAMStatus); ok {
d.statusResponse.Ipam = s
}
}
},
},
{
Name: "node-monitor",
Probe: func(ctx context.Context) (interface{}, error) {
return d.monitorAgent.State(), nil
},
OnStatusUpdate: func(status status.Status) {
d.statusCollectMutex.Lock()
defer d.statusCollectMutex.Unlock()
// NodeMonitor has no way to show errors
if status.Err == nil {
if s, ok := status.Data.(*models.MonitorStatus); ok {
d.statusResponse.NodeMonitor = s
}
}
},
},
{
Name: "cluster",
Probe: func(ctx context.Context) (interface{}, error) {
clusterStatus := &models.ClusterStatus{
Self: d.nodeDiscovery.LocalNode.Fullname(),
}
return clusterStatus, nil
},
OnStatusUpdate: func(status status.Status) {
d.statusCollectMutex.Lock()
defer d.statusCollectMutex.Unlock()
// ClusterStatus has no way to report errors
if status.Err == nil {
if s, ok := status.Data.(*models.ClusterStatus); ok {
if d.statusResponse.Cluster != nil {
// NB: CiliumHealth is set concurrently by the
// "cilium-health" probe, so do not override it
s.CiliumHealth = d.statusResponse.Cluster.CiliumHealth
}
d.statusResponse.Cluster = s
}
}
},
},
{
Name: "cilium-health",
Probe: func(ctx context.Context) (interface{}, error) {
if d.ciliumHealth == nil {
return nil, nil
}
return d.ciliumHealth.GetStatus(), nil
},
OnStatusUpdate: func(status status.Status) {
if d.ciliumHealth == nil {
return
}
d.statusCollectMutex.Lock()
defer d.statusCollectMutex.Unlock()
if d.statusResponse.Cluster == nil {
d.statusResponse.Cluster = &models.ClusterStatus{}
}
if status.Err != nil {
d.statusResponse.Cluster.CiliumHealth = &models.Status{
State: models.StatusStateFailure,
Msg: status.Err.Error(),
}
return
}
if s, ok := status.Data.(*models.Status); ok {
d.statusResponse.Cluster.CiliumHealth = s
}
},
},
{
Name: "l7-proxy",
Probe: func(ctx context.Context) (interface{}, error) {
if d.l7Proxy == nil {
return nil, nil
}
return d.l7Proxy.GetStatusModel(), nil
},
OnStatusUpdate: func(status status.Status) {
d.statusCollectMutex.Lock()
defer d.statusCollectMutex.Unlock()
// ProxyStatus has no way to report errors
if status.Err == nil {
if s, ok := status.Data.(*models.ProxyStatus); ok {
d.statusResponse.Proxy = s
}
}
},
},
{
Name: "controllers",
Probe: func(ctx context.Context) (interface{}, error) {
return controller.GetGlobalStatus(), nil
},
OnStatusUpdate: func(status status.Status) {
d.statusCollectMutex.Lock()
defer d.statusCollectMutex.Unlock()
// ControllerStatuses has no way to report errors
if status.Err == nil {
if s, ok := status.Data.(models.ControllerStatuses); ok {
d.statusResponse.Controllers = s
}
}
},
},
{
Name: "clustermesh",
Probe: func(ctx context.Context) (interface{}, error) {
if d.clustermesh == nil {
return nil, nil
}
return d.clustermesh.Status(), nil
},
OnStatusUpdate: func(status status.Status) {
d.statusCollectMutex.Lock()
defer d.statusCollectMutex.Unlock()
if status.Err == nil {
if s, ok := status.Data.(*models.ClusterMeshStatus); ok {
d.statusResponse.ClusterMesh = s
}
}
},
},
{
Name: "hubble",
Probe: func(ctx context.Context) (interface{}, error) {
return d.getHubbleStatus(ctx), nil
},
OnStatusUpdate: func(status status.Status) {
d.statusCollectMutex.Lock()
defer d.statusCollectMutex.Unlock()
if status.Err == nil {
if s, ok := status.Data.(*models.HubbleStatus); ok {
d.statusResponse.Hubble = s
}
}
},
},
}
if k8s.IsEnabled() {
// kube-proxy replacement configuration does not change after
// initKubeProxyReplacementOptions() has been executed, so it's fine to
// statically set the field here.
d.statusResponse.KubeProxyReplacement = d.getKubeProxyReplacementStatus()
}
d.statusResponse.Masquerading = d.getMasqueradingStatus()
d.statusResponse.BandwidthManager = d.getBandwidthManagerStatus()
d.statusResponse.BpfMaps = d.getBPFMapStatus()
d.statusCollector = status.NewCollector(probes, status.Config{})
// Set up a signal handler function which prints out logs related to daemon status.
cleaner.cleanupFuncs.Add(func() {
// If the KVstore state is not OK, print help for user.
if d.statusResponse.Kvstore != nil &&
d.statusResponse.Kvstore.State != models.StatusStateOk {
helpMsg := "cilium-agent depends on the availability of cilium-operator/etcd-cluster. " +
"Check if the cilium-operator pod and etcd-cluster are running and do not have any " +
"warnings or error messages."
log.WithFields(logrus.Fields{
"status": d.statusResponse.Kvstore.Msg,
logfields.HelpMessage: helpMsg,
}).Error("KVStore state not OK")
}
})
return
}
| cilium-team/cilium | daemon/cmd/status.go | GO | apache-2.0 | 25,264 |
package cn.com.warlock.wisp.core.plugin.processor.support.filter;
import cn.com.warlock.wisp.core.dto.MysqlEntryWrap;
import cn.com.warlock.wisp.core.exception.WispProcessorException;
public interface IEntryFilterChain {
void doFilter(MysqlEntryWrap entry) throws WispProcessorException;
}
| warlock-china/wisp | wisp-core/src/main/java/cn/com/warlock/wisp/core/plugin/processor/support/filter/IEntryFilterChain.java | Java | apache-2.0 | 298 |
/*
* Copyright 2017 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.codenergic.theskeleton.post;
import org.codenergic.theskeleton.core.data.AuditingEntityRepository;
import org.springframework.data.domain.Page;
import org.springframework.data.domain.Pageable;
import org.springframework.data.jpa.repository.Query;
import org.springframework.stereotype.Repository;
@Repository
public interface PostRepository extends AuditingEntityRepository<PostEntity> {
Page<PostEntity> findByPosterId(String posterId, Pageable pageable);
Page<PostEntity> findByResponseToId(String postId, Pageable pageable);
@Query("from PostEntity p where p.content like %?1% order by p.createdDate desc")
Page<PostEntity> findByContentContaining(String title, Pageable pageable);
}
| codenergic/theskeleton | src/main/java/org/codenergic/theskeleton/post/PostRepository.java | Java | apache-2.0 | 1,323 |
/*
* Copyright 2016 Bear Giles <bgiles@coyotesong.com>
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.coyotesong.demo.cxf;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.context.ApplicationContext;
import org.springframework.context.ApplicationContextAware;
import org.springframework.context.annotation.ComponentScan;
@SpringBootApplication
@ComponentScan("com.coyotesong.demo.cxf")
public class ApacheCxfWss4jApplication implements ApplicationContextAware {
private static ApplicationContext ctx;
public void setApplicationContext(ApplicationContext ctx) {
this.ctx = ctx;
}
@SuppressWarnings("unchecked")
public static <T> T getBean(String name) {
return (T) ctx.getBean(name);
}
public static void main(String[] args) {
SpringApplication.run(ApacheCxfWss4jApplication.class, args);
}
}
| beargiles/cheat-sheet | webservices/Apache-CXF/apache-cxf-wss4j-interceptors/src/main/java/com/coyotesong/demo/cxf/ApacheCxfWss4jApplication.java | Java | apache-2.0 | 1,721 |
/*
* Copyright 2016 LINE Corporation
*
* LINE Corporation licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package com.linecorp.armeria.server;
import static com.linecorp.armeria.common.HttpStatus.OK;
import static com.linecorp.armeria.server.RoutingContextTest.virtualHost;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.assertThatThrownBy;
import java.util.List;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.CsvSource;
import org.slf4j.LoggerFactory;
import com.linecorp.armeria.common.HttpMethod;
import com.linecorp.armeria.common.HttpResponse;
import com.linecorp.armeria.common.RequestHeaders;
class VirtualHostBuilderTest {
private static final VirtualHostBuilder template = Server.builder().virtualHostTemplate;
@Test
void defaultVirtualHost() {
final ServerBuilder sb = Server.builder();
final Server server = sb.defaultVirtualHost()
.service("/test", (ctx, req) -> HttpResponse.of(OK))
.and().build();
final VirtualHost virtualHost = server.config().defaultVirtualHost();
assertThat(virtualHost.hostnamePattern()).isEqualTo("*");
assertThat(virtualHost.defaultHostname()).isNotEqualTo("*");
}
@Test
void withDefaultVirtualHost() {
final ServerBuilder sb = Server.builder();
final Server server = sb.withDefaultVirtualHost(builder -> {
builder.defaultHostname("foo")
.service("/test", (ctx, req) -> HttpResponse.of(OK));
}).build();
final VirtualHost virtualHost = server.config().defaultVirtualHost();
assertThat(virtualHost.hostnamePattern()).isEqualTo("*");
assertThat(virtualHost.defaultHostname()).isEqualTo("foo");
}
@Test
void defaultVirtualHostSetDefaultHostname() {
final ServerBuilder sb = Server.builder();
sb.defaultHostname("foo");
final Server server = sb.defaultVirtualHost()
.service("/test", (ctx, req) -> HttpResponse.of(OK))
.and().build();
final VirtualHost virtualHost = server.config().defaultVirtualHost();
assertThat(virtualHost.hostnamePattern()).isEqualTo("*");
assertThat(virtualHost.defaultHostname()).isEqualTo("foo");
}
@Test
void defaultVirtualHostWithImplicitStyle() {
final ServerBuilder sb = Server.builder();
final Server server = sb.service("/test", (ctx, req) -> HttpResponse.of(OK)).build();
final VirtualHost virtualHost = server.config().defaultVirtualHost();
assertThat(virtualHost.hostnamePattern()).isEqualTo("*");
}
@Test
void virtualHostWithHostnamePattern() {
final ServerBuilder sb = Server.builder();
final Server server = sb.virtualHost("*.foo.com")
.service("/test", (ctx, req) -> HttpResponse.of(OK))
.and()
.build();
final List<VirtualHost> virtualHosts = server.config().virtualHosts();
assertThat(virtualHosts.size()).isEqualTo(2);
final VirtualHost virtualHost = virtualHosts.get(0);
assertThat(virtualHost.hostnamePattern()).isEqualTo("*.foo.com");
assertThat(virtualHost.defaultHostname()).isEqualTo("foo.com");
final VirtualHost defaultVirtualHost = virtualHosts.get(1);
assertThat(defaultVirtualHost).isEqualTo(server.config().defaultVirtualHost());
}
@ParameterizedTest
@CsvSource({ "foo, foo", "bar, *.bar", "a.baz, *.baz" })
void virtualHostWithDefaultHostnameAndHostnamePattern(String defaultHostname, String hostnamePattern) {
final ServerBuilder sb = Server.builder();
final Server server = sb.virtualHost(defaultHostname, hostnamePattern)
.service("/test", (ctx, req) -> HttpResponse.of(OK))
.and()
.build();
final List<VirtualHost> virtualHosts = server.config().virtualHosts();
assertThat(virtualHosts.size()).isEqualTo(2);
final VirtualHost virtualHost = virtualHosts.get(0);
assertThat(virtualHost.hostnamePattern()).isEqualTo(hostnamePattern);
assertThat(virtualHost.defaultHostname()).isEqualTo(defaultHostname);
final VirtualHost defaultVirtualHost = virtualHosts.get(1);
assertThat(defaultVirtualHost).isEqualTo(server.config().defaultVirtualHost());
}
@Test
void withVirtualHost() {
final ServerBuilder sb = Server.builder();
final Server server = sb.withVirtualHost(builder -> {
builder.defaultHostname("foo")
.service("/test", (ctx, req) -> HttpResponse.of(OK));
}).build();
final List<VirtualHost> virtualHosts = server.config().virtualHosts();
assertThat(virtualHosts.size()).isEqualTo(2);
final VirtualHost virtualHost = virtualHosts.get(0);
assertThat(virtualHost.hostnamePattern()).isEqualTo("*.foo");
assertThat(virtualHost.defaultHostname()).isEqualTo("foo");
}
@Test
void defaultVirtualHostMixedStyle() {
final ServerBuilder sb = Server.builder();
sb.service("/test", (ctx, req) -> HttpResponse.of(OK))
.defaultVirtualHost().service("/test2", (ctx, req) -> HttpResponse.of(OK));
final Server server = sb.build();
final List<ServiceConfig> serviceConfigs = server.config().defaultVirtualHost().serviceConfigs();
assertThat(serviceConfigs.size()).isEqualTo(2);
}
@Test
void virtualHostWithoutPattern() {
final VirtualHost h = new VirtualHostBuilder(Server.builder(), false)
.defaultHostname("foo.com")
.hostnamePattern("foo.com")
.build(template);
assertThat(h.hostnamePattern()).isEqualTo("foo.com");
assertThat(h.defaultHostname()).isEqualTo("foo.com");
}
@Test
void virtualHostWithPattern() {
final VirtualHost h = new VirtualHostBuilder(Server.builder(), false)
.defaultHostname("bar.foo.com")
.hostnamePattern("*.foo.com")
.build(template);
assertThat(h.hostnamePattern()).isEqualTo("*.foo.com");
assertThat(h.defaultHostname()).isEqualTo("bar.foo.com");
}
@Test
void accessLoggerCustomization() {
final VirtualHost h1 = new VirtualHostBuilder(Server.builder(), false)
.defaultHostname("bar.foo.com")
.hostnamePattern("*.foo.com")
.accessLogger(host -> LoggerFactory.getLogger("customize.test"))
.build(template);
assertThat(h1.accessLogger().getName()).isEqualTo("customize.test");
final VirtualHost h2 = new VirtualHostBuilder(Server.builder(), false)
.defaultHostname("bar.foo.com")
.hostnamePattern("*.foo.com")
.accessLogger(LoggerFactory.getLogger("com.foo.test"))
.build(template);
assertThat(h2.accessLogger().getName()).isEqualTo("com.foo.test");
}
@Test
void hostnamePatternCannotBeSetForDefaultBuilder() {
final ServerBuilder sb = Server.builder();
assertThatThrownBy(() -> sb.defaultVirtualHost().hostnamePattern("CannotSet"))
.isInstanceOf(UnsupportedOperationException.class);
}
@Test
void hostnamePatternCannotBeSetForDefaultBuilder2() {
final ServerBuilder sb = Server.builder();
assertThatThrownBy(() -> sb.withDefaultVirtualHost(builder -> builder.hostnamePattern("CannotSet")))
.isInstanceOf(UnsupportedOperationException.class);
}
@Test
void virtualHostWithNull2() {
final ServerBuilder sb = Server.builder();
assertThatThrownBy(() -> sb.virtualHost(null, "foo.com")).isInstanceOf(NullPointerException.class);
}
@Test
void virtualHostWithNull3() {
final ServerBuilder sb = Server.builder();
assertThatThrownBy(() -> sb.virtualHost(null, null)).isInstanceOf(NullPointerException.class);
}
@Test
void virtualHostWithMismatch() {
assertThatThrownBy(() -> {
new VirtualHostBuilder(Server.builder(), false)
.defaultHostname("bar.com")
.hostnamePattern("foo.com")
.build(template);
}).isInstanceOf(IllegalArgumentException.class);
}
@Test
void virtualHostWithMismatch2() {
assertThatThrownBy(() -> {
new VirtualHostBuilder(Server.builder(), false)
.defaultHostname("bar.com")
.hostnamePattern("*.foo.com")
.build(template);
}).isInstanceOf(IllegalArgumentException.class);
}
@Test
void precedenceOfDuplicateRoute() {
final Route routeA = Route.builder().path("/").build();
final Route routeB = Route.builder().path("/").build();
final VirtualHost virtualHost = new VirtualHostBuilder(Server.builder(), true)
.service(routeA, (ctx, req) -> HttpResponse.of(OK))
.service(routeB, (ctx, req) -> HttpResponse.of(OK))
.build(template);
assertThat(virtualHost.serviceConfigs().size()).isEqualTo(2);
final RoutingContext routingContext = new DefaultRoutingContext(virtualHost(), "example.com",
RequestHeaders.of(HttpMethod.GET, "/"),
"/", null, false);
final Routed<ServiceConfig> serviceConfig = virtualHost.findServiceConfig(routingContext);
final Route route = serviceConfig.route();
assertThat(route).isSameAs(routeA);
}
}
| minwoox/armeria | core/src/test/java/com/linecorp/armeria/server/VirtualHostBuilderTest.java | Java | apache-2.0 | 10,501 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.component.activemq;
import org.apache.camel.test.spring.CamelSpringTestSupport;
import org.junit.Test;
import org.springframework.context.support.AbstractApplicationContext;
import org.springframework.context.support.ClassPathXmlApplicationContext;
public class ActiveMQComponentFactoryUserNamePasswordTest extends CamelSpringTestSupport {
@Override
protected AbstractApplicationContext createApplicationContext() {
return new ClassPathXmlApplicationContext("org/apache/camel/component/activemq/ActiveMQComponentFactoryUserNamePassword.xml");
}
@Test
public void testActiveMQ() throws Exception {
ActiveMQComponent comp = context.getComponent("activemq", ActiveMQComponent.class);
assertNotNull(comp);
ActiveMQConfiguration config = (ActiveMQConfiguration)comp.getConfiguration();
assertNotNull(config);
assertEquals("admin2", config.getUserName());
assertEquals("secret2", config.getPassword());
getMockEndpoint("mock:result").expectedBodiesReceived("Hello World");
template.sendBody("activemq:queue:bar", "Hello World");
assertMockEndpointsSatisfied();
}
}
| punkhorn/camel-upstream | components/camel-activemq/src/test/java/org/apache/camel/component/activemq/ActiveMQComponentFactoryUserNamePasswordTest.java | Java | apache-2.0 | 2,000 |
/*
* DBeaver - Universal Database Manager
* Copyright (C) 2010-2021 DBeaver Corp and others
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jkiss.dbeaver.model;
/**
* Virtual object.
* Object which doesn't exist in database but exist on client side.
* Virtual schemas can be created for some drivers (e.g. Phoenix)
*/
public interface DBPVirtualObject
{
boolean isVirtual();
}
| Sargul/dbeaver | plugins/org.jkiss.dbeaver.model/src/org/jkiss/dbeaver/model/DBPVirtualObject.java | Java | apache-2.0 | 917 |
try
{
if (Koadic.JOBKEY != "stage")
{
if (Koadic.isHTA())
{
//HKCU\SOFTWARE\Microsoft\Internet Explorer\Style\MaxScriptStatements = 0xFFFFFFFF
var path = "SOFTWARE\\Microsoft\\Internet Explorer\\Styles";
var key = "MaxScriptStatements";
Koadic.registry.write(Koadic.registry.HKCU, path, key, 0xFFFFFFFF, Koadic.registry.DWORD);
}
Koadic.work.report(Koadic.user.info());
try {
Koadic.work.fork("");
} catch (e) {
Koadic.work.error(e)
}
Koadic.exit();
}
else
{
if (Koadic.isHTA())
DoWorkTimeout();
else
DoWorkLoop();
}
}
catch (e)
{
// todo: critical error reporting
Koadic.work.error(e);
}
function DoWork()
{
var epoch = new Date().getTime();
var expire = parseInt(Koadic.EXPIRE);
if (epoch > expire)
{
return false;
}
try
{
var work = Koadic.work.get();
// 201 = x64 or x86
// 202 = force x86
if (work.status == 501 || work.status == 502)
{
if (work.responseText.length > 0) {
var jobkey = work.responseText;
Koadic.work.fork(jobkey, work.status == 502);
}
}
else // if (work.status == 500) // kill code
{
return false;
}
}
catch (e)
{
return false;
}
return true;
}
function DoWorkLoop()
{
while (DoWork())
;
Koadic.exit();
}
function DoWorkTimeout()
{
for (var i = 0; i < 10; ++i)
{
if (!DoWork())
{
Koadic.exit();
return;
}
}
//window.setTimeout(DoWorkTimeoutCallback, 0);
Koadic.work.fork("");
Koadic.exit();
}
| zerosum0x0/koadic | data/stager/js/stage.js | JavaScript | apache-2.0 | 1,812 |
package profitbricks
import (
"context"
"github.com/ionos-cloud/sdk-go/v5"
"net/http"
)
// Lan object
type Lan struct {
ID string `json:"id,omitempty"`
PBType string `json:"type,omitempty"`
Href string `json:"href,omitempty"`
Metadata *Metadata `json:"metadata,omitempty"`
Properties LanProperties `json:"properties,omitempty"`
Entities *LanEntities `json:"entities,omitempty"`
Response string `json:"Response,omitempty"`
Headers *http.Header `json:"headers,omitempty"`
StatusCode int `json:"statuscode,omitempty"`
}
// LanProperties object
type LanProperties struct {
Name string `json:"name,omitempty"`
Public bool `json:"public,omitempty"`
IPFailover *[]IPFailover `json:"ipFailover,omitempty"`
PCC string `json:"pcc,omitempty"`
}
// LanEntities object
type LanEntities struct {
Nics *LanNics `json:"nics,omitempty"`
}
// IPFailover object
type IPFailover struct {
NicUUID string `json:"nicUuid,omitempty"`
IP string `json:"ip,omitempty"`
}
// LanNics object
type LanNics struct {
ID string `json:"id,omitempty"`
PBType string `json:"type,omitempty"`
Href string `json:"href,omitempty"`
Items []Nic `json:"items,omitempty"`
}
// Lans object
type Lans struct {
ID string `json:"id,omitempty"`
PBType string `json:"type,omitempty"`
Href string `json:"href,omitempty"`
Items []Lan `json:"items,omitempty"`
Response string `json:"Response,omitempty"`
Headers *http.Header `json:"headers,omitempty"`
StatusCode int `json:"statuscode,omitempty"`
}
// ListLans returns a Collection for lans in the Datacenter
func (c *Client) ListLans(dcid string) (*Lans, error) {
ctx, cancel := c.GetContext()
if cancel != nil {
defer cancel()
}
rsp, apiResponse, err := c.CoreSdk.LanApi.DatacentersLansGet(ctx, dcid).Execute()
ret := Lans{}
if errConvert := convertToCompat(&rsp, &ret); errConvert != nil {
return nil, errConvert
}
fillInResponse(&ret, apiResponse)
return &ret, err
/*
url := lansPath(dcid)
ret := &Lans{}
err := c.Get(url, ret, http.StatusOK)
return ret, err
*/
}
// CreateLan creates a lan in the datacenter
// from a jason []byte and returns a Instance struct
func (c *Client) CreateLan(dcid string, request Lan) (*Lan, error) {
input := ionoscloud.LanPost{}
if errConvert := convertToCore(&request, &input); errConvert != nil {
return nil, errConvert
}
ctx, cancel := c.GetContext()
if cancel != nil {
defer cancel()
}
rsp, apiResponse, err := c.CoreSdk.LanApi.DatacentersLansPost(ctx, dcid).Lan(input).Execute()
ret := Lan{}
if errConvert := convertToCompat(&rsp, &ret); errConvert != nil {
return nil, errConvert
}
fillInResponse(&ret, apiResponse)
return &ret, err
/*
url := lansPath(dcid)
ret := &Lan{}
err := c.Post(url, request, ret, http.StatusAccepted)
return ret, err
*/
}
// CreateLanAndWait creates a lan, waits for the request to finish and returns a refreshed lan
// Note that an error does not necessarily means that the resource has not been created.
// If err & res are not nil, a resource with res.ID exists, but an error occurred either while waiting for
// the request or when refreshing the resource.
func (c *Client) CreateLanAndWait(ctx context.Context, dcid string, request Lan) (res *Lan, err error) {
res, err = c.CreateLan(dcid, request)
if err != nil {
return
}
if err = c.WaitTillProvisionedOrCanceled(ctx, res.Headers.Get("location")); err != nil {
return
}
var lan *Lan
if lan, err = c.GetLan(dcid, res.ID); err != nil {
return
} else {
return lan, err
}
}
// GetLan pulls data for the lan where id = lanid returns an Instance struct
func (c *Client) GetLan(dcid, lanid string) (*Lan, error) {
ctx, cancel := c.GetContext()
if cancel != nil {
defer cancel()
}
rsp, apiResponse, err := c.CoreSdk.LanApi.DatacentersLansFindById(ctx, dcid, lanid).Execute()
ret := Lan{}
if errConvert := convertToCompat(&rsp, &ret); errConvert != nil {
return nil, errConvert
}
fillInResponse(&ret, apiResponse)
return &ret, err
/*
url := lanPath(dcid, lanid)
ret := &Lan{}
err := c.Get(url, ret, http.StatusOK)
return ret, err
*/
}
// UpdateLan does a partial update to a lan using json from []byte json returns a Instance struct
func (c *Client) UpdateLan(dcid string, lanid string, obj LanProperties) (*Lan, error) {
input := ionoscloud.LanProperties{}
if errConvert := convertToCore(&obj, &input); errConvert != nil {
return nil, errConvert
}
ctx, cancel := c.GetContext()
if cancel != nil {
defer cancel()
}
rsp, apiResponse, err := c.CoreSdk.LanApi.DatacentersLansPatch(ctx, dcid, lanid).Lan(input).Execute()
ret := Lan{}
if errConvert := convertToCompat(&rsp, &ret); errConvert != nil {
return nil, errConvert
}
fillInResponse(&ret, apiResponse)
return &ret, err
/*
url := lanPath(dcid, lanid)
ret := &Lan{}
err := c.Patch(url, obj, ret, http.StatusAccepted)
return ret, err
*/
}
// UpdateLanAndWait creates a lan, waits for the request to finish and returns a refreshed lan
// Note that an error does not necessarily means that the resource has not been updated.
// If err & res are not nil, a resource with res.ID exists, but an error occurred either while waiting for
// the request or when refreshing the resource.
func (c *Client) UpdateLanAndWait(ctx context.Context, dcid, lanid string, props LanProperties) (res *Lan, err error) {
res, err = c.UpdateLan(dcid, lanid, props)
if err != nil {
return
}
if err = c.WaitTillProvisionedOrCanceled(ctx, res.Headers.Get("location")); err != nil {
return
}
var lan *Lan
if lan, err = c.GetLan(dcid, res.ID); err != nil {
return
} else {
return lan, err
}
}
// DeleteLan deletes a lan where id == lanid
func (c *Client) DeleteLan(dcid, lanid string) (*http.Header, error) {
ctx, cancel := c.GetContext()
if cancel != nil {
defer cancel()
}
_, apiResponse, err := c.CoreSdk.LanApi.DatacentersLansDelete(ctx, dcid, lanid).Execute()
if apiResponse != nil {
return &apiResponse.Header, err
} else {
return nil, err
}
/*
url := lanPath(dcid, lanid)
ret := &http.Header{}
err := c.Delete(url, ret, http.StatusAccepted)
return ret, err
*/
}
// DeleteLanAndWait deletes given lan and waits for the request to finish
func (c *Client) DeleteLanAndWait(ctx context.Context, dcid, lanid string) error {
rsp, err := c.DeleteLan(dcid, lanid)
if err != nil {
return err
}
return c.WaitTillProvisionedOrCanceled(ctx, rsp.Get("location"))
}
| profitbricks/profitbricks-sdk-go | lan.go | GO | apache-2.0 | 6,599 |
package me.wangkang.spring.messaging;
/**
* Hello world!
*
*/
public class App
{
public static void main( String[] args )
{
System.out.println( "Hello World!" );
}
}
| iwangkang/spring | spring-framework-example/spring-messaging-example/src/main/java/me/wangkang/spring/messaging/App.java | Java | apache-2.0 | 191 |
package com.gdn.venice.facade;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import javax.ejb.EJBException;
import javax.ejb.Stateless;
import javax.ejb.TransactionAttribute;
import javax.ejb.TransactionAttributeType;
import javax.persistence.EntityManager;
import javax.persistence.PersistenceContext;
import javax.persistence.PersistenceContextType;
import javax.persistence.Query;
import org.apache.commons.configuration.ConfigurationException;
import org.apache.commons.configuration.XMLConfiguration;
import org.apache.log4j.Logger;
import com.gdn.venice.facade.callback.SessionCallback;
import com.gdn.venice.facade.finder.FinderReturn;
import com.gdn.venice.persistence.VenOrderItemHistory;
import com.djarum.raf.utilities.JPQLAdvancedQueryCriteria;
import com.djarum.raf.utilities.JPQLQueryStringBuilder;
import com.djarum.raf.utilities.Log4jLoggerFactory;
/**
* Session Bean implementation class VenOrderItemHistorySessionEJBBean
*
* <p>
* <b>author:</b> <a href="mailto:david@pwsindonesia.com">David Forden</a>
* <p>
* <b>version:</b> 1.0
* <p>
* <b>since:</b> 2011
*
*/
@Stateless(mappedName = "VenOrderItemHistorySessionEJBBean")
public class VenOrderItemHistorySessionEJBBean implements VenOrderItemHistorySessionEJBRemote,
VenOrderItemHistorySessionEJBLocal {
/*
* Implements an IOC model for pre/post callbacks to persist, merge, and
* remove operations. The onPrePersist, onPostPersist, onPreMerge,
* onPostMerge, onPreRemove and OnPostRemove operations must be implemented
* by the callback class.
*/
private String _sessionCallbackClassName = null;
// A reference to the callback object that has been instantiated
private SessionCallback _callback = null;
protected static Logger _log = null;
// The configuration file to use
private String _configFile = System.getenv("VENICE_HOME")
+ "/conf/module-config.xml";
//The binding array used when binding variables into a JPQL query
private Object[] bindingArray = null;
@PersistenceContext(unitName = "GDN-Venice-Persistence", type = PersistenceContextType.TRANSACTION)
protected EntityManager em;
/**
* Default constructor.
*/
public VenOrderItemHistorySessionEJBBean() {
super();
Log4jLoggerFactory loggerFactory = new Log4jLoggerFactory();
_log = loggerFactory
.getLog4JLogger("com.gdn.venice.facade.VenOrderItemHistorySessionEJBBean");
// If the configuration is successful then instantiate the callback
if (this.configure())
this.instantiateTriggerCallback();
}
/**
* Reads the venice configuration file and configures the EJB's
* triggerCallbackClassName
*/
private Boolean configure() {
_log.debug("Venice Configuration File:" + _configFile);
try {
XMLConfiguration config = new XMLConfiguration(_configFile);
/*
* Get the index entry for the adapter configuration from the
* configuration file - there will be multiple adapter
* configurations
*/
@SuppressWarnings({ "rawtypes" })
List callbacks = config
.getList("sessionBeanConfig.callback.[@name]");
Integer beanConfigIndex = new Integer(Integer.MAX_VALUE);
@SuppressWarnings("rawtypes")
Iterator i = callbacks.iterator();
while (i.hasNext()) {
String beanName = (String) i.next();
if (this.getClass().getSimpleName().equals(beanName)) {
beanConfigIndex = callbacks.indexOf(beanName);
_log.debug("Bean configuration for " + beanName
+ " found at " + beanConfigIndex);
}
}
this._sessionCallbackClassName = config
.getString("sessionBeanConfig.callback(" + beanConfigIndex + ").[@class]");
_log.debug("Loaded configuration for _sessionCallbackClassName:"
+ _sessionCallbackClassName);
} catch (ConfigurationException e) {
_log.error("A ConfigurationException occured when processing the configuration file"
+ e.getMessage());
e.printStackTrace();
return Boolean.FALSE;
}
return Boolean.TRUE;
}
/**
* Instantiates the trigger callback handler class
*
* @return
*/
Boolean instantiateTriggerCallback() {
if (_sessionCallbackClassName != null
&& !_sessionCallbackClassName.isEmpty())
try {
Class<?> c = Class.forName(_sessionCallbackClassName);
_callback = (SessionCallback) c.newInstance();
} catch (ClassNotFoundException e) {
_log.error("A ClassNotFoundException occured when trying to instantiate:"
+ this._sessionCallbackClassName);
e.printStackTrace();
return Boolean.FALSE;
} catch (InstantiationException e) {
_log.error("A InstantiationException occured when trying to instantiate:"
+ this._sessionCallbackClassName);
e.printStackTrace();
return Boolean.FALSE;
} catch (IllegalAccessException e) {
_log.error("A IllegalAccessException occured when trying to instantiate:"
+ this._sessionCallbackClassName);
e.printStackTrace();
return Boolean.FALSE;
}
return Boolean.TRUE;
}
/*
* (non-Javadoc)
*
* @see
* com.gdn.venice.facade.VenOrderItemHistorySessionEJBRemote#queryByRange(java.lang
* .String, int, int)
*/
@Override
@SuppressWarnings({ "unchecked" })
public List<VenOrderItemHistory> queryByRange(String jpqlStmt, int firstResult,
int maxResults) {
Long startTime = System.currentTimeMillis();
_log.debug("queryByRange()");
Query query = null;
try {
query = em.createQuery(jpqlStmt);
if(this.bindingArray != null){
for(int i = 0; i < bindingArray.length; ++i){
if(bindingArray[i] != null){
query.setParameter(i+1, bindingArray[i]);
}
}
}
} catch (Exception e) {
_log.error("An exception occured when calling em.createQuery():"
+ e.getMessage());
throw new EJBException(e);
}
try {
if (firstResult > 0) {
query = query.setFirstResult(firstResult);
}
if (maxResults > 0) {
query = query.setMaxResults(maxResults);
}
} catch (Exception e) {
_log.error("An exception occured when accessing the result set of a query:"
+ e.getMessage());
throw new EJBException(e);
}
List<VenOrderItemHistory> returnList = (List<VenOrderItemHistory>)query.getResultList();
this.bindingArray = null;
Long endTime = System.currentTimeMillis();
Long duration = startTime - endTime;
_log.debug("queryByRange() duration:" + duration + "ms");
return returnList;
}
/*
* (non-Javadoc)
*
* @see
* com.gdn.venice.facade.VenOrderItemHistorySessionEJBRemote#persistVenOrderItemHistory(com
* .gdn.venice.persistence.VenOrderItemHistory)
*/
@Override
@TransactionAttribute(TransactionAttributeType.REQUIRED)
public VenOrderItemHistory persistVenOrderItemHistory(VenOrderItemHistory venOrderItemHistory) {
Long startTime = System.currentTimeMillis();
_log.debug("persistVenOrderItemHistory()");
// Call the onPrePersist() callback and throw an exception if it fails
if (this._callback != null) {
if (!this._callback.onPrePersist(venOrderItemHistory)) {
_log.error("An onPrePersist callback operation failed for:"
+ this._sessionCallbackClassName);
throw new EJBException(
"An onPrePersist callback operation failed for:"
+ this._sessionCallbackClassName);
}
}
VenOrderItemHistory existingVenOrderItemHistory = null;
if (venOrderItemHistory != null && venOrderItemHistory.getOrderItemHistoryId() != null) {
_log.debug("persistVenOrderItemHistory:em.find()");
try {
existingVenOrderItemHistory = em.find(VenOrderItemHistory.class,
venOrderItemHistory.getOrderItemHistoryId());
} catch (Exception e) {
_log.error("An exception occured when calling em.find():"
+ e.getMessage());
throw new EJBException(e);
}
}
if (existingVenOrderItemHistory == null) {
_log.debug("persistVenOrderItemHistory:em.persist()");
try {
em.persist(venOrderItemHistory);
} catch (Exception e) {
_log.error("An exception occured when calling em.persist():"
+ e.getMessage());
throw new EJBException(e);
}
_log.debug("persistVenOrderItemHistory:em.flush()");
try {
em.flush();
em.clear();
} catch (Exception e) {
_log.error("An exception occured when calling em.flush():"
+ e.getMessage());
throw new EJBException(e);
}
// Call the onPostPersist() callback and throw an exception if it fails
if (this._callback != null) {
if (!this._callback.onPostPersist(venOrderItemHistory)) {
_log.error("An onPostPersist callback operation failed for:"
+ this._sessionCallbackClassName);
throw new EJBException(
"An onPostPersist callback operation failed for:"
+ this._sessionCallbackClassName);
}
}
Long endTime = System.currentTimeMillis();
Long duration = startTime - endTime;
_log.debug("persistVenOrderItemHistory() duration:" + duration + "ms");
return venOrderItemHistory;
} else {
throw new EJBException("VenOrderItemHistory exists!. VenOrderItemHistory = "
+ venOrderItemHistory.getOrderItemHistoryId());
}
}
/*
* (non-Javadoc)
*
* @see
* com.gdn.venice.facade.VenOrderItemHistorySessionEJBRemote#persistVenOrderItemHistoryList
* (java.util.List)
*/
@Override
@SuppressWarnings("rawtypes")
@TransactionAttribute(TransactionAttributeType.REQUIRED)
public ArrayList<VenOrderItemHistory> persistVenOrderItemHistoryList(
List<VenOrderItemHistory> venOrderItemHistoryList) {
_log.debug("persistVenOrderItemHistoryList()");
Iterator i = venOrderItemHistoryList.iterator();
while (i.hasNext()) {
this.persistVenOrderItemHistory((VenOrderItemHistory) i.next());
}
return (ArrayList<VenOrderItemHistory>)venOrderItemHistoryList;
}
/*
* (non-Javadoc)
*
* @see
* com.gdn.venice.facade.VenOrderItemHistorySessionEJBRemote#mergeVenOrderItemHistory(com.
* gdn.venice.persistence.VenOrderItemHistory)
*/
@Override
@TransactionAttribute(TransactionAttributeType.REQUIRED)
public VenOrderItemHistory mergeVenOrderItemHistory(VenOrderItemHistory venOrderItemHistory) {
Long startTime = System.currentTimeMillis();
_log.debug("mergeVenOrderItemHistory()");
// Call the onPreMerge() callback and throw an exception if it fails
if (this._callback != null) {
if (!this._callback.onPreMerge(venOrderItemHistory)) {
_log.error("An onPreMerge callback operation failed for:"
+ this._sessionCallbackClassName);
throw new EJBException(
"An onPreMerge callback operation failed for:"
+ this._sessionCallbackClassName);
}
}
VenOrderItemHistory existing = null;
if (venOrderItemHistory.getOrderItemHistoryId() != null){
_log.debug("mergeVenOrderItemHistory:em.find()");
existing = em.find(VenOrderItemHistory.class, venOrderItemHistory.getOrderItemHistoryId());
}
if (existing == null) {
return this.persistVenOrderItemHistory(venOrderItemHistory);
} else {
_log.debug("mergeVenOrderItemHistory:em.merge()");
try {
em.merge(venOrderItemHistory);
} catch (Exception e) {
_log.error("An exception occured when calling em.merge():"
+ e.getMessage());
throw new EJBException(e);
}
_log.debug("mergeVenOrderItemHistory:em.flush()");
try {
em.flush();
em.clear();
} catch (Exception e) {
_log.error("An exception occured when calling em.flush():"
+ e.getMessage());
throw new EJBException(e);
}
VenOrderItemHistory newobject = em.find(VenOrderItemHistory.class,
venOrderItemHistory.getOrderItemHistoryId());
_log.debug("mergeVenOrderItemHistory():em.refresh");
try {
em.refresh(newobject);
} catch (Exception e) {
_log.error("An exception occured when calling em.refresh():"
+ e.getMessage());
throw new EJBException(e);
}
// Call the onPostMerge() callback and throw an exception if it fails
if (this._callback != null) {
if (!this._callback.onPostMerge(newobject)) {
_log.error("An onPostMerge callback operation failed for:"
+ this._sessionCallbackClassName);
throw new EJBException(
"An onPostMerge callback operation failed for:"
+ this._sessionCallbackClassName);
}
}
Long endTime = System.currentTimeMillis();
Long duration = startTime - endTime;
_log.debug("mergeVenOrderItemHistory() duration:" + duration + "ms");
return newobject;
}
}
/*
* (non-Javadoc)
*
* @see
* com.gdn.venice.facade.VenOrderItemHistorySessionEJBRemote#mergeVenOrderItemHistoryList(
* java.util.List)
*/
@Override
@SuppressWarnings("rawtypes")
@TransactionAttribute(TransactionAttributeType.REQUIRED)
public ArrayList<VenOrderItemHistory> mergeVenOrderItemHistoryList(
List<VenOrderItemHistory> venOrderItemHistoryList) {
_log.debug("mergeVenOrderItemHistoryList()");
Iterator i = venOrderItemHistoryList.iterator();
while (i.hasNext()) {
this.mergeVenOrderItemHistory((VenOrderItemHistory) i.next());
}
return (ArrayList<VenOrderItemHistory>)venOrderItemHistoryList;
}
/*
* (non-Javadoc)
*
* @see
* com.gdn.venice.facade.VenOrderItemHistorySessionEJBRemote#removeVenOrderItemHistory(com.
* gdn.venice.persistence.VenOrderItemHistory)
*/
@Override
@TransactionAttribute(TransactionAttributeType.REQUIRED)
public void removeVenOrderItemHistory(VenOrderItemHistory venOrderItemHistory) {
Long startTime = System.currentTimeMillis();
_log.debug("removeVenOrderItemHistory()");
// Call the onPreRemove() callback and throw an exception if it fails
if (this._callback != null) {
if (!this._callback.onPreRemove(venOrderItemHistory)) {
_log.error("An onPreRemove callback operation failed for:"
+ this._sessionCallbackClassName);
throw new EJBException(
"An onPreRemove callback operation failed for:"
+ this._sessionCallbackClassName);
}
}
_log.debug("removeVenOrderItemHistory:em.find()");
venOrderItemHistory = em.find(VenOrderItemHistory.class, venOrderItemHistory.getOrderItemHistoryId());
try {
_log.debug("removeVenOrderItemHistory:em.remove()");
em.remove(venOrderItemHistory);
} catch (Exception e) {
_log.error("An exception occured when calling em.remove():"
+ e.getMessage());
throw new EJBException(e);
}
// Call the onPostRemove() callback and throw an exception if it fails
if (this._callback != null) {
if (!this._callback.onPostRemove(venOrderItemHistory)) {
_log.error("An onPostRemove callback operation failed for:"
+ this._sessionCallbackClassName);
throw new EJBException(
"An onPostRemove callback operation failed for:"
+ this._sessionCallbackClassName);
}
}
_log.debug("removeVenOrderItemHistory:em.flush()");
em.flush();
em.clear();
Long endTime = System.currentTimeMillis();
Long duration = startTime - endTime;
_log.debug("removeVenOrderItemHistory() duration:" + duration + "ms");
}
/*
* (non-Javadoc)
*
* @see
* com.gdn.venice.facade.VenOrderItemHistorySessionEJBRemote#removeVenOrderItemHistoryList(
* java.util.List)
*/
@Override
@SuppressWarnings("rawtypes")
@TransactionAttribute(TransactionAttributeType.REQUIRED)
public void removeVenOrderItemHistoryList(List<VenOrderItemHistory> venOrderItemHistoryList) {
_log.debug("removeVenOrderItemHistoryList()");
Iterator i = venOrderItemHistoryList.iterator();
while (i.hasNext()) {
this.removeVenOrderItemHistory((VenOrderItemHistory) i.next());
}
}
/*
* (non-Javadoc)
*
* @see
* com.gdn.venice.facade.VenOrderItemHistorySessionEJBRemote#findByVenOrderItemHistoryLike(
* com.gdn.venice.persistence.VenOrderItemHistory, int, int)
*/
@Override
@SuppressWarnings({ "rawtypes", "unchecked" })
public List<VenOrderItemHistory> findByVenOrderItemHistoryLike(VenOrderItemHistory venOrderItemHistory,
JPQLAdvancedQueryCriteria criteria, int firstResult, int maxResults) {
Long startTime = System.currentTimeMillis();
_log.debug("findByVenOrderItemHistoryLike()");
JPQLQueryStringBuilder qb = new JPQLQueryStringBuilder(venOrderItemHistory);
HashMap complexTypeBindings = new HashMap();
String stmt = qb.buildQueryString(complexTypeBindings, criteria);
if(criteria != null){
/*
* Get the binding array from the query builder and make
* it available to the queryByRange method
*/
this.bindingArray = qb.getBindingArray();
for(int i = 0; i < qb.getBindingArray().length; i++){
System.out.println("Bindings:" + i + ":" + qb.getBindingArray()[i]);
}
List<VenOrderItemHistory> venOrderItemHistoryList = this.queryByRange(stmt, firstResult, maxResults);
Long endTime = System.currentTimeMillis();
Long duration = startTime - endTime;
_log.debug("findByVenOrderItemHistoryLike() duration:" + duration + "ms");
return venOrderItemHistoryList;
}else{
String errMsg = "A query has been initiated with null criteria.";
_log.error(errMsg);
throw new EJBException(errMsg);
}
}
/*
* (non-Javadoc)
*
* @see
* com.gdn.venice.facade.VenOrderItemHistorySessionEJBRemote#findByVenOrderItemHistoryLikeFR(
* com.gdn.venice.persistence.VenOrderItemHistory, int, int)
*/
@Override
@SuppressWarnings({ "rawtypes", "unchecked" })
public FinderReturn findByVenOrderItemHistoryLikeFR(VenOrderItemHistory venOrderItemHistory,
JPQLAdvancedQueryCriteria criteria, int firstResult, int maxResults) {
Long startTime = System.currentTimeMillis();
_log.debug("findByVenOrderItemHistoryLikeFR()");
JPQLQueryStringBuilder qb = new JPQLQueryStringBuilder(venOrderItemHistory);
HashMap complexTypeBindings = new HashMap();
String stmt = qb.buildQueryString(complexTypeBindings, criteria);
if(criteria != null){
/*
* Get the binding array from the query builder and make
* it available to the queryByRange method
*/
this.bindingArray = qb.getBindingArray();
for(int i = 0; i < qb.getBindingArray().length; i++){
System.out.println("Bindings:" + i + ":" + qb.getBindingArray()[i]);
}
//Set the finder return object with the count of the total query rows
FinderReturn fr = new FinderReturn();
String countStmt = "select count(o) " + stmt.substring(stmt.indexOf("from"));
Query query = null;
try {
query = em.createQuery(countStmt);
if(this.bindingArray != null){
for(int i = 0; i < bindingArray.length; ++i){
if(bindingArray[i] != null){
query.setParameter(i+1, bindingArray[i]);
}
}
}
Long totalRows = (Long)query.getSingleResult();
fr.setNumQueryRows(totalRows);
} catch (Exception e) {
_log.error("An exception occured when calling em.createQuery():"
+ e.getMessage());
throw new EJBException(e);
}
//Set the finder return object with the query list
fr.setResultList(this.queryByRange(stmt, firstResult, maxResults));
Long endTime = System.currentTimeMillis();
Long duration = startTime - endTime;
_log.debug("findByVenOrderItemHistoryLike() duration:" + duration + "ms");
return fr;
}else{
String errMsg = "A query has been initiated with null criteria.";
_log.error(errMsg);
throw new EJBException(errMsg);
}
}
}
| yauritux/venice-legacy | Venice/Venice-Service/src/main/java/com/gdn/venice/facade/VenOrderItemHistorySessionEJBBean.java | Java | apache-2.0 | 19,139 |
package com.hiwhitley.chapter01;
/**
* Created by hiwhitley on 2016/10/27.
*/
public class Pet {
private String type;
public Pet(String type) {
this.type = type;
}
public String getType() {
return type;
}
}
| hiwhitley/CodingEveryDay | src/com/hiwhitley/chapter01/Pet.java | Java | apache-2.0 | 248 |