text stringlengths 1 1.05M |
|---|
#!/bin/sh
# Env vars for local broker
# set PS+ env variables
export HOST=localhost
export VPN=default
export MSG_USER=default
export MSG_PASS=default
export ADMIN_USER=admin
export ADMIN_PASS=admin
# URLs should not need change if using default ports
export SEMP_URL=http://$HOST:8080
# change to 55556 if on Mac, port 55555 blocked
export SMF_URL=tcp://$HOST:55555
export WS_URL=ws://$HOST:8008
export REST_URL=http://$HOST:9000
export MQTT_URL=tcp://$HOST:1883
export MQTTW_URL=ws://$HOST:8000
export AMQP_URL=tcp://$HOST:5672
# Uncomment to use secure ports
#export SEMP_URL=https://$HOST:1943
#export SMF_URL=tcps://$HOST:55443
#export WS_URL=wss://$HOST:1443
#export REST_URL=https://$HOST:9443
#export MQTT_URL=ssl://$HOST:8883
#export MQTTW_URL=wss://$HOST:8443
#export AMQP_URL=tcps://$HOST:5671
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package gzip
import (
"net/http"
"github.com/onsi/ginkgo"
"github.com/apisix/manager-api/test/e2enew/base"
)
var _ = ginkgo.Describe("Gzip enable", func() {
ginkgo.It("get index.html", func() {
base.RunTestCase(base.HttpTestCase{
Object: base.ManagerApiExpect(),
Method: http.MethodGet,
Path: "/",
Headers: map[string]string{"Accept-Encoding": "gzip, deflate, br"},
ExpectHeaders: map[string]string{"Content-Encoding": "gzip"},
})
})
})
|
<filename>Documentation/classarmnn_1_1_ref_resize_bilinear_workload.js<gh_stars>0
var classarmnn_1_1_ref_resize_bilinear_workload =
[
[ "Execute", "classarmnn_1_1_ref_resize_bilinear_workload.xhtml#ae071e8822437c78baea75c3aef3a263a", null ]
]; |
SELECT last_name, COUNT(*) as count
FROM table_name
GROUP BY last_name
ORDER BY count DESC
LIMIT 5; |
<gh_stars>1-10
export interface Behavior {
bind(source: unknown): void;
unbind(): void;
}
export interface BehaviorFactory {
targetIndex: number;
createBehavior(target: any): Behavior;
}
|
package org.rs2server.tools;
import java.sql.*;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Map;
import java.util.Properties;
import java.util.function.Function;
import java.util.stream.Collectors;
/**
* @author Clank1337
*/
public class TopVoter {
private final static String HOST = "192.168.3.11";
private Connection conn;
private Statement statement;
private Properties connectionProps;
private ResultSet result;
public TopVoter() {
try {
connectionProps = new Properties();
connectionProps.put("user", "lostisle_player");
connectionProps.put("password", "<PASSWORD>");
conn = DriverManager.getConnection("jdbc:mysql://" + HOST + "/lostisle_motivote", connectionProps);
statement = conn.createStatement();
ArrayList<String> voteList = new ArrayList<>();
result = statement.executeQuery("SELECT * FROM mv_votes");
while (result.next()) {
voteList.add(result.getString(3));
}
String top = voteList.stream().collect(Collectors.groupingBy(Function.identity(), Collectors.counting()))
.entrySet().stream().max((o1, o2) -> o1.getValue().compareTo(o2.getValue()))
.map(Map.Entry::getKey).orElse(null);
int occurences = Collections.frequency(voteList, top);
if (result != null) try {
result.close();
} catch (SQLException ignore) {
}
if (statement != null) try {
statement.close();
} catch (SQLException ignore) {
}
if (conn != null) try {
conn.close();
} catch (SQLException ignore) {
}
} catch (SQLException e) {
e.printStackTrace();
}
}
}
|
<body>
<h1>Contact Us</h1>
<form action="" method="post">
<label for="name">Name:</label><br>
<input type="text" name="name" id="name"><br><br>
<label for="email">Email:</label><br>
<input type="email" name="email" id="email"><br><br>
<label for="message">Message:</label><br>
<textarea name="message" id="message" cols="50" rows="6"></textarea><br><br>
<input type="submit" value="Submit">
</form>
</body> |
<reponame>strikeraryu/chapel
/*
* Copyright 2021 Hewlett Packard Enterprise Development LP
* Other additional copyright holders may be indicated within.
*
* The entirety of this work is licensed under the Apache License,
* Version 2.0 (the "License"); you may not use this file except
* in compliance with the License.
*
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "chpl/uast/ModuleDecl.h"
#include "chpl/uast/Builder.h"
namespace chpl {
namespace uast {
bool ModuleDecl::contentsMatchInner(const ASTNode* other) const {
const ModuleDecl* lhs = this;
const ModuleDecl* rhs = (const ModuleDecl*) other;
return lhs->symDeclContentsMatchInner(rhs);
}
void ModuleDecl::markUniqueStringsInner(Context* context) const {
return symDeclMarkUniqueStringsInner(context);
}
owned<ModuleDecl>
ModuleDecl::build(Builder* builder, Location loc,
UniqueString name, Sym::Visibility vis,
Module::Kind kind, ASTList stmts) {
Module* sym = new Module(std::move(stmts), name, vis, kind);
ModuleDecl* ret = new ModuleDecl(toOwned(sym));
builder->noteLocation(sym, loc);
builder->noteLocation(ret, loc);
return toOwned(ret);
}
} // namespace uast
} // namespace chpl
|
//
// connect_pipe.cpp
// ~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2021 <NAME> (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
// Disable autolinking for unit tests.
#if !defined(BOOST_ALL_NO_LIB)
#define BOOST_ALL_NO_LIB 1
#endif // !defined(BOOST_ALL_NO_LIB)
// Test that header file is self-contained.
#include <boost/asio/connect_pipe.hpp>
#include <string>
#include <boost/asio/io_context.hpp>
#include <boost/asio/read.hpp>
#include <boost/asio/readable_pipe.hpp>
#include <boost/asio/writable_pipe.hpp>
#include <boost/asio/write.hpp>
#include "unit_test.hpp"
#if defined(BOOST_ASIO_HAS_BOOST_BIND)
# include <boost/bind/bind.hpp>
#else // defined(BOOST_ASIO_HAS_BOOST_BIND)
# include <functional>
#endif // defined(BOOST_ASIO_HAS_BOOST_BIND)
//------------------------------------------------------------------------------
// connect_pipe_compile test
// ~~~~~~~~~~~~~~~~~~~~~~~~~
// The following test checks that all connect_pipe functions compile and link
// correctly. Runtime failures are ignored.
namespace connect_pipe_compile {
void test()
{
#if defined(BOOST_ASIO_HAS_PIPE)
using namespace boost::asio;
try
{
boost::asio::io_context io_context;
boost::system::error_code ec1;
readable_pipe p1(io_context);
writable_pipe p2(io_context);
connect_pipe(p1, p2);
readable_pipe p3(io_context);
writable_pipe p4(io_context);
connect_pipe(p3, p4, ec1);
}
catch (std::exception&)
{
}
#endif // defined(BOOST_ASIO_HAS_PIPE)
}
} // namespace connect_pipe_compile
//------------------------------------------------------------------------------
// connect_pipe_runtime test
// ~~~~~~~~~~~~~~~~~~~~~~~~~
// The following test checks that connect_pipe operates correctly at runtime.
namespace connect_pipe_runtime {
static const char write_data[]
= "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz";
void handle_read(const boost::system::error_code& err,
size_t bytes_transferred, bool* called)
{
*called = true;
BOOST_ASIO_CHECK(!err);
BOOST_ASIO_CHECK(bytes_transferred == sizeof(write_data));
}
void handle_write(const boost::system::error_code& err,
size_t bytes_transferred, bool* called)
{
*called = true;
BOOST_ASIO_CHECK(!err);
BOOST_ASIO_CHECK(bytes_transferred == sizeof(write_data));
}
void test()
{
#if defined(BOOST_ASIO_HAS_PIPE)
using namespace std; // For memcmp.
using namespace boost::asio;
#if defined(BOOST_ASIO_HAS_BOOST_BIND)
namespace bindns = boost;
#else // defined(BOOST_ASIO_HAS_BOOST_BIND)
namespace bindns = std;
#endif // defined(BOOST_ASIO_HAS_BOOST_BIND)
using bindns::placeholders::_1;
using bindns::placeholders::_2;
try
{
boost::asio::io_context io_context;
boost::system::error_code ec1;
boost::system::error_code ec2;
readable_pipe p1(io_context);
writable_pipe p2(io_context);
connect_pipe(p1, p2);
std::string data1 = write_data;
boost::asio::write(p2, boost::asio::buffer(data1));
std::string data2;
data2.resize(data1.size());
boost::asio::read(p1, boost::asio::buffer(data2));
BOOST_ASIO_CHECK(data1 == data2);
char read_buffer[sizeof(write_data)];
bool read_completed = false;
boost::asio::async_read(p1,
boost::asio::buffer(read_buffer),
bindns::bind(handle_read,
_1, _2, &read_completed));
bool write_completed = false;
boost::asio::async_write(p2,
boost::asio::buffer(write_data),
bindns::bind(handle_write,
_1, _2, &write_completed));
io_context.run();
BOOST_ASIO_CHECK(read_completed);
BOOST_ASIO_CHECK(write_completed);
BOOST_ASIO_CHECK(memcmp(read_buffer, write_data, sizeof(write_data)) == 0);
}
catch (std::exception&)
{
BOOST_ASIO_CHECK(false);
}
#endif // defined(BOOST_ASIO_HAS_PIPE)
}
} // namespace connect_pipe_compile
//------------------------------------------------------------------------------
BOOST_ASIO_TEST_SUITE
(
"connect_pipe",
BOOST_ASIO_TEST_CASE(connect_pipe_compile::test)
BOOST_ASIO_TEST_CASE(connect_pipe_runtime::test)
)
|
<reponame>FarseerCode/baobab-react<filename>src/utils/helpers.js
/**
* Baobab-React Helpers
* =====================
*
* Miscellaneous helper functions.
*/
/**
* Simple curry function.
*/
export function curry(fn, arity) {
return function f1(...args) {
if (args.length >= arity) {
return fn.apply(null, args);
}
else {
return function f2(...args2) {
return f1.apply(null, args.concat(args2));
};
}
};
}
/**
* Solving the mapping given to a higher-order construct.
*/
export function solveMapping(mapping, props, context) {
if (typeof mapping === 'function')
mapping = mapping(props, context);
return mapping;
}
/**
* Determines if the given tree is a Baobab tree.
* FIXME: if Baobab ever implements something like Array.isArray we should use
* that instead of relying in the internal _identity = '[object Baobab]' value.
* See https://github.com/Yomguithereal/baobab/blob/master/src/baobab.js#L111
*/
export function isBaobabTree(tree) {
return !!(tree && typeof tree.toString === 'function' && tree.toString() === '[object Baobab]');
}
|
def search(e, stack):
for i in range(len(stack) - 1, -1, -1):
if stack[i] == e:
return len(stack) - i
return -1 |
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
$* -dM -E - </dev/null 2>&1 | grep -q __ANDROID__ && echo "y"
|
###
# Use this script to compile and install locallly
#
# 1. Run this script from project root:
# ```
# $ scripts/compile.sh
# ```
# 2. use pip to install
# ```
# $ pip install dist/{{the package you just compiled}}
# ```
##
rm -rf build dist *.egg-info
python3 setup.py bdist_wheel |
// 15700. 타일 채우기 4
// 2021.12.02
// 수학
#include<iostream>
using namespace std;
int main()
{
long long n, m;
cin >> n >> m;
cout << (n * m) / 2 << endl;
return 0;
}
|
import { Constants } from './Constants';
import { Direction } from './Direction';
import { Actor } from './Actor';
import { ZeldaGame } from './ZeldaGame';
import { Rectangle, SpriteSheet } from 'gtp';
import { Link } from './Link';
declare let game: ZeldaGame;
/**
* A projectile thrown by an enemy, such as a rock or arrow.
*/
export class Projectile extends Actor {
private readonly _ssRow: number;
private readonly _ssCol: number;
constructor(ssRow: number, ssCol: number, x: number, y: number, dir: Direction) {
super();
this._ssRow = ssRow;
this._ssCol = ssCol;
// In the actual game, rocks start by completely overlapping the enemy who shoots them. Honest!
this.x = x;
this.y = y;
this.dir = dir;
this.hitBox = new Rectangle();
}
collidedWith(other: Actor): boolean {
if (other instanceof Link) {
this.done = true;
return true;
}
return false;
}
paint(ctx: CanvasRenderingContext2D) {
this.possiblyPaintHitBox(ctx);
const ss: SpriteSheet = game.assets.get('enemies');
const index: number = this._ssRow * 15 + this._ssCol;
ss.drawByIndex(ctx, this.x, this.y, index);
}
update() {
const SPEED: number = 2.5;
switch (this.dir) {
case 'DOWN':
this.y += SPEED;
if (this.y > Constants.SCREEN_HEIGHT_WITH_HUD + this.h) {
this.done = true;
}
break;
case 'LEFT':
this.x -= SPEED;
if (this.x < -this.w) {
this.done = true;
}
break;
case 'UP':
this.y -= SPEED;
if (this.y < -this.h) {
this.done = true;
}
break;
case 'RIGHT':
this.x += SPEED;
if (this.x > Constants.SCREEN_WIDTH + this.w) {
this.done = true;
}
break;
}
this.hitBox.set(this.x + 4, this.y + 3, this.w - 8, this.h - 6);
}
}
|
<reponame>infamousSs/zod
package com.infamous.zod.base.rest;
import static org.junit.jupiter.api.Assertions.assertTrue;
import org.junit.jupiter.api.Test;
class JerseyRestConfigTest {
@Test
public void testJerseyRestConfig() {
JerseyRestConfig config = new JerseyRestConfig((resource) -> {
resource.register(NoOptsController.class);
});
assertTrue(config.isRegistered(NoOptsController.class));
}
}
class NoOptsController {
} |
#-*- coding: utf-8 -*-
# graph
# draw graph
require_if_exist 'rubygems'
require_if_exist 'gruff'
module Graph
def self.graph_drawable?
return defined?(Gruff)
end
# values = {records=>[points(Numeric)]}
# options = {
# :title => 'graph title'
# :tags => ['hash tags']
# :label => ['label of column']
# :start => 'label of start'
# :end => 'label of end'
# }
def self.drawgraph(values, options)
notice 'graph: '+options.inspect
if(self.graph_drawable?) then
graph = Gruff::Line.new
length = 0
values.each{ |key, ary|
graph.data(key, ary)
length = [length, ary.size].max
}
if(options[:label].is_a? Proc) then
label = options[:label].call(:get).freeze
else
label = (options[:label] || Hash.new).freeze
end
if (not options[:end]) then
options[:end] = Time.now
end
notice 'graph: '+label.inspect
graph.labels = label
graph.title = options[:title] + '(' + options[:start].strftime('%Y/%m/%d') + ')'
tmpfile = Tempfile.open('graph')
tmpfile.write(graph.to_blob)
result = {
:message => "#{options[:start].strftime('%Y/%m/%d %H:%M')}から#{options[:end].strftime('%m/%d %H:%M')}の#{options[:title]}のグラフ",
:tags => options[:tags],
:image => Message::Image.new(tmpfile.path)}
tmpfile.close
result
else
table = values.values.flatten
{ :message => "#{options[:start].strftime('%Y/%m/%d %H:%M')}から#{options[:end].strftime('%m/%d %H:%M')}の#{options[:title]}は、最高#{table.max}、最低#{table.min}、平均#{table.avg.round_at(4)}です。",
:tags => options[:tags]} end end
# return graph label generator.
def self.gen_graph_label_defer(default={0 => Time.now.strftime('%H')})
last_sample_time = Time.now
temp_label = default
count = 0
lambda{ |value|
if(value == :get) then
return temp_label
elsif(value != nil) then
temp_label[count] = value
elsif(last_sample_time.strftime('%H') != Time.now.strftime('%H')) then
temp_label[count] = Time.now.strftime('%H')
last_sample_time = Time.now
else
end
count += 1
temp_label
}
end
end
|
<reponame>mtfbwy/typedef
package pd.fenc.codec;
public class HexCodec {
/**
* ['6','1'] => (byte) 0x61
*/
public static byte decode1byte(int hiValue, int loValue) {
int byteValue = (decode4bit(hiValue) << 4) | decode4bit(loValue);
return (byte) byteValue;
}
private static int decode4bit(int ch) {
switch (ch) {
case '0':
case '1':
case '2':
case '3':
case '4':
case '5':
case '6':
case '7':
case '8':
case '9':
return ch - '0';
case 'A':
case 'B':
case 'C':
case 'D':
case 'E':
case 'F':
return ch - 'A' + 10;
case 'a':
case 'b':
case 'c':
case 'd':
case 'e':
case 'f':
return ch - 'a' + 10;
default:
break;
}
throw new IllegalArgumentException();
}
/**
* consume 1 byte and produce 2 int32<br/>
* (byte) 0x61 => ['6','1']
*/
public static void encode1byte(byte byteValue, int[] dst, int start) {
dst[start] = (byte) encode4bit((byteValue >> 4) & 0x0F);
dst[start + 1] = (byte) encode4bit(byteValue & 0x0F);
}
private static int encode4bit(int value) {
switch (value) {
case 0:
case 1:
case 2:
case 3:
case 4:
case 5:
case 6:
case 7:
case 8:
case 9:
return value + '0';
case 10:
case 11:
case 12:
case 13:
case 14:
case 15:
return value + 'A';
default:
break;
}
throw new IllegalArgumentException();
}
}
|
<gh_stars>0
package org.jooby.jdbc;
import com.google.inject.Binder;
import com.google.inject.Key;
import com.google.inject.binder.AnnotatedBindingBuilder;
import com.google.inject.name.Names;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigValueFactory;
import static com.typesafe.config.ConfigValueFactory.fromAnyRef;
import com.zaxxer.hikari.HikariConfig;
import com.zaxxer.hikari.HikariDataSource;
import static org.easymock.EasyMock.expect;
import org.jooby.Env;
import org.jooby.funzy.Throwing;
import org.jooby.test.MockUnit;
import org.jooby.test.MockUnit.Block;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.powermock.core.classloader.annotations.PrepareForTest;
import org.powermock.modules.junit4.PowerMockRunner;
import javax.sql.DataSource;
import java.util.Optional;
import java.util.Properties;
@RunWith(PowerMockRunner.class)
@PrepareForTest({Jdbc.class, Properties.class, HikariConfig.class, HikariDataSource.class,
System.class})
public class JdbcTest {
static String POOL_SIZE = "12";
private Block onStop = unit -> {
Env env = unit.get(Env.class);
expect(env.onStop(unit.capture(Throwing.Runnable.class))).andReturn(env);
expect(env.onStarted(unit.capture(Throwing.Runnable.class))).andReturn(env);
};
private Block mysql = unit -> {
Properties props = unit.get(Properties.class);
expect(props.setProperty("dataSource.useServerPrepStmts", "true")).andReturn(null);
expect(props.setProperty("dataSource.prepStmtCacheSqlLimit", "2048")).andReturn(null);
expect(props.setProperty("dataSource.cachePrepStmts", "true")).andReturn(null);
expect(props.setProperty("dataSource.prepStmtCacheSize", "250")).andReturn(null);
expect(props.setProperty("dataSource.encoding", "UTF-8")).andReturn(null);
};
@Test(expected = IllegalArgumentException.class)
public void nullname() throws Exception {
new Jdbc(null);
}
@Test(expected = IllegalArgumentException.class)
public void emptyname() throws Exception {
new Jdbc("");
}
@Test
public void memdb() throws Exception {
Config config = ConfigFactory.parseResources(getClass(), "jdbc.conf");
Config dbconf = config.withValue("db", ConfigValueFactory.fromAnyRef("mem"))
.withValue("application.charset", fromAnyRef("UTF-8"))
.withValue("application.name", fromAnyRef("jdbctest"))
.withValue("application.tmpdir", fromAnyRef("target"))
.withValue("runtime.processors-x2", fromAnyRef(POOL_SIZE))
.resolve();
new MockUnit(Env.class, Config.class, Binder.class)
.expect(existingDB(false, "123"))
.expect(existingDB(false, "db"))
.expect(currentTimeMillis(123))
.expect(props("org.h2.jdbcx.JdbcDataSource", "jdbc:h2:mem:123;DB_CLOSE_DELAY=-1", "h2.123",
"sa", "", false))
.expect(unit -> {
Properties properties = unit.get(Properties.class);
expect(
properties.setProperty("dataSource.url", "jdbc:h2:mem:{mem.seed};DB_CLOSE_DELAY=-1"))
.andReturn(null);
expect(properties.setProperty("dataSource.user", "sa")).andReturn(null);
expect(properties.setProperty("dataSource.password", "")).andReturn(null);
})
.expect(withDriver(false, "jdbc:h2:mem:123;DB_CLOSE_DELAY=-1"))
.expect(hikariConfig(null))
.expect(hikariDataSource())
.expect(serviceKey("db", "h2", "jdbc:h2:mem:123;DB_CLOSE_DELAY=-1"))
.expect(serviceKey("123"))
.expect(onStop)
.run(unit -> {
new Jdbc().configure(unit.get(Env.class), dbconf, unit.get(Binder.class));
});
}
@Test(expected = IllegalArgumentException.class)
public void duplicates() throws Exception {
Config config = ConfigFactory.parseResources(getClass(), "jdbc.conf");
Config dbconf = config.withValue("db", ConfigValueFactory.fromAnyRef("mem"))
.withValue("application.charset", fromAnyRef("UTF-8"))
.withValue("application.name", fromAnyRef("jdbctest"))
.withValue("application.tmpdir", fromAnyRef("target"))
.withValue("runtime.processors-x2", fromAnyRef(POOL_SIZE))
.resolve();
new MockUnit(Env.class, Config.class, Binder.class)
.expect(currentTimeMillis(123))
.expect(existingDB(true, "db"))
.expect(existingDB(true, "123"))
.run(unit -> {
new Jdbc().configure(unit.get(Env.class), dbconf, unit.get(Binder.class));
});
}
private Block withDriver(boolean useDriver, String url) {
return unit -> {
Properties properties = unit.get(Properties.class);
expect(properties.containsKey("driverClassName")).andReturn(useDriver);
if (useDriver) {
expect(properties.remove("dataSourceClassName")).andReturn(null);
expect(properties.setProperty("jdbcUrl", url)).andReturn(null);
}
};
}
@Test
public void minpoolsize() throws Exception {
Config config = ConfigFactory.parseResources(getClass(), "jdbc.conf");
Config dbconf = config.withValue("db", ConfigValueFactory.fromAnyRef("mem"))
.withValue("application.charset", fromAnyRef("UTF-8"))
.withValue("application.name", fromAnyRef("jdbctest"))
.withValue("application.tmpdir", fromAnyRef("target"))
.withValue("runtime.processors-x2", fromAnyRef(2))
.resolve();
new MockUnit(Env.class, Config.class, Binder.class)
.expect(existingDB(false, "123"))
.expect(existingDB(false, "db"))
.expect(currentTimeMillis(123))
.expect(props("org.h2.jdbcx.JdbcDataSource", "jdbc:h2:mem:123;DB_CLOSE_DELAY=-1", "h2.123",
"sa", "", false, false))
.expect(unit -> {
Properties properties = unit.get(Properties.class);
expect(
properties.setProperty("dataSource.url", "jdbc:h2:mem:{mem.seed};DB_CLOSE_DELAY=-1"))
.andReturn(null);
expect(properties.setProperty("dataSource.user", "sa")).andReturn(null);
expect(properties.setProperty("dataSource.password", "")).andReturn(null);
})
.expect(withDriver(false, "jdbc:h2:mem:123;DB_CLOSE_DELAY=-1"))
.expect(unit -> {
Properties props = unit.get(Properties.class);
expect(props.setProperty("maximumPoolSize", "10")).andReturn(null);
})
.expect(hikariConfig(2, null))
.expect(hikariDataSource())
.expect(serviceKey("db", "h2", "jdbc:h2:mem:123;DB_CLOSE_DELAY=-1"))
.expect(serviceKey("123"))
.expect(onStop)
.run(unit -> {
new Jdbc().configure(unit.get(Env.class), dbconf, unit.get(Binder.class));
});
}
@Test
public void fsdb() throws Exception {
Config config = ConfigFactory.parseResources(getClass(), "jdbc.conf");
Config dbconf = config.withValue("db", ConfigValueFactory.fromAnyRef("fs"))
.withValue("application.name", fromAnyRef("jdbctest"))
.withValue("application.tmpdir", fromAnyRef("target"))
.withValue("application.charset", fromAnyRef("UTF-8"))
.withValue("runtime.processors-x2", fromAnyRef(POOL_SIZE))
.resolve();
new MockUnit(Env.class, Config.class, Binder.class)
.expect(existingDB(false, "jdbctest"))
.expect(existingDB(false, "db"))
.expect(props("org.h2.jdbcx.JdbcDataSource", "jdbc:h2:target/jdbctest", "h2.jdbctest",
"sa", "", false))
.expect(hikariConfig(null))
.expect(hikariDataSource())
.expect(serviceKey("db", "h2", "jdbc:h2:target/jdbctest"))
.expect(serviceKey("jdbctest"))
.expect(withDriver(false, "jdbc:h2:target/jdbctest"))
.expect(onStop)
.expect(unit -> {
unit.get(HikariDataSource.class).close();
})
.run(unit -> {
new Jdbc().configure(unit.get(Env.class), dbconf, unit.get(Binder.class));
}, unit -> {
unit.captured(Throwing.Runnable.class).iterator().next().run();
});
}
@Test
public void dbWithCallback() throws Exception {
Config config = ConfigFactory.parseResources(getClass(), "jdbc.conf");
Config dbconf = config.withValue("db", ConfigValueFactory.fromAnyRef("fs"))
.withValue("application.name", fromAnyRef("jdbctest"))
.withValue("application.tmpdir", fromAnyRef("target"))
.withValue("application.charset", fromAnyRef("UTF-8"))
.withValue("runtime.processors-x2", fromAnyRef(POOL_SIZE))
.resolve();
new MockUnit(Env.class, Config.class, Binder.class)
.expect(existingDB(false, "jdbctest"))
.expect(existingDB(false, "db"))
.expect(props("org.h2.jdbcx.JdbcDataSource", "jdbc:h2:target/jdbctest", "h2.jdbctest",
"sa", "", false))
.expect(hikariConfig(null))
.expect(hikariDataSource())
.expect(serviceKey("jdbctest"))
.expect(serviceKey("db", "h2", "jdbc:h2:target/jdbctest"))
.expect(onStop)
.expect(withDriver(false, "jdbc:h2:target/jdbctest"))
.expect(unit -> {
HikariConfig h = unit.get(HikariConfig.class);
h.setAllowPoolSuspension(true);
})
.run(unit -> {
new Jdbc()
.doWith((final HikariConfig h) -> {
h.setAllowPoolSuspension(true);
})
.configure(unit.get(Env.class), dbconf, unit.get(Binder.class));
});
}
@Test
public void databaseWithCredentials() throws Exception {
Config config = ConfigFactory.parseResources(getClass(), "jdbc.conf");
Config dbconf = config.withValue("db.url",
ConfigValueFactory.fromAnyRef("jdbc:mysql://localhost/db"))
.withValue("db.user", fromAnyRef("foo"))
.withValue("db.password", fromAnyRef("<PASSWORD>"))
.withValue("application.charset", fromAnyRef("UTF-8"))
.withValue("application.name", fromAnyRef("jdbctest"))
.withValue("application.tmpdir", fromAnyRef("target"))
.withValue("runtime.processors-x2", fromAnyRef(POOL_SIZE))
.resolve();
new MockUnit(Env.class, Config.class, Binder.class)
.expect(existingDB(false, "db"))
.expect(props("com.mysql.jdbc.jdbc2.optional.MysqlDataSource", "jdbc:mysql://localhost/db",
"mysql.db", "foo", "bar", false))
.expect(mysql)
.expect(hikariConfig(null))
.expect(hikariDataSource())
.expect(withDriver(false, "jdbc:mysql://localhost/db"))
.expect(serviceKey("db", "mysql", "jdbc:mysql://localhost/db"))
.expect(onStop)
.run(unit -> {
new Jdbc().configure(unit.get(Env.class), dbconf, unit.get(Binder.class));
});
}
@Test
public void derby() throws Exception {
Config config = ConfigFactory.parseResources(getClass(), "jdbc.conf");
Config dbconf = config.withValue("db", ConfigValueFactory.fromAnyRef("jdbc:derby:testdb"))
.withValue("application.name", fromAnyRef("jdbctest"))
.withValue("application.tmpdir", fromAnyRef("target"))
.withValue("application.charset", fromAnyRef("UTF-8"))
.withValue("runtime.processors-x2", fromAnyRef(POOL_SIZE))
.resolve();
new MockUnit(Env.class, Config.class, Binder.class)
.expect(existingDB(false, "testdb"))
.expect(existingDB(false, "db"))
.expect(props("org.apache.derby.jdbc.ClientDataSource", "jdbc:derby:testdb", "derby.testdb",
null, "", false))
.expect(hikariConfig(null))
.expect(withDriver(false, "jdbc:derby:testdb"))
.expect(hikariDataSource())
.expect(serviceKey("testdb"))
.expect(serviceKey("db", "derby", "jdbc:derby:testdb"))
.expect(onStop)
.run(unit -> {
new Jdbc().configure(unit.get(Env.class), dbconf, unit.get(Binder.class));
});
}
@Test
public void connectionString() throws Exception {
Config config = ConfigFactory.parseResources(getClass(), "jdbc.conf")
.withValue("application.name", fromAnyRef("jdbctest"))
.withValue("application.tmpdir", fromAnyRef("target"))
.withValue("application.charset", fromAnyRef("UTF-8"))
.withValue("runtime.processors-x2", fromAnyRef(POOL_SIZE))
.resolve();
new MockUnit(Env.class, Config.class, Binder.class)
.expect(existingDB(false, "testdb"))
.expect(props("org.apache.derby.jdbc.ClientDataSource", null, "derby.testdb",
null, "", false))
.expect(hikariConfig(null))
.expect(unit -> {
Properties props = unit.mock(Properties.class);
expect(props.setProperty("url", "jdbc:derby:testdb")).andReturn(null);
HikariConfig hconf = unit.get(HikariConfig.class);
expect(hconf.getDataSourceProperties()).andReturn(props);
})
.expect(hikariDataSource())
.expect(withDriver(false, "jdbc:derby:testdb"))
.expect(serviceKey("testdb", "derby", "jdbc:derby:testdb"))
.expect(onStop)
.run(unit -> {
new Jdbc("jdbc:derby:testdb").configure(unit.get(Env.class), config,
unit.get(Binder.class));
});
}
@Test
public void db2() throws Exception {
Config config = ConfigFactory.parseResources(getClass(), "jdbc.conf");
Config dbconf = config.withValue("db",
ConfigValueFactory.fromAnyRef("jdbc:db2://127.0.0.1:50000/SAMPLE"))
.withValue("application.name", fromAnyRef("jdbctest"))
.withValue("application.tmpdir", fromAnyRef("target"))
.withValue("application.charset", fromAnyRef("UTF-8"))
.withValue("runtime.processors-x2", fromAnyRef(POOL_SIZE))
.resolve();
new MockUnit(Env.class, Config.class, Binder.class)
.expect(existingDB(false, "SAMPLE"))
.expect(existingDB(false, "db"))
.expect(props("com.ibm.db2.jcc.DB2SimpleDataSource", "jdbc:db2://127.0.0.1:50000/SAMPLE",
"db2.SAMPLE", null, "", false))
.expect(hikariConfig(null))
.expect(hikariDataSource())
.expect(withDriver(false, "jdbc:db2://127.0.0.1:50000/SAMPLE"))
.expect(serviceKey("SAMPLE"))
.expect(serviceKey("db", "db2", "jdbc:db2://127.0.0.1:50000/SAMPLE"))
.expect(onStop)
.run(unit -> {
new Jdbc().configure(unit.get(Env.class), dbconf, unit.get(Binder.class));
});
}
@Test
public void hsql() throws Exception {
Config config = ConfigFactory.parseResources(getClass(), "jdbc.conf");
Config dbconf = config.withValue("db",
ConfigValueFactory.fromAnyRef("jdbc:hsqldb:file"))
.withValue("application.name", fromAnyRef("jdbctest"))
.withValue("application.tmpdir", fromAnyRef("target"))
.withValue("application.charset", fromAnyRef("UTF-8"))
.withValue("runtime.processors-x2", fromAnyRef(POOL_SIZE))
.resolve();
new MockUnit(Env.class, Config.class, Binder.class)
.expect(existingDB(false, "file"))
.expect(existingDB(false, "db"))
.expect(props("org.hsqldb.jdbc.JDBCDataSource", "jdbc:hsqldb:file",
"hsqldb.file", null, "", false))
.expect(hikariConfig(null))
.expect(hikariDataSource())
.expect(withDriver(false, "jdbc:hsqldb:file"))
.expect(serviceKey("file"))
.expect(serviceKey("db", "hsqldb", "jdbc:hsqldb:file"))
.expect(onStop)
.run(unit -> {
new Jdbc().configure(unit.get(Env.class), dbconf, unit.get(Binder.class));
});
}
@Test
public void mariadb() throws Exception {
Config config = ConfigFactory.parseResources(getClass(), "jdbc.conf");
Config dbconf = config.withValue("db",
ConfigValueFactory.fromAnyRef("jdbc:mariadb://localhost/db"))
.withValue("application.name", fromAnyRef("jdbctest"))
.withValue("application.tmpdir", fromAnyRef("target"))
.withValue("application.charset", fromAnyRef("UTF-8"))
.withValue("runtime.processors-x2", fromAnyRef(POOL_SIZE))
.resolve();
new MockUnit(Env.class, Config.class, Binder.class)
.expect(existingDB(false, "db"))
.expect(props("org.mariadb.jdbc.MySQLDataSource", "jdbc:mariadb://localhost/db",
"mariadb.db", null, "", false))
.expect(hikariConfig(null))
.expect(hikariDataSource())
.expect(serviceKey("db", "mariadb", "jdbc:mariadb://localhost/db"))
.expect(withDriver(false, "jdbc:mariadb://localhost/db"))
.expect(onStop)
.run(unit -> {
new Jdbc().configure(unit.get(Env.class), dbconf, unit.get(Binder.class));
});
}
@Test
public void mysql() throws Exception {
Config config = ConfigFactory.parseResources(getClass(), "jdbc.conf");
Config dbconf = config.withValue("db",
ConfigValueFactory.fromAnyRef("jdbc:mysql://localhost/db"))
.withValue("application.charset", fromAnyRef("UTF-8"))
.withValue("application.name", fromAnyRef("jdbctest"))
.withValue("application.tmpdir", fromAnyRef("target"))
.withValue("runtime.processors-x2", fromAnyRef(POOL_SIZE))
.resolve();
new MockUnit(Env.class, Config.class, Binder.class)
.expect(existingDB(false, "db"))
.expect(props("com.mysql.jdbc.jdbc2.optional.MysqlDataSource", "jdbc:mysql://localhost/db",
"mysql.db", null, "", false))
.expect(mysql)
.expect(withDriver(false, "jdbc:mysql://localhost/db"))
.expect(hikariConfig(null))
.expect(hikariDataSource())
.expect(serviceKey("db", "mysql", "jdbc:mysql://localhost/db"))
.expect(onStop)
.run(unit -> {
new Jdbc().configure(unit.get(Env.class), dbconf, unit.get(Binder.class));
});
}
@Test
public void log4jdbc() throws Exception {
Config config = ConfigFactory.parseResources(getClass(), "jdbc.conf");
Config dbconf = config.withValue("db",
ConfigValueFactory.fromAnyRef("jdbc:log4jdbc:mysql://localhost/db"))
.withValue("application.charset", fromAnyRef("UTF-8"))
.withValue("application.name", fromAnyRef("jdbctest"))
.withValue("application.tmpdir", fromAnyRef("target"))
.withValue("runtime.processors-x2", fromAnyRef(POOL_SIZE))
.resolve();
new MockUnit(Env.class, Config.class, Binder.class)
.expect(existingDB(false, "db"))
.expect(props("com.mysql.jdbc.jdbc2.optional.MysqlDataSource",
"jdbc:log4jdbc:mysql://localhost/db",
"mysql.db", null, "", false))
.expect(mysql)
.expect(withDriver(true, "jdbc:log4jdbc:mysql://localhost/db"))
.expect(unit -> {
Properties properties = unit.get(Properties.class);
expect(properties.setProperty("driverClassName", "net.sf.log4jdbc.DriverSpy"))
.andReturn(null);
})
.expect(hikariConfig(null))
.expect(hikariDataSource())
.expect(serviceKey("db", "mysql", "jdbc:log4jdbc:mysql://localhost/db"))
.expect(onStop)
.run(unit -> {
new Jdbc().configure(unit.get(Env.class), dbconf, unit.get(Binder.class));
});
}
@Test
public void dbspecific() throws Exception {
Config config = ConfigFactory.parseResources(getClass(), "jdbc.conf");
Config dbconf = config.withValue("db.url",
ConfigValueFactory
.fromAnyRef("jdbc:mysql://localhost/db?useEncoding=true&characterEncoding=UTF-8"))
.withValue("application.charset", fromAnyRef("UTF-8"))
.withValue("application.name", fromAnyRef("jdbctest"))
.withValue("application.tmpdir", fromAnyRef("target"))
.withValue("runtime.processors-x2", fromAnyRef(POOL_SIZE))
// override defaults
.withValue("db.cachePrepStmts", fromAnyRef(false))
.resolve();
new MockUnit(Env.class, Config.class, Binder.class)
.expect(existingDB(false, "db"))
.expect(props("com.mysql.jdbc.jdbc2.optional.MysqlDataSource",
"jdbc:mysql://localhost/db?useEncoding=true&characterEncoding=UTF-8",
"mysql.db", null, "", false))
.expect(mysql)
.expect(unit -> {
Properties props = unit.get(Properties.class);
expect(props.setProperty("dataSource.cachePrepStmts", "false")).andReturn(null);
})
.expect(
withDriver(false, "jdbc:mysql://localhost/db?useEncoding=true&characterEncoding=UTF-8"))
.expect(hikariConfig(null))
.expect(hikariDataSource())
.expect(serviceKey("db", "mysql",
"jdbc:mysql://localhost/db?useEncoding=true&characterEncoding=UTF-8"))
.expect(onStop)
.run(unit -> {
new Jdbc().configure(unit.get(Env.class), dbconf, unit.get(Binder.class));
});
}
@Test
public void setHikariOptions() throws Exception {
long connectionTimeout = 1000;
int maximumPoolSize = 12;
long idleTimeout = 800000;
Config config = ConfigFactory.parseResources(getClass(), "jdbc.conf");
Config dbconf = config.withValue("db", ConfigValueFactory.fromAnyRef("fs"))
.withValue("application.name", fromAnyRef("jdbctest"))
.withValue("application.tmpdir", fromAnyRef("target"))
.withValue("application.charset", fromAnyRef("UTF-8"))
.withValue("hikari.connectionTimeout", fromAnyRef(connectionTimeout))
.withValue("hikari.maximumPoolSize", fromAnyRef(maximumPoolSize))
.withValue("hikari.idleTimeout", fromAnyRef(idleTimeout))
.withValue("hikari.autoCommit", fromAnyRef(false))
.withValue("runtime.processors-x2", fromAnyRef(POOL_SIZE))
.resolve();
new MockUnit(Env.class, Config.class, Binder.class)
.expect(existingDB(false, "jdbctest"))
.expect(existingDB(false, "db"))
.expect(props("org.h2.jdbcx.JdbcDataSource", "jdbc:h2:target/jdbctest", "h2.jdbctest",
"sa", "", false, false))
.expect(unit -> {
Properties props = unit.get(Properties.class);
expect(props.setProperty("maximumPoolSize", "12")).andReturn(null);
expect(props.setProperty("maximumPoolSize", "12")).andReturn(null);
expect(props.setProperty("connectionTimeout", "1000")).andReturn(null);
expect(props.setProperty("idleTimeout", "800000")).andReturn(null);
expect(props.setProperty("autoCommit", "false")).andReturn(null);
})
.expect(hikariConfig(12))
.expect(hikariDataSource())
.expect(withDriver(false, "jdbc:h2:target/jdbctest"))
.expect(serviceKey("jdbctest"))
.expect(serviceKey("db", "h2", "jdbc:h2:target/jdbctest"))
.expect(onStop)
.run(unit -> {
new Jdbc().configure(unit.get(Env.class), dbconf, unit.get(Binder.class));
});
}
@Test
public void overrideDataSource() throws Exception {
Config config = ConfigFactory.parseResources(getClass(), "jdbc.conf");
Config dbconf = config.withValue("db", ConfigValueFactory.fromAnyRef("fs"))
.withValue("application.name", fromAnyRef("jdbctest"))
.withValue("application.tmpdir", fromAnyRef("target"))
.withValue("application.charset", fromAnyRef("UTF-8"))
.withValue("hikari.dataSourceClassName", fromAnyRef("test.MyDataSource"))
.withValue("runtime.processors-x2", fromAnyRef(POOL_SIZE))
.resolve();
new MockUnit(Env.class, Config.class, Binder.class)
.expect(existingDB(false, "jdbctest"))
.expect(existingDB(false, "db"))
.expect(props("org.h2.jdbcx.JdbcDataSource", "jdbc:h2:target/jdbctest", "h2.jdbctest",
"sa", "", true))
.expect(unit -> {
Properties properties = unit.get(Properties.class);
expect(properties.setProperty("dataSourceClassName", "test.MyDataSource"))
.andReturn(null);
})
.expect(hikariConfig(null))
.expect(withDriver(false, "jdbc:h2:target/jdbctest"))
.expect(hikariDataSource())
.expect(serviceKey("jdbctest"))
.expect(serviceKey("db", "h2", "jdbc:h2:target/jdbctest"))
.expect(onStop)
.run(unit -> {
new Jdbc().configure(unit.get(Env.class), dbconf, unit.get(Binder.class));
});
}
@Test
public void twoDatabases() throws Exception {
Config config = ConfigFactory.parseResources(getClass(), "jdbc.conf");
Config dbconf = config.withValue("db.audit.url",
ConfigValueFactory.fromAnyRef("jdbc:h2:mem:audit;DB_CLOSE_DELAY=-1"))
.withValue("application.name", fromAnyRef("jdbctest"))
.withValue("application.tmpdir", fromAnyRef("target"))
.withValue("application.charset", fromAnyRef("UTF-8"))
.withValue("db.audit.user", fromAnyRef("sa"))
.withValue("db.audit.password", fromAnyRef(""))
.withValue("db.audit.hikari.dataSourceClassName", fromAnyRef("test.MyDataSource"))
.withValue("runtime.processors-x2", fromAnyRef(POOL_SIZE))
.resolve();
new MockUnit(Env.class, Config.class, Binder.class)
.expect(existingDB(false, "audit"))
.expect(existingDB(false, "db.audit"))
.expect(
props("org.h2.jdbcx.JdbcDataSource", "jdbc:h2:mem:audit;DB_CLOSE_DELAY=-1", "h2.audit",
"sa", "", true))
.expect(unit -> {
Properties properties = unit.get(Properties.class);
expect(properties.setProperty("dataSourceClassName", "test.MyDataSource"))
.andReturn(null);
})
.expect(unit -> {
Properties properties = unit.get(Properties.class);
expect(
properties.setProperty("dataSource.url", "jdbc:h2:mem:{mem.seed};DB_CLOSE_DELAY=-1"))
.andReturn(null);
expect(properties.setProperty("dataSource.user", "sa")).andReturn(null);
expect(properties.setProperty("dataSource.password", "")).andReturn(null);
})
.expect(withDriver(false, "jdbc:h2:mem:audit;DB_CLOSE_DELAY=-1"))
.expect(hikariConfig(null))
.expect(hikariDataSource())
.expect(serviceKey("audit"))
.expect(serviceKey("db.audit", "h2", "jdbc:h2:mem:audit;DB_CLOSE_DELAY=-1"))
.expect(onStop)
.run(unit -> {
new Jdbc("db.audit").configure(unit.get(Env.class), dbconf, unit.get(Binder.class));
});
}
private Block existingDB(boolean exists, String name) {
return unit -> {
Env env = unit.get(Env.class);
Optional optional;
if (exists) {
optional = Optional.of("XXX");
} else {
optional = Optional.empty();
}
if (name == null) {
expect(env.get(Key.get(DataSource.class))).andReturn(optional);
} else {
expect(env.get(Key.get(DataSource.class, Names.named(name)))).andReturn(optional);
}
};
}
@Test
public void sqlserver() throws Exception {
Config config = ConfigFactory.parseResources(getClass(), "jdbc.conf");
Config dbconf = config.withValue("db",
ConfigValueFactory.fromAnyRef(
"jdbc:sqlserver://localhost:1433;databaseName=AdventureWorks;integratedSecurity=true;"))
.withValue("application.name", fromAnyRef("jdbctest"))
.withValue("application.tmpdir", fromAnyRef("target"))
.withValue("application.charset", fromAnyRef("UTF-8"))
.withValue("runtime.processors-x2", fromAnyRef(POOL_SIZE))
.resolve();
new MockUnit(Env.class, Config.class, Binder.class)
.expect(existingDB(false, "AdventureWorks"))
.expect(existingDB(false, "db"))
.expect(
props("com.microsoft.sqlserver.jdbc.SQLServerDataSource",
"jdbc:sqlserver://localhost:1433;databaseName=AdventureWorks;integratedSecurity=true;",
"sqlserver.AdventureWorks", null, "", false))
.expect(hikariConfig(null))
.expect(hikariDataSource())
.expect(withDriver(false,
"jdbc:sqlserver://localhost:1433;databaseName=AdventureWorks;integratedSecurity=true;"))
.expect(serviceKey("AdventureWorks"))
.expect(serviceKey("db", "sqlserver",
"jdbc:sqlserver://localhost:1433;databaseName=AdventureWorks;integratedSecurity=true;"))
.expect(onStop)
.run(unit -> {
new Jdbc().configure(unit.get(Env.class), dbconf, unit.get(Binder.class));
});
}
@Test
public void oracle() throws Exception {
Config config = ConfigFactory.parseResources(getClass(), "jdbc.conf");
Config dbconf = config.withValue("db",
ConfigValueFactory.fromAnyRef("jdbc:oracle:thin:@myhost:1521:orcl"))
.withValue("application.charset", fromAnyRef("UTF-8"))
.withValue("application.name", fromAnyRef("jdbctest"))
.withValue("application.tmpdir", fromAnyRef("target"))
.withValue("runtime.processors-x2", fromAnyRef(POOL_SIZE))
.resolve();
;
new MockUnit(Env.class, Config.class, Binder.class)
.expect(existingDB(false, "orcl"))
.expect(existingDB(false, "db"))
.expect(props("oracle.jdbc.pool.OracleDataSource", "jdbc:oracle:thin:@myhost:1521:orcl",
"oracle.orcl", null, "", false))
.expect(hikariConfig(null))
.expect(hikariDataSource())
.expect(withDriver(false, "jdbc:oracle:thin:@myhost:1521:orcl"))
.expect(serviceKey("orcl"))
.expect(serviceKey("db", "oracle", "jdbc:oracle:thin:@myhost:1521:orcl"))
.expect(onStop)
.run(unit -> {
new Jdbc().configure(unit.get(Env.class), dbconf, unit.get(Binder.class));
});
}
@Test
public void pgsql() throws Exception {
Config config = ConfigFactory.parseResources(getClass(), "jdbc.conf");
String url = "jdbc:pgsql://server/database";
Config dbconf = config.withValue("db", ConfigValueFactory.fromAnyRef(url))
.withValue("application.charset", fromAnyRef("UTF-8"))
.withValue("application.name", fromAnyRef("jdbctest"))
.withValue("application.tmpdir", fromAnyRef("target"))
.withValue("runtime.processors-x2", fromAnyRef(POOL_SIZE))
.resolve();
new MockUnit(Env.class, Config.class, Binder.class)
.expect(existingDB(false, "database"))
.expect(existingDB(false, "db"))
.expect(
props("com.impossibl.postgres.jdbc.PGDataSourceWithUrl", "jdbc:pgsql://server/database",
"pgsql.database", null, "", false))
.expect(hikariConfig(null))
.expect(hikariDataSource())
.expect(withDriver(false, "jdbc:pgsql://server/database"))
.expect(serviceKey("database"))
.expect(serviceKey("db", "pgsql", "jdbc:pgsql://server/database"))
.expect(onStop)
.run(unit -> {
new Jdbc().configure(unit.get(Env.class), dbconf, unit.get(Binder.class));
});
}
@Test
public void postgresql() throws Exception {
Config config = ConfigFactory.parseResources(getClass(), "jdbc.conf");
String url = "jdbc:postgresql://server/database";
Config dbconf = config.withValue("db", ConfigValueFactory.fromAnyRef(url))
.withValue("application.charset", fromAnyRef("UTF-8"))
.withValue("application.name", fromAnyRef("jdbctest"))
.withValue("application.tmpdir", fromAnyRef("target"))
.withValue("runtime.processors-x2", fromAnyRef(POOL_SIZE))
.resolve();
new MockUnit(Env.class, Config.class, Binder.class)
.expect(existingDB(false, "database"))
.expect(existingDB(false, "db"))
.expect(props("org.postgresql.ds.PGSimpleDataSource", "jdbc:postgresql://server/database",
"postgresql.database", null, "", false))
.expect(hikariConfig(null))
.expect(hikariDataSource())
.expect(withDriver(false, "jdbc:postgresql://server/database"))
.expect(serviceKey("database"))
.expect(serviceKey("db", "postgresql", "jdbc:postgresql://server/database"))
.expect(onStop)
.run(unit -> {
new Jdbc().configure(unit.get(Env.class), dbconf, unit.get(Binder.class));
});
}
@Test
public void sybase() throws Exception {
Config config = ConfigFactory.parseResources(getClass(), "jdbc.conf");
Config dbconf = config
.withValue("db", ConfigValueFactory.fromAnyRef("jdbc:jtds:sybase://server/database"))
.withValue("application.charset", fromAnyRef("UTF-8"))
.withValue("application.name", fromAnyRef("jdbctest"))
.withValue("application.tmpdir", fromAnyRef("target"))
.withValue("runtime.processors-x2", fromAnyRef(POOL_SIZE))
.resolve();
new MockUnit(Env.class, Config.class, Binder.class)
.expect(existingDB(false, "database"))
.expect(existingDB(false, "db"))
.expect(props("com.sybase.jdbcx.SybDataSource", "jdbc:jtds:sybase://server/database",
"sybase.database", null, "", false))
.expect(hikariConfig(null))
.expect(hikariDataSource())
.expect(withDriver(false, "jdbc:jtds:sybase://server/database"))
.expect(serviceKey("database"))
.expect(serviceKey("db", "sybase", "jdbc:jtds:sybase://server/database"))
.expect(onStop)
.run(unit -> {
new Jdbc().configure(unit.get(Env.class), dbconf, unit.get(Binder.class));
});
}
@Test
public void firebirdsql() throws Exception {
Config config = ConfigFactory.parseResources(getClass(), "jdbc.conf");
Config dbconf = config.withValue("db",
ConfigValueFactory.fromAnyRef("jdbc:firebirdsql:host:mydb"))
.withValue("application.charset", fromAnyRef("UTF-8"))
.withValue("application.name", fromAnyRef("jdbctest"))
.withValue("application.tmpdir", fromAnyRef("target"))
.withValue("runtime.processors-x2", fromAnyRef(POOL_SIZE))
.resolve();
new MockUnit(Env.class, Config.class, Binder.class)
.expect(existingDB(false, "mydb"))
.expect(existingDB(false, "db"))
.expect(props("org.firebirdsql.pool.FBSimpleDataSource", "jdbc:firebirdsql:host:mydb",
"firebirdsql.mydb", null, "", false))
.expect(hikariConfig(null))
.expect(hikariDataSource())
.expect(withDriver(false, "jdbc:firebirdsql:host:mydb"))
.expect(serviceKey("mydb"))
.expect(serviceKey("db", "firebirdsql", "jdbc:firebirdsql:host:mydb"))
.expect(onStop)
.run(unit -> {
new Jdbc().configure(unit.get(Env.class), dbconf, unit.get(Binder.class));
});
}
@Test
public void sqlite() throws Exception {
Config config = ConfigFactory.parseResources(getClass(), "jdbc.conf");
Config dbconf = config.withValue("db",
ConfigValueFactory.fromAnyRef("jdbc:sqlite:testdb"))
.withValue("application.charset", fromAnyRef("UTF-8"))
.withValue("application.name", fromAnyRef("jdbctest"))
.withValue("application.tmpdir", fromAnyRef("target"))
.withValue("runtime.processors-x2", fromAnyRef(POOL_SIZE))
.resolve();
new MockUnit(Env.class, Config.class, Binder.class)
.expect(existingDB(false, "testdb"))
.expect(existingDB(false, "db"))
.expect(props("org.sqlite.SQLiteDataSource", "jdbc:sqlite:testdb",
"sqlite.testdb", null, "", false))
.expect(hikariConfig(null))
.expect(hikariDataSource())
.expect(withDriver(false, "jdbc:sqlite:testdb"))
.expect(serviceKey("testdb"))
.expect(serviceKey("db", "sqlite", "jdbc:sqlite:testdb"))
.expect(onStop)
.run(unit -> {
new Jdbc().configure(unit.get(Env.class), dbconf, unit.get(Binder.class));
});
}
@Test
public void unknownDb() throws Exception {
Config config = ConfigFactory.parseResources(getClass(), "jdbc.conf");
Config dbconf = config
.withValue("db", ConfigValueFactory.fromAnyRef("jdbc:custom:testdb"))
.withValue("databases.custom.dataSourceClassName",
ConfigValueFactory.fromAnyRef("custom.DS"))
.withValue("application.charset", fromAnyRef("UTF-8"))
.withValue("application.name", fromAnyRef("jdbctest"))
.withValue("application.tmpdir", fromAnyRef("target"))
.withValue("runtime.processors-x2", fromAnyRef(POOL_SIZE))
.resolve();
new MockUnit(Env.class, Config.class, Binder.class)
.expect(existingDB(false, "testdb"))
.expect(existingDB(false, "db"))
.expect(props("custom.DS", "jdbc:custom:testdb",
"custom.testdb", null, "", false))
.expect(hikariConfig(null))
.expect(hikariDataSource())
.expect(withDriver(false, "jdbc:custom:testdb"))
.expect(serviceKey("testdb"))
.expect(serviceKey("db", "custom", "jdbc:custom:testdb"))
.expect(onStop)
.run(unit -> {
new Jdbc().configure(unit.get(Env.class), dbconf, unit.get(Binder.class));
});
}
private Block serviceKey(final String db) {
return serviceKey(db, null, null);
}
@SuppressWarnings("unchecked")
private Block serviceKey(final String db, String dbtype, String url) {
return unit -> {
Env env = unit.get(Env.class);
expect(env.serviceKey()).andReturn(new Env.ServiceKey());
AnnotatedBindingBuilder<DataSource> binding = unit.mock(AnnotatedBindingBuilder.class);
binding.toInstance(unit.get(HikariDataSource.class));
binding.toInstance(unit.get(HikariDataSource.class));
Binder binder = unit.get(Binder.class);
expect(binder.bind(Key.get(DataSource.class))).andReturn(binding);
expect(binder.bind(Key.get(DataSource.class, Names.named(db)))).andReturn(binding);
expect(env.set(Key.get(DataSource.class), unit.get(HikariDataSource.class))).andReturn(env);
expect(env.set(Key.get(DataSource.class, Names.named(db)), unit.get(HikariDataSource.class)))
.andReturn(env);
if (url != null) {
expect(env.set(Key.get(String.class, Names.named(db + ".url")), url)).andReturn(env);
}
if (dbtype != null) {
expect(env.set(Key.get(String.class, Names.named(db + ".dbtype")), dbtype)).andReturn(env);
}
};
}
private Block hikariConfig(Object poolsize) {
return hikariConfig(Integer.parseInt(POOL_SIZE) + 1, poolsize);
}
private Block hikariConfig(Integer defpoolsize, Object poolsize) {
return unit -> {
Properties properties = unit.get(Properties.class);
if (poolsize == null) {
if (defpoolsize < 10) {
expect(properties.getOrDefault("maximumPoolSize", "10"))
.andReturn("10");
} else {
expect(properties.getOrDefault("maximumPoolSize", defpoolsize.toString()))
.andReturn(POOL_SIZE);
}
} else {
expect(properties.getOrDefault("maximumPoolSize", defpoolsize.toString()))
.andReturn(poolsize);
}
HikariConfig hikari = unit.constructor(HikariConfig.class)
.build(properties);
unit.registerMock(HikariConfig.class, hikari);
};
}
private Block hikariDataSource() {
return unit -> {
HikariConfig properties = unit.get(HikariConfig.class);
HikariDataSource hikari = unit.constructor(HikariDataSource.class)
.build(properties);
unit.registerMock(HikariDataSource.class, hikari);
};
}
private Block currentTimeMillis(final long millis) {
return unit -> {
unit.mockStatic(System.class);
expect(System.currentTimeMillis()).andReturn(millis);
};
}
private Block props(final String dataSourceClassName, final String url, final String name,
final String username, final String password, final boolean hasDataSourceClassName) {
return props(dataSourceClassName, url, name, username, password, hasDataSourceClassName, true);
}
private Block props(final String dataSourceClassName, final String url, final String name,
final String username, final String password, final boolean hasDataSourceClassName,
final boolean poolSize) {
return unit -> {
Properties properties = unit.constructor(Properties.class)
.build();
expect(properties
.setProperty("dataSourceClassName", dataSourceClassName))
.andReturn(null);
if (username != null) {
expect(properties
.setProperty("dataSource.user", username))
.andReturn(null);
expect(properties
.setProperty("dataSource.password", password))
.andReturn(null);
}
if (url != null) {
expect(properties
.setProperty("dataSource.url", url))
.andReturn(null);
}
expect(properties.setProperty("poolName", name)).andReturn(null);
if (poolSize) {
expect(properties.setProperty("maximumPoolSize", POOL_SIZE)).andReturn(null);
}
unit.registerMock(Properties.class, properties);
};
}
}
|
<reponame>wt-student-projects/real-time-3D<filename>src/Real Time 3D/DemoScene.cpp
#include "SurfaceManager.h"
#include "Win32Codes.h"
#include "DemoScene.h"
#include "Demo.h"
DemoScene::DemoScene()
: loaded(false)
{
}
DemoScene::~DemoScene()
{
}
void DemoScene::onUpdate()
{
if (Demo::demoSettings.enableFx)
{
effects.enable();
}
renderer.LoadIdentity();
renderer.onUpdate();
player.getMatrix()->loadIdenditity();
player.getMatrix()->rotate(RAIDAN(90), vec3(0.0, 0.0, 1.0));
player.getMatrix()->rotate(RAIDAN(90), vec3(0.0, 1.0, 0.0));
player.getMatrix()->scale(vec3(0.03, 0.03, 0.03));
SurfaceManager::get()->checkForCollision();
camera.onUpdate(true);
}
void DemoScene::render()
{
renderer.render();
audioPlayer.stream();
if (Demo::demoSettings.enableFx)
{
effects.execute();
}
if (Demo::demoSettings.showBoundingBoxes)
{
SurfaceManager::get()->renderSurfaces();
}
renderer.render2D(GL_FALSE);
options.render(&renderer);
quit.render(&renderer);
renderer.render3D(Demo::demoSettings.wireframeEnabled);
miniMap.renderToBuffer();
renderer.LoadIdentity();
renderer.onUpdate();
camera.onUpdate(GL_FALSE);
renderer.render();
miniMap.renderToScreen();
camera.Reset();
}
void DemoScene::onKeyPress(int Key, int State)
{
camera.onKeyPress(Key, State);
if (Key == ENTER && State == PRESSED)
{
SceneManager::get()->switchTo(2);
}
if (Key == ESCAPE && State == RELEASED)
{
PostQuitMessage(0);
}
}
void DemoScene::onMotion(float pos_x, float pos_y)
{
camera.onMotion(pos_x, pos_y);
}
void DemoScene::mousePress(int Key, int State, int x, int y)
{
if (options.mouseState(Key, State, x, y))
{
SceneManager::get()->switchTo(2);
}
if (quit.mouseState(Key, State, x, y))
{
PostQuitMessage(0);
}
}
void DemoScene::enter()
{
if (!loaded)
{
options.setPosition("Options", vec2(1050, 610), vec2(200, 50));
options.getTexture()->setShade(vec4(1.0, 0.0, 0.0, 1.0));
quit.setPosition("Quit", vec2(1050, 10), vec2(200, 50));
quit.getTexture()->setShade(vec4(1.0, 0.0, 0.0, 1.0));
effects.initialise(1280, 720);
skybox.folder("data/skybox/");
skybox.setDistance(10000);
skybox.setup();
overlaymap.setOverlayImage("data/img/road.jpg");
overlaymap.setOverlayMap("data/img/pathway.png");
heightmap.setHeightMap("data/img/heightmap.png");
heightmap.setMapTexture("data/img/grass.png");
heightmap.PushOverlay(&overlaymap);
loaded = !loaded;
setupGraphics();
setupAudio();
}
}
void DemoScene::setupGraphics()
{
city.load("data/models/Metro 1.3ds", "data/models/", true);
city.setScale(0.75f);
city.setRotate(RAIDAN(90), vec3(-1, 0, 0));
city.setTranslate(vec3(-5.0, -2.5, -15));
player.readTexture("data/models/pac3D.bmp");
player.readMD2Model("data/models/pac3D.md2", true);
renderer.pushHeightmap(&heightmap);
renderer.pushSkybox(&skybox);
renderer.pushModel(&city);
renderer.prepare();
renderer.perspective(70, vec2(16, 9), vec2(0.1, 10000));
renderer.setSurfaceSize(vec4(0, 0, 1280, 720));
camera.setPlayerModel(&player);
camera.initialise(&renderer);
miniMap.initialise(&camera);
SurfaceManager::get()->pushCamera(&camera);
SurfaceManager::get()->pushCameraObject(player.getSurface());
}
void DemoScene::setupAudio()
{
music.setAudioSource("data/audio/PianoMono.mp3");
music.setPosition(vec3(-1, -1, -1));
music.play();
audioPlayer.pushClip(&music);
audioPlayer.set3DCamera(&camera);
audioPlayer.set3DRenderer(&renderer);
audioPlayer.initialise();
} |
public static void main(String[] args)
{
int[] array = {-3, 14, 1, 22, 0, 18};
int max = array[0];
int min = array[0];
for(int i = 1; i < array.length; i++)
{
if(array[i] > max)
{
max = array[i];
}
if(array[i] < min)
{
min = array[i];
}
}
System.out.println("The maximum value of the array is: "+max);
System.out.println("The minimum value of the array is: "+min);
} |
#!/bin/sh
b58=$(base58 1)
test x$b58 = xr
|
#!/bin/bash
# LinuxGSM fix_kf2.sh function
# Author: Daniel Gibbs
# Website: https://linuxgsm.com
# Description: Resolves various issues with Killing Floor 2.
local commandname="FIX"
local commandaction="Fix"
local function_selfname="$(basename "$(readlink -f "${BASH_SOURCE[0]}")")"
fn_parms(){
parms="\"${defaultmap}?Game=KFGameContent.KFGameInfo_VersusSurvival\""
}
fn_print_information "starting ${gamename} server to generate configs."
fn_sleep_time
exitbypass=1
command_start.sh
sleep 10
exitbypass=1
command_stop.sh
|
<reponame>tombartkowski/phonestreamer-server
import { expect } from 'chai';
import fc from 'fast-check';
import { PreviewImageUrl } from './../valueObjects/devicePreviewImageUrl';
import { ErrorCode } from '../../../core/AppError';
import isURL from 'validator/lib/isURL';
describe('PreviewImageUrl ValueObject', function () {
describe('#create', function () {
it(`when creating with valid url, expect PreviewImageUrl to have it.`, function () {
fc.assert(
//Arrange
fc.property(fc.webUrl(), url => {
//Act
const [previewImageUrl] = PreviewImageUrl.create(url);
//Assert
expect(previewImageUrl?.value).to.be.a('string').that.is.equal(url);
})
);
});
it(`when creating with invalid url, expect validation error.`, function () {
fc.assert(
//Arrange
fc.property(
fc.string().filter(s => !isURL(s)),
url => {
//Act
const [, error] = PreviewImageUrl.create(url);
//Assert
expect(error)
.to.have.a.property('code')
.equal(ErrorCode.VALIDATION_ERROR);
}
)
);
});
});
describe('#toDto', function () {
it(`when called, expect an object with only 'previewImageUrl' key.`, function () {
fc.assert(
fc.property(fc.webUrl(), input => {
//Arrange
const [previewImageUrl] = PreviewImageUrl.create(input);
//Act
const dto = previewImageUrl?.toDto();
//Assert
expect(dto).to.have.property('previewImageUrl');
expect(Object.keys(dto)).to.have.lengthOf(1);
})
);
});
});
});
|
def char_count(string, char):
count = 0
for c in string:
if c == char:
count += 1
return count
result = char_count("Hello World", 'l')
print(result) |
#!/usr/bin/env bash
set -o errexit # Exit script when a command exits with non-zero status
set -o pipefail # Return exit status of the last command in the pipe that failed
pip=$(which pip3) || pip=$(which pip)
$pip install --prefer-binary --upgrade setuptools
if [ ! -z "$INSTALL_PIP" ]; then
$pip install --prefer-binary $INSTALL_PIP
fi
|
"use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.music_volume_down = void 0;
var music_volume_down = {
"viewBox": "0 0 64 64",
"children": [{
"name": "g",
"attribs": {},
"children": [{
"name": "polygon",
"attribs": {
"fill": "none",
"stroke": "#000000",
"stroke-width": "2",
"stroke-miterlimit": "10",
"points": "8,32 8,20 20,20 38,2 38,32 38,62 20,44 \r\n\t\t8,44 \t"
},
"children": [{
"name": "polygon",
"attribs": {
"fill": "none",
"stroke": "#000000",
"stroke-width": "2",
"stroke-miterlimit": "10",
"points": "8,32 8,20 20,20 38,2 38,32 38,62 20,44 \r\n\t\t8,44 \t"
},
"children": []
}]
}, {
"name": "path",
"attribs": {
"fill": "none",
"stroke": "#000000",
"stroke-width": "2",
"stroke-miterlimit": "10",
"d": "M46,42c5.522,0,10-4.478,10-10s-4.478-10-10-10"
},
"children": [{
"name": "path",
"attribs": {
"fill": "none",
"stroke": "#000000",
"stroke-width": "2",
"stroke-miterlimit": "10",
"d": "M46,42c5.522,0,10-4.478,10-10s-4.478-10-10-10"
},
"children": []
}]
}]
}]
};
exports.music_volume_down = music_volume_down; |
<!DOCTYPE html>
<html>
<body>
<button type="button">Order Now</button>
</body>
</html> |
<gh_stars>1-10
package sdk
import (
"errors"
"fmt"
)
type NotFoundError struct {
Err error
}
func (e *NotFoundError) Error() string {
return fmt.Sprintf("Resource not found: %s", e.Err)
}
func (e *NotFoundError) Unwrap() error { return e.Err }
func IsNotFoundError(err error) bool {
var notFound *NotFoundError
return errors.As(err, ¬Found)
}
|
<filename>sys_prog/java/io/github/lionell/lab2/Runner.java
package io.github.lionell.lab2;
/** Created by lionell on 10/3/16. */
public class Runner {
public static void main(String[] args) {
final String SIGMA = "0123456789+-.";
final char[] DIGITS = "0123456789".toCharArray();
NFA.Builder a = new NFA.Builder();
a.setQ0("q0");
a.addSigma(SIGMA);
a.setEpsilon('@');
a.bind("q0", "q1", "+-@".toCharArray());
a.bind("q1", "q1", DIGITS);
a.bind("q1", "q2", '.');
a.bind("q2", "q3", DIGITS);
a.bind("q3", "q3", DIGITS);
a.bind("q1", "q4", DIGITS);
a.bind("q4", "q3", '.');
a.bind("q3", "q5", '@');
a.addFinals("q5");
NFA.Builder b = new NFA.Builder();
b.setQ0("q0");
b.addSigma(SIGMA);
b.setEpsilon('@');
b.bind("q0", "q1", "+-@".toCharArray());
b.bind("q1", "q1", DIGITS);
b.bind("q1", "q2", '.');
b.bind("q2", "q3", DIGITS);
b.bind("q3", "q3", DIGITS);
b.bind("q1", "q4", DIGITS);
b.bind("q4", "q3", '.');
b.addFinals("q3");
System.out.println(a.equalsTo(b));
}
}
|
import { IH2 } from '@/types';
const generateLink = (a: HTMLSpanElement, indexString: string) => {
a.innerText = indexString;
return a;
};
export const generateToc = (contents: HTMLDivElement, h2Data: IH2[]) => {
let h2 = 0;
let h3 = 0;
let h4 = 0;
let h5 = 0;
let h6 = 0;
const contentsArray = Array.from(contents.children);
const headingContents = contentsArray.filter((node) => node.className.includes('wiki-heading'));
for (let i = 0; i < headingContents.length; i++) {
headingContents[i].id = `toc${i}`;
}
headingContents.map((item) => {
const { id, children, } = item as HTMLDivElement;
const aData = children[0] as HTMLAnchorElement;
const spanData = children[1] as HTMLSpanElement;
const text = spanData.innerText;
let link: string;
if (spanData.children.length !== 0) {
link = decodeURI(text.replace(/[ ]/g, '_'));
} else {
link = '';
}
if (item.nodeName === 'H2') {
h2++;
generateLink(aData, `${h2}.`);
h3 = 0;
h4 = 0;
h5 = 0;
h6 = 0;
h2Data.push({
id,
text,
name: item.nodeName,
link,
items: [],
});
} else if (item.nodeName === 'H3' && h2Data.length > 0) {
h3++;
generateLink(aData, `${h2}.${h3}.`);
h4 = 0;
h5 = 0;
h6 = 0;
const h3Data = h2Data[h2Data.length - 1].items;
h3Data.push({
id,
text,
name: item.nodeName,
link,
items: [],
});
} else if (item.nodeName === 'H4' && h2Data.length > 0) {
h4++;
generateLink(aData, `${h2}.${h3}.${h4}.`);
h5 = 0;
h6 = 0;
const h3Data = h2Data[h2Data.length - 1].items;
const h4Data = h3Data[h3Data.length - 1].items;
h4Data.push({
id,
text,
name: item.nodeName,
link,
items: [],
});
} else if (item.nodeName === 'H5' && h2Data.length > 0) {
h5++;
generateLink(aData, `${h2}.${h3}.${h4}.${h5}.`);
h6 = 0;
const h3Data = h2Data[h2Data.length - 1].items;
const h4Data = h3Data[h3Data.length - 1].items;
const h5Data = h4Data[h4Data.length - 1].items;
h5Data.push({
id,
text,
name: item.nodeName,
link,
items: [],
});
} else if (item.nodeName === 'H6' && h2Data.length > 0) {
h6++;
generateLink(aData, `${h2}.${h3}.${h4}.${h5}.${h6}.`);
const h3Data = h2Data[h2Data.length - 1].items;
const h4Data = h3Data[h3Data.length - 1].items;
const h5Data = h4Data[h4Data.length - 1].items;
const h6Data = h5Data[h5Data.length - 1].items;
h6Data.push({
id,
text,
name: item.nodeName,
link,
});
}
});
return h2Data;
};
|
<filename>src/ClockTimeValue.d.ts
/*!
* @author electricessence / https://github.com/electricessence/
* @license MIT
*/
export default interface ClockTimeValue
{
hour: number;
minute: number;
second?: number;
millisecond?: number;
tick?: number;
}
|
#!/bin/bash
if [ "$#" -ne 1 ]; then
echo "usage: run_each_test <test path>"
exit
fi
TEST_PATH=$1
find $TEST_PATH -type f \( -iname "*_tests.js" ! -iname "*atlas*" ! -path "*node-next*" \) -exec npx mocha {} \;
|
cd ..
./Tools/jmavsim_run.sh -l &
/home/anton/projects/PX4-Autopilot/Tools/sitl_run.sh /home/anton/projects/PX4-Autopilot/build/px4_sitl_default/bin/px4 none none none none /home/anton/projects/PX4-Autopilot /home/anton/projects/PX4-Autopilot/build/px4_sitl_default
cd run
|
#https://developer.arm.com/open-source/gnu-toolchain/gnu-rm/downloads
mkdir -p build
cd build
arm-none-eabi-g++ -Os -Werror -fno-common -mcpu=cortex-m3 -mthumb -msoft-float -fno-exceptions -fno-rtti -fno-threadsafe-statics -Wno-psabi -DLA104 -DARDUINO=1000 -MD -D _ARM -D STM32F10X_HD -c \
../source/main.cpp \
../source/fancyepd/FancyEPD.cpp \
../source/adafruitgfx/Adafruit_GFX.cpp \
-I../../../os_library/include/
arm-none-eabi-gcc -fPIC -mcpu=cortex-m3 -mthumb -o output.elf -nostartfiles -T ../source/app.lds \
./main.o \
./FancyEPD.o \
./Adafruit_GFX.o \
-lbios_la104 -L../../../os_library/build
arm-none-eabi-objdump -d -S output.elf > output.asm
arm-none-eabi-readelf -all output.elf > output.txt
find . -type f -name '*.o' -delete
find . -type f -name '*.d' -delete
../../../../tools/elfstrip/elfstrip output.elf 89fancy.elf
|
price_for_kg_of_flour = float(input())
kg_of_flour = float(input())
kg_of_sugar = float(input())
number_of_egg_card = int(input())
packet_of_may = int(input())
price_for_kg_of_sugar = price_for_kg_of_flour - price_for_kg_of_flour * 25 / 100
price_for_pack_of_eggs = price_for_kg_of_flour + price_for_kg_of_flour * 10 / 100
price_for_pack_of_may = price_for_kg_of_sugar - price_for_kg_of_sugar * 80 / 100
money_for_flour = price_for_kg_of_flour * kg_of_flour
money_for_sugar = price_for_kg_of_sugar * kg_of_sugar
money_for_eggs = price_for_pack_of_eggs * number_of_egg_card
money_for_may = price_for_pack_of_may * packet_of_may
sum_of_all = money_for_flour + money_for_sugar + money_for_eggs + money_for_may
print(f"{sum_of_all:.2f}") |
#!/usr/bin/env bash
set -e
trap ctrl_c INT
function ctrl_c() {
exit 1
}
function usage() {
me=$(basename "$0")
echo
echo "Usage: $me generate <destination> <sample_name> [sql|sqllight|sqlfull|micro|mongodb|cassandra|couchbase] | list"
echo
echo "Examples:"
echo "$me generate /tmp/ngx-default/ ngx-default sql"
echo "$me generate /tmp/ngx-default/ ngx-session-cassandra-fr cassandra"
echo
exit 2
}
function generateProject() {
cd "$mydir"
echo "JHI_FOLDER_APP=$JHI_FOLDER_APP"
echo "JHI_APP=$JHI_APP"
echo "JHI_ENTITY=$JHI_ENTITY"
if [ ! -d "$JHI_FOLDER_APP" ]; then
echo "*** Create $JHI_FOLDER_APP"
mkdir -p "$JHI_FOLDER_APP"
fi
if [ ! -z "$(ls -A $JHI_FOLDER_APP)" ]; then
echo "*** The folder is not empty: $JHI_FOLDER_APP"
exit 1
else
mkdir -p "$JHI_FOLDER_APP"/.jhipster/
echo "*** Empty folder, let's generate JHipster project in: $JHI_FOLDER_APP"
fi
pushd scripts/
echo "*********************** Copying entities for $JHI_APP"
source ./11-generate-entities.sh
popd
echo "*********************** Copy configuration"
cp -f "$JHI_SAMPLES"/"$JHI_APP"/.yo-rc.json "$JHI_FOLDER_APP"/
ls -al "$JHI_FOLDER_APP"/
}
mydir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
JHI_SAMPLES="$mydir/samples"
if [ "$1" = "list" ]; then
for dir in $(ls -1 "$JHI_SAMPLES"); do
if [ -f "$JHI_SAMPLES/$dir/.yo-rc.json" ] && [[ $dir != *-sample ]]; then
echo "$dir"
fi
done
elif [ "$1" = "generate" ]; then
if [ "$3" != "" ]; then
JHI_FOLDER_APP=$2
JHI_APP=$3
JHI_ENTITY=$4
generateProject
else
usage
fi
else
usage
fi
|
SSMIS=ssmis_f16
sensor=z${SSMIS}
#data_dir=/u/wx23yh/noscrub_jcsda/work_zeeman2/z${SSMIS}
data_dir=/jcsda/noscrub/wx23yc/CRTM_ODPStraining/work_zeeman2/z${SSMIS}
for chan in 19 20 21 22;do
/jcsda/save/wx23yc/CRTM_clean_copy/src_0824/src/Zeeman/training/ssmis/src_ssmis/Compute_Coeff<<EOF
${data_dir}/zssmis_tau_UMBC_101LVL_48.${chan}.txt
${data_dir}/AtmProfile.txt
/jcsda/noscrub/wx23yc/CRTM_ODPStraining/Zeeman/SpcCoeff/exec/${sensor}.SpcCoeff.nc
EOF
mv TauCoeff.nc TauCoeff.${chan}.${sensor}.nc
mv fort.40 fitting_err.${chan}.${sensor}.txt
done
|
<reponame>Zomis/Monopoly
package net.zomis.monopoly.model;
import java.util.Set;
public class Inventory<E> {
private Set<E> elements;
}
|
#!/bin/bash
#SBATCH --account=rrg-pbellec
#SBATCH --job-name=smriprep_sub-7573547.job
#SBATCH --output=/scratch/fnadeau/cima-q/1633992344/smriprep_sub-7573547.out
#SBATCH --error=/scratch/fnadeau/cima-q/1633992344/smriprep_sub-7573547.err
#SBATCH --time=24:00:00
#SBATCH --cpus-per-task=16
#SBATCH --mem-per-cpu=4096M
#SBATCH --mail-user=francois.nadeau.1@umontreal.ca
#SBATCH --mail-type=BEGIN
#SBATCH --mail-type=END
#SBATCH --mail-type=FAIL
export SINGULARITYENV_FS_LICENSE=$HOME/.freesurfer.txt
export SINGULARITYENV_TEMPLATEFLOW_HOME=/templateflow
module load singularity/3.8
#copying input dataset into local scratch space
rsync -rltv --info=progress2 --exclude="sub*" --exclude="derivatives" /project/rrg-pbellec/fnadeau/fnadeau_cimaq_preproc/data/cima-q $SLURM_TMPDIR
rsync -rltv --info=progress2 /project/rrg-pbellec/fnadeau/fnadeau_cimaq_preproc/data/cima-q/sub-7573547 $SLURM_TMPDIR/cima-q
singularity run --cleanenv -B $SLURM_TMPDIR:/DATA -B /home/fnadeau/.cache/templateflow:/templateflow -B /etc/pki:/etc/pki/ -B /scratch/fnadeau/cima-q/1633992344:/OUTPUT /lustre03/project/6003287/containers/fmriprep-20.2.1lts.sif -w /DATA/fmriprep_work --participant-label 7573547 --bids-database-dir /DATA/cima-q/.pybids_cache --bids-filter-file /OUTPUT/bids_filters.json --output-spaces MNI152NLin2009cAsym MNI152NLin6Asym fsnative anat --output-layout bids --notrack --skip_bids_validation --write-graph --omp-nthreads 8 --nprocs 16 --mem_mb 65536 --resource-monitor /DATA/cima-q /project/rrg-pbellec/fnadeau/fnadeau_cimaq_preproc/data/cima-q/derivatives participant
fmriprep_exitcode=$?
if [ $fmriprep_exitcode -ne 0 ] ; then cp -R $SLURM_TMPDIR/fmriprep_work /scratch/fnadeau/cima-q/1633992344/smriprep_sub-7573547.workdir ; fi
if [ $fmriprep_exitcode -eq 0 ] ; then cp $SLURM_TMPDIR/fmriprep_work/fmriprep_wf/resource_monitor.json /scratch/fnadeau/cima-q/1633992344/smriprep_sub-7573547_resource_monitor.json ; fi
if [ $fmriprep_exitcode -eq 0 ] ; then mkdir -p /scratch/fnadeau/cima-q/1633992344//project/rrg-pbellec/fnadeau/fnadeau_cimaq_preproc/data/cima-q/derivatives-cima-q ; fi
if [ $fmriprep_exitcode -eq 0 ] ; then cp -R /project/rrg-pbellec/fnadeau/fnadeau_cimaq_preproc/data/cima-q/derivatives/* /scratch/fnadeau/cima-q/1633992344//project/rrg-pbellec/fnadeau/fnadeau_cimaq_preproc/data/cima-q/derivatives-cima-q/ ; fi
exit $fmriprep_exitcode
|
/*
* text_ExternalFormattedText.cpp
*/
#include <string>
#include "utilities/json_JsonUtilities.h"
#include "text/text_ExternalPlainText.h"
#include "text/text_ExternalFormattedText.h"
#include "text/text_ExternalTextConverter.h"
namespace
{
const static std::string RED_KEY = "red";
const static std::string GREEN_KEY = "green";
const static std::string BLUE_KEY = "blue";
const static std::string COLOR_STYLE_KEY = "color";
const static std::string BOLD_STYLE_KEY = "bold";
const static std::string ITALIC_STYLE_KEY = "italic";
const static std::string UNDERLINE_STYLE_KEY = "underline";
const static std::string INVERSE_STYLE_KEY = "inverse";
}
namespace mutgos
{
namespace text
{
// ----------------------------------------------------------------------
bool ExternalFormattedText::save(
json::JSONRoot &root,
json::JSONNode &node) const
{
bool success = ExternalPlainText::save(root, node);
if (success)
{
success = json::add_static_key_static_value(
COLOR_STYLE_KEY,
ExternalTextConverter::color_to_string(color_style),
node,
root);
if (color_style == COLOR_CUSTOM)
{
// Need to also serialize RGB values
//
success = json::add_static_key_value(RED_KEY, red, node, root)
and success;
success = json::add_static_key_value(GREEN_KEY, green, node, root)
and success;
success = json::add_static_key_value(BLUE_KEY, blue, node, root)
and success;
}
// Only serialize the boolean styles if they are true, to conserve
// space and CPU.
//
if (bold_style)
{
success = json::add_static_key_value(
BOLD_STYLE_KEY,
bold_style,
node,
root) and success;
}
if (italic_style)
{
success = json::add_static_key_value(
ITALIC_STYLE_KEY,
italic_style,
node,
root) and success;
}
if (underline_style)
{
success = json::add_static_key_value(
UNDERLINE_STYLE_KEY,
underline_style,
node,
root) and success;
}
if (underline_style)
{
success = json::add_static_key_value(
INVERSE_STYLE_KEY,
inverse_style,
node,
root) and success;
}
}
return success;
}
// ----------------------------------------------------------------------
bool ExternalFormattedText::restore(const json::JSONNode &node)
{
bool success = ExternalPlainText::restore(node);
std::string color_style_string;
if (success)
{
success =
json::get_key_value(COLOR_STYLE_KEY, node, color_style_string);
if (success)
{
color_style = ExternalTextConverter::string_to_color(
color_style_string);
success = (color_style != COLOR_END_INVALID);
}
if (not success)
{
color_style = COLOR_DEFAULT;
}
else
{
if (color_style == COLOR_CUSTOM)
{
MG_UnsignedInt int_value = 0;
success = json::get_key_value(RED_KEY, node, int_value)
and success;
red = (osinterface::OsTypes::UnsignedInt8) int_value;
success = json::get_key_value(GREEN_KEY, node, int_value)
and success;
green = (osinterface::OsTypes::UnsignedInt8) int_value;
success = json::get_key_value(BLUE_KEY, node, int_value)
and success;
blue = (osinterface::OsTypes::UnsignedInt8) int_value;
}
// Defaults to false, and these are all optional.
//
json::get_key_value(BOLD_STYLE_KEY, node, bold_style);
json::get_key_value(ITALIC_STYLE_KEY, node, italic_style);
json::get_key_value(UNDERLINE_STYLE_KEY, node, underline_style);
json::get_key_value(INVERSE_STYLE_KEY, node, inverse_style);
}
}
return success;
}
}
}
|
<gh_stars>1-10
import nltk, numpy, tflearn, tensorflow, random, json, pickle, streamlit as st, SessionState
from nltk.stem.lancaster import LancasterStemmer
stemmer = LancasterStemmer()
# with open("dataset/games.json") as file:
# data = json.load(file)
# try:
# with open("data.pickle", "rb") as f:
# words, labels, training, output = pickle.load(f)
# except:
# words = []
# labels = []
# docs_x = []
# docs_y = []
# for intent in data["intents"]:
# for pattern in intent["patterns"]:
# wrds = nltk.word_tokenize(pattern)
# words.extend(wrds)
# docs_x.append(wrds)
# docs_y.append(intent["tag"])
# if intent["tag"] not in labels:
# labels.append(intent["tag"])
# words = [stemmer.stem(w.lower()) for w in words if w != "?"]
# words = sorted(list(set(words)))
# labels = sorted(labels)
# training = []
# output = []
# out_empty = [0 for _ in range(len(labels))]
# for x, doc in enumerate(docs_x):
# bag = []
# wrds = [stemmer.stem(w.lower()) for w in doc]
# for w in words:
# if w in wrds:
# bag.append(1)
# else:
# bag.append(0)
# output_row = out_empty[:]
# output_row[labels.index(docs_y[x])] = 1
# training.append(bag)
# output.append(output_row)
# training = numpy.array(training)
# output = numpy.array(output)
# with open("data.pickle", "wb") as f:
# pickle.dump((words, labels, training, output), f)
# tensorflow.reset_default_graph()
# net = tflearn.input_data(shape=[None, len(training[0])])
# net = tflearn.fully_connected(net, 8)
# net = tflearn.fully_connected(net, 8)
# net = tflearn.fully_connected(net, len(output[0]), activation="softmax")
# net = tflearn.regression(net)
# model = tflearn.DNN(net)
# try:
# except:
# model.fit(training, output, n_epoch=500, batch_size=8, show_metric=True)
# model.save("model_2.tflearn")
def bag_of_words(s, words):
bag = [0 for _ in range(len(words))]
s_words = nltk.word_tokenize(s)
s_words = [stemmer.stem(word.lower()) for word in s_words]
for se in s_words:
for i, w in enumerate(words):
if w == se:
bag[i] = 1
return numpy.array(bag)
def get_text():
input_text = st.text_input("You: ","type here")
# df_input = pd.DataFrame([input_text],columns=['questions'])
return input_text
def chat():
# print("Start talking with the bot (type quit to stop)!")
# while True:
# inp = input("You: ")
inp = get_text()
if inp.lower() == "quit":
break
model = model.load("model_2.tflearn")
results = model.predict([bag_of_words(inp, words)])
results_index = numpy.argmax(results)
tag = labels[results_index]
for tg in data["intents"]:
if tg['tag'] == tag:
responses = tg['responses']
break
return responses
# print(random.choice(responses))
st.text_area("Bot:", value=chat(), height=200, max_chars=None, key=None) |
#!/bin/bash -x
#
# Generated - do not edit!
#
# Macros
TOP=`pwd`
CND_PLATFORM=Cygwin-Windows
CND_CONF=Default
CND_DISTDIR=dist
NBTMPDIR=build/${CND_CONF}/${CND_PLATFORM}/tmp-packaging
TMPDIRNAME=tmp-packaging
OUTPUT_PATH=MissingOutputInProject
OUTPUT_BASENAME=MissingOutputInProject
PACKAGE_TOP_DIR=Triple1.2/
# Functions
function checkReturnCode
{
rc=$?
if [ $rc != 0 ]
then
exit $rc
fi
}
function makeDirectory
# $1 directory path
# $2 permission (optional)
{
mkdir -p "$1"
checkReturnCode
if [ "$2" != "" ]
then
chmod $2 "$1"
checkReturnCode
fi
}
function copyFileToTmpDir
# $1 from-file path
# $2 to-file path
# $3 permission
{
cp "$1" "$2"
checkReturnCode
if [ "$3" != "" ]
then
chmod $3 "$2"
checkReturnCode
fi
}
# Setup
cd "${TOP}"
mkdir -p ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package
rm -rf ${NBTMPDIR}
mkdir -p ${NBTMPDIR}
# Copy files and create directories and links
cd "${TOP}"
makeDirectory "${NBTMPDIR}/Triple1.2/bin"
copyFileToTmpDir "${OUTPUT_PATH}" "${NBTMPDIR}/${PACKAGE_TOP_DIR}bin/${OUTPUT_BASENAME}" 0755
# Generate tar file
cd "${TOP}"
rm -f ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package/Triple1.2.tar
cd ${NBTMPDIR}
tar -vcf ../../../../${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package/Triple1.2.tar *
checkReturnCode
# Cleanup
cd "${TOP}"
rm -rf ${NBTMPDIR}
|
#!/bin/bash
sudo /usr/local/bin/ibaa.bash
|
package com.jinke.calligraphy.date;
import java.util.Calendar;
/**
* A Labeler that displays months
*/
public class MonthLabeler extends Labeler {
private final String mFormatString;
public MonthLabeler(String formatString) {
super(180, 60);
mFormatString = formatString;
}
@Override
public TimeObject add(long time, int val) {
return timeObjectfromCalendar(Util.addMonths(time, val));
}
@Override
protected TimeObject timeObjectfromCalendar(Calendar c) {
return Util.getMonth(c, mFormatString);
}
} |
package dbtest
import (
"os/exec"
"testing"
"github.com/stretchr/testify/require"
)
func TestPostgres(t *testing.T) {
db := Postgres(t)
t.Log("tempdb url", db.DSN)
err := exec.Command("psql", db.DSN, "-c", "SELECT 1").Run()
require.NoError(t, err)
db.Close()
err = exec.Command("psql", db.DSN, "-c", "SELECT 1").Run()
require.Error(t, err)
// ensure Close() can be called multiple times
db.Close()
}
|
#!/bin/bash
# Start the Google Cloud web development server in `wptd-dev-instance`
# (started using ./run.sh).
DOCKER_DIR=$(dirname $0)
source "${DOCKER_DIR}/../commands.sh"
source "${DOCKER_DIR}/../logging.sh"
source "${DOCKER_DIR}/../path.sh"
WPTD_PATH=${WPTD_PATH:-$(absdir ${DOCKER_DIR}/../..)}
WPTD_CONTAINER_HOST=0.0.0.0
set -e
usage() {
USAGE="Usage: web_server.sh [-r]
-r - Allow remote requests (disable host checking)"
info "${USAGE}"
}
HOST_CHECKING=true
while getopts ':rh' flag; do
case "${flag}" in
r) HOST_CHECKING=false ;;
h|*) usage && exit 0;;
esac
done
info "Pruning node_modules so dev_appserver can handle watching file updates..."
wptd_exec make webapp_node_modules_prune
info "Installing other web server code dependencies"
wptd_exec make webserver_deps
DOCKER_STATUS="${?}"
if [ "${DOCKER_STATUS}" != "0" ]; then
error "Failed to install web server code dependencies"
exit "${DOCKER_STATUS}"
fi
info "Starting web server. Port forwarded from wptd-dev-instance: 8080"
wptd_exec_it dev_appserver.py \
--enable_host_checking $HOST_CHECKING \
--host $WPTD_CONTAINER_HOST \
--port=8080 \
--admin_host=$WPTD_CONTAINER_HOST \
--admin_port=8000 \
--api_host=$WPTD_CONTAINER_HOST \
--api_port=9999 \
-A=wptdashboard \
/home/user/wpt.fyi/webapp/app.yaml
|
<filename>snips_nlu/pipeline/configs/intent_classifier.py
from __future__ import unicode_literals
from snips_nlu.common.from_dict import FromDict
from snips_nlu.constants import (
CUSTOM_ENTITY_PARSER_USAGE, NOISE, STEMS, STOP_WORDS, WORD_CLUSTERS)
from snips_nlu.entity_parser.custom_entity_parser import (
CustomEntityParserUsage)
from snips_nlu.pipeline.configs import Config, ProcessingUnitConfig
from snips_nlu.resources import merge_required_resources
class LogRegIntentClassifierConfig(FromDict, ProcessingUnitConfig):
"""Configuration of a :class:`.LogRegIntentClassifier`"""
# pylint: disable=line-too-long
def __init__(self, data_augmentation_config=None, featurizer_config=None,
random_seed=None):
"""
Args:
data_augmentation_config (:class:`IntentClassifierDataAugmentationConfig`):
Defines the strategy of the underlying data augmentation
featurizer_config (:class:`FeaturizerConfig`): Configuration of the
:class:`.Featurizer` used underneath
random_seed (int, optional): Allows to fix the seed ot have
reproducible trainings
"""
if data_augmentation_config is None:
data_augmentation_config = IntentClassifierDataAugmentationConfig()
if featurizer_config is None:
featurizer_config = FeaturizerConfig()
self._data_augmentation_config = None
self.data_augmentation_config = data_augmentation_config
self._featurizer_config = None
self.featurizer_config = featurizer_config
self.random_seed = random_seed
# pylint: enable=line-too-long
@property
def data_augmentation_config(self):
return self._data_augmentation_config
@data_augmentation_config.setter
def data_augmentation_config(self, value):
if isinstance(value, dict):
self._data_augmentation_config = \
IntentClassifierDataAugmentationConfig.from_dict(value)
elif isinstance(value, IntentClassifierDataAugmentationConfig):
self._data_augmentation_config = value
else:
raise TypeError("Expected instance of "
"IntentClassifierDataAugmentationConfig or dict"
"but received: %s" % type(value))
@property
def featurizer_config(self):
return self._featurizer_config
@featurizer_config.setter
def featurizer_config(self, value):
if isinstance(value, dict):
self._featurizer_config = \
FeaturizerConfig.from_dict(value)
elif isinstance(value, FeaturizerConfig):
self._featurizer_config = value
else:
raise TypeError("Expected instance of FeaturizerConfig or dict"
"but received: %s" % type(value))
@property
def unit_name(self):
from snips_nlu.intent_classifier import LogRegIntentClassifier
return LogRegIntentClassifier.unit_name
def get_required_resources(self):
resources = self.data_augmentation_config.get_required_resources()
resources = merge_required_resources(
resources, self.featurizer_config.get_required_resources())
return resources
def to_dict(self):
return {
"unit_name": self.unit_name,
"data_augmentation_config":
self.data_augmentation_config.to_dict(),
"featurizer_config": self.featurizer_config.to_dict(),
"random_seed": self.random_seed
}
class IntentClassifierDataAugmentationConfig(FromDict, Config):
"""Configuration used by a :class:`.LogRegIntentClassifier` which defines
how to augment data to improve the training of the classifier"""
def __init__(self, min_utterances=20, noise_factor=5,
add_builtin_entities_examples=True, unknown_word_prob=0,
unknown_words_replacement_string=None,
max_unknown_words=None):
"""
Args:
min_utterances (int, optional): The minimum number of utterances to
automatically generate for each intent, based on the existing
utterances. Default is 20.
noise_factor (int, optional): Defines the size of the noise to
generate to train the implicit *None* intent, as a multiplier
of the average size of the other intents. Default is 5.
add_builtin_entities_examples (bool, optional): If True, some
builtin entity examples will be automatically added to the
training data. Default is True.
"""
self.min_utterances = min_utterances
self.noise_factor = noise_factor
self.add_builtin_entities_examples = add_builtin_entities_examples
self.unknown_word_prob = unknown_word_prob
self.unknown_words_replacement_string = \
unknown_words_replacement_string
if max_unknown_words is not None and max_unknown_words < 0:
raise ValueError("max_unknown_words must be None or >= 0")
self.max_unknown_words = max_unknown_words
if unknown_word_prob > 0 and unknown_words_replacement_string is None:
raise ValueError("unknown_word_prob is positive (%s) but the "
"replacement string is None" % unknown_word_prob)
@staticmethod
def get_required_resources():
return {
NOISE: True,
STOP_WORDS: True
}
def to_dict(self):
return {
"min_utterances": self.min_utterances,
"noise_factor": self.noise_factor,
"add_builtin_entities_examples":
self.add_builtin_entities_examples,
"unknown_word_prob": self.unknown_word_prob,
"unknown_words_replacement_string":
self.unknown_words_replacement_string,
"max_unknown_words": self.max_unknown_words
}
class FeaturizerConfig(FromDict, ProcessingUnitConfig):
"""Configuration of a :class:`.Featurizer` object"""
# pylint: disable=line-too-long
def __init__(self, tfidf_vectorizer_config=None,
cooccurrence_vectorizer_config=None,
pvalue_threshold=0.4,
added_cooccurrence_feature_ratio=0):
"""
Args:
tfidf_vectorizer_config (:class:`.TfidfVectorizerConfig`, optional):
empty configuration of the featurizer's
:attr:`tfidf_vectorizer`
cooccurrence_vectorizer_config: (:class:`.CooccurrenceVectorizerConfig`, optional):
configuration of the featurizer's
:attr:`cooccurrence_vectorizer`
pvalue_threshold (float): after fitting the training set to
extract tfidf features, a univariate feature selection is
applied. Features are tested for independence using a Chi-2
test, under the null hypothesis that each feature should be
equally present in each class. Only features having a p-value
lower than the threshold are kept
added_cooccurrence_feature_ratio (float, optional): proportion of
cooccurrence features to add with respect to the number of
tfidf features. For instance with a ratio of 0.5, if 100 tfidf
features are remaining after feature selection, a maximum of 50
cooccurrence features will be added
"""
self.pvalue_threshold = pvalue_threshold
self.added_cooccurrence_feature_ratio = \
added_cooccurrence_feature_ratio
if tfidf_vectorizer_config is None:
tfidf_vectorizer_config = TfidfVectorizerConfig()
elif isinstance(tfidf_vectorizer_config, dict):
tfidf_vectorizer_config = TfidfVectorizerConfig.from_dict(
tfidf_vectorizer_config)
self.tfidf_vectorizer_config = tfidf_vectorizer_config
if cooccurrence_vectorizer_config is None:
cooccurrence_vectorizer_config = CooccurrenceVectorizerConfig()
elif isinstance(cooccurrence_vectorizer_config, dict):
cooccurrence_vectorizer_config = CooccurrenceVectorizerConfig \
.from_dict(cooccurrence_vectorizer_config)
self.cooccurrence_vectorizer_config = cooccurrence_vectorizer_config
# pylint: enable=line-too-long
@property
def unit_name(self):
from snips_nlu.intent_classifier import Featurizer
return Featurizer.unit_name
def get_required_resources(self):
required_resources = self.tfidf_vectorizer_config \
.get_required_resources()
if self.cooccurrence_vectorizer_config:
required_resources = merge_required_resources(
required_resources,
self.cooccurrence_vectorizer_config.get_required_resources())
return required_resources
def to_dict(self):
return {
"unit_name": self.unit_name,
"pvalue_threshold": self.pvalue_threshold,
"added_cooccurrence_feature_ratio":
self.added_cooccurrence_feature_ratio,
"tfidf_vectorizer_config": self.tfidf_vectorizer_config.to_dict(),
"cooccurrence_vectorizer_config":
self.cooccurrence_vectorizer_config.to_dict(),
}
class TfidfVectorizerConfig(FromDict, ProcessingUnitConfig):
"""Configuration of a :class:`.TfidfVectorizerConfig` object"""
def __init__(self, word_clusters_name=None, use_stemming=False):
"""
Args:
word_clusters_name (str, optional): if a word cluster name is
provided then the featurizer will use the word clusters IDs
detected in the utterances and add them to the utterance text
before computing the tfidf. Default to None
use_stemming (bool, optional): use stemming before computing the
tfdif. Defaults to False (no stemming used)
"""
self.word_clusters_name = word_clusters_name
self.use_stemming = use_stemming
@property
def unit_name(self):
from snips_nlu.intent_classifier import TfidfVectorizer
return TfidfVectorizer.unit_name
def get_required_resources(self):
resources = {STEMS: True if self.use_stemming else False}
if self.word_clusters_name:
resources[WORD_CLUSTERS] = {self.word_clusters_name}
return resources
def to_dict(self):
return {
"unit_name": self.unit_name,
"word_clusters_name": self.word_clusters_name,
"use_stemming": self.use_stemming
}
class CooccurrenceVectorizerConfig(FromDict, ProcessingUnitConfig):
"""Configuration of a :class:`.CooccurrenceVectorizer` object"""
def __init__(self, window_size=None, unknown_words_replacement_string=None,
filter_stop_words=True, keep_order=True):
"""
Args:
window_size (int, optional): if provided, word cooccurrences will
be taken into account only in a context window of size
:attr:`window_size`. If the window size is 3 then given a word
w[i], the vectorizer will only extract the following pairs:
(w[i], w[i + 1]), (w[i], w[i + 2]) and (w[i], w[i + 3]).
Defaults to None, which means that we consider all words
unknown_words_replacement_string (str, optional)
filter_stop_words (bool, optional): if True, stop words are ignored
when computing cooccurrences
keep_order (bool, optional): if True then cooccurrence are computed
taking the words order into account, which means the pairs
(w1, w2) and (w2, w1) will count as two separate features.
Defaults to `True`.
"""
self.window_size = window_size
self.unknown_words_replacement_string = \
unknown_words_replacement_string
self.filter_stop_words = filter_stop_words
self.keep_order = keep_order
@property
def unit_name(self):
from snips_nlu.intent_classifier import CooccurrenceVectorizer
return CooccurrenceVectorizer.unit_name
def get_required_resources(self):
return {
STOP_WORDS: self.filter_stop_words,
# We require the parser to be trained without stems because we
# don't normalize and stem when processing in the
# CooccurrenceVectorizer (in order to run the builtin and
# custom parser on the same unormalized input).
# Requiring no stems ensures we'll be able to parse the unstemmed
# input
CUSTOM_ENTITY_PARSER_USAGE: CustomEntityParserUsage.WITHOUT_STEMS
}
def to_dict(self):
return {
"unit_name": self.unit_name,
"unknown_words_replacement_string":
self.unknown_words_replacement_string,
"window_size": self.window_size,
"filter_stop_words": self.filter_stop_words,
"keep_order": self.keep_order
}
|
<filename>data-prepper-api/src/main/java/com/amazon/dataprepper/model/buffer/Buffer.java
/*
* Copyright OpenSearch Contributors
* SPDX-License-Identifier: Apache-2.0
*/
package com.amazon.dataprepper.model.buffer;
import com.amazon.dataprepper.model.record.Record;
import com.amazon.dataprepper.model.CheckpointState;
import java.util.Collection;
import java.util.Map;
import java.util.concurrent.TimeoutException;
/**
* Buffer queues the records between TI components and acts as a layer between source and processor/sink. Buffer can
* be in-memory, disk based or other a standalone implementation.
*/
public interface Buffer<T extends Record<?>> {
/**
* writes the record to the buffer
*
* @param record the Record to add
* @param timeoutInMillis how long to wait before giving up
* @throws TimeoutException thrown when timeout for writing into the Buffer
*/
void write(T record, int timeoutInMillis) throws TimeoutException;
/**
* Atomically writes collection of records into the buffer
*
* @param records the collection of records to add
* @param timeoutInMillis how long to wait before giving up
* @throws TimeoutException Unable to write to the buffer within the timeout
* @throws SizeOverflowException The number of records exceeds the total capacity of the buffer. This cannot be retried.
* @throws RuntimeException Other exceptions
*/
void writeAll(Collection<T> records, int timeoutInMillis) throws Exception;
/**
* Retrieves and removes the batch of records from the head of the queue. The batch size is defined/determined by
* the configuration attribute "batch_size" or the @param timeoutInMillis
* @param timeoutInMillis how long to wait before giving up
* @return The earliest batch of records in the buffer which are still not read and its corresponding checkpoint state.
*/
Map.Entry<Collection<T>, CheckpointState> read(int timeoutInMillis);
/**
* Check summary of records processed by data-prepper downstreams(preppers, sinks, pipelines).
*
* @param checkpointState the summary object of checkpoint variables
*/
void checkpoint(CheckpointState checkpointState);
boolean isEmpty();
}
|
#!/bin/bash
##
# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# For those usages not covered by the Apache License, Version 2.0 please
# contact with: nfvlabs@tid.es
##
#
#Upgrade/Downgrade openmano database preserving the content
#
DBUTILS="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
DBUSER="mano"
DBPASS=""
DEFAULT_DBPASS="manopw"
DBHOST=""
DBPORT="3306"
DBNAME="mano_db"
QUIET_MODE=""
BACKUP_DIR=""
BACKUP_FILE=""
# TODO update it with the last database version
LAST_DB_VERSION=41
# Detect paths
MYSQL=$(which mysql)
AWK=$(which awk)
GREP=$(which grep)
function usage(){
echo -e "Usage: $0 OPTIONS [version]"
echo -e " Upgrades/Downgrades openmano database preserving the content."\
"If [version] is not provided, it is upgraded to the last version"
echo -e " OPTIONS"
echo -e " -u USER database user. '$DBUSER' by default. Prompts if DB access fails"
echo -e " -p PASS database password. If missing it tries without and '$DEFAULT_DBPASS' password before prompting"
echo -e " -P PORT database port. '$DBPORT' by default"
echo -e " -h HOST database host. 'localhost' by default"
echo -e " -d NAME database name. '$DBNAME' by default. Prompts if DB access fails"
echo -e " -b DIR backup folder where to create rollback backup file"
echo -e " -q --quiet: Do not prompt for credentials and exit if cannot access to database"
echo -e " --help shows this help"
}
while getopts ":u:p:b:P:h:d:q-:" o; do
case "${o}" in
u)
DBUSER="$OPTARG"
;;
p)
DBPASS="$OPTARG"
;;
P)
DBPORT="$OPTARG"
;;
d)
DBNAME="$OPTARG"
;;
h)
DBHOST="$OPTARG"
;;
b)
BACKUP_DIR="$OPTARG"
;;
q)
export QUIET_MODE=yes
;;
-)
[ "${OPTARG}" == "help" ] && usage && exit 0
[ "${OPTARG}" == "quiet" ] && export QUIET_MODE=yes && continue
echo "Invalid option: '--$OPTARG'. Type --help for more information" >&2
exit 1
;;
\?)
echo "Invalid option: '-$OPTARG'. Type --help for more information" >&2
exit 1
;;
:)
echo "Option '-$OPTARG' requires an argument. Type --help for more information" >&2
exit 1
;;
*)
usage >&2
exit 1
;;
esac
done
shift $((OPTIND-1))
DB_VERSION=$1
if [ -n "$DB_VERSION" ] ; then
# check it is a number and an allowed one
[ "$DB_VERSION" -eq "$DB_VERSION" ] 2>/dev/null ||
! echo "parameter 'version' requires a integer value" >&2 || exit 1
if [ "$DB_VERSION" -lt 0 ] || [ "$DB_VERSION" -gt "$LAST_DB_VERSION" ] ; then
echo "parameter 'version' requires a valid database version between '0' and '$LAST_DB_VERSION'"\
"If you need an upper version, get a newer version of this script '$0'" >&2
exit 1
fi
else
DB_VERSION="$LAST_DB_VERSION"
fi
# Creating temporary file
TEMPFILE="$(mktemp -q --tmpdir "migratemanodb.XXXXXX")"
trap 'rm -f "$TEMPFILE"' EXIT
chmod 0600 "$TEMPFILE"
DEF_EXTRA_FILE_PARAM="--defaults-extra-file=$TEMPFILE"
echo -e "[client]\n user='${DBUSER}'\n password='$DBPASS'\n host='$DBHOST'\n port='$DBPORT'" > "$TEMPFILE"
# Check and ask for database user password
FIRST_TRY="yes"
while ! DB_ERROR=`mysql "$DEF_EXTRA_FILE_PARAM" $DBNAME -e "quit" 2>&1 >/dev/null`
do
# if password is not provided, try silently with $DEFAULT_DBPASS before exit or prompt for credentials
[[ -n "$FIRST_TRY" ]] && [[ -z "$DBPASS" ]] && DBPASS="$DEFAULT_DBPASS" &&
echo -e "[client]\n user='${DBUSER}'\n password='$DBPASS'\n host='$DBHOST'\n port='$DBPORT'" > "$TEMPFILE" &&
continue
echo "$DB_ERROR"
[[ -n "$QUIET_MODE" ]] && echo -e "Invalid database credentials!!!" >&2 && exit 1
echo -e "Provide database name and credentials (Ctrl+c to abort):"
read -e -p " mysql database name($DBNAME): " KK
[ -n "$KK" ] && DBNAME="$KK"
read -e -p " mysql user($DBUSER): " KK
[ -n "$KK" ] && DBUSER="$KK"
read -e -s -p " mysql password: " DBPASS
echo -e "[client]\n user='${DBUSER}'\n password='$DBPASS'\n host='$DBHOST'\n port='$DBPORT'" > "$TEMPFILE"
FIRST_TRY=""
echo
done
DBCMD="mysql $DEF_EXTRA_FILE_PARAM $DBNAME"
#echo DBCMD $DBCMD
#check that the database seems a openmano database
if ! echo -e "show create table vnfs;\nshow create table scenarios" | $DBCMD >/dev/null 2>&1
then
echo " database $DBNAME does not seem to be an openmano database" >&2
exit 1;
fi
#GET DATABASE TARGET VERSION
#DB_VERSION=0
#[ $OPENMANO_VER_NUM -ge 2002 ] && DB_VERSION=1 #0.2.2 => 1
#[ $OPENMANO_VER_NUM -ge 2005 ] && DB_VERSION=2 #0.2.5 => 2
#[ $OPENMANO_VER_NUM -ge 3003 ] && DB_VERSION=3 #0.3.3 => 3
#[ $OPENMANO_VER_NUM -ge 3005 ] && DB_VERSION=4 #0.3.5 => 4
#[ $OPENMANO_VER_NUM -ge 4001 ] && DB_VERSION=5 #0.4.1 => 5
#[ $OPENMANO_VER_NUM -ge 4002 ] && DB_VERSION=6 #0.4.2 => 6
#[ $OPENMANO_VER_NUM -ge 4003 ] && DB_VERSION=7 #0.4.3 => 7
#[ $OPENMANO_VER_NUM -ge 4032 ] && DB_VERSION=8 #0.4.32=> 8
#[ $OPENMANO_VER_NUM -ge 4033 ] && DB_VERSION=9 #0.4.33=> 9
#[ $OPENMANO_VER_NUM -ge 4036 ] && DB_VERSION=10 #0.4.36=> 10
#[ $OPENMANO_VER_NUM -ge 4043 ] && DB_VERSION=11 #0.4.43=> 11
#[ $OPENMANO_VER_NUM -ge 4046 ] && DB_VERSION=12 #0.4.46=> 12
#[ $OPENMANO_VER_NUM -ge 4047 ] && DB_VERSION=13 #0.4.47=> 13
#[ $OPENMANO_VER_NUM -ge 4057 ] && DB_VERSION=14 #0.4.57=> 14
#[ $OPENMANO_VER_NUM -ge 4059 ] && DB_VERSION=15 #0.4.59=> 15
#[ $OPENMANO_VER_NUM -ge 5002 ] && DB_VERSION=16 #0.5.2 => 16
#[ $OPENMANO_VER_NUM -ge 5003 ] && DB_VERSION=17 #0.5.3 => 17
#[ $OPENMANO_VER_NUM -ge 5004 ] && DB_VERSION=18 #0.5.4 => 18
#[ $OPENMANO_VER_NUM -ge 5005 ] && DB_VERSION=19 #0.5.5 => 19
#[ $OPENMANO_VER_NUM -ge 5009 ] && DB_VERSION=20 #0.5.9 => 20
#[ $OPENMANO_VER_NUM -ge 5015 ] && DB_VERSION=21 #0.5.15 => 21
#[ $OPENMANO_VER_NUM -ge 5016 ] && DB_VERSION=22 #0.5.16 => 22
#[ $OPENMANO_VER_NUM -ge 5020 ] && DB_VERSION=23 #0.5.20 => 23
#[ $OPENMANO_VER_NUM -ge 5021 ] && DB_VERSION=24 #0.5.21 => 24
#[ $OPENMANO_VER_NUM -ge 5022 ] && DB_VERSION=25 #0.5.22 => 25
#[ $OPENMANO_VER_NUM -ge 5024 ] && DB_VERSION=26 #0.5.24 => 26
#[ $OPENMANO_VER_NUM -ge 5025 ] && DB_VERSION=27 #0.5.25 => 27
#[ $OPENMANO_VER_NUM -ge 5052 ] && DB_VERSION=28 #0.5.52 => 28
#[ $OPENMANO_VER_NUM -ge 5059 ] && DB_VERSION=29 #0.5.59 => 29
#[ $OPENMANO_VER_NUM -ge 5060 ] && DB_VERSION=30 #0.5.60 => 30
#[ $OPENMANO_VER_NUM -ge 5061 ] && DB_VERSION=31 #0.5.61 => 31
#[ $OPENMANO_VER_NUM -ge 5070 ] && DB_VERSION=32 #0.5.70 => 32
#[ $OPENMANO_VER_NUM -ge 5082 ] && DB_VERSION=33 #0.5.82 => 33
#[ $OPENMANO_VER_NUM -ge 6000 ] && DB_VERSION=34 #0.6.00 => 34
#[ $OPENMANO_VER_NUM -ge 6001 ] && DB_VERSION=35 #0.6.01 => 35
#[ $OPENMANO_VER_NUM -ge 6003 ] && DB_VERSION=36 #0.6.03 => 36
#[ $OPENMANO_VER_NUM -ge 6009 ] && DB_VERSION=37 #0.6.09 => 37
#[ $OPENMANO_VER_NUM -ge 6011 ] && DB_VERSION=38 #0.6.11 => 38
#[ $OPENMANO_VER_NUM -ge 6020 ] && DB_VERSION=39 #0.6.20 => 39
#[ $OPENMANO_VER_NUM -ge 6000004 ] && DB_VERSION=40 #6.0.4 => 40
#[ $OPENMANO_VER_NUM -ge 8000000 ] && DB_VERSION=41 #8.0.0 => 41
# TODO ... put next versions here
function upgrade_to_1(){
# echo " upgrade database from version 0.0 to version 0.1"
echo " CREATE TABLE \`schema_version\`"
sql "CREATE TABLE \`schema_version\` (
\`version_int\` INT NOT NULL COMMENT 'version as a number. Must not contain gaps',
\`version\` VARCHAR(20) NOT NULL COMMENT 'version as a text',
\`openmano_ver\` VARCHAR(20) NOT NULL COMMENT 'openmano version',
\`comments\` VARCHAR(2000) NULL COMMENT 'changes to database',
\`date\` DATE NULL,
PRIMARY KEY (\`version_int\`)
)
COMMENT='database schema control version'
COLLATE='utf8_general_ci'
ENGINE=InnoDB;"
sql "INSERT INTO \`schema_version\` (\`version_int\`, \`version\`, \`openmano_ver\`, \`comments\`, \`date\`)
VALUES (1, '0.1', '0.2.2', 'insert schema_version', '2015-05-08');"
}
function downgrade_from_1(){
# echo " downgrade database from version 0.1 to version 0.0"
echo " DROP TABLE IF EXISTS \`schema_version\`"
sql "DROP TABLE IF EXISTS \`schema_version\`;"
}
function upgrade_to_2(){
# echo " upgrade database from version 0.1 to version 0.2"
echo " Add columns user/passwd to table 'vim_tenants'"
sql "ALTER TABLE vim_tenants ADD COLUMN user VARCHAR(36) NULL COMMENT 'Credentials for vim' AFTER created,
ADD COLUMN passwd VARCHAR(50) NULL COMMENT 'Credentials for vim' AFTER user;"
echo " Add table 'images' and 'datacenters_images'"
sql "CREATE TABLE images (
uuid VARCHAR(36) NOT NULL,
name VARCHAR(50) NOT NULL,
location VARCHAR(200) NOT NULL,
description VARCHAR(100) NULL,
metadata VARCHAR(400) NULL,
PRIMARY KEY (uuid),
UNIQUE INDEX location (location) )
COLLATE='utf8_general_ci'
ENGINE=InnoDB;"
sql "CREATE TABLE datacenters_images (
id INT NOT NULL AUTO_INCREMENT,
image_id VARCHAR(36) NOT NULL,
datacenter_id VARCHAR(36) NOT NULL,
vim_id VARCHAR(36) NOT NULL,
PRIMARY KEY (id),
CONSTRAINT FK__images FOREIGN KEY (image_id) REFERENCES images (uuid) ON UPDATE CASCADE ON DELETE CASCADE,
CONSTRAINT FK__datacenters_i FOREIGN KEY (datacenter_id) REFERENCES datacenters (uuid) ON UPDATE CASCADE ON DELETE CASCADE )
COLLATE='utf8_general_ci'
ENGINE=InnoDB;"
echo " migrate data from table 'vms' into 'images'"
sql "INSERT INTO images (uuid, name, location) SELECT DISTINCT vim_image_id, vim_image_id, image_path FROM vms;"
sql "INSERT INTO datacenters_images (image_id, datacenter_id, vim_id)
SELECT DISTINCT vim_image_id, datacenters.uuid, vim_image_id FROM vms JOIN datacenters;"
echo " Add table 'flavors' and 'datacenter_flavors'"
sql "CREATE TABLE flavors (
uuid VARCHAR(36) NOT NULL,
name VARCHAR(50) NOT NULL,
description VARCHAR(100) NULL,
disk SMALLINT(5) UNSIGNED NULL DEFAULT NULL,
ram SMALLINT(5) UNSIGNED NULL DEFAULT NULL,
vcpus SMALLINT(5) UNSIGNED NULL DEFAULT NULL,
extended VARCHAR(2000) NULL DEFAULT NULL COMMENT 'Extra description json format of needed resources and pining, orginized in sets per numa',
PRIMARY KEY (uuid) )
COLLATE='utf8_general_ci'
ENGINE=InnoDB;"
sql "CREATE TABLE datacenters_flavors (
id INT NOT NULL AUTO_INCREMENT,
flavor_id VARCHAR(36) NOT NULL,
datacenter_id VARCHAR(36) NOT NULL,
vim_id VARCHAR(36) NOT NULL,
PRIMARY KEY (id),
CONSTRAINT FK__flavors FOREIGN KEY (flavor_id) REFERENCES flavors (uuid) ON UPDATE CASCADE ON DELETE CASCADE,
CONSTRAINT FK__datacenters_f FOREIGN KEY (datacenter_id) REFERENCES datacenters (uuid) ON UPDATE CASCADE ON DELETE CASCADE )
COLLATE='utf8_general_ci'
ENGINE=InnoDB;"
echo " migrate data from table 'vms' into 'flavors'"
sql "INSERT INTO flavors (uuid, name) SELECT DISTINCT vim_flavor_id, vim_flavor_id FROM vms;"
sql "INSERT INTO datacenters_flavors (flavor_id, datacenter_id, vim_id)
SELECT DISTINCT vim_flavor_id, datacenters.uuid, vim_flavor_id FROM vms JOIN datacenters;"
sql "ALTER TABLE vms ALTER vim_flavor_id DROP DEFAULT, ALTER vim_image_id DROP DEFAULT;
ALTER TABLE vms CHANGE COLUMN vim_flavor_id flavor_id VARCHAR(36) NOT NULL COMMENT 'Link to flavor table' AFTER vnf_id,
CHANGE COLUMN vim_image_id image_id VARCHAR(36) NOT NULL COMMENT 'Link to image table' AFTER flavor_id,
ADD CONSTRAINT FK_vms_images FOREIGN KEY (image_id) REFERENCES images (uuid),
ADD CONSTRAINT FK_vms_flavors FOREIGN KEY (flavor_id) REFERENCES flavors (uuid);"
sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (2, '0.2', '0.2.5', 'new tables images,flavors', '2015-07-13');"
}
function downgrade_from_2(){
# echo " downgrade database from version 0.2 to version 0.1"
echo " migrate back data from 'datacenters_images' 'datacenters_flavors' into 'vms'"
sql "ALTER TABLE vms ALTER image_id DROP DEFAULT, ALTER flavor_id DROP DEFAULT;
ALTER TABLE vms CHANGE COLUMN flavor_id vim_flavor_id VARCHAR(36) NOT NULL COMMENT 'Flavor ID in the VIM DB' AFTER vnf_id,
CHANGE COLUMN image_id vim_image_id VARCHAR(36) NOT NULL COMMENT 'Image ID in the VIM DB' AFTER vim_flavor_id,
DROP FOREIGN KEY FK_vms_flavors, DROP INDEX FK_vms_flavors,
DROP FOREIGN KEY FK_vms_images, DROP INDEX FK_vms_images;"
# echo "UPDATE v SET v.vim_image_id=di.vim_id
# FROM vms as v INNER JOIN images as i ON v.vim_image_id=i.uuid
# INNER JOIN datacenters_images as di ON i.uuid=di.image_id;"
echo " Delete columns 'user/passwd' from 'vim_tenants'"
sql "ALTER TABLE vim_tenants DROP COLUMN user, DROP COLUMN passwd; "
echo " delete tables 'datacenter_images', 'images'"
sql "DROP TABLE IF EXISTS \`datacenters_images\`;"
sql "DROP TABLE IF EXISTS \`images\`;"
echo " delete tables 'datacenter_flavors', 'flavors'"
sql "DROP TABLE IF EXISTS \`datacenters_flavors\`;"
sql "DROP TABLE IF EXISTS \`flavors\`;"
sql "DELETE FROM schema_version WHERE version_int='2';"
}
function upgrade_to_3(){
# echo " upgrade database from version 0.2 to version 0.3"
echo " Change table 'logs', 'uuids"
sql "ALTER TABLE logs CHANGE COLUMN related related VARCHAR(36) NOT NULL COMMENT 'Relevant element for the log' AFTER nfvo_tenant_id;"
sql "ALTER TABLE uuids CHANGE COLUMN used_at used_at VARCHAR(36) NULL DEFAULT NULL COMMENT 'Table that uses this UUID' AFTER created_at;"
echo " Add column created to table 'datacenters_images' and 'datacenters_flavors'"
for table in datacenters_images datacenters_flavors
do
sql "ALTER TABLE $table ADD COLUMN created ENUM('true','false') NOT NULL DEFAULT 'false'
COMMENT 'Indicates if it has been created by openmano, or already existed' AFTER vim_id;"
done
sql "ALTER TABLE images CHANGE COLUMN metadata metadata VARCHAR(2000) NULL DEFAULT NULL AFTER description;"
echo " Allow null to column 'vim_interface_id' in 'instance_interfaces'"
sql "ALTER TABLE instance_interfaces CHANGE COLUMN vim_interface_id vim_interface_id VARCHAR(36) NULL DEFAULT NULL COMMENT 'vim identity for that interface' AFTER interface_id; "
echo " Add column config to table 'datacenters'"
sql "ALTER TABLE datacenters ADD COLUMN config VARCHAR(4000) NULL DEFAULT NULL COMMENT 'extra config information in json' AFTER vim_url_admin;
"
echo " Add column datacenter_id to table 'vim_tenants'"
sql "ALTER TABLE vim_tenants ADD COLUMN datacenter_id VARCHAR(36) NULL COMMENT 'Datacenter of this tenant' AFTER uuid,
DROP INDEX name, DROP INDEX vim_tenant_id;"
sql "ALTER TABLE vim_tenants CHANGE COLUMN name vim_tenant_name VARCHAR(36) NULL DEFAULT NULL COMMENT 'tenant name at VIM' AFTER datacenter_id,
CHANGE COLUMN vim_tenant_id vim_tenant_id VARCHAR(36) NULL DEFAULT NULL COMMENT 'Tenant ID at VIM' AFTER vim_tenant_name;"
echo "UPDATE vim_tenants as vt LEFT JOIN tenants_datacenters as td ON vt.uuid=td.vim_tenant_id
SET vt.datacenter_id=td.datacenter_id;"
sql "DELETE FROM vim_tenants WHERE datacenter_id is NULL;"
sql "ALTER TABLE vim_tenants ALTER datacenter_id DROP DEFAULT;
ALTER TABLE vim_tenants
CHANGE COLUMN datacenter_id datacenter_id VARCHAR(36) NOT NULL COMMENT 'Datacenter of this tenant' AFTER uuid;"
sql "ALTER TABLE vim_tenants ADD CONSTRAINT FK_vim_tenants_datacenters FOREIGN KEY (datacenter_id) REFERENCES datacenters (uuid)
ON UPDATE CASCADE ON DELETE CASCADE;"
sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (3, '0.3', '0.3.3', 'alter vim_tenant tables', '2015-07-28');"
}
function downgrade_from_3(){
# echo " downgrade database from version 0.3 to version 0.2"
echo " Change back table 'logs', 'uuids'"
sql "ALTER TABLE logs CHANGE COLUMN related related ENUM('nfvo_tenants','datacenters','vim_tenants','tenants_datacenters','vnfs','vms','interfaces','nets','scenarios','sce_vnfs','sce_interfaces','sce_nets','instance_scenarios','instance_vnfs','instance_vms','instance_nets','instance_interfaces') NOT NULL COMMENT 'Relevant element for the log' AFTER nfvo_tenant_id;"
sql "ALTER TABLE uuids CHANGE COLUMN used_at used_at ENUM('nfvo_tenants','datacenters','vim_tenants','vnfs','vms','interfaces','nets','scenarios','sce_vnfs','sce_interfaces','sce_nets','instance_scenarios','instance_vnfs','instance_vms','instance_nets','instance_interfaces') NULL DEFAULT NULL COMMENT 'Table that uses this UUID' AFTER created_at;"
echo " Delete column created from table 'datacenters_images' and 'datacenters_flavors'"
for table in datacenters_images datacenters_flavors
do
sql "ALTER TABLE $table DROP COLUMN created;"
done
sql "ALTER TABLE images CHANGE COLUMN metadata metadata VARCHAR(400) NULL DEFAULT NULL AFTER description;"
echo " Deny back null to column 'vim_interface_id' in 'instance_interfaces'"
sql "ALTER TABLE instance_interfaces CHANGE COLUMN vim_interface_id vim_interface_id VARCHAR(36) NOT NULL COMMENT 'vim identity for that interface' AFTER interface_id; "
echo " Delete column config to table 'datacenters'"
sql "ALTER TABLE datacenters DROP COLUMN config;"
echo " Delete column datacenter_id to table 'vim_tenants'"
sql "ALTER TABLE vim_tenants DROP COLUMN datacenter_id, DROP FOREIGN KEY FK_vim_tenants_datacenters;"
sql "ALTER TABLE vim_tenants CHANGE COLUMN vim_tenant_name name VARCHAR(36) NULL DEFAULT NULL COMMENT '' AFTER uuid"
sql "ALTER TABLE vim_tenants ALTER name DROP DEFAULT;"
sql "ALTER TABLE vim_tenants CHANGE COLUMN name name VARCHAR(36) NOT NULL AFTER uuid" || ! echo "Warning changing column name at vim_tenants!"
sql "ALTER TABLE vim_tenants ADD UNIQUE INDEX name (name);" || ! echo "Warning add unique index name at vim_tenants!"
sql "ALTER TABLE vim_tenants ALTER vim_tenant_id DROP DEFAULT;"
sql "ALTER TABLE vim_tenants CHANGE COLUMN vim_tenant_id vim_tenant_id VARCHAR(36) NOT NULL COMMENT 'Tenant ID in the VIM DB' AFTER name;" ||
! echo "Warning changing column vim_tenant_id at vim_tenants!"
sql "ALTER TABLE vim_tenants ADD UNIQUE INDEX vim_tenant_id (vim_tenant_id);" ||
! echo "Warning add unique index vim_tenant_id at vim_tenants!"
sql "DELETE FROM schema_version WHERE version_int='3';"
}
function upgrade_to_4(){
# echo " upgrade database from version 0.3 to version 0.4"
echo " Enlarge graph field at tables 'sce_vnfs', 'sce_nets'"
for table in sce_vnfs sce_nets
do
sql "ALTER TABLE $table CHANGE COLUMN graph graph VARCHAR(2000) NULL DEFAULT NULL AFTER modified_at;"
done
sql "ALTER TABLE datacenters CHANGE COLUMN type type VARCHAR(36) NOT NULL DEFAULT 'openvim' AFTER description;"
sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (4, '0.4', '0.3.5', 'enlarge graph field at sce_vnfs/nets', '2015-10-20');"
}
function downgrade_from_4(){
# echo " downgrade database from version 0.4 to version 0.3"
echo " Shorten back graph field at tables 'sce_vnfs', 'sce_nets'"
for table in sce_vnfs sce_nets
do
sql "ALTER TABLE $table CHANGE COLUMN graph graph VARCHAR(2000) NULL DEFAULT NULL AFTER modified_at;"
done
sql "ALTER TABLE datacenters CHANGE COLUMN type type ENUM('openvim','openstack') NOT NULL DEFAULT 'openvim' AFTER description;"
sql "DELETE FROM schema_version WHERE version_int='4';"
}
function upgrade_to_5(){
# echo " upgrade database from version 0.4 to version 0.5"
echo " Add 'mac' field for bridge interfaces in table 'interfaces'"
sql "ALTER TABLE interfaces ADD COLUMN mac CHAR(18) NULL DEFAULT NULL AFTER model;"
sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (5, '0.5', '0.4.1', 'Add mac address for bridge interfaces', '2015-12-14');"
}
function downgrade_from_5(){
# echo " downgrade database from version 0.5 to version 0.4"
echo " Remove 'mac' field for bridge interfaces in table 'interfaces'"
sql "ALTER TABLE interfaces DROP COLUMN mac;"
sql "DELETE FROM schema_version WHERE version_int='5';"
}
function upgrade_to_6(){
# echo " upgrade database from version 0.5 to version 0.6"
echo " Add 'descriptor' field text to 'vnfd', 'scenarios'"
sql "ALTER TABLE vnfs ADD COLUMN descriptor TEXT NULL DEFAULT NULL COMMENT 'Original text descriptor used for create the VNF' AFTER class;"
sql "ALTER TABLE scenarios ADD COLUMN descriptor TEXT NULL DEFAULT NULL COMMENT 'Original text descriptor used for create the scenario' AFTER modified_at;"
echo " Add 'last_error', 'vim_info' to 'instance_vms', 'instance_nets'"
sql "ALTER TABLE instance_vms ADD COLUMN error_msg VARCHAR(1024) NULL DEFAULT NULL AFTER status;"
sql "ALTER TABLE instance_vms ADD COLUMN vim_info TEXT NULL DEFAULT NULL AFTER error_msg;"
sql "ALTER TABLE instance_vms CHANGE COLUMN status status ENUM('ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED') NOT NULL DEFAULT 'BUILD' AFTER vim_vm_id;"
sql "ALTER TABLE instance_nets ADD COLUMN error_msg VARCHAR(1024) NULL DEFAULT NULL AFTER status;"
sql "ALTER TABLE instance_nets ADD COLUMN vim_info TEXT NULL DEFAULT NULL AFTER error_msg;"
sql "ALTER TABLE instance_nets CHANGE COLUMN status status ENUM('ACTIVE','DOWN','BUILD','ERROR','VIM_ERROR','INACTIVE','DELETED') NOT NULL DEFAULT 'BUILD' AFTER instance_scenario_id;"
echo " Add 'mac_address', 'ip_address', 'vim_info' to 'instance_interfaces'"
sql "ALTER TABLE instance_interfaces ADD COLUMN mac_address VARCHAR(32) NULL DEFAULT NULL AFTER vim_interface_id, ADD COLUMN ip_address VARCHAR(64) NULL DEFAULT NULL AFTER mac_address, ADD COLUMN vim_info TEXT NULL DEFAULT NULL AFTER ip_address;"
echo " Add 'sce_vnf_id','datacenter_id','vim_tenant_id' field to 'instance_vnfs'"
sql "ALTER TABLE instance_vnfs ADD COLUMN sce_vnf_id VARCHAR(36) NULL DEFAULT NULL AFTER vnf_id, ADD CONSTRAINT FK_instance_vnfs_sce_vnfs FOREIGN KEY (sce_vnf_id) REFERENCES sce_vnfs (uuid) ON UPDATE CASCADE ON DELETE SET NULL;"
sql "ALTER TABLE instance_vnfs ADD COLUMN vim_tenant_id VARCHAR(36) NULL DEFAULT NULL AFTER sce_vnf_id, ADD CONSTRAINT FK_instance_vnfs_vim_tenants FOREIGN KEY (vim_tenant_id) REFERENCES vim_tenants (uuid) ON UPDATE RESTRICT ON DELETE RESTRICT;"
sql "ALTER TABLE instance_vnfs ADD COLUMN datacenter_id VARCHAR(36) NULL DEFAULT NULL AFTER vim_tenant_id, ADD CONSTRAINT FK_instance_vnfs_datacenters FOREIGN KEY (datacenter_id) REFERENCES datacenters (uuid) ON UPDATE RESTRICT ON DELETE RESTRICT;"
echo " Add 'sce_net_id','net_id','datacenter_id','vim_tenant_id' field to 'instance_nets'"
sql "ALTER TABLE instance_nets ADD COLUMN sce_net_id VARCHAR(36) NULL DEFAULT NULL AFTER instance_scenario_id, ADD CONSTRAINT FK_instance_nets_sce_nets FOREIGN KEY (sce_net_id) REFERENCES sce_nets (uuid) ON UPDATE CASCADE ON DELETE SET NULL;"
sql "ALTER TABLE instance_nets ADD COLUMN net_id VARCHAR(36) NULL DEFAULT NULL AFTER sce_net_id, ADD CONSTRAINT FK_instance_nets_nets FOREIGN KEY (net_id) REFERENCES nets (uuid) ON UPDATE CASCADE ON DELETE SET NULL;"
sql "ALTER TABLE instance_nets ADD COLUMN vim_tenant_id VARCHAR(36) NULL DEFAULT NULL AFTER net_id, ADD CONSTRAINT FK_instance_nets_vim_tenants FOREIGN KEY (vim_tenant_id) REFERENCES vim_tenants (uuid) ON UPDATE RESTRICT ON DELETE RESTRICT;"
sql "ALTER TABLE instance_nets ADD COLUMN datacenter_id VARCHAR(36) NULL DEFAULT NULL AFTER vim_tenant_id, ADD CONSTRAINT FK_instance_nets_datacenters FOREIGN KEY (datacenter_id) REFERENCES datacenters (uuid) ON UPDATE RESTRICT ON DELETE RESTRICT;"
sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (6, '0.6', '0.4.2', 'Adding VIM status info', '2015-12-22');"
}
function downgrade_from_6(){
# echo " downgrade database from version 0.6 to version 0.5"
echo " Remove 'descriptor' field from 'vnfd', 'scenarios' tables"
sql "ALTER TABLE vnfs DROP COLUMN descriptor;"
sql "ALTER TABLE scenarios DROP COLUMN descriptor;"
echo " Remove 'last_error', 'vim_info' from 'instance_vms', 'instance_nets'"
sql "ALTER TABLE instance_vms DROP COLUMN error_msg, DROP COLUMN vim_info;"
sql "ALTER TABLE instance_vms CHANGE COLUMN status status ENUM('ACTIVE','PAUSED','INACTIVE','CREATING','ERROR','DELETING') NOT NULL DEFAULT 'CREATING' AFTER vim_vm_id;"
sql "ALTER TABLE instance_nets DROP COLUMN error_msg, DROP COLUMN vim_info;"
sql "ALTER TABLE instance_nets CHANGE COLUMN status status ENUM('ACTIVE','DOWN','BUILD','ERROR') NOT NULL DEFAULT 'BUILD' AFTER instance_scenario_id;"
echo " Remove 'mac_address', 'ip_address', 'vim_info' from 'instance_interfaces'"
sql "ALTER TABLE instance_interfaces DROP COLUMN mac_address, DROP COLUMN ip_address, DROP COLUMN vim_info;"
echo " Remove 'sce_vnf_id','datacenter_id','vim_tenant_id' field from 'instance_vnfs'"
sql "ALTER TABLE instance_vnfs DROP COLUMN sce_vnf_id, DROP FOREIGN KEY FK_instance_vnfs_sce_vnfs;"
sql "ALTER TABLE instance_vnfs DROP COLUMN vim_tenant_id, DROP FOREIGN KEY FK_instance_vnfs_vim_tenants;"
sql "ALTER TABLE instance_vnfs DROP COLUMN datacenter_id, DROP FOREIGN KEY FK_instance_vnfs_datacenters;"
echo " Remove 'sce_net_id','net_id','datacenter_id','vim_tenant_id' field from 'instance_nets'"
sql "ALTER TABLE instance_nets DROP COLUMN sce_net_id, DROP FOREIGN KEY FK_instance_nets_sce_nets;"
sql "ALTER TABLE instance_nets DROP COLUMN net_id, DROP FOREIGN KEY FK_instance_nets_nets;"
sql "ALTER TABLE instance_nets DROP COLUMN vim_tenant_id, DROP FOREIGN KEY FK_instance_nets_vim_tenants;"
sql "ALTER TABLE instance_nets DROP COLUMN datacenter_id, DROP FOREIGN KEY FK_instance_nets_datacenters;"
sql "DELETE FROM schema_version WHERE version_int='6';"
}
function upgrade_to_7(){
# echo " upgrade database from version 0.6 to version 0.7"
echo " Change created_at, modified_at from timestamp to unix float at all database"
for table in datacenters datacenter_nets instance_nets instance_scenarios instance_vms instance_vnfs interfaces nets nfvo_tenants scenarios sce_interfaces sce_nets sce_vnfs tenants_datacenters vim_tenants vms vnfs uuids
do
echo -en " $table \r"
sql "ALTER TABLE $table ADD COLUMN created_at_ DOUBLE NOT NULL after created_at;"
echo "UPDATE $table SET created_at_=unix_timestamp(created_at);"
sql "ALTER TABLE $table DROP COLUMN created_at, CHANGE COLUMN created_at_ created_at DOUBLE NOT NULL;"
[[ $table == uuids ]] || sql "ALTER TABLE $table CHANGE COLUMN modified_at modified_at DOUBLE NULL DEFAULT NULL;"
done
echo " Add 'descriptor' field text to 'vnfd', 'scenarios'"
sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (7, '0.7', '0.4.3', 'Changing created_at time at database', '2016-01-25');"
}
function downgrade_from_7(){
# echo " downgrade database from version 0.7 to version 0.6"
echo " Change back created_at, modified_at from unix float to timestamp at all database"
for table in datacenters datacenter_nets instance_nets instance_scenarios instance_vms instance_vnfs interfaces nets nfvo_tenants scenarios sce_interfaces sce_nets sce_vnfs tenants_datacenters vim_tenants vms vnfs uuids
do
echo -en " $table \r"
sql "ALTER TABLE $table ADD COLUMN created_at_ TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP after created_at;"
echo "UPDATE $table SET created_at_=from_unixtime(created_at);"
sql "ALTER TABLE $table DROP COLUMN created_at, CHANGE COLUMN created_at_ created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP;"
[[ $table == uuids ]] || sql "ALTER TABLE $table CHANGE COLUMN modified_at modified_at TIMESTAMP NULL DEFAULT NULL;"
done
echo " Remove 'descriptor' field from 'vnfd', 'scenarios' tables"
sql "DELETE FROM schema_version WHERE version_int='7';"
}
function upgrade_to_8(){
# echo " upgrade database from version 0.7 to version 0.8"
echo " Change enalarge name, description to 255 at all database"
for table in datacenters datacenter_nets flavors images instance_scenarios nets nfvo_tenants scenarios sce_nets sce_vnfs vms vnfs
do
echo -en " $table \r"
sql "ALTER TABLE $table CHANGE COLUMN name name VARCHAR(255) NOT NULL;"
sql "ALTER TABLE $table CHANGE COLUMN description description VARCHAR(255) NULL DEFAULT NULL;"
done
echo -en " interfaces \r"
sql "ALTER TABLE interfaces CHANGE COLUMN internal_name internal_name VARCHAR(255) NOT NULL, CHANGE COLUMN external_name external_name VARCHAR(255) NULL DEFAULT NULL;"
sql "ALTER TABLE vim_tenants CHANGE COLUMN vim_tenant_name vim_tenant_name VARCHAR(64) NULL DEFAULT NULL;"
echo -en " vim_tenants \r"
sql "ALTER TABLE vim_tenants CHANGE COLUMN user user VARCHAR(64) NULL DEFAULT NULL, CHANGE COLUMN passwd passwd VARCHAR(64) NULL DEFAULT NULL;"
sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (8, '0.8', '0.4.32', 'Enlarging name at database', '2016-02-01');"
}
function downgrade_from_8(){
# echo " downgrade database from version 0.8 to version 0.7"
echo " Change back name,description to shorter length at all database"
for table in datacenters datacenter_nets flavors images instance_scenarios nets nfvo_tenants scenarios sce_nets sce_vnfs vms vnfs
do
name_length=50
[[ $table == flavors ]] || [[ $table == images ]] || name_length=36
echo -en " $table \r"
sql "ALTER TABLE $table CHANGE COLUMN name name VARCHAR($name_length) NOT NULL;"
sql "ALTER TABLE $table CHANGE COLUMN description description VARCHAR(100) NULL DEFAULT NULL;"
done
echo -en " interfaces \r"
sql "ALTER TABLE interfaces CHANGE COLUMN internal_name internal_name VARCHAR(25) NOT NULL, CHANGE COLUMN external_name external_name VARCHAR(25) NULL DEFAULT NULL;"
echo -en " vim_tenants \r"
sql "ALTER TABLE vim_tenants CHANGE COLUMN vim_tenant_name vim_tenant_name VARCHAR(36) NULL DEFAULT NULL;"
sql "ALTER TABLE vim_tenants CHANGE COLUMN user user VARCHAR(36) NULL DEFAULT NULL, CHANGE COLUMN passwd passwd VARCHAR(50) NULL DEFAULT NULL;"
sql "DELETE FROM schema_version WHERE version_int='8';"
}
function upgrade_to_9(){
# echo " upgrade database from version 0.8 to version 0.9"
echo " Add more status to 'instance_vms'"
sql "ALTER TABLE instance_vms CHANGE COLUMN status status ENUM('ACTIVE:NoMgmtIP','ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED') NOT NULL DEFAULT 'BUILD';"
sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (9, '0.9', '0.4.33', 'Add ACTIVE:NoMgmtIP to instance_vms table', '2016-02-05');"
}
function downgrade_from_9(){
# echo " downgrade database from version 0.9 to version 0.8"
echo " Add more status to 'instance_vms'"
sql "ALTER TABLE instance_vms CHANGE COLUMN status status ENUM('ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED') NOT NULL DEFAULT 'BUILD';"
sql "DELETE FROM schema_version WHERE version_int='9';"
}
function upgrade_to_10(){
# echo " upgrade database from version 0.9 to version 0.10"
echo " add tenant to 'vnfs'"
sql "ALTER TABLE vnfs ADD COLUMN tenant_id VARCHAR(36) NULL DEFAULT NULL AFTER name, ADD CONSTRAINT FK_vnfs_nfvo_tenants FOREIGN KEY (tenant_id) REFERENCES nfvo_tenants (uuid) ON UPDATE CASCADE ON DELETE SET NULL, CHANGE COLUMN public public ENUM('true','false') NOT NULL DEFAULT 'false' AFTER physical, DROP INDEX name, DROP INDEX path, DROP COLUMN path;"
sql "ALTER TABLE scenarios DROP FOREIGN KEY FK_scenarios_nfvo_tenants;"
sql "ALTER TABLE scenarios CHANGE COLUMN nfvo_tenant_id tenant_id VARCHAR(36) NULL DEFAULT NULL after name, ADD CONSTRAINT FK_scenarios_nfvo_tenants FOREIGN KEY (tenant_id) REFERENCES nfvo_tenants (uuid);"
sql "ALTER TABLE instance_scenarios DROP FOREIGN KEY FK_instance_scenarios_nfvo_tenants;"
sql "ALTER TABLE instance_scenarios CHANGE COLUMN nfvo_tenant_id tenant_id VARCHAR(36) NULL DEFAULT NULL after name, ADD CONSTRAINT FK_instance_scenarios_nfvo_tenants FOREIGN KEY (tenant_id) REFERENCES nfvo_tenants (uuid);"
echo " rename 'vim_tenants' table to 'datacenter_tenants'"
echo "RENAME TABLE vim_tenants TO datacenter_tenants;"
for table in tenants_datacenters instance_scenarios instance_vnfs instance_nets
do
NULL="NOT NULL"
[[ $table == instance_vnfs ]] && NULL="NULL DEFAULT NULL"
sql "ALTER TABLE ${table} DROP FOREIGN KEY FK_${table}_vim_tenants;"
sql "ALTER TABLE ${table} ALTER vim_tenant_id DROP DEFAULT;"
sql "ALTER TABLE ${table} CHANGE COLUMN vim_tenant_id datacenter_tenant_id VARCHAR(36) ${NULL} AFTER datacenter_id, ADD CONSTRAINT FK_${table}_datacenter_tenants FOREIGN KEY (datacenter_tenant_id) REFERENCES datacenter_tenants (uuid); "
done
sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (10, '0.10', '0.4.36', 'tenant management of vnfs,scenarios', '2016-03-08');"
}
function downgrade_from_10(){
# echo " downgrade database from version 0.10 to version 0.9"
echo " remove tenant from 'vnfs'"
sql "ALTER TABLE vnfs DROP COLUMN tenant_id, DROP FOREIGN KEY FK_vnfs_nfvo_tenants, ADD UNIQUE INDEX name (name), ADD COLUMN path VARCHAR(100) NULL DEFAULT NULL COMMENT 'Path where the YAML descriptor of the VNF can be found. NULL if it is a physical network function.' AFTER name, ADD UNIQUE INDEX path (path), CHANGE COLUMN public public ENUM('true','false') NOT NULL DEFAULT 'true' AFTER physical;"
sql "ALTER TABLE scenarios DROP FOREIGN KEY FK_scenarios_nfvo_tenants;"
sql "ALTER TABLE scenarios CHANGE COLUMN tenant_id nfvo_tenant_id VARCHAR(36) NULL DEFAULT NULL after name, ADD CONSTRAINT FK_scenarios_nfvo_tenants FOREIGN KEY (nfvo_tenant_id) REFERENCES nfvo_tenants (uuid);"
sql "ALTER TABLE instance_scenarios DROP FOREIGN KEY FK_instance_scenarios_nfvo_tenants;"
sql "ALTER TABLE instance_scenarios CHANGE COLUMN tenant_id nfvo_tenant_id VARCHAR(36) NULL DEFAULT NULL after name, ADD CONSTRAINT FK_instance_scenarios_nfvo_tenants FOREIGN KEY (nfvo_tenant_id) REFERENCES nfvo_tenants (uuid);"
echo " rename back 'datacenter_tenants' table to 'vim_tenants'"
echo "RENAME TABLE datacenter_tenants TO vim_tenants;"
for table in tenants_datacenters instance_scenarios instance_vnfs instance_nets
do
sql "ALTER TABLE ${table} DROP FOREIGN KEY FK_${table}_datacenter_tenants;"
NULL="NOT NULL"
[[ $table == instance_vnfs ]] && NULL="NULL DEFAULT NULL"
sql "ALTER TABLE ${table} ALTER datacenter_tenant_id DROP DEFAULT;"
sql "ALTER TABLE ${table} CHANGE COLUMN datacenter_tenant_id vim_tenant_id VARCHAR(36) $NULL AFTER datacenter_id, ADD CONSTRAINT FK_${table}_vim_tenants FOREIGN KEY (vim_tenant_id) REFERENCES vim_tenants (uuid); "
done
sql "DELETE FROM schema_version WHERE version_int='10';"
}
function upgrade_to_11(){
# echo " upgrade database from version 0.10 to version 0.11"
echo " remove unique name at 'scenarios', 'instance_scenarios'"
sql "ALTER TABLE scenarios DROP INDEX name;"
sql "ALTER TABLE instance_scenarios DROP INDEX name;"
sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (11, '0.11', '0.4.43', 'remove unique name at scenarios,instance_scenarios', '2016-07-18');"
}
function downgrade_from_11(){
# echo " downgrade database from version 0.11 to version 0.10"
echo " add unique name at 'scenarios', 'instance_scenarios'"
sql "ALTER TABLE scenarios ADD UNIQUE INDEX name (name);"
sql "ALTER TABLE instance_scenarios ADD UNIQUE INDEX name (name);"
sql "DELETE FROM schema_version WHERE version_int='11';"
}
function upgrade_to_12(){
# echo " upgrade database from version 0.11 to version 0.12"
echo " create ip_profiles table, with foreign keys to all nets tables, and add ip_address column to 'interfaces' and 'sce_interfaces'"
sql "CREATE TABLE IF NOT EXISTS ip_profiles (
id INT(11) NOT NULL AUTO_INCREMENT,
net_id VARCHAR(36) NULL DEFAULT NULL,
sce_net_id VARCHAR(36) NULL DEFAULT NULL,
instance_net_id VARCHAR(36) NULL DEFAULT NULL,
ip_version ENUM('IPv4','IPv6') NOT NULL DEFAULT 'IPv4',
subnet_address VARCHAR(64) NULL DEFAULT NULL,
gateway_address VARCHAR(64) NULL DEFAULT NULL,
dns_address VARCHAR(64) NULL DEFAULT NULL,
dhcp_enabled ENUM('true','false') NOT NULL DEFAULT 'true',
dhcp_start_address VARCHAR(64) NULL DEFAULT NULL,
dhcp_count INT(11) NULL DEFAULT NULL,
PRIMARY KEY (id),
CONSTRAINT FK_ipprofiles_nets FOREIGN KEY (net_id) REFERENCES nets (uuid) ON DELETE CASCADE,
CONSTRAINT FK_ipprofiles_scenets FOREIGN KEY (sce_net_id) REFERENCES sce_nets (uuid) ON DELETE CASCADE,
CONSTRAINT FK_ipprofiles_instancenets FOREIGN KEY (instance_net_id) REFERENCES instance_nets (uuid) ON DELETE CASCADE )
COMMENT='Table containing the IP parameters of a network, either a net, a sce_net or and instance_net.'
COLLATE='utf8_general_ci'
ENGINE=InnoDB;"
sql "ALTER TABLE interfaces ADD COLUMN ip_address VARCHAR(64) NULL DEFAULT NULL AFTER mac;"
sql "ALTER TABLE sce_interfaces ADD COLUMN ip_address VARCHAR(64) NULL DEFAULT NULL AFTER interface_id;"
sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (12, '0.12', '0.4.46', 'create ip_profiles table, with foreign keys to all nets tables, and add ip_address column to interfaces and sce_interfaces', '2016-08-29');"
}
function downgrade_from_12(){
# echo " downgrade database from version 0.12 to version 0.11"
echo " delete ip_profiles table, and remove ip_address column in 'interfaces' and 'sce_interfaces'"
sql "DROP TABLE IF EXISTS ip_profiles;"
sql "ALTER TABLE interfaces DROP COLUMN ip_address;"
sql "ALTER TABLE sce_interfaces DROP COLUMN ip_address;"
sql "DELETE FROM schema_version WHERE version_int='12';"
}
function upgrade_to_13(){
# echo " upgrade database from version 0.12 to version 0.13"
echo " add cloud_config at 'scenarios', 'instance_scenarios'"
sql "ALTER TABLE scenarios ADD COLUMN cloud_config MEDIUMTEXT NULL DEFAULT NULL AFTER descriptor;"
sql "ALTER TABLE instance_scenarios ADD COLUMN cloud_config MEDIUMTEXT NULL DEFAULT NULL AFTER modified_at;"
sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (13, '0.13', '0.4.47', 'insert cloud-config at scenarios,instance_scenarios', '2016-08-30');"
}
function downgrade_from_13(){
# echo " downgrade database from version 0.13 to version 0.12"
echo " remove cloud_config at 'scenarios', 'instance_scenarios'"
sql "ALTER TABLE scenarios DROP COLUMN cloud_config;"
sql "ALTER TABLE instance_scenarios DROP COLUMN cloud_config;"
sql "DELETE FROM schema_version WHERE version_int='13';"
}
function upgrade_to_14(){
# echo " upgrade database from version 0.13 to version 0.14"
echo " remove unique index vim_net_id, instance_scenario_id at table 'instance_nets'"
sql "ALTER TABLE instance_nets DROP INDEX vim_net_id_instance_scenario_id;"
sql "ALTER TABLE instance_nets CHANGE COLUMN external created ENUM('true','false') NOT NULL DEFAULT 'false' COMMENT 'Created or already exists at VIM' AFTER multipoint;"
sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (14, '0.14', '0.4.57', 'remove unique index vim_net_id, instance_scenario_id', '2016-09-26');"
}
function downgrade_from_14(){
# echo " downgrade database from version 0.14 to version 0.13"
echo " remove cloud_config at 'scenarios', 'instance_scenarios'"
sql "ALTER TABLE instance_nets ADD UNIQUE INDEX vim_net_id_instance_scenario_id (vim_net_id, instance_scenario_id);"
sql "ALTER TABLE instance_nets CHANGE COLUMN created external ENUM('true','false') NOT NULL DEFAULT 'false' COMMENT 'If external, means that it already exists at VIM' AFTER multipoint;"
sql "DELETE FROM schema_version WHERE version_int='14';"
}
function upgrade_to_15(){
# echo " upgrade database from version 0.14 to version 0.15"
echo " add columns 'universal_name' and 'checksum' at table 'images', add unique index universal_name_checksum, and change location to allow NULL; change column 'image_path' in table 'vms' to allow NULL"
sql "ALTER TABLE images ADD COLUMN checksum VARCHAR(32) NULL DEFAULT NULL AFTER name;"
sql "ALTER TABLE images ALTER location DROP DEFAULT;"
sql "ALTER TABLE images ADD COLUMN universal_name VARCHAR(255) NULL AFTER name, CHANGE COLUMN location location VARCHAR(200) NULL AFTER checksum, ADD UNIQUE INDEX universal_name_checksum (universal_name, checksum);"
sql "ALTER TABLE vms ALTER image_path DROP DEFAULT;"
sql "ALTER TABLE vms CHANGE COLUMN image_path image_path VARCHAR(100) NULL COMMENT 'Path where the image of the VM is located' AFTER image_id;"
sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (15, '0.15', '0.4.59', 'add columns universal_name and checksum at table images, add unique index universal_name_checksum, and change location to allow NULL; change column image_path in table vms to allow NULL', '2016-09-27');"
}
function downgrade_from_15(){
# echo " downgrade database from version 0.15 to version 0.14"
echo " remove columns 'universal_name' and 'checksum' from table 'images', remove index universal_name_checksum, change location NOT NULL; change column 'image_path' in table 'vms' to NOT NULL"
sql "ALTER TABLE images DROP INDEX universal_name_checksum;"
sql "ALTER TABLE images ALTER location DROP DEFAULT;"
sql "ALTER TABLE images CHANGE COLUMN location location VARCHAR(200) NOT NULL AFTER checksum;"
sql "ALTER TABLE images DROP COLUMN universal_name;"
sql "ALTER TABLE images DROP COLUMN checksum;"
sql "ALTER TABLE vms ALTER image_path DROP DEFAULT;"
sql "ALTER TABLE vms CHANGE COLUMN image_path image_path VARCHAR(100) NOT NULL COMMENT 'Path where the image of the VM is located' AFTER image_id;"
sql "DELETE FROM schema_version WHERE version_int='15';"
}
function upgrade_to_16(){
# echo " upgrade database from version 0.15 to version 0.16"
echo " add column 'config' at table 'datacenter_tenants', enlarge 'vim_tenant_name/id'"
sql "ALTER TABLE datacenter_tenants ADD COLUMN config VARCHAR(4000) NULL DEFAULT NULL AFTER passwd;"
sql "ALTER TABLE datacenter_tenants CHANGE COLUMN vim_tenant_name vim_tenant_name VARCHAR(256) NULL DEFAULT NULL AFTER datacenter_id;"
sql "ALTER TABLE datacenter_tenants CHANGE COLUMN vim_tenant_id vim_tenant_id VARCHAR(256) NULL DEFAULT NULL COMMENT 'Tenant ID at VIM' AFTER vim_tenant_name;"
sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (16, '0.16', '0.5.2', 'enlarge vim_tenant_name and id. New config at datacenter_tenants', '2016-10-11');"
}
function downgrade_from_16(){
# echo " downgrade database from version 0.16 to version 0.15"
echo " remove column 'config' at table 'datacenter_tenants', restoring lenght 'vim_tenant_name/id'"
sql "ALTER TABLE datacenter_tenants DROP COLUMN config;"
sql "ALTER TABLE datacenter_tenants CHANGE COLUMN vim_tenant_name vim_tenant_name VARCHAR(64) NULL DEFAULT NULL AFTER datacenter_id;"
sql "ALTER TABLE datacenter_tenants CHANGE COLUMN vim_tenant_id vim_tenant_id VARCHAR(36) NULL DEFAULT NULL COMMENT 'Tenant ID at VIM' AFTER vim_tenant_name;"
sql "DELETE FROM schema_version WHERE version_int='16';"
}
function upgrade_to_17(){
# echo " upgrade database from version 0.16 to version 0.17"
echo " add column 'extended' at table 'datacenter_flavors'"
sql "ALTER TABLE datacenters_flavors ADD extended varchar(2000) NULL COMMENT 'Extra description json format of additional devices';"
sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (17, '0.17', '0.5.3', 'Extra description json format of additional devices in datacenter_flavors', '2016-12-20');"
}
function downgrade_from_17(){
# echo " downgrade database from version 0.17 to version 0.16"
echo " remove column 'extended' from table 'datacenter_flavors'"
sql "ALTER TABLE datacenters_flavors DROP COLUMN extended;"
sql "DELETE FROM schema_version WHERE version_int='17';"
}
function upgrade_to_18(){
# echo " upgrade database from version 0.17 to version 0.18"
echo " add columns 'floating_ip' and 'port_security' at tables 'interfaces' and 'instance_interfaces'"
sql "ALTER TABLE interfaces ADD floating_ip BOOL DEFAULT 0 NOT NULL COMMENT 'Indicates if a floating_ip must be associated to this interface';"
sql "ALTER TABLE interfaces ADD port_security BOOL DEFAULT 1 NOT NULL COMMENT 'Indicates if port security must be enabled or disabled. By default it is enabled';"
sql "ALTER TABLE instance_interfaces ADD floating_ip BOOL DEFAULT 0 NOT NULL COMMENT 'Indicates if a floating_ip must be associated to this interface';"
sql "ALTER TABLE instance_interfaces ADD port_security BOOL DEFAULT 1 NOT NULL COMMENT 'Indicates if port security must be enabled or disabled. By default it is enabled';"
sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (18, '0.18', '0.5.4', 'Add columns \'floating_ip\' and \'port_security\' at tables \'interfaces\' and \'instance_interfaces\'', '2017-01-09');"
}
function downgrade_from_18(){
# echo " downgrade database from version 0.18 to version 0.17"
echo " remove columns 'floating_ip' and 'port_security' from tables 'interfaces' and 'instance_interfaces'"
sql "ALTER TABLE interfaces DROP COLUMN floating_ip;"
sql "ALTER TABLE interfaces DROP COLUMN port_security;"
sql "ALTER TABLE instance_interfaces DROP COLUMN floating_ip;"
sql "ALTER TABLE instance_interfaces DROP COLUMN port_security;"
sql "DELETE FROM schema_version WHERE version_int='18';"
}
function upgrade_to_19(){
# echo " upgrade database from version 0.18 to version 0.19"
echo " add column 'boot_data' at table 'vms'"
sql "ALTER TABLE vms ADD COLUMN boot_data TEXT NULL DEFAULT NULL AFTER image_path;"
sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (19, '0.19', '0.5.5', 'Extra Boot-data content at VNFC (vms)', '2017-01-11');"
}
function downgrade_from_19(){
# echo " downgrade database from version 0.19 to version 0.18"
echo " remove column 'boot_data' from table 'vms'"
sql "ALTER TABLE vms DROP COLUMN boot_data;"
sql "DELETE FROM schema_version WHERE version_int='19';"
}
function upgrade_to_20(){
# echo " upgrade database from version 0.19 to version 0.20"
echo " add column 'sdn_net_id' at table 'instance_nets' and columns 'sdn_port_id', 'compute_node', 'pci' and 'vlan' to table 'instance_interfaces'"
sql "ALTER TABLE instance_nets ADD sdn_net_id varchar(36) DEFAULT NULL NULL COMMENT 'Network id in ovim';"
sql "ALTER TABLE instance_interfaces ADD sdn_port_id varchar(36) DEFAULT NULL NULL COMMENT 'Port id in ovim';"
sql "ALTER TABLE instance_interfaces ADD compute_node varchar(100) DEFAULT NULL NULL COMMENT 'Compute node id used to specify the SDN port mapping';"
sql "ALTER TABLE instance_interfaces ADD pci varchar(12) DEFAULT NULL NULL COMMENT 'PCI of the physical port in the host';"
sql "ALTER TABLE instance_interfaces ADD vlan SMALLINT UNSIGNED DEFAULT NULL NULL COMMENT 'VLAN tag used by the port';"
sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (20, '0.20', '0.5.9', 'Added columns to store dataplane connectivity info', '2017-03-13');"
}
function downgrade_from_20(){
# echo " downgrade database from version 0.20 to version 0.19"
echo " remove column 'sdn_net_id' at table 'instance_nets' and columns 'sdn_port_id', 'compute_node', 'pci' and 'vlan' to table 'instance_interfaces'"
sql "ALTER TABLE instance_nets DROP COLUMN sdn_net_id;"
sql "ALTER TABLE instance_interfaces DROP COLUMN vlan;"
sql "ALTER TABLE instance_interfaces DROP COLUMN pci;"
sql "ALTER TABLE instance_interfaces DROP COLUMN compute_node;"
sql "ALTER TABLE instance_interfaces DROP COLUMN sdn_port_id;"
sql "DELETE FROM schema_version WHERE version_int='20';"
}
function upgrade_to_21(){
# echo " upgrade database from version 0.20 to version 0.21"
echo " edit 'instance_nets' to allow instance_scenario_id=None"
sql "ALTER TABLE instance_nets MODIFY COLUMN instance_scenario_id varchar(36) NULL;"
echo " enlarge column 'dns_address' at table 'ip_profiles'"
sql "ALTER TABLE ip_profiles MODIFY dns_address varchar(255) DEFAULT NULL NULL "\
"comment 'dns ip list separated by semicolon';"
sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (21, '0.21', '0.5.15', 'Edit instance_nets to allow instance_scenario_id=None and enlarge column dns_address at table ip_profiles', '2017-06-02');"
}
function downgrade_from_21(){
# echo " downgrade database from version 0.21 to version 0.20"
echo " edit 'instance_nets' to disallow instance_scenario_id=None"
#Delete all lines with a instance_scenario_id=NULL in order to disable this option
sql "DELETE FROM instance_nets WHERE instance_scenario_id IS NULL;"
sql "ALTER TABLE instance_nets MODIFY COLUMN instance_scenario_id varchar(36) NOT NULL;"
echo " shorten column 'dns_address' at table 'ip_profiles'"
sql "ALTER TABLE ip_profiles MODIFY dns_address varchar(64) DEFAULT NULL NULL;"
sql "DELETE FROM schema_version WHERE version_int='21';"
}
function upgrade_to_22(){
# echo " upgrade database from version 0.21 to version 0.22"
echo " Changed type of ram in 'flavors' from SMALLINT to MEDIUMINT"
sql "ALTER TABLE flavors CHANGE COLUMN ram ram MEDIUMINT(7) UNSIGNED NULL DEFAULT NULL AFTER disk;"
sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (22, '0.22', '0.5.16', 'Changed type of ram in flavors from SMALLINT to MEDIUMINT', '2017-06-02');"
}
function downgrade_from_22(){
# echo " downgrade database from version 0.22 to version 0.21"
echo " Changed type of ram in 'flavors' from MEDIUMINT to SMALLINT"
sql "ALTER TABLE flavors CHANGE COLUMN ram ram SMALLINT(5) UNSIGNED NULL DEFAULT NULL AFTER disk;"
sql "DELETE FROM schema_version WHERE version_int='22';"
}
function upgrade_to_23(){
# echo " upgrade database from version 0.22 to version 0.23"
echo " add column 'availability_zone' at table 'vms'"
sql "ALTER TABLE vms ADD COLUMN availability_zone VARCHAR(255) NULL AFTER modified_at;"
sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (23, '0.23', '0.5.20',"\
"'Changed type of ram in flavors from SMALLINT to MEDIUMINT', '2017-08-29');"
}
function downgrade_from_23(){
# echo " downgrade database from version 0.23 to version 0.22"
echo " remove column 'availability_zone' from table 'vms'"
sql "ALTER TABLE vms DROP COLUMN availability_zone;"
sql "DELETE FROM schema_version WHERE version_int='23';"
}
function upgrade_to_24(){
# echo " upgrade database from version 0.23 to version 0.24"
echo " Add 'count' to table 'vms'"
sql "ALTER TABLE vms ADD COLUMN count SMALLINT NOT NULL DEFAULT '1' AFTER vnf_id;"
sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) "\
"VALUES (24, '0.24', '0.5.21', 'Added vnfd fields', '2017-08-29');"
}
function downgrade_from_24(){
# echo " downgrade database from version 0.24 to version 0.23"
echo " Remove 'count' from table 'vms'"
sql "ALTER TABLE vms DROP COLUMN count;"
sql "DELETE FROM schema_version WHERE version_int='24';"
}
function upgrade_to_25(){
# echo " upgrade database from version 0.24 to version 0.25"
echo " Add 'osm_id','short_name','vendor' to tables 'vnfs', 'scenarios'"
for table in vnfs scenarios; do
sql "ALTER TABLE $table ADD COLUMN osm_id VARCHAR(255) NULL AFTER uuid, "\
"ADD UNIQUE INDEX osm_id_tenant_id (osm_id, tenant_id), "\
"ADD COLUMN short_name VARCHAR(255) NULL AFTER name, "\
"ADD COLUMN vendor VARCHAR(255) NULL AFTER description;"
done
sql "ALTER TABLE vnfs ADD COLUMN mgmt_access VARCHAR(2000) NULL AFTER vendor;"
sql "ALTER TABLE vms ADD COLUMN osm_id VARCHAR(255) NULL AFTER uuid;"
sql "ALTER TABLE sce_vnfs ADD COLUMN member_vnf_index SMALLINT(6) NULL DEFAULT NULL AFTER uuid;"
echo " Add 'security_group' to table 'ip_profiles'"
sql "ALTER TABLE ip_profiles ADD COLUMN security_group VARCHAR(255) NULL DEFAULT NULL AFTER dhcp_count;"
sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) "\
"VALUES (25, '0.25', '0.5.22', 'Added osm_id to vnfs,scenarios', '2017-09-01');"
}
function downgrade_from_25(){
# echo " downgrade database from version 0.25 to version 0.24"
echo " Remove 'osm_id','short_name','vendor' from tables 'vnfs', 'scenarios'"
for table in vnfs scenarios; do
sql "ALTER TABLE $table DROP INDEX osm_id_tenant_id, DROP COLUMN osm_id, "\
"DROP COLUMN short_name, DROP COLUMN vendor;"
done
sql "ALTER TABLE vnfs DROP COLUMN mgmt_access;"
sql "ALTER TABLE vms DROP COLUMN osm_id;"
sql "ALTER TABLE sce_vnfs DROP COLUMN member_vnf_index;"
echo " Remove 'security_group' from table 'ip_profiles'"
sql "ALTER TABLE ip_profiles DROP COLUMN security_group;"
sql "DELETE FROM schema_version WHERE version_int='25';"
}
function upgrade_to_26(){
echo " Add name to table datacenter_tenants"
sql "ALTER TABLE datacenter_tenants ADD COLUMN name VARCHAR(255) NULL AFTER uuid;"
sql "UPDATE datacenter_tenants as dt join datacenters as d on dt.datacenter_id = d.uuid set dt.name=d.name;"
echo " Add 'SCHEDULED' to 'status' at tables 'instance_nets', 'instance_vms'"
sql "ALTER TABLE instance_vms CHANGE COLUMN status status ENUM('ACTIVE:NoMgmtIP','ACTIVE','INACTIVE','BUILD',"\
"'ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') "\
"NOT NULL DEFAULT 'BUILD';"
sql "ALTER TABLE instance_nets CHANGE COLUMN status status ENUM('ACTIVE','INACTIVE','DOWN','BUILD','ERROR',"\
"'VIM_ERROR','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD';"
echo " Enlarge pci at instance_interfaces to allow extended pci for SDN por mapping"
sql "ALTER TABLE instance_interfaces CHANGE COLUMN pci pci VARCHAR(50) NULL DEFAULT NULL COMMENT 'PCI of the "\
"physical port in the host' AFTER compute_node;"
for t in flavor image; do
echo " Change 'datacenters_${t}s' to point to datacenter_tenant, add status, vim_info"
sql "ALTER TABLE datacenters_${t}s ADD COLUMN datacenter_vim_id VARCHAR(36) NULL DEFAULT NULL AFTER "\
"datacenter_id, ADD COLUMN status ENUM('ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','DELETED',"\
"'SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD' AFTER vim_id, ADD COLUMN vim_info "\
"TEXT NULL AFTER status;"
sql "UPDATE datacenters_${t}s as df left join datacenter_tenants as dt on dt.datacenter_id=df.datacenter_id "\
"set df.datacenter_vim_id=dt.uuid;"
sql "DELETE FROM datacenters_${t}s WHERE datacenter_vim_id is NULL;"
sql "ALTER TABLE datacenters_${t}s CHANGE COLUMN datacenter_vim_id datacenter_vim_id VARCHAR(36) NOT NULL;"
sql "ALTER TABLE datacenters_${t}s ADD CONSTRAINT FK_datacenters_${t}s_datacenter_tenants FOREIGN KEY "\
"(datacenter_vim_id) REFERENCES datacenter_tenants (uuid) ON UPDATE CASCADE ON DELETE CASCADE;"
sql "ALTER TABLE datacenters_${t}s DROP FOREIGN KEY FK__datacenters_${t:0:1};"
sql "ALTER TABLE datacenters_${t}s DROP COLUMN datacenter_id;"
done
echo " Decoupling 'instance_interfaces' from scenarios/vnfs to allow scale actions"
sql "ALTER TABLE instance_interfaces CHANGE COLUMN vim_interface_id vim_interface_id VARCHAR(128) NULL DEFAULT NULL;"
sql "ALTER TABLE instance_interfaces CHANGE COLUMN interface_id interface_id VARCHAR(36) NULL DEFAULT NULL;"
sql "ALTER TABLE instance_interfaces DROP FOREIGN KEY FK_instance_ids"
sql "ALTER TABLE instance_interfaces ADD CONSTRAINT FK_instance_ids FOREIGN KEY (interface_id) "\
"REFERENCES interfaces (uuid) ON UPDATE CASCADE ON DELETE SET NULL;"
echo " Decoupling 'instance_vms' from scenarios/vnfs to allow scale actions"
sql "ALTER TABLE instance_vms CHANGE COLUMN vim_vm_id vim_vm_id VARCHAR(128) NULL DEFAULT NULL;"
sql "ALTER TABLE instance_vms CHANGE COLUMN vm_id vm_id VARCHAR(36) NULL DEFAULT NULL;"
sql "ALTER TABLE instance_vms DROP FOREIGN KEY FK_instance_vms_vms;"
sql "ALTER TABLE instance_vms ADD CONSTRAINT FK_instance_vms_vms FOREIGN KEY (vm_id) "\
"REFERENCES vms (uuid) ON UPDATE CASCADE ON DELETE SET NULL;"
echo " Decoupling 'instance_nets' from scenarios/vnfs to allow scale actions"
sql "ALTER TABLE instance_nets CHANGE COLUMN vim_net_id vim_net_id VARCHAR(128) NULL DEFAULT NULL;"
echo " Decoupling 'instance_scenarios' from scenarios"
sql "ALTER TABLE instance_scenarios CHANGE COLUMN scenario_id scenario_id VARCHAR(36) NULL DEFAULT NULL;"
sql "ALTER TABLE instance_scenarios DROP FOREIGN KEY FK_instance_scenarios_scenarios;"
sql "ALTER TABLE instance_scenarios ADD CONSTRAINT FK_instance_scenarios_scenarios FOREIGN KEY (scenario_id) "\
"REFERENCES scenarios (uuid) ON UPDATE CASCADE ON DELETE SET NULL;"
echo " Create table instance_actions, vim_actions"
sql "CREATE TABLE IF NOT EXISTS instance_actions (
uuid VARCHAR(36) NOT NULL,
tenant_id VARCHAR(36) NULL DEFAULT NULL,
instance_id VARCHAR(36) NULL DEFAULT NULL,
description VARCHAR(64) NULL DEFAULT NULL COMMENT 'CREATE, DELETE, SCALE OUT/IN, ...',
number_tasks SMALLINT(6) NOT NULL DEFAULT '1',
number_done SMALLINT(6) NOT NULL DEFAULT '0',
number_failed SMALLINT(6) NOT NULL DEFAULT '0',
created_at DOUBLE NOT NULL,
modified_at DOUBLE NULL DEFAULT NULL,
PRIMARY KEY (uuid),
INDEX FK_actions_tenants (tenant_id),
CONSTRAINT FK_actions_tenant FOREIGN KEY (tenant_id) REFERENCES nfvo_tenants (uuid) ON UPDATE CASCADE ON DELETE CASCADE)
COMMENT='Contains client actions over instances'
COLLATE='utf8_general_ci'
ENGINE=InnoDB;"
sql "CREATE TABLE IF NOT EXISTS vim_actions (
instance_action_id VARCHAR(36) NOT NULL,
task_index INT(6) NOT NULL,
datacenter_vim_id VARCHAR(36) NOT NULL,
vim_id VARCHAR(64) NULL DEFAULT NULL,
action VARCHAR(36) NOT NULL COMMENT 'CREATE,DELETE,START,STOP...',
item ENUM('datacenters_flavors','datacenter_images','instance_nets','instance_vms','instance_interfaces') NOT NULL COMMENT 'table where the item is stored',
item_id VARCHAR(36) NULL DEFAULT NULL COMMENT 'uuid of the entry in the table',
status ENUM('SCHEDULED', 'BUILD', 'DONE', 'FAILED', 'SUPERSEDED') NOT NULL DEFAULT 'SCHEDULED',
extra TEXT NULL DEFAULT NULL COMMENT 'json with params:, depends_on: for the task',
error_msg VARCHAR(1024) NULL DEFAULT NULL,
created_at DOUBLE NOT NULL,
modified_at DOUBLE NULL DEFAULT NULL,
PRIMARY KEY (task_index, instance_action_id),
INDEX FK_actions_instance_actions (instance_action_id),
CONSTRAINT FK_actions_instance_actions FOREIGN KEY (instance_action_id) REFERENCES instance_actions (uuid) ON UPDATE CASCADE ON DELETE CASCADE,
INDEX FK_actions_vims (datacenter_vim_id),
CONSTRAINT FK_actions_vims FOREIGN KEY (datacenter_vim_id) REFERENCES datacenter_tenants (uuid) ON UPDATE CASCADE ON DELETE CASCADE)
COMMENT='Table with the individual VIM actions.'
COLLATE='utf8_general_ci'
ENGINE=InnoDB;"
sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) "\
"VALUES (26, '0.26', '0.5.23', 'Several changes', '2017-09-09');"
}
function downgrade_from_26(){
echo " Remove name from table datacenter_tenants"
sql "ALTER TABLE datacenter_tenants DROP COLUMN name;"
echo " Remove 'SCHEDULED' from the 'status' at tables 'instance_nets', 'instance_vms'"
sql "ALTER TABLE instance_vms CHANGE COLUMN status status ENUM('ACTIVE:NoMgmtIP','ACTIVE','INACTIVE','BUILD',"\
"'ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED') NOT NULL DEFAULT 'BUILD';"
sql "ALTER TABLE instance_nets CHANGE COLUMN status status ENUM('ACTIVE','DOWN','BUILD','ERROR','VIM_ERROR',"\
"'INACTIVE','DELETED') NOT NULL DEFAULT 'BUILD';"
echo " Shorten back pci at instance_interfaces to allow extended pci for SDN por mapping"
sql "ALTER TABLE instance_interfaces CHANGE COLUMN pci pci VARCHAR(12) NULL DEFAULT NULL COMMENT 'PCI of the "\
"physical port in the host' AFTER compute_node;"
for t in flavor image; do
echo " Restore back 'datacenters_${t}s'"
sql "ALTER TABLE datacenters_${t}s ADD COLUMN datacenter_id VARCHAR(36) NULL DEFAULT NULL AFTER "\
"${t}_id, DROP COLUMN status, DROP COLUMN vim_info ;"
sql "UPDATE datacenters_${t}s as df left join datacenter_tenants as dt on dt.uuid=df.datacenter_vim_id set "\
"df.datacenter_id=dt.datacenter_id;"
sql "ALTER TABLE datacenters_${t}s CHANGE COLUMN datacenter_id datacenter_id VARCHAR(36) NOT NULL;"
sql "ALTER TABLE datacenters_${t}s ADD CONSTRAINT FK__datacenters_${t:0:1} FOREIGN KEY "\
"(datacenter_id) REFERENCES datacenters (uuid), DROP FOREIGN KEY FK_datacenters_${t}s_datacenter_tenants, "\
"DROP COLUMN datacenter_vim_id;"
done
echo " Restore back 'instance_interfaces' coupling to scenarios/vnfs"
sql "ALTER TABLE instance_interfaces CHANGE COLUMN vim_interface_id vim_interface_id VARCHAR(36) NULL DEFAULT NULL;"
sql "ALTER TABLE instance_interfaces DROP FOREIGN KEY FK_instance_ids"
sql "ALTER TABLE instance_interfaces CHANGE COLUMN interface_id interface_id VARCHAR(36) NOT NULL;"
sql "ALTER TABLE instance_interfaces ADD CONSTRAINT FK_instance_ids FOREIGN KEY (interface_id) "\
"REFERENCES interfaces (uuid);"
echo " Restore back 'instance_vms' coupling to scenarios/vnfs"
echo " Decoupling 'instance vms' from scenarios/vnfs to allow scale actions"
sql "UPDATE instance_vms SET vim_vm_id='' WHERE vim_vm_id is NULL;"
sql "ALTER TABLE instance_vms CHANGE COLUMN vim_vm_id vim_vm_id VARCHAR(36) NOT NULL;"
sql "ALTER TABLE instance_vms DROP FOREIGN KEY FK_instance_vms_vms;"
sql "ALTER TABLE instance_vms CHANGE COLUMN vm_id vm_id VARCHAR(36) NOT NULL;"
sql "ALTER TABLE instance_vms ADD CONSTRAINT FK_instance_vms_vms FOREIGN KEY (vm_id) "\
"REFERENCES vms (uuid);"
echo " Restore back 'instance_nets' coupling to scenarios/vnfs"
sql "UPDATE instance_nets SET vim_net_id='' WHERE vim_net_id is NULL;"
sql "ALTER TABLE instance_nets CHANGE COLUMN vim_net_id vim_net_id VARCHAR(36) NOT NULL;"
echo " Restore back 'instance_scenarios' coupling to scenarios"
sql "ALTER TABLE instance_scenarios DROP FOREIGN KEY FK_instance_scenarios_scenarios;"
sql "ALTER TABLE instance_scenarios CHANGE COLUMN scenario_id scenario_id VARCHAR(36) NOT NULL;"
sql "ALTER TABLE instance_scenarios ADD CONSTRAINT FK_instance_scenarios_scenarios FOREIGN KEY (scenario_id) "\
"REFERENCES scenarios (uuid);"
echo " Delete table instance_actions"
sql "DROP TABLE IF EXISTS vim_actions"
sql "DROP TABLE IF EXISTS instance_actions"
sql "DELETE FROM schema_version WHERE version_int='26';"
}
function upgrade_to_27(){
echo " Added 'encrypted_RO_priv_key','RO_pub_key' to table 'nfvo_tenants'"
sql "ALTER TABLE nfvo_tenants ADD COLUMN encrypted_RO_priv_key VARCHAR(2000) NULL AFTER description;"
sql "ALTER TABLE nfvo_tenants ADD COLUMN RO_pub_key VARCHAR(510) NULL AFTER encrypted_RO_priv_key;"
sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) "\
"VALUES (27, '0.27', '0.5.25', 'Added encrypted_RO_priv_key,RO_pub_key to table nfvo_tenants', '2017-09-29');"
}
function downgrade_from_27(){
echo " Remove 'encrypted_RO_priv_key','RO_pub_key' from table 'nfvo_tenants'"
sql "ALTER TABLE nfvo_tenants DROP COLUMN encrypted_RO_priv_key;"
sql "ALTER TABLE nfvo_tenants DROP COLUMN RO_pub_key;"
sql "DELETE FROM schema_version WHERE version_int='27';"
}
function upgrade_to_28(){
echo " [Adding necessary tables for VNFFG]"
echo " Adding sce_vnffgs"
sql "CREATE TABLE IF NOT EXISTS sce_vnffgs (
uuid VARCHAR(36) NOT NULL,
tenant_id VARCHAR(36) NULL DEFAULT NULL,
name VARCHAR(255) NOT NULL,
description VARCHAR(255) NULL DEFAULT NULL,
vendor VARCHAR(255) NULL DEFAULT NULL,
scenario_id VARCHAR(36) NOT NULL,
created_at DOUBLE NOT NULL,
modified_at DOUBLE NULL DEFAULT NULL,
PRIMARY KEY (uuid),
INDEX FK_scenarios_sce_vnffg (scenario_id),
CONSTRAINT FK_scenarios_vnffg FOREIGN KEY (tenant_id) REFERENCES scenarios (uuid) ON UPDATE CASCADE ON DELETE CASCADE)
COLLATE='utf8_general_ci'
ENGINE=InnoDB;"
echo " Adding sce_rsps"
sql "CREATE TABLE IF NOT EXISTS sce_rsps (
uuid VARCHAR(36) NOT NULL,
tenant_id VARCHAR(36) NULL DEFAULT NULL,
name VARCHAR(255) NOT NULL,
sce_vnffg_id VARCHAR(36) NOT NULL,
created_at DOUBLE NOT NULL,
modified_at DOUBLE NULL DEFAULT NULL,
PRIMARY KEY (uuid),
INDEX FK_sce_vnffgs_rsp (sce_vnffg_id),
CONSTRAINT FK_sce_vnffgs_rsp FOREIGN KEY (sce_vnffg_id) REFERENCES sce_vnffgs (uuid) ON UPDATE CASCADE ON DELETE CASCADE)
COLLATE='utf8_general_ci'
ENGINE=InnoDB;"
echo " Adding sce_rsp_hops"
sql "CREATE TABLE IF NOT EXISTS sce_rsp_hops (
uuid VARCHAR(36) NOT NULL,
if_order INT DEFAULT 0 NOT NULL,
interface_id VARCHAR(36) NOT NULL,
sce_vnf_id VARCHAR(36) NOT NULL,
sce_rsp_id VARCHAR(36) NOT NULL,
created_at DOUBLE NOT NULL,
modified_at DOUBLE NULL DEFAULT NULL,
PRIMARY KEY (uuid),
INDEX FK_interfaces_rsp_hop (interface_id),
INDEX FK_sce_vnfs_rsp_hop (sce_vnf_id),
INDEX FK_sce_rsps_rsp_hop (sce_rsp_id),
CONSTRAINT FK_interfaces_rsp_hop FOREIGN KEY (interface_id) REFERENCES interfaces (uuid) ON UPDATE CASCADE ON DELETE CASCADE,
CONSTRAINT FK_sce_vnfs_rsp_hop FOREIGN KEY (sce_vnf_id) REFERENCES sce_vnfs (uuid) ON UPDATE CASCADE ON DELETE CASCADE,
CONSTRAINT FK_sce_rsps_rsp_hop FOREIGN KEY (sce_rsp_id) REFERENCES sce_rsps (uuid) ON UPDATE CASCADE ON DELETE CASCADE)
COLLATE='utf8_general_ci'
ENGINE=InnoDB;"
echo " Adding sce_classifiers"
sql "CREATE TABLE IF NOT EXISTS sce_classifiers (
uuid VARCHAR(36) NOT NULL,
tenant_id VARCHAR(36) NULL DEFAULT NULL,
name VARCHAR(255) NOT NULL,
sce_vnffg_id VARCHAR(36) NOT NULL,
sce_rsp_id VARCHAR(36) NOT NULL,
sce_vnf_id VARCHAR(36) NOT NULL,
interface_id VARCHAR(36) NOT NULL,
created_at DOUBLE NOT NULL,
modified_at DOUBLE NULL DEFAULT NULL,
PRIMARY KEY (uuid),
INDEX FK_sce_vnffgs_classifier (sce_vnffg_id),
INDEX FK_sce_rsps_classifier (sce_rsp_id),
INDEX FK_sce_vnfs_classifier (sce_vnf_id),
INDEX FK_interfaces_classifier (interface_id),
CONSTRAINT FK_sce_vnffgs_classifier FOREIGN KEY (sce_vnffg_id) REFERENCES sce_vnffgs (uuid) ON UPDATE CASCADE ON DELETE CASCADE,
CONSTRAINT FK_sce_rsps_classifier FOREIGN KEY (sce_rsp_id) REFERENCES sce_rsps (uuid) ON UPDATE CASCADE ON DELETE CASCADE,
CONSTRAINT FK_sce_vnfs_classifier FOREIGN KEY (sce_vnf_id) REFERENCES sce_vnfs (uuid) ON UPDATE CASCADE ON DELETE CASCADE,
CONSTRAINT FK_interfaces_classifier FOREIGN KEY (interface_id) REFERENCES interfaces (uuid) ON UPDATE CASCADE ON DELETE CASCADE)
COLLATE='utf8_general_ci'
ENGINE=InnoDB;"
echo " Adding sce_classifier_matches"
sql "CREATE TABLE IF NOT EXISTS sce_classifier_matches (
uuid VARCHAR(36) NOT NULL,
ip_proto VARCHAR(2) NOT NULL,
source_ip VARCHAR(16) NOT NULL,
destination_ip VARCHAR(16) NOT NULL,
source_port VARCHAR(5) NOT NULL,
destination_port VARCHAR(5) NOT NULL,
sce_classifier_id VARCHAR(36) NOT NULL,
created_at DOUBLE NOT NULL,
modified_at DOUBLE NULL DEFAULT NULL,
PRIMARY KEY (uuid),
INDEX FK_classifiers_classifier_match (sce_classifier_id),
CONSTRAINT FK_sce_classifiers_classifier_match FOREIGN KEY (sce_classifier_id) REFERENCES sce_classifiers (uuid) ON UPDATE CASCADE ON DELETE CASCADE)
COLLATE='utf8_general_ci'
ENGINE=InnoDB;"
echo " [Adding necessary tables for VNFFG-SFC instance mapping]"
echo " Adding instance_sfis"
sql "CREATE TABLE IF NOT EXISTS instance_sfis (
uuid varchar(36) NOT NULL,
instance_scenario_id varchar(36) NOT NULL,
vim_sfi_id varchar(36) DEFAULT NULL,
sce_rsp_hop_id varchar(36) DEFAULT NULL,
datacenter_id varchar(36) DEFAULT NULL,
datacenter_tenant_id varchar(36) DEFAULT NULL,
status enum('ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD',
error_msg varchar(1024) DEFAULT NULL,
vim_info text,
created_at double NOT NULL,
modified_at double DEFAULT NULL,
PRIMARY KEY (uuid),
KEY FK_instance_sfis_instance_scenarios (instance_scenario_id),
KEY FK_instance_sfis_sce_rsp_hops (sce_rsp_hop_id),
KEY FK_instance_sfis_datacenters (datacenter_id),
KEY FK_instance_sfis_datacenter_tenants (datacenter_tenant_id),
CONSTRAINT FK_instance_sfis_datacenter_tenants FOREIGN KEY (datacenter_tenant_id) REFERENCES datacenter_tenants (uuid),
CONSTRAINT FK_instance_sfis_datacenters FOREIGN KEY (datacenter_id) REFERENCES datacenters (uuid),
CONSTRAINT FK_instance_sfis_instance_scenarios FOREIGN KEY (instance_scenario_id) REFERENCES instance_scenarios (uuid) ON DELETE CASCADE ON UPDATE CASCADE,
CONSTRAINT FK_instance_sfis_sce_rsp_hops FOREIGN KEY (sce_rsp_hop_id) REFERENCES sce_rsp_hops (uuid) ON DELETE SET NULL ON UPDATE CASCADE)
COLLATE='utf8_general_ci'
ENGINE=InnoDB;"
echo " Adding instance_sfs"
sql "CREATE TABLE IF NOT EXISTS instance_sfs (
uuid varchar(36) NOT NULL,
instance_scenario_id varchar(36) NOT NULL,
vim_sf_id varchar(36) DEFAULT NULL,
sce_rsp_hop_id varchar(36) DEFAULT NULL,
datacenter_id varchar(36) DEFAULT NULL,
datacenter_tenant_id varchar(36) DEFAULT NULL,
status enum('ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD',
error_msg varchar(1024) DEFAULT NULL,
vim_info text,
created_at double NOT NULL,
modified_at double DEFAULT NULL,
PRIMARY KEY (uuid),
KEY FK_instance_sfs_instance_scenarios (instance_scenario_id),
KEY FK_instance_sfs_sce_rsp_hops (sce_rsp_hop_id),
KEY FK_instance_sfs_datacenters (datacenter_id),
KEY FK_instance_sfs_datacenter_tenants (datacenter_tenant_id),
CONSTRAINT FK_instance_sfs_datacenter_tenants FOREIGN KEY (datacenter_tenant_id) REFERENCES datacenter_tenants (uuid),
CONSTRAINT FK_instance_sfs_datacenters FOREIGN KEY (datacenter_id) REFERENCES datacenters (uuid),
CONSTRAINT FK_instance_sfs_instance_scenarios FOREIGN KEY (instance_scenario_id) REFERENCES instance_scenarios (uuid) ON DELETE CASCADE ON UPDATE CASCADE,
CONSTRAINT FK_instance_sfs_sce_rsp_hops FOREIGN KEY (sce_rsp_hop_id) REFERENCES sce_rsp_hops (uuid) ON DELETE SET NULL ON UPDATE CASCADE)
COLLATE='utf8_general_ci'
ENGINE=InnoDB;"
echo " Adding instance_classifications"
sql "CREATE TABLE IF NOT EXISTS instance_classifications (
uuid varchar(36) NOT NULL,
instance_scenario_id varchar(36) NOT NULL,
vim_classification_id varchar(36) DEFAULT NULL,
sce_classifier_match_id varchar(36) DEFAULT NULL,
datacenter_id varchar(36) DEFAULT NULL,
datacenter_tenant_id varchar(36) DEFAULT NULL,
status enum('ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD',
error_msg varchar(1024) DEFAULT NULL,
vim_info text,
created_at double NOT NULL,
modified_at double DEFAULT NULL,
PRIMARY KEY (uuid),
KEY FK_instance_classifications_instance_scenarios (instance_scenario_id),
KEY FK_instance_classifications_sce_classifier_matches (sce_classifier_match_id),
KEY FK_instance_classifications_datacenters (datacenter_id),
KEY FK_instance_classifications_datacenter_tenants (datacenter_tenant_id),
CONSTRAINT FK_instance_classifications_datacenter_tenants FOREIGN KEY (datacenter_tenant_id) REFERENCES datacenter_tenants (uuid),
CONSTRAINT FK_instance_classifications_datacenters FOREIGN KEY (datacenter_id) REFERENCES datacenters (uuid),
CONSTRAINT FK_instance_classifications_instance_scenarios FOREIGN KEY (instance_scenario_id) REFERENCES instance_scenarios (uuid) ON DELETE CASCADE ON UPDATE CASCADE,
CONSTRAINT FK_instance_classifications_sce_classifier_matches FOREIGN KEY (sce_classifier_match_id) REFERENCES sce_classifier_matches (uuid) ON DELETE SET NULL ON UPDATE CASCADE)
COLLATE='utf8_general_ci'
ENGINE=InnoDB;"
echo " Adding instance_sfps"
sql "CREATE TABLE IF NOT EXISTS instance_sfps (
uuid varchar(36) NOT NULL,
instance_scenario_id varchar(36) NOT NULL,
vim_sfp_id varchar(36) DEFAULT NULL,
sce_rsp_id varchar(36) DEFAULT NULL,
datacenter_id varchar(36) DEFAULT NULL,
datacenter_tenant_id varchar(36) DEFAULT NULL,
status enum('ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD',
error_msg varchar(1024) DEFAULT NULL,
vim_info text,
created_at double NOT NULL,
modified_at double DEFAULT NULL,
PRIMARY KEY (uuid),
KEY FK_instance_sfps_instance_scenarios (instance_scenario_id),
KEY FK_instance_sfps_sce_rsps (sce_rsp_id),
KEY FK_instance_sfps_datacenters (datacenter_id),
KEY FK_instance_sfps_datacenter_tenants (datacenter_tenant_id),
CONSTRAINT FK_instance_sfps_datacenter_tenants FOREIGN KEY (datacenter_tenant_id) REFERENCES datacenter_tenants (uuid),
CONSTRAINT FK_instance_sfps_datacenters FOREIGN KEY (datacenter_id) REFERENCES datacenters (uuid),
CONSTRAINT FK_instance_sfps_instance_scenarios FOREIGN KEY (instance_scenario_id) REFERENCES instance_scenarios (uuid) ON DELETE CASCADE ON UPDATE CASCADE,
CONSTRAINT FK_instance_sfps_sce_rsps FOREIGN KEY (sce_rsp_id) REFERENCES sce_rsps (uuid) ON DELETE SET NULL ON UPDATE CASCADE)
COLLATE='utf8_general_ci'
ENGINE=InnoDB;"
echo " [Altering vim_actions table]"
sql "ALTER TABLE vim_actions MODIFY COLUMN item ENUM('datacenters_flavors','datacenter_images','instance_nets','instance_vms','instance_interfaces','instance_sfis','instance_sfs','instance_classifications','instance_sfps') NOT NULL COMMENT 'table where the item is stored'"
sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) "\
"VALUES (28, '0.28', '0.5.28', 'Adding VNFFG-related tables', '2017-11-20');"
}
function downgrade_from_28(){
echo " [Undo adding the VNFFG tables]"
echo " Dropping instance_sfps"
sql "DROP TABLE IF EXISTS instance_sfps;"
echo " Dropping sce_classifications"
sql "DROP TABLE IF EXISTS instance_classifications;"
echo " Dropping instance_sfs"
sql "DROP TABLE IF EXISTS instance_sfs;"
echo " Dropping instance_sfis"
sql "DROP TABLE IF EXISTS instance_sfis;"
echo " Dropping sce_classifier_matches"
echo " [Undo adding the VNFFG-SFC instance mapping tables]"
sql "DROP TABLE IF EXISTS sce_classifier_matches;"
echo " Dropping sce_classifiers"
sql "DROP TABLE IF EXISTS sce_classifiers;"
echo " Dropping sce_rsp_hops"
sql "DROP TABLE IF EXISTS sce_rsp_hops;"
echo " Dropping sce_rsps"
sql "DROP TABLE IF EXISTS sce_rsps;"
echo " Dropping sce_vnffgs"
sql "DROP TABLE IF EXISTS sce_vnffgs;"
echo " [Altering vim_actions table]"
sql "ALTER TABLE vim_actions MODIFY COLUMN item ENUM('datacenters_flavors','datacenter_images','instance_nets','instance_vms','instance_interfaces') NOT NULL COMMENT 'table where the item is stored'"
sql "DELETE FROM schema_version WHERE version_int='28';"
}
function upgrade_to_29(){
echo " Change 'member_vnf_index' from int to str at 'sce_vnfs'"
sql "ALTER TABLE sce_vnfs CHANGE COLUMN member_vnf_index member_vnf_index VARCHAR(255) NULL DEFAULT NULL AFTER uuid;"
echo " Add osm_id to 'nets's and 'sce_nets'"
sql "ALTER TABLE nets ADD COLUMN osm_id VARCHAR(255) NULL AFTER uuid;"
sql "ALTER TABLE sce_nets ADD COLUMN osm_id VARCHAR(255) NULL AFTER uuid;"
sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) "\
"VALUES (29, '0.29', '0.5.59', 'Change member_vnf_index to str accordingly to the model', '2018-04-11');"
}
function downgrade_from_29(){
echo " Change back 'member_vnf_index' from str to int at 'sce_vnfs'"
sql "ALTER TABLE sce_vnfs CHANGE COLUMN member_vnf_index member_vnf_index SMALLINT NULL DEFAULT NULL AFTER uuid;"
echo " Remove osm_id from 'nets's and 'sce_nets'"
sql "ALTER TABLE nets DROP COLUMN osm_id;"
sql "ALTER TABLE sce_nets DROP COLUMN osm_id;"
sql "DELETE FROM schema_version WHERE version_int='29';"
}
function upgrade_to_30(){
echo " Add 'image_list' at 'vms' to allocate alternative images"
sql "ALTER TABLE vms ADD COLUMN image_list TEXT NULL COMMENT 'Alternative images' AFTER image_id;"
sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) "\
"VALUES (30, '0.30', '0.5.60', 'Add image_list to vms', '2018-04-24');"
}
function downgrade_from_30(){
echo " Remove back 'image_list' from 'vms' to allocate alternative images"
sql "ALTER TABLE vms DROP COLUMN image_list;"
sql "DELETE FROM schema_version WHERE version_int='30';"
}
function upgrade_to_31(){
echo " Add 'vim_network_name' at 'sce_nets'"
sql "ALTER TABLE sce_nets ADD COLUMN vim_network_name VARCHAR(255) NULL DEFAULT NULL AFTER description;"
sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) "\
"VALUES (31, '0.31', '0.5.61', 'Add vim_network_name to sce_nets', '2018-05-03');"
}
function downgrade_from_31(){
echo " Remove back 'vim_network_name' from 'sce_nets'"
sql "ALTER TABLE sce_nets DROP COLUMN vim_network_name;"
sql "DELETE FROM schema_version WHERE version_int='31';"
}
function upgrade_to_32(){
echo " Add 'vim_name' to 'instance_vms'"
sql "ALTER TABLE instance_vms ADD COLUMN vim_name VARCHAR(255) NULL DEFAULT NULL AFTER vim_vm_id;"
sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) "\
"VALUES (32, '0.32', '0.5.70', 'Add vim_name to instance vms', '2018-06-28');"
}
function downgrade_from_32(){
echo " Remove back 'vim_name' from 'instance_vms'"
sql "ALTER TABLE instance_vms DROP COLUMN vim_name;"
sql "DELETE FROM schema_version WHERE version_int='32';"
}
function upgrade_to_33(){
echo " Add PDU information to 'vms'"
sql "ALTER TABLE vms ADD COLUMN pdu_type VARCHAR(255) NULL DEFAULT NULL AFTER osm_id;"
sql "ALTER TABLE instance_nets ADD COLUMN vim_name VARCHAR(255) NULL DEFAULT NULL AFTER vim_net_id;"
sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) "\
"VALUES (33, '0.33', '0.5.82', 'Add pdu information to vms', '2018-11-13');"
}
function downgrade_from_33(){
echo " Remove back PDU information from 'vms'"
sql "ALTER TABLE vms DROP COLUMN pdu_type;"
sql "ALTER TABLE instance_nets DROP COLUMN vim_name;"
sql "DELETE FROM schema_version WHERE version_int='33';"
}
function upgrade_to_X(){
echo " change 'datacenter_nets'"
sql "ALTER TABLE datacenter_nets ADD COLUMN vim_tenant_id VARCHAR(36) NOT NULL AFTER datacenter_id, DROP INDEX name_datacenter_id, ADD UNIQUE INDEX name_datacenter_id (name, datacenter_id, vim_tenant_id);"
}
function downgrade_from_X(){
echo " Change back 'datacenter_nets'"
sql "ALTER TABLE datacenter_nets DROP COLUMN vim_tenant_id, DROP INDEX name_datacenter_id, ADD UNIQUE INDEX name_datacenter_id (name, datacenter_id);"
}
function upgrade_to_34() {
echo " Create databases required for WIM features"
script="$(find "${DBUTILS}/migrations/up" -iname "34*.sql" | tail -1)"
sql "source ${script}"
}
function downgrade_from_34() {
echo " Drop databases required for WIM features"
script="$(find "${DBUTILS}/migrations/down" -iname "34*.sql" | tail -1)"
sql "source ${script}"
}
function upgrade_to_35(){
echo " Create databases required for WIM features"
script="$(find "${DBUTILS}/migrations/up" -iname "35*.sql" | tail -1)"
sql "source ${script}"
}
function downgrade_from_35(){
echo " Drop databases required for WIM features"
script="$(find "${DBUTILS}/migrations/down" -iname "35*.sql" | tail -1)"
sql "source ${script}"
}
function upgrade_to_36(){
echo " Allow null for image_id at 'vms'"
sql "ALTER TABLE vms ALTER image_id DROP DEFAULT;"
sql "ALTER TABLE vms CHANGE COLUMN image_id image_id VARCHAR(36) NULL COMMENT 'Link to image table' AFTER " \
"flavor_id;"
echo " Enlarge config at 'wims' and 'wim_accounts'"
sql "ALTER TABLE wims CHANGE COLUMN config config TEXT NULL DEFAULT NULL AFTER wim_url;"
sql "ALTER TABLE wim_accounts CHANGE COLUMN config config TEXT NULL DEFAULT NULL AFTER password;"
sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) "\
"VALUES (36, '0.36', '0.6.03', 'Allow vm without image_id for PDUs', '2018-12-19');"
}
function downgrade_from_36(){
echo " Force back not null for image_id at 'vms'"
sql "ALTER TABLE vms ALTER image_id DROP DEFAULT;"
sql "ALTER TABLE vms CHANGE COLUMN image_id image_id VARCHAR(36) NOT NULL COMMENT 'Link to image table' AFTER " \
"flavor_id;"
# For downgrade do not restore wims/wim_accounts config to varchar 4000
sql "DELETE FROM schema_version WHERE version_int='36';"
}
function upgrade_to_37(){
echo " Adding the enum tags for SFC"
sql "ALTER TABLE vim_wim_actions " \
"MODIFY COLUMN item " \
"ENUM('datacenters_flavors','datacenter_images','instance_nets','instance_vms','instance_interfaces'," \
"'instance_sfis','instance_sfs','instance_classifications','instance_sfps','instance_wim_nets') " \
"NOT NULL COMMENT 'table where the item is stored';"
sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) " \
"VALUES (37, '0.37', '0.6.09', 'Adding the enum tags for SFC', '2019-02-07');"
}
function downgrade_from_37(){
echo " Adding the enum tags for SFC isn't going to be reversed"
# It doesn't make sense to reverse to a bug state.
sql "DELETE FROM schema_version WHERE version_int='37';"
}
function upgrade_to_38(){
echo " Change vim_wim_actions, add worker, related"
sql "ALTER TABLE vim_wim_actions ADD COLUMN worker VARCHAR(64) NULL AFTER task_index, " \
"ADD COLUMN related VARCHAR(36) NULL AFTER worker, " \
"CHANGE COLUMN status status ENUM('SCHEDULED','BUILD','DONE','FAILED','SUPERSEDED','FINISHED') " \
"NOT NULL DEFAULT 'SCHEDULED' AFTER item_id;"
sql "UPDATE vim_wim_actions set related=item_id;"
echo " Change DONE to FINISHED when DELETE has been completed"
sql "UPDATE vim_wim_actions as v1 join vim_wim_actions as v2 on (v1.action='CREATE' or v1.action='FIND') and " \
"v2.action='DELETE' and (v2.status='SUPERSEDED' or v2.status='DONE') and v1.item_id=v2.item_id " \
"SET v1.status='FINISHED', v2.status='FINISHED';"
echo " Add osm_id to instance_nets"
sql "ALTER TABLE instance_nets ADD COLUMN osm_id VARCHAR(255) NULL AFTER uuid;"
echo " Add related to instance_xxxx"
for table in instance_classifications instance_nets instance_sfis instance_sfps instance_sfs \
instance_vms
do
sql "ALTER TABLE $table ADD COLUMN related VARCHAR(36) NULL AFTER vim_info;"
sql "UPDATE $table set related=uuid;"
done
sql "ALTER TABLE instance_wim_nets ADD COLUMN related VARCHAR(36) NULL AFTER wim_info;"
sql "UPDATE instance_wim_nets set related=uuid;"
sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) " \
"VALUES (38, '0.38', '0.6.11', 'Adding related to vim_wim_actions', '2019-03-07');"
}
function downgrade_from_38(){
echo " Change vim_wim_actions, delete worker, related"
sql "UPDATE vim_wim_actions SET status='DONE' WHERE status='FINISHED';"
sql "ALTER TABLE vim_wim_actions DROP COLUMN worker, DROP COLUMN related, " \
"CHANGE COLUMN status status ENUM('SCHEDULED','BUILD','DONE','FAILED','SUPERSEDED') " \
"NOT NULL DEFAULT 'SCHEDULED' AFTER item_id;"
echo " Remove related from instance_xxxx"
for table in instance_classifications instance_nets instance_wim_nets instance_sfis instance_sfps instance_sfs \
instance_vms
do
sql "ALTER TABLE $table DROP COLUMN related;"
done
echo " Remove osm_id from instance_nets"
sql "ALTER TABLE instance_nets DROP COLUMN osm_id;"
sql "DELETE FROM schema_version WHERE version_int='38';"
}
function upgrade_to_39(){
echo " Enlarge vim_id to 300 at all places"
sql "ALTER TABLE datacenters_flavors CHANGE COLUMN vim_id vim_id VARCHAR(300) NOT NULL AFTER datacenter_vim_id;"
sql "ALTER TABLE datacenters_images CHANGE COLUMN vim_id vim_id VARCHAR(300) NOT NULL AFTER datacenter_vim_id;"
sql "ALTER TABLE datacenter_nets CHANGE COLUMN vim_net_id vim_net_id VARCHAR(300) NOT NULL AFTER name;"
sql "ALTER TABLE instance_classifications CHANGE COLUMN vim_classification_id vim_classification_id VARCHAR(300)" \
" NULL DEFAULT NULL AFTER instance_scenario_id;"
sql "ALTER TABLE instance_interfaces CHANGE COLUMN vim_interface_id vim_interface_id VARCHAR(300) NULL DEFAULT " \
" NULL AFTER interface_id;"
sql "ALTER TABLE instance_nets CHANGE COLUMN vim_net_id vim_net_id VARCHAR(300) NULL DEFAULT NULL" \
" AFTER osm_id;"
sql "ALTER TABLE instance_sfis CHANGE COLUMN vim_sfi_id vim_sfi_id VARCHAR(300) NULL DEFAULT NULL" \
" AFTER instance_scenario_id;"
sql "ALTER TABLE instance_sfps CHANGE COLUMN vim_sfp_id vim_sfp_id VARCHAR(300) NULL DEFAULT NULL" \
" AFTER instance_scenario_id;"
sql "ALTER TABLE instance_sfs CHANGE COLUMN vim_sf_id vim_sf_id VARCHAR(300) NULL DEFAULT NULL" \
" AFTER instance_scenario_id;"
sql "ALTER TABLE instance_vms CHANGE COLUMN vim_vm_id vim_vm_id VARCHAR(300) NULL DEFAULT NULL" \
" AFTER instance_vnf_id, DROP INDEX vim_vm_id;"
sql "ALTER TABLE instance_wim_nets CHANGE COLUMN wim_internal_id wim_internal_id VARCHAR(300) NULL DEFAULT NULL" \
" COMMENT 'Internal ID used by the WIM to refer to the network' AFTER uuid;"
sql "ALTER TABLE vim_wim_actions CHANGE COLUMN vim_id vim_id VARCHAR(300) NULL DEFAULT NULL" \
" AFTER datacenter_vim_id;"
sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) " \
"VALUES (39, '0.39', '0.6.20', 'Enlarge vim_id to 300 at all places', '2019-05-23');"
}
function downgrade_from_39(){
echo " Set vim_id to original lenght at all places"
sql "ALTER TABLE datacenters_flavors CHANGE COLUMN vim_id vim_id VARCHAR(36) NOT NULL AFTER datacenter_vim_id;"
sql "ALTER TABLE datacenters_images CHANGE COLUMN vim_id vim_id VARCHAR(36) NOT NULL AFTER datacenter_vim_id;"
sql "ALTER TABLE datacenter_nets CHANGE COLUMN vim_net_id vim_net_id VARCHAR(36) NOT NULL AFTER name;"
sql "ALTER TABLE instance_classifications CHANGE COLUMN vim_classification_id vim_classification_id VARCHAR(36)" \
" NULL DEFAULT NULL AFTER instance_scenario_id;"
sql "ALTER TABLE instance_interfaces CHANGE COLUMN vim_interface_id vim_interface_id VARCHAR(128) NULL DEFAULT " \
" NULL AFTER interface_id;"
sql "ALTER TABLE instance_nets CHANGE COLUMN vim_net_id vim_net_id VARCHAR(128) NULL DEFAULT NULL" \
" AFTER osm_id;"
sql "ALTER TABLE instance_sfis CHANGE COLUMN vim_sfi_id vim_sfi_id VARCHAR(36) NULL DEFAULT NULL" \
" AFTER instance_scenario_id;"
sql "ALTER TABLE instance_sfps CHANGE COLUMN vim_sfp_id vim_sfp_id VARCHAR(36) NULL DEFAULT NULL" \
" AFTER instance_scenario_id;"
sql "ALTER TABLE instance_sfs CHANGE COLUMN vim_sf_id vim_sf_id VARCHAR(36) NULL DEFAULT NULL" \
" AFTER instance_scenario_id;"
sql "ALTER TABLE instance_vms CHANGE COLUMN vim_vm_id vim_vm_id VARCHAR(36) NULL DEFAULT NULL" \
" AFTER instance_vnf_id, ADD UNIQUE INDEX vim_vm_id (vim_vm_id);"
sql "ALTER TABLE instance_wim_nets CHANGE COLUMN wim_internal_id wim_internal_id VARCHAR(128) NULL DEFAULT NULL" \
" COMMENT 'Internal ID used by the WIM to refer to the network' AFTER uuid;"
sql "ALTER TABLE vim_wim_actions CHANGE COLUMN vim_id vim_id VARCHAR(64) NULL DEFAULT NULL" \
" AFTER datacenter_vim_id;"
sql "DELETE FROM schema_version WHERE version_int='39';"
}
function upgrade_to_40(){
echo " Adding instance_wim_net_id, created_at, modified_at at 'instance_interfaces'"
sql "ALTER TABLE instance_interfaces ADD COLUMN instance_wim_net_id VARCHAR(36) NULL AFTER instance_net_id, " \
"ADD COLUMN model VARCHAR(12) NULL DEFAULT NULL AFTER type, " \
"ADD COLUMN created_at DOUBLE NULL DEFAULT NULL AFTER vlan, " \
"ADD COLUMN modified_at DOUBLE NULL DEFAULT NULL AFTER created_at;"
echo " Adding sdn to 'instance_wim_nets'"
sql "ALTER TABLE instance_wim_nets ADD COLUMN sdn ENUM('true','false') NOT NULL DEFAULT 'false' AFTER created;"
echo " Change from created to sdn at 'wim_accounts'"
sql "ALTER TABLE wim_accounts CHANGE COLUMN created sdn ENUM('true','false') NOT NULL DEFAULT 'false' AFTER wim_id;"
echo " Remove unique_datacenter_port_mapping at 'wim_port_mappings'"
sql "ALTER TABLE wim_port_mappings DROP INDEX unique_datacenter_port_mapping;"
echo " change 'wim_port_mappings' pop_x to device_x, adding switch_dpid, switch_port"
sql "ALTER TABLE wim_port_mappings ALTER pop_switch_dpid DROP DEFAULT, ALTER pop_switch_port DROP DEFAULT;"
sql "ALTER TABLE wim_port_mappings CHANGE COLUMN pop_switch_dpid device_id VARCHAR(64) NULL AFTER datacenter_id," \
" CHANGE COLUMN pop_switch_port device_interface_id VARCHAR(64) NULL AFTER device_id, " \
" CHANGE COLUMN wan_service_endpoint_id service_endpoint_id VARCHAR(256) NOT NULL AFTER device_interface_id, " \
" CHANGE COLUMN wan_service_mapping_info service_mapping_info TEXT NULL AFTER service_endpoint_id, " \
" ADD COLUMN switch_dpid VARCHAR(64) NULL AFTER service_endpoint_id," \
" ADD COLUMN switch_port VARCHAR(64) NULL AFTER switch_dpid;"
echo " remove unique name to 'datacenters'"
sql "ALTER TABLE datacenters DROP INDEX name;"
sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) " \
"VALUES (40, '0.40', '6.0.4', 'Chagnes to SDN ', '2019-10-23');"
}
function downgrade_from_40(){
echo " Removing instance_wim_net_id, created_at, modified_at from 'instance_interfaces'"
sql "ALTER TABLE instance_interfaces DROP COLUMN instance_wim_net_id, DROP COLUMN created_at, " \
"DROP COLUMN modified_at, DROP COLUMN model;"
echo " Removing sdn from 'instance_wim_nets'"
sql "ALTER TABLE instance_wim_nets DROP COLUMN sdn;"
echo " Change back from sdn to created at 'wim_accounts'"
sql "ALTER TABLE wim_accounts CHANGE COLUMN sdn created ENUM('true','false') NOT NULL DEFAULT 'false' AFTER wim_id;"
echo " Restore back unique_datacenter_port_mapping at 'wim_port_mappings'"
echo " change 'wim_port_mappings' device_x to pop_x, remove switch_dpid, switch_port"
sql "ALTER TABLE wim_port_mappings ALTER device_id DROP DEFAULT, ALTER device_interface_id DROP DEFAULT;"
sql "ALTER TABLE wim_port_mappings CHANGE COLUMN device_id pop_switch_dpid VARCHAR(64) NOT NULL AFTER " \
"datacenter_id, CHANGE COLUMN device_interface_id pop_switch_port VARCHAR(64) NOT NULL AFTER pop_switch_dpid," \
" CHANGE COLUMN service_endpoint_id wan_service_endpoint_id VARCHAR(256) NOT NULL AFTER pop_switch_port, " \
" CHANGE COLUMN service_mapping_info wan_service_mapping_info TEXT NULL AFTER wan_service_endpoint_id, " \
" DROP COLUMN switch_dpid, DROP COLUMN switch_port;"
sql "ALTER TABLE wim_port_mappings ADD UNIQUE INDEX unique_datacenter_port_mapping(datacenter_id, " \
"pop_switch_dpid, pop_switch_port);"
echo " add unique name to 'datacenters'"
sql "ALTER TABLE datacenters ADD UNIQUE INDEX name (name);"
sql "DELETE FROM schema_version WHERE version_int='40';"
}
function upgrade_to_41(){
echo " Removing unique name at 'wims' 'wim_accounts'"
sql "ALTER TABLE wims DROP INDEX name;"
sql "ALTER TABLE wim_accounts DROP INDEX wim_name;"
sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) " \
"VALUES (41, '0.41', '8.0.0', 'Removing unique name for wims/wim_accounts', '2020-07-16');"
}
function downgrade_from_41(){
echo " Adding back unique name at 'wims' 'wim_accounts'"
sql "ALTER TABLE wims ADD UNIQUE INDEX name (name);"
sql "ALTER TABLE wim_accounts ADD UNIQUE INDEX wim_name (name);"
sql "DELETE FROM schema_version WHERE version_int='41';"
}
#TODO ... put functions here
function del_schema_version_process()
{
echo "DELETE FROM schema_version WHERE version_int='0';" | $DBCMD ||
! echo " ERROR writing on schema_version" >&2 || exit 1
}
function set_schema_version_process()
{
echo "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES "\
"(0, '0.0', '0.0.0', 'migration from $DATABASE_VER_NUM to $DB_VERSION backup: $BACKUP_FILE',"\
"'$(date +%Y-%m-%d)');" | $DBCMD ||
! echo " Cannot set database at migration process writing into schema_version" >&2 || exit 1
}
function rollback_db()
{
if echo $DATABASE_PROCESS | grep -q init ; then # Empty database. No backup needed
echo " Aborted! Rollback database not needed" && exit 1
else # migration a non empty database or Recovering a migration process
cat $BACKUP_FILE | mysql $DEF_EXTRA_FILE_PARAM && echo " Aborted! Rollback database OK" &&
del_schema_version_process && rm -f "$BACKUP_FILE" && exit 1
echo " Aborted! Rollback database FAIL" && exit 1
fi
}
function sql() # send a sql command
{
echo "$*" | $DBCMD || ! echo " ERROR with command '$*'" || rollback_db
return 0
}
function migrate()
{
#UPGRADE DATABASE step by step
while [ $DB_VERSION -gt $DATABASE_VER_NUM ]
do
echo " upgrade database from version '$DATABASE_VER_NUM' to '$((DATABASE_VER_NUM+1))'"
DATABASE_VER_NUM=$((DATABASE_VER_NUM+1))
upgrade_to_${DATABASE_VER_NUM}
#FILE_="${DIRNAME}/upgrade_to_${DATABASE_VER_NUM}.sh"
#[ ! -x "$FILE_" ] && echo "Error, can not find script '$FILE_' to upgrade" >&2 && exit -1
#$FILE_ || exit -1 # if fail return
done
#DOWNGRADE DATABASE step by step
while [ $DB_VERSION -lt $DATABASE_VER_NUM ]
do
echo " downgrade database from version '$DATABASE_VER_NUM' to '$((DATABASE_VER_NUM-1))'"
#FILE_="${DIRNAME}/downgrade_from_${DATABASE_VER_NUM}.sh"
#[ ! -x "$FILE_" ] && echo "Error, can not find script '$FILE_' to downgrade" >&2 && exit -1
#$FILE_ || exit -1 # if fail return
downgrade_from_${DATABASE_VER_NUM}
DATABASE_VER_NUM=$((DATABASE_VER_NUM-1))
done
}
# check if current database is ok
function check_migration_needed()
{
DATABASE_VER_NUM=`echo "select max(version_int) from schema_version;" | $DBCMD | tail -n+2` ||
! echo " ERROR cannot read from schema_version" || exit 1
if [[ -z "$DATABASE_VER_NUM" ]] || [[ "$DATABASE_VER_NUM" -lt 0 ]] || [[ "$DATABASE_VER_NUM" -gt 100 ]] ; then
echo " Error can not get database version ($DATABASE_VER_NUM?)" >&2
exit 1
fi
[[ $DB_VERSION -eq $DATABASE_VER_NUM ]] && echo " current database version '$DATABASE_VER_NUM' is ok" && return 1
[[ "$DATABASE_VER_NUM" -gt "$LAST_DB_VERSION" ]] &&
echo "Database has been upgraded with a newer version of this script. Use this version to downgrade" >&2 &&
exit 1
return 0
}
DATABASE_PROCESS=`echo "select comments from schema_version where version_int=0;" | $DBCMD | tail -n+2` ||
! echo " ERROR cannot read from schema_version" || exit 1
if [[ -z "$DATABASE_PROCESS" ]] ; then # migration a non empty database
check_migration_needed || exit 0
# Create a backup database content
[[ -n "$BACKUP_DIR" ]] && BACKUP_FILE=$(mktemp -q "${BACKUP_DIR}/backupdb.XXXXXX.sql")
[[ -z "$BACKUP_DIR" ]] && BACKUP_FILE=$(mktemp -q --tmpdir "backupdb.XXXXXX.sql")
mysqldump $DEF_EXTRA_FILE_PARAM --add-drop-table --add-drop-database --routines --databases $DBNAME > $BACKUP_FILE ||
! echo "Cannot create Backup file '$BACKUP_FILE'" >&2 || exit 1
echo " Backup file '$BACKUP_FILE' created"
# Set schema version
set_schema_version_process
migrate
del_schema_version_process
rm -f "$BACKUP_FILE"
elif echo $DATABASE_PROCESS | grep -q init ; then # Empty database. No backup needed
echo " Migrating an empty database"
if check_migration_needed ; then
migrate
fi
del_schema_version_process
else # Recover Migration process
BACKUP_FILE=${DATABASE_PROCESS##*backup: }
[[ -f "$BACKUP_FILE" ]] || ! echo "Previous migration process fail and cannot recover backup file '$BACKUP_FILE'" >&2 ||
exit 1
echo " Previous migration was killed. Restoring database from rollback file'$BACKUP_FILE'"
cat $BACKUP_FILE | mysql $DEF_EXTRA_FILE_PARAM || ! echo " Cannot load backup file '$BACKUP_FILE'" >&2 || exit 1
if check_migration_needed ; then
set_schema_version_process
migrate
fi
del_schema_version_process
rm -f "$BACKUP_FILE"
fi
exit 0
#echo done
|
#!/bin/sh
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
export LC_ALL=C
set -e
ROOTDIR=dist
BUNDLE="${ROOTDIR}/Bitcoin3-Qt.app"
CODESIGN=codesign
TEMPDIR=sign.temp
TEMPLIST=${TEMPDIR}/signatures.txt
OUT=signature-osx.tar.gz
OUTROOT=osx
if [ ! -n "$1" ]; then
echo "usage: $0 <codesign args>"
echo "example: $0 -s MyIdentity"
exit 1
fi
rm -rf ${TEMPDIR} ${TEMPLIST}
mkdir -p ${TEMPDIR}
${CODESIGN} -f --file-list ${TEMPLIST} "$@" "${BUNDLE}"
grep -v CodeResources < "${TEMPLIST}" | while read i; do
TARGETFILE="${BUNDLE}/`echo "${i}" | sed "s|.*${BUNDLE}/||"`"
SIZE=`pagestuff "$i" -p | tail -2 | grep size | sed 's/[^0-9]*//g'`
OFFSET=`pagestuff "$i" -p | tail -2 | grep offset | sed 's/[^0-9]*//g'`
SIGNFILE="${TEMPDIR}/${OUTROOT}/${TARGETFILE}.sign"
DIRNAME="`dirname "${SIGNFILE}"`"
mkdir -p "${DIRNAME}"
echo "Adding detached signature for: ${TARGETFILE}. Size: ${SIZE}. Offset: ${OFFSET}"
dd if="$i" of="${SIGNFILE}" bs=1 skip=${OFFSET} count=${SIZE} 2>/dev/null
done
grep CodeResources < "${TEMPLIST}" | while read i; do
TARGETFILE="${BUNDLE}/`echo "${i}" | sed "s|.*${BUNDLE}/||"`"
RESOURCE="${TEMPDIR}/${OUTROOT}/${TARGETFILE}"
DIRNAME="`dirname "${RESOURCE}"`"
mkdir -p "${DIRNAME}"
echo "Adding resource for: \"${TARGETFILE}\""
cp "${i}" "${RESOURCE}"
done
rm ${TEMPLIST}
tar -C "${TEMPDIR}" -czf "${OUT}" .
rm -rf "${TEMPDIR}"
echo "Created ${OUT}"
|
import java.io.IOException;
public class RuntimeIOException extends RuntimeException {
public RuntimeIOException(IOException cause) {
super(cause);
}
public static void main(String[] args) {
try {
// Simulate an IOException
throw new IOException("File not found");
} catch (IOException e) {
// Wrap the IOException in RuntimeIOException
throw new RuntimeIOException(e);
}
}
} |
<reponame>gitter-badger/intellij-swagger
package org.zalando.intellij.swagger.completion.field.completion.swagger.json;
import com.intellij.openapi.vfs.VirtualFile;
import org.zalando.intellij.swagger.SwaggerLightCodeInsightFixtureTestCase;
import org.zalando.intellij.swagger.assertion.AssertableList;
public class ParametersFileCompletionTest extends SwaggerLightCodeInsightFixtureTestCase {
private static final String PARTIAL_FILES_PATH = "completion/field/swagger/partial/json";
public void testThatAutoCompletionWorksForFileWithSingleDefinition() {
myFixture.copyFileToProject(PARTIAL_FILES_PATH + "/parameter.json", "parameter.json");
final VirtualFile swaggerFile =
myFixture.copyFileToProject(PARTIAL_FILES_PATH + "/swagger.json", "swagger.json");
myFixture.configureFromExistingVirtualFile(swaggerFile);
final AssertableList completions =
new AssertableList(myFixture.getCompletionVariants("parameter.json"));
completions
.assertContains(
"name",
"in",
"description",
"required",
"schema",
"type",
"format",
"allowEmptyValue",
"items",
"collectionFormat",
"default",
"maximum",
"exclusiveMaximum",
"minimum",
"exclusiveMinimum",
"maxLength",
"minLength",
"pattern",
"maxItems",
"minItems",
"uniqueItems",
"enum",
"multipleOf")
.isOfSize(23);
}
public void testThatAutoCompletionWorksForFileWithMultipleParameterDefinitionsInRoot() {
myFixture.copyFileToProject(
PARTIAL_FILES_PATH + "/parameter_definitions_in_root.json",
"parameter_definitions_in_root.json");
final VirtualFile swaggerFile =
myFixture.copyFileToProject(PARTIAL_FILES_PATH + "/swagger.json", "swagger.json");
myFixture.configureFromExistingVirtualFile(swaggerFile);
final AssertableList completions =
new AssertableList(myFixture.getCompletionVariants("parameter_definitions_in_root.json"));
completions
.assertContains(
"name",
"in",
"description",
"required",
"schema",
"type",
"format",
"allowEmptyValue",
"items",
"collectionFormat",
"default",
"maximum",
"exclusiveMaximum",
"minimum",
"exclusiveMinimum",
"maxLength",
"minLength",
"pattern",
"maxItems",
"minItems",
"uniqueItems",
"enum",
"multipleOf")
.isOfSize(23);
}
public void testThatAutoCompletionWorksForFileWithMultipleParameterDefinitionsNotInRoot() {
myFixture.copyFileToProject(
PARTIAL_FILES_PATH + "/parameter_definitions_not_in_root.json",
"parameter_definitions_not_in_root.json");
final VirtualFile swaggerFile =
myFixture.copyFileToProject(PARTIAL_FILES_PATH + "/swagger.json", "swagger.json");
myFixture.configureFromExistingVirtualFile(swaggerFile);
final AssertableList completions =
new AssertableList(
myFixture.getCompletionVariants("parameter_definitions_not_in_root.json"));
completions
.assertContains(
"name",
"in",
"description",
"required",
"schema",
"type",
"format",
"allowEmptyValue",
"items",
"collectionFormat",
"default",
"maximum",
"exclusiveMaximum",
"minimum",
"exclusiveMinimum",
"maxLength",
"minLength",
"pattern",
"maxItems",
"minItems",
"uniqueItems",
"enum",
"multipleOf")
.isOfSize(23);
}
}
|
<filename>src/studio/src/designer/backend/gulpfile.js<gh_stars>0
const gulp = require('gulp');
const run = require('gulp-run-command').default;
const chokidar = require('chokidar');
const del = require('del');
const fs = require('fs');
// When specifying options, you need to add all options to avoid lint errors.
// This can be removed if/when https://github.com/Klathmon/gulp-run-command/pull/11 is released
const defaultGulpRunOptions = {
quiet: false,
ignoreErrors: false,
timeout: undefined,
env: {},
};
const cleanGlobs = [
'wwwroot/designer/css/lib/**/*.css',
'wwwroot/designer/js/lib/**/*.js',
'wwwroot/designer/css/font-awesome/*.css',
'wwwroot/designer/js/lib/**',
'wwwroot/designer/css/lib/**',
'wwwroot/designer/css/bootstrap*.css',
'wwwroot/designer/css/font/fontawesome*.*',
];
const jsServDevFile = '../frontend/dist/app-development/app-development.js';
const jsServDevModuleFile0 =
'../frontend/dist/app-development/1.app-development.js';
const jsServDevModuleFile1 =
'../frontend/dist/app-development/2.app-development.js';
const jsServDevModuleFile2 =
'../frontend/dist/app-development/3.app-development.js';
const jsServDevModuleFile3 =
'../frontend/dist/app-development/4.app-development.js';
const jsServDevMonacoWorker1 =
'../frontend/dist/app-development/editor.worker.js';
const jsServDevMonacoWorker2 = '../frontend/dist/app-development/ts.worker.js';
const jsDashboardFile = '../frontend/dist/dashboard/dashboard.js';
const cssServDevFile = '../frontend/dist/app-development/app-development.css';
const cssDashboardFile = '../frontend/dist/dashboard/dashboard.css';
let jsWatcher = null;
let cssWatcher = null;
const jslibDest = 'wwwroot/designer/js/lib/';
const copyGlobs = [
{
src: 'node_modules/bootstrap/dist/css/bootstrap*.css',
dest: 'wwwroot/designer/css/',
},
{
src: 'node_modules/jquery-ui-dist/*.js',
dest: 'wwwroot/designer/js/lib/jquery-ui/',
},
{
src: 'node_modules/bootstrap/dist/js/bootstrap*.js',
dest: jslibDest,
},
{
src: 'node_modules/json-editor/dist/*.js',
dest: jslibDest,
},
{
src: 'node_modules/select2/dist/js/select2.full.js',
dest: jslibDest,
},
{
src: 'node_modules/select2/dist/css/select2.css',
dest: 'wwwroot/designer/css/lib',
},
{
src: 'node_modules/jquery/dist/*.js',
dest: jslibDest,
},
{
src: 'node_modules/requirejs/require.js',
dest: jslibDest,
},
{
src: 'node_modules/underscore/*.js',
dest: jslibDest,
},
{
src: 'node_modules/requirejs-text/*.js',
dest: jslibDest,
},
{
src: 'node_modules/js-beautify/js/lib/beautify*.js',
dest: jslibDest,
},
{
src: 'node_modules/sightglass/*.js',
dest: jslibDest,
},
{
src: 'node_modules/rivets/dist/*.js',
dest: jslibDest,
},
{
src: 'node_modules/jquery-validation-unobtrusive/dist/*.js',
dest: jslibDest,
},
{
src: 'node_modules/jquery-validation/dist/*.js',
dest: jslibDest,
},
{
src: 'node_modules/popper.js/dist/umd/*.*.js',
dest: jslibDest,
},
{
src: 'node_modules/monaco-editor/min/**/*.*',
dest: 'wwwroot/designer/js/lib/monaco-editor',
},
{
src: 'node_modules/bootstrap-list-filter/bootstrap-list-filter.min.js',
dest: jslibDest,
},
];
function copyNodeModulePackages(cb) {
copyGlobs.forEach((copyGlob) =>
gulp.src(copyGlob.src).pipe(gulp.dest(copyGlob.dest)),
);
cb();
}
function cleanNodeModulePackages() {
return del(cleanGlobs);
}
function copyReactJs(cb) {
copyDashboardJs();
copyServDevJs();
cb();
return;
}
function copyReactCss(cb) {
copyDashboardCss();
copyServDevCss();
cb();
return;
}
function copyDashboardJs() {
setTimeout(function () {
gulp.src(jsDashboardFile).pipe(gulp.dest('./wwwroot/designer/js/react'));
}, 1000);
return;
}
function copyServDevJs() {
setTimeout(function () {
gulp.src(jsServDevFile).pipe(gulp.dest('./wwwroot/designer/js/react'));
gulp
.src(jsServDevModuleFile0)
.pipe(gulp.dest('./wwwroot/designer/js/react'));
gulp
.src(jsServDevModuleFile1)
.pipe(gulp.dest('./wwwroot/designer/js/react'));
gulp
.src(jsServDevModuleFile2)
.pipe(gulp.dest('./wwwroot/designer/js/react'));
gulp
.src(jsServDevModuleFile3)
.pipe(gulp.dest('./wwwroot/designer/js/react'));
gulp
.src(jsServDevMonacoWorker1)
.pipe(gulp.dest('./wwwroot/designer/js/react'));
gulp
.src(jsServDevMonacoWorker2)
.pipe(gulp.dest('./wwwroot/designer/js/react'));
}, 1000);
return;
}
function copyDashboardCss() {
setTimeout(function () {
gulp.src(cssDashboardFile).pipe(gulp.dest('./wwwroot/designer/css/react'));
}, 1000);
return;
}
function copyServDevCss() {
setTimeout(function () {
gulp.src(cssServDevFile).pipe(gulp.dest('./wwwroot/designer/css/react'));
}, 1000);
return;
}
function deleteServDevJs() {
return del('wwwroot/designer/js/react/app-development.js');
}
function deleteDashboardJs() {
return del('wwwroot/designer/js/react/dashboard.js');
}
function deleteServDevCss() {
return del('wwwroot/designer/css/react/app-development.css');
}
function deleteDashboardCss() {
return del('wwwroot/designer/css/react/dashboard.css');
}
function setupWatchers(cb) {
var checkDashboardJsFile = setInterval(function () {
if (fs.existsSync(jsDashboardFile)) {
jsWatcher = chokidar.watch(jsDashboardFile);
// jsWatcher.on('ready', copyReactJs);
jsWatcher.on('change', copyDashboardJs);
clearInterval(checkDashboardJsFile);
}
}, 1000);
var checkServDevJsFile = setInterval(function () {
if (fs.existsSync(jsServDevFile)) {
jsWatcher = chokidar.watch(jsServDevFile);
// jsWatcher.on('ready', copyReactJs);
jsWatcher.on('change', copyServDevJs);
clearInterval(checkServDevJsFile);
}
}, 1000);
var checkDashboardCssFile = setInterval(function () {
if (fs.existsSync(cssDashboardFile)) {
cssWatcher = chokidar.watch(cssDashboardFile);
// cssWatcher.on('ready', copyReactCss);
cssWatcher.on('change', copyDashboardCss);
clearInterval(checkDashboardCssFile);
}
}, 1000);
var checkServDevCssFile = setInterval(function () {
if (fs.existsSync(cssServDevFile)) {
cssWatcher = chokidar.watch(cssServDevFile);
// cssWatcher.on('ready', copyReactCss);
cssWatcher.on('change', copyServDevCss);
clearInterval(checkServDevCssFile);
}
}, 1000);
cb();
}
gulp.task('build', gulp.series([copyNodeModulePackages]));
gulp.task(
'copy-files',
gulp.series(copyNodeModulePackages, copyReactJs, copyReactCss),
);
gulp.task(
'clean',
gulp.series(
deleteServDevCss,
deleteDashboardCss,
deleteServDevJs,
deleteDashboardJs,
cleanNodeModulePackages,
run('npm run clean', {
...defaultGulpRunOptions,
cwd: '../frontend/dashboard',
}),
run('npm run clean', {
...defaultGulpRunOptions,
cwd: '../frontend/app-development',
}),
),
);
gulp.task(
'develop',
gulp.parallel(
copyNodeModulePackages,
setupWatchers,
run('dotnet run'),
run('npm run webpack-watch', {
...defaultGulpRunOptions,
cwd: '../frontend/app-development',
}),
),
);
gulp.task(
'develop-designer-frontend',
gulp.parallel(
copyNodeModulePackages,
setupWatchers,
run('npm run webpack-watch', {
...defaultGulpRunOptions,
cwd: '../frontend/app-development',
}),
),
);
gulp.task(
'develop-dashboard',
gulp.parallel(
copyNodeModulePackages,
setupWatchers,
run('dotnet run'),
run('npm run webpack-watch', {
...defaultGulpRunOptions,
cwd: '../frontend/dashboard',
}),
),
);
gulp.task(
'build-ux-editor',
gulp.series(
run('npm run build', {
...defaultGulpRunOptions,
cwd: '../frontend/ux-editor',
}),
'copy-files',
),
);
gulp.task(
'install-react-app-dependencies',
gulp.series(
run('lerna bootstrap --hoist --ci', {
...defaultGulpRunOptions,
cwd: '../Frontend',
}),
),
);
gulp.task(
'default',
gulp.series([
run('npm run build', {
...defaultGulpRunOptions,
cwd: '../frontend/app-development',
}),
run('npm run build', {
...defaultGulpRunOptions,
cwd: '../frontend/dashboard',
}),
'copy-files',
]),
);
|
<filename>lib/db/client-metrics-db.js
'use strict';
const METRICS_COLUMNS = ['id', 'created_at', 'metrics'];
const TABLE = 'client_metrics';
const ONE_MINUTE = 60 * 1000;
const mapRow = row => ({
id: row.id,
createdAt: row.created_at,
metrics: row.metrics,
});
class ClientMetricsDb {
constructor(db, getLogger) {
this.db = db;
this.logger = getLogger('client-metrics-db.js');
// Clear old metrics regulary
const clearer = () => this.removeMetricsOlderThanOneHour();
setTimeout(clearer, 10).unref();
setInterval(clearer, ONE_MINUTE).unref();
}
removeMetricsOlderThanOneHour() {
this.db(TABLE)
.whereRaw("created_at < now() - interval '1 hour'")
.del()
.then(res => res > 0 && this.logger.info(`Deleted ${res} metrics`));
}
// Insert new client metrics
insert(metrics) {
return this.db(TABLE).insert({ metrics });
}
// Used at startup to load all metrics last week into memory!
getMetricsLastHour() {
return this.db
.select(METRICS_COLUMNS)
.from(TABLE)
.limit(2000)
.whereRaw("created_at > now() - interval '1 hour'")
.orderBy('created_at', 'asc')
.map(mapRow);
}
// Used to poll for new metrics
getNewMetrics(lastKnownId) {
return this.db
.select(METRICS_COLUMNS)
.from(TABLE)
.limit(1000)
.where('id', '>', lastKnownId)
.orderBy('created_at', 'asc')
.map(mapRow);
}
}
module.exports = ClientMetricsDb;
|
//// react
import React, {useState, useEffect, useContext, useCallback} from 'react';
//// react native
import {
View,
StyleSheet,
Dimensions,
FlatList,
Image,
ActivityIndicator,
TouchableWithoutFeedback,
} from 'react-native';
//// react navigation
import {useFocusEffect} from '@react-navigation/native';
import {navigate} from '~/navigation/service';
//// language
import {useIntl} from 'react-intl';
import {getCharacterLength} from '~/utils/strings';
import {substr_utf8_bytes} from '~/utils/strings';
const runes = require('runes');
//// ui, styles
import {Block, Icon, Button, Input, Text, theme} from 'galio-framework';
import {argonTheme} from '~/constants/argonTheme';
const {width, height} = Dimensions.get('screen');
import Modal from 'react-native-modal';
//// contexts
import {
PostsContext,
UIContext,
UserContext,
SettingsContext,
} from '~/contexts';
import {PostData, PostRef, PostsTypes} from '~/contexts/types';
const BACKGROUND_COLORS = [
argonTheme.COLORS.BORDER,
argonTheme.COLORS.SECONDARY,
];
//// props
interface Props {
authors: string[];
showModal: boolean;
handlePressAuthor: (author: string) => void;
cancelModal?: () => void;
}
const AuthorListView = (props: Props): JSX.Element => {
//// props
console.log('AuthorListView. props', props);
const {authors, showModal} = props;
//// language
const intl = useIntl();
//// contexts
const {uiState, setAuthorParam} = useContext(UIContext);
const {userState} = useContext(UserContext);
const {setPostRef} = useContext(PostsContext);
const {settingsState} = useContext(SettingsContext);
//// states
// const [showModal, setShowModal] = useState(true);
const [searchText, setSearchText] = useState('');
const [searchedItems, setSearchedItems] = useState(authors || []);
const [loading, setLoading] = useState(false);
const [refreshing, setRefreshing] = useState(false);
const [loadingMore, setLoadingMore] = useState(false);
const [loadedAll, setLoadedAll] = useState(false);
const _onPressAuthor = (author: string) => {
console.log('[AuthorList] onPressAuthor');
// setShowModal(false);
props.handlePressAuthor(author);
};
const _onSubmitSearch = () => {
console.log('_onSubmitSearch. search text', searchText);
// setSearchParam(searchText);
// setShowModal(false);
props.handlePressAuthor(searchText);
};
const _handleTextChange = (text: string) => {
// set text
setSearchText(text);
// filtering
if (text === '') {
setSearchedItems(authors);
} else {
const _filterdItems = authors.filter(
(author) => text && author.includes(text),
);
setSearchedItems(_filterdItems);
}
};
const _renderHeader = () => {
const iconSearch =
searchText === '' ? (
<TouchableWithoutFeedback onPress={() => setSearchText('')}>
<Icon
size={16}
color={theme.COLORS.MUTED}
name="page-remove"
family="foundation"
/>
</TouchableWithoutFeedback>
) : (
<TouchableWithoutFeedback onPress={_onSubmitSearch}>
<Icon
size={16}
color={theme.COLORS.MUTED}
name="magnifying-glass"
family="entypo"
/>
</TouchableWithoutFeedback>
);
return (
<Block center>
<Input
style={styles.searchContainer}
right
color={argonTheme.COLORS.ERROR}
autoFocus={true}
autoCorrect={false}
autoCapitalize="none"
iconContent={iconSearch}
defaultValue={searchText}
placeholder={intl.formatMessage({id: 'Profile.search_author'})}
placehoderTextColor={argonTheme.COLORS.INPUT}
onChangeText={_handleTextChange}
onSubmitEditing={_onSubmitSearch}
/>
</Block>
);
};
const _renderSearchBar = () => (
<Block center>
<Input
style={styles.searchContainer}
right
color={argonTheme.COLORS.ERROR}
autoFocus={true}
autoCorrect={false}
autoCapitalize="none"
defaultValue={searchText}
placeholder={intl.formatMessage({id: 'Profile.search_author'})}
placehoderTextColor={argonTheme.COLORS.INPUT}
onChangeText={_handleTextChange}
onSubmitEditing={_onSubmitSearch}
/>
</Block>
);
//// render footer when loading more
const _renderFooter = () => {
if (!loadingMore) return null;
return (
<View
style={{
position: 'relative',
width: width,
height: height,
paddingVertical: 20,
marginTop: 10,
marginBottom: 10,
borderColor: theme.COLORS.PINK,
}}>
<ActivityIndicator color={argonTheme.COLORS.ERROR} size="large" />
</View>
);
};
//// render an item
const _renderItem = (author: string, index: number) => {
const avatar = `${settingsState.blockchains.image}/u/${author}/avatar`;
return (
<TouchableWithoutFeedback onPress={() => _onPressAuthor(author)}>
<Block
flex
card
row
space="between"
style={{
marginBottom: 5,
padding: 5,
backgroundColor:
BACKGROUND_COLORS[index % BACKGROUND_COLORS.length],
}}>
<Block row middle>
<Image
source={{
uri: avatar || null,
}}
style={styles.avatar}
/>
<Text size={14} style={{marginHorizontal: 5}}>
{author}
</Text>
</Block>
</Block>
</TouchableWithoutFeedback>
);
};
return (
<Modal
isVisible={showModal}
animationIn="zoomIn"
animationOut="zoomOut"
onBackdropPress={props.cancelModal}>
<Block card style={styles.container}>
{_renderSearchBar()}
<FlatList
contentContainerStyle={styles.posts}
data={searchedItems}
renderItem={({item, index}) => _renderItem(item, index)}
keyExtractor={(item, index) => String(index)}
initialNumToRender={5}
// ListHeaderComponent={_renderHeader}
ListFooterComponent={_renderFooter}
showsVerticalScrollIndicator={false}
/>
</Block>
</Modal>
);
};
export {AuthorListView};
const styles = StyleSheet.create({
container: {
width: width * 0.6,
height: height * 0.3,
backgroundColor: argonTheme.COLORS.TWITTER,
paddingVertical: 10,
marginHorizontal: 70,
},
posts: {
width: '100%',
paddingHorizontal: theme.SIZES.BASE,
paddingVertical: theme.SIZES.BASE * 1,
},
avatar: {
width: 24,
height: 24,
borderRadius: 24 / 2,
},
searchContainer: {
height: 38,
width: width * 0.5,
marginHorizontal: 16,
borderWidth: 1,
borderRadius: 3,
},
});
|
def string_index(string):
result = ""
for i, char in enumerate(string):
result += str(i)
return result
test_string = "Hello World"
print(string_index(test_string)) # Output: 012345678910 |
<filename>nerdlets/slo-r-entity/index.js
/**
* Provides full New Relic One SLO/R Entity functionality.
*
* @file This files defines the NR1 App SLO/R Entity functionaly and loads dedicated elements to define and display SLOs.
* @author <NAME>
*/
/** core */
import React from 'react';
import PropTypes from 'prop-types';
/** nr1 */
import {
Button,
EntityStorageMutation,
Grid,
GridItem,
Icon,
Modal,
navigation,
NerdletStateContext,
PlatformStateContext,
Stack,
StackItem,
Spinner
} from 'nr1';
/** 3rd party */
import { format } from 'date-fns';
/** shared */
// slo documents
import { fetchSloDocuments } from '../shared/services/slo-documents';
/** local */
import SloList from './components/slo-list';
import SloForm from './components/slo-form';
import ViewDocument from './components/view-document';
import { getNow } from '../shared/helpers';
/**
* SLOREntityNerdlet
*/
export default class SLOREntityNedlet extends React.Component {
static propTypes = {
nerdletUrlState: PropTypes.object
};
constructor(props) {
super(props);
this.state = {
entityGuid: this.props.nerdletUrlState.entityGuid,
slo_documents: null,
SLOTableView: false,
// New SLO
isActiveCreateModal: false,
isActiveUpdateModal: false,
isActiveViewModal: false,
// Update SLO
editDocumentId: null,
// View SLO
viewDocumentId: null,
// Refresh
refreshInterval: 60000, // in milliseconds
refreshing: false,
lastUpdated: 0,
groupList: []
}; // state
this.openConfig = this._openConfig.bind(
this
); /** opens the SLO configuration */
this.upsertDocumentCallback = this.upsertDocumentCallback.bind(this);
this.deleteDocumentCallback = this.deleteDocumentCallback.bind(this);
this.toggleCreateModal = this.toggleCreateModal.bind(this);
this.toggleUpdateModal = this.toggleUpdateModal.bind(this);
this.toggleViewModal = this.toggleViewModal.bind(this);
} // constructor
async componentDidMount() {
const { entityGuid } = this.state;
await this.load(entityGuid);
this.startTimer();
}
/*
* Reload if we changed entityGuid or triggered a refresh
*/
componentDidUpdate = async prevProps => {
const prevEntityGuid = prevProps.nerdletUrlState.entityGuid;
const currentEntityGuid = this.props.nerdletUrlState.entityGuid;
if (prevEntityGuid !== currentEntityGuid) {
await this.load(currentEntityGuid);
}
};
componentWillUnmount() {
this.stopTimer();
}
static contextType = NerdletStateContext;
load = async entityGuid => {
if (entityGuid) {
this.setState({ refreshing: true, entityGuid });
} else {
this.setState({ refreshing: true });
}
await this.getSloDocuments(entityGuid);
};
startTimer() {
const { refreshInterval } = this.state;
this.refresh = setInterval(async () => {
const { entityGuid } = this.state;
await this.load(entityGuid);
}, refreshInterval);
}
stopTimer() {
if (this.refresh) {
clearInterval(this.refresh);
}
}
/** opens the slo-r configuration nerdlet */
_openConfig() {
const __confignerdlet = {
id: 'slo-r-config',
urlState: {
entityGuid: this.state.entityGuid,
renderCallback: this.rerenderSLOs
}
};
navigation.openStackedNerdlet(__confignerdlet);
}
/** gets all the SLO documents defined for this entity */
getSloDocuments = async entityGuid => {
const slo_documents = await fetchSloDocuments({ entityGuid });
const groupList = [];
slo_documents.forEach(({ document: { slogroup } }) => {
if (slogroup && !groupList.includes(slogroup)) {
groupList.push(slogroup);
}
});
this.setState({
slo_documents,
refreshing: false,
lastUpdated: getNow(),
groupList
});
};
toggleCreateModal() {
this.setState(prevState => ({
isActiveCreateModal: !prevState.isActiveCreateModal
}));
}
toggleUpdateModal(options = { document: {} }) {
const idField = 'documentId';
const { document } = options;
const documentId = document[idField] || null;
this.setState(prevState => {
return {
isActiveUpdateModal: !prevState.isActiveUpdateModal,
editDocumentId: documentId
};
});
}
toggleViewModal(options = { document: {} }) {
const { document } = options;
this.setState(prevState => ({
viewDocumentId: document.documentId,
isActiveViewModal: !prevState.isActiveViewModal
}));
}
// Form Callbacks
async upsertDocumentCallback({ document, response }) {
if (!response) {
throw new Error('Error writing SLO Document to Entity Storage');
}
this.upsertDocumentInList({ mutationResult: document });
await this.getSloDocuments(this.state.entityGuid);
}
async deleteDocumentCallback({ document }) {
const __mutation = {
actionType: EntityStorageMutation.ACTION_TYPE.DELETE_DOCUMENT,
collection: 'nr1-csg-slo-r',
entityGuid: document.entityGuid,
// TODO - Remove document.name and document.slo_name after we've reached an initial release
documentId: document.documentId || document.name || document.slo_name
};
// TODO Provide message of the successful deletion
const __result = await EntityStorageMutation.mutate(__mutation);
if (!__result) {
throw new Error('Error deleting SLO document from Entity Storage');
}
this.removeDocumentFromList({ document });
// TODO: Check to see the entity in question has any other SLO documents in the collection and remove the tag slor=true if there are none.
}
upsertDocumentInList({ mutationResult }) {
const { slo_documents } = this.state;
const { documentId, document } = mutationResult;
const documentIndex = slo_documents.findIndex(
d => d.document.documentId === mutationResult.documentId
);
// Update item in list without mutating state
if (documentIndex >= 0) {
this.setState(({ slo_documents }) => ({
slo_documents: [
...slo_documents.slice(0, documentIndex),
{ id: documentId, document },
...slo_documents.slice(documentIndex + 1)
],
isActiveUpdateModal: false,
editDocumentId: null
}));
}
if (documentIndex < 0) {
const newRecords = [{ id: documentId, document }];
this.setState(prevState => ({
slo_documents: prevState.slo_documents.concat(newRecords),
isActiveCreateModal: false,
editDocumentId: null
}));
}
}
removeDocumentFromList({ document }) {
this.setState(prevState => ({
slo_documents: prevState.slo_documents.filter(doc => {
return doc.document.documentId !== document.documentId;
})
}));
}
renderToolbar() {
const { lastUpdated, refreshing } = this.state;
return (
<Stack
className="entity-toolbar-container"
fullWidth
horizontalType={Stack.HORIZONTAL_TYPE.FILL}
verticalType={Stack.VERTICAL_TYPE.CENTER}
gapType={Stack.GAP_TYPE.NONE}
>
<StackItem className="toolbar-left-side">
<Stack
horizontalType={Stack.HORIZONTAL_TYPE.FILL}
verticalType={Stack.VERTICAL_TYPE.CENTER}
gapType={Stack.GAP_TYPE.NONE}
>
<StackItem className="segmented-control-container">
<button
type="button"
className={`grid-view-button ${
!this.state.SLOTableView ? 'active' : ''
}`}
onClick={() => this.setState({ SLOTableView: false })}
>
<Icon
type={Icon.TYPE.INTERFACE__OPERATIONS__GROUP}
color={this.state.SLOTableView ? '#007e8a' : '#ffffff'}
/>
Grid
</button>
<button
type="button"
className={`table-view-button ${
this.state.SLOTableView ? 'active' : ''
}`}
onClick={() => this.setState({ SLOTableView: true })}
>
<Icon
type={Icon.TYPE.INTERFACE__VIEW__LIST_VIEW}
color={this.state.SLOTableView ? '#ffffff' : '#007e8a'}
/>
Table
</button>
</StackItem>
<StackItem>
<hr />
</StackItem>
<StackItem className="updated-timestamp">
Last updated at: {format(lastUpdated, 'hh:mm:ss')}
{refreshing && <Spinner inline />}
</StackItem>
</Stack>
</StackItem>
<StackItem>
<Stack
className="toolbar-right-side"
fullWidth
horizontalType={Stack.HORIZONTAL_TYPE.RIGHT}
>
<Button
onClick={this.toggleCreateModal}
type={Button.TYPE.PRIMARY}
iconType={Button.ICON_TYPE.DOCUMENTS__DOCUMENTS__NOTES__A_ADD}
>
Define an SLO
</Button>
</Stack>
</StackItem>
</Stack>
);
}
/** lifecycle provides the rendering context for this nerdlet */
render() {
// ensure we have state for our slo documents to render the reporting table and configuration options
const { slo_documents, refreshing } = this.state;
if (slo_documents === null && refreshing === true) {
return (
<div>
<Spinner className="centered" size="small" />
</div>
);
}
const sloHasBeenDefined = this.state.slo_documents?.length > 0;
return (
<div>
{this.renderToolbar()}
<Grid
className={
!sloHasBeenDefined ? 'empty-state-parent' : 'slo-table-container'
}
>
<GridItem
columnSpan={!sloHasBeenDefined ? 4 : 12}
columnStart={!sloHasBeenDefined ? 5 : null}
>
<PlatformStateContext.Consumer>
{platformUrlState => {
if (slo_documents === null) {
return null;
}
return (
<SloList
entityGuid={this.state.entity}
slo_documents={this.state.slo_documents}
timeRange={platformUrlState.timeRange}
toggleCreateModal={this.toggleCreateModal}
toggleUpdateModal={this.toggleUpdateModal}
toggleViewModal={this.toggleViewModal}
tableView={this.state.SLOTableView}
deleteCallback={this.deleteDocumentCallback}
/>
);
}}
</PlatformStateContext.Consumer>
</GridItem>
</Grid>
{/* Create Modal */}
<Modal
hidden={!this.state.isActiveCreateModal}
onClose={() => this.setState({ isActiveCreateModal: false })}
>
<PlatformStateContext.Consumer>
{platformUrlState => {
return (
<SloForm
entityGuid={this.state.entityGuid}
upsertDocumentCallback={this.upsertDocumentCallback}
modalToggleCallback={this.toggleCreateModal}
timeRange={platformUrlState.timeRange}
groupList={this.state.groupList}
/>
);
}}
</PlatformStateContext.Consumer>
</Modal>
{/* Update Modal */}
<Modal
hidden={!this.state.isActiveUpdateModal}
onClose={() => this.setState({ isActiveUpdateModal: false })}
>
<SloForm
entityGuid={this.state.entityGuid}
documentId={this.state.editDocumentId}
upsertDocumentCallback={this.upsertDocumentCallback}
modalToggleCallback={this.toggleUpdateModal}
groupList={this.state.groupList}
/>
</Modal>
{/* View Modal */}
<Modal
hidden={!this.state.isActiveViewModal}
onClose={() => this.setState({ isActiveViewModal: false })}
>
<ViewDocument
entityGuid={this.state.entityGuid}
documentId={this.state.viewDocumentId}
/>
</Modal>
</div>
);
} // render
} // SLOREntityNedlet
|
<gh_stars>0
import * as THREE from 'three';
export default class CrochetPaths {
constructor(color) {
this.color = color || 0x000000;
}
draw(stitch, color){
this.color = color || 0x000000;
let x = 0;
let y = 0;
switch(stitch) {
case 'slst':
return this.drawSlipstitch(x, y);
case 'sc':
return this.drawSingleCrochet(x, y);
case 'mr':
return this.drawMagicRing(x, y);
case 'ch':
return this.drawChainStitch(x, y);
case 'hdc':
return this.drawHalfDoubleCrochet(x, y);
case 'dc':
return this.drawDoubleCrochet(x, y);
case 'tr':
return this.drawTrebleCrochet(x, y);
case 'dtr':
return this.drawDoubleTrebleCrochet(x, y);
case 'hole':
return this.drawHole(x, y);
default:
return false;
}
}
lineMaterial() {
return new THREE.LineBasicMaterial({
color: this.color
});
}
meshMaterial() {
return new THREE.MeshBasicMaterial({
color: this.color
});
}
drawHole(x,y) {
let material = new THREE.LineDashedMaterial( {
color: this.color,
linewidth: 1,
scale: 1,
dashSize: 2,
gapSize: 2,
} );
let circGeometry = new THREE.CircleGeometry( 5, 16 );
circGeometry.vertices.shift();
return new THREE.Line( circGeometry, material).computeLineDistances();
}
drawMagicRing(x, y) {
let path = new THREE.Path();
let radius = 0;
let angle = 0;
path.moveTo(x,y);
for (let n = 0; n < 40; n++) {
radius += 0.2;
// make a complete circle every 50 iterations
angle += (Math.PI * 2) / 20;
let newX = x + radius * Math.cos(angle);
let newY = y + radius * Math.sin(angle);
path.lineTo(newX, newY);
}
let points = path.getPoints();
let geometry = new THREE.BufferGeometry().setFromPoints( points );
return new THREE.Line( geometry.rotateX(90), this.lineMaterial() );
}
drawChainStitch(x, y) {
let path = new THREE.Path();
path.absellipse(x, y, 4, 2, 0, 2*Math.PI, null, null);
let points = path.getPoints();
let geometry = new THREE.BufferGeometry().setFromPoints( points );
return new THREE.Line( geometry, this.lineMaterial() );
}
drawSlipstitch(x, y) {
let geometry = new THREE.SphereGeometry( 2, 16, 16 );
return new THREE.Mesh( geometry, this.meshMaterial() );
}
createLine(vec1, vec2) {
let geometry = new THREE.Geometry();
geometry.vertices.push(
vec1,
vec2
);
return new THREE.Line( geometry, this.lineMaterial() );
}
drawSingleCrochet(x, y) {
let group = new THREE.Group();
let line1 = this.createLine(new THREE.Vector3( x-10, 0, y ),
new THREE.Vector3( x+10, 0, y ));
let line2 = this.createLine(new THREE.Vector3( x, 0, y-10 ),
new THREE.Vector3( x, 0, y+10 ));
group.add( line1, line2 );
return group;
}
tShape(group, x, y) {
let line1 = this.createLine(new THREE.Vector3( x, 0, y-15 ),
new THREE.Vector3( x, 0, y+15 ));
let line2 = this.createLine(new THREE.Vector3( x-10, 0, y-15 ),
new THREE.Vector3( x+10, 0, y-15 ));
group.add(line1, line2);
}
drawHalfDoubleCrochet(x, y){
let group = new THREE.Group();
this.tShape(group, x, y);
return group;
}
drawDoubleCrochet(x, y) {
let group = new THREE.Group();
this.tShape(group, x, y);
let middleSlash = this.createLine(new THREE.Vector3( x-5, 0, y+5 ),
new THREE.Vector3( x+5, 0, y-5 ));
group.add(middleSlash);
return group;
}
drawTrebleCrochet(x, y) {
let group = new THREE.Group();
this.tShape(group, x, y);
let topSlash = this.createLine(new THREE.Vector3( x-5, 0, y ),
new THREE.Vector3( x+5, 0, y-10 ));
let bottomSlash = this.createLine(new THREE.Vector3( x-5, 0, y+10 ),
new THREE.Vector3( x+5, 0, y ));
group.add(topSlash, bottomSlash);
return group;
}
drawDoubleTrebleCrochet(x, y) {
let group = new THREE.Group();
this.tShape(group, x, y);
let topSlash = this.createLine(new THREE.Vector3( x-5, 0, y ),
new THREE.Vector3( x+5, 0, y-10 ));
let middleSlash = this.createLine(new THREE.Vector3( x-5, 0, y+5 ),
new THREE.Vector3( x+5, 0, y-5 ));
let bottomSlash = this.createLine(new THREE.Vector3( x-5, 0, y+10 ),
new THREE.Vector3( x+5, 0, y ));
group.add(topSlash, middleSlash, bottomSlash);
return group;
}
} |
from django import forms
from klazor.models import Course
class CourseForm(forms.ModelForm):
class Meta:
model = Course
fields = ['title', 'tag_set', 'instructor_set', 'release_date'] |
if [ $# -eq 0 ]
then
echo "No arguments supplied"
echo "Using default number of proc"
mpirun bin/dz2z1 1000 0 1000
mpirun bin/dz2z1 1000 100 10000
mpirun bin/dz2z1 1000000 0 1000
mpirun bin/dz2z1 1000000 1000 10000
mpirun bin/dz2z1 1000000000 0 1000
mpirun bin/dz2z1 1000000000 100 10000
else
echo "Running with $1 number of procesors"
mpirun -np $1 bin/dz2z1 1000 0 1000
mpirun -np $1 bin/dz2z1 1000 100 10000
mpirun -np $1 bin/dz2z1 1000000 0 1000
mpirun -np $1 bin/dz2z1 1000000 1000 10000
mpirun -np $1 bin/dz2z1 1000000000 0 1000
mpirun -np $1 bin/dz2z1 1000000000 100 10000
fi
|
# Copyright (c) 2017-2020 VMware, Inc. or its affiliates
# SPDX-License-Identifier: Apache-2.0
# Default to GPHOME for both the source and target installations. These may be
# overridden manually for cross-version testing.
GPHOME_SOURCE=${GPHOME_SOURCE:-$GPHOME}
GPHOME_TARGET=${GPHOME_TARGET:-$GPHOME}
# log() prints its arguments to stdout.
#
# XXX At one point, log() printed its arguments to the TAP stream, but that
# interfered with the BATS "pretty" format, so we moved to the current solution.
# At some point it would be nice to get the TAP reporting working again, because
# it separates the reason for failure from the potentially massive amount of
# stdout/err generated by a failing test. That may require some conversations
# with bats-core upstream.
log() {
while read -r line; do
echo "$line"
done <<< "$*"
}
# fail() is meant to be called from BATS tests. It will fail the current test
# after printing its arguments to the TAP stream.
fail() {
log "$@"
false
}
# abort() is meant to be called from BATS tests. It will exit the process after
# printing its arguments to the TAP stream.
abort() {
log "fatal: $*"
exit 1
}
# skip_if_no_gpdb() will skip a test if a cluster's environment is not set up.
skip_if_no_gpdb() {
[ -n "${GPHOME_SOURCE}" ] || skip "this test requires an active GPDB source cluster (set GPHOME or GPHOME_SOURCE)"
[ -n "${GPHOME_TARGET}" ] || skip "this test requires an active GPDB target cluster (set GPHOME or GPHOME_TARGET)"
[ -n "${PGPORT}" ] || skip "this test requires an active GPDB source cluster (set PGPORT)"
}
# isready abstracts pg_isready semantics across postgres versions
isready() {
local gphome=${1:-$GPHOME_SOURCE}
local port=${2:-$PGPORT}
if command -v "$gphome"/bin/pg_isready > /dev/null; then
"$gphome"/bin/pg_isready -q -p "$port"
else
# 5X does not have pg_isready
"$gphome"/bin/psql postgres -p "$port" -qc "SELECT 1" &> /dev/null
fi
}
# start_source_cluster() ensures that database is up before returning
start_source_cluster() {
isready || (source "$GPHOME_SOURCE"/greenplum_path.sh && "${GPHOME_SOURCE}"/bin/gpstart -a)
}
# delete_cluster takes an master data directory and calls gpdeletesystem, and
# removes the associated data directories.
delete_cluster() {
local gphome="$1"
local masterdir="$2"
# Perform a sanity check before deleting.
expected_suffix="*qddir/demoDataDir.*.-1"
[[ "$masterdir" == ${expected_suffix} ]] || \
abort "cowardly refusing to delete $masterdir which does not look like an upgraded demo data directory. Expected suffix ${expected_suffix}"
__gpdeletesystem "$gphome" "$masterdir"
# XXX: Since gpugprade archives instead of removing data directories,
# gpupgrade will fail when copying the master data directory to segments
# with "file exists". To prevent this remove the data directories.
delete_target_datadirs "$masterdir"
}
# delete_finalized_cluster takes an upgrade master data directory and deletes
# the cluster. It also resets the finalized data directories to what they were
# before upgrade by removing the upgraded data directories, and renaming the
# archive directories to their original name (which is the same as their
# upgraded name).
delete_finalized_cluster() {
local gphome="$1"
local masterdir="$2"
# Perform a sanity check before deleting.
local archive_masterdir=$(archive_dir "$masterdir")
[ -d "$archive_masterdir" ] || abort "cowardly refusing to delete $masterdir. Expected $archive_masterdir to exist."
__gpdeletesystem "$gphome" "$masterdir"
local id=$(gpupgrade config show --id)
local datadirs=$(dirname "$(dirname "$masterdir")")
for archive in $(find "${datadirs}" -name "*${id}*.old"); do
# The following sed matches archived data directories and returns the
# path of the original directory. For example,
# /dbfast_mirror2/demoDataDir.BY6l9U0LfX8.1.old -> /dbfast_mirror2/demoDataDir1
# /datadirs/standby.BY6l9U0LfX8.old -> /datadirs/standby
local original=$(sed -E 's/\.'"${id}"'(\.([-0-9]+))?\.old/\2/' <<< "$archive")
rm -rf "${original}"
mv "$archive" "$original"
done
}
# Calls gpdeletesystem on the cluster pointed to by the given master data
# directory.
__gpdeletesystem() {
local gphome="$1"
local masterdir="$2"
# Look up the master port (fourth line of the postmaster PID file).
local port=$(awk 'NR == 4 { print $0 }' < "$masterdir/postmaster.pid")
local gpdeletesystem="$gphome"/bin/gpdeletesystem
# XXX gpdeletesystem returns 1 if there are warnings. There are always
# warnings. So we ignore the exit code...
(source $gphome/greenplum_path.sh && yes | PGPORT="$port" "$gpdeletesystem" -fd "$masterdir") || true
}
delete_target_datadirs() {
local masterdir="$1"
local datadir=$(dirname "$(dirname "$masterdir")")
rm -rf "${datadir}"/*/demoDataDir.*.[0-9]
}
# require_gnu_stat tries to find a GNU stat program. If one is found, it will be
# assigned to the STAT global variable; otherwise the current test is skipped.
require_gnu_stat() {
if command -v gstat > /dev/null; then
STAT=gstat
elif command -v stat > /dev/null; then
STAT=stat
else
skip "GNU stat is required for this test"
fi
# Check to make sure what we have is really GNU.
local version=$($STAT --version || true)
[[ $version = *"GNU coreutils"* ]] || skip "GNU stat is required for this test"
}
process_is_running() {
ps -ef | grep -wGc "$1"
}
# Takes an original datadir and echoes the expected temporary datadir containing
# the upgradeID.
#
# NOTE for devs: this is just for getting the expected data directories, which
# is an implementation detail. If you want the actual location of the new master
# data directory after an initialization, you can just ask the hub with
#
# gpupgrade config show --target-datadir
#
expected_target_datadir() {
local dir=$1
local parentDir=$(dirname "${dir}")
local baseDir=$(basename "${dir}")
local suffix="${baseDir#demoDataDir}"
local upgradeID
upgradeID=$(gpupgrade config show --id)
# Sanity check.
[ -n "$parentDir" ]
if [ "${baseDir}" == "standby" ]; then
echo "${parentDir}/${baseDir}.${upgradeID}"
return
fi
echo "${parentDir}/demoDataDir.${upgradeID}.${suffix}"
}
# archive_dir echoes the expected archive directory given an original data
# directory.
archive_dir() {
local dir=$1
echo "$(expected_target_datadir "$dir")".old
}
is_GPDB5() {
local gphome=$1
local version=$("$gphome"/bin/postgres --gp-version)
[[ $version =~ ^"postgres (Greenplum Database) 5." ]]
}
# query_datadirs returns the datadirs across various version of GPDB.
# Arguments are GPHOME, PGPORT, and the WHERE clause to use when querying
# gp_segment_configuration.
query_datadirs() {
local gphome=$1
local port=$2
local where_clause=${3:-true}
local sql="SELECT datadir FROM gp_segment_configuration WHERE ${where_clause} ORDER BY content, role"
if is_GPDB5 "$gphome"; then
sql="
SELECT e.fselocation as datadir
FROM gp_segment_configuration s
JOIN pg_filespace_entry e ON s.dbid = e.fsedbid
JOIN pg_filespace f ON e.fsefsoid = f.oid
WHERE f.fsname = 'pg_system' AND ${where_clause}
ORDER BY s.content, s.role"
fi
run "$gphome"/bin/psql -At -p "$port" postgres -c "$sql"
[ "$status" -eq 0 ] || fail "$output"
echo "$output"
}
# get_rsync_pairs maps the data directory of every standby/mirror with the
# corresponding master/primary. The map will later be used to rsync the
# contents of the mirror back to the primary.
get_rsync_pairs() {
local gphome=$1
local port=${2:-$PGPORT}
local sql="
WITH cte AS (select role, content, fselocation datadir FROM pg_filespace_entry INNER JOIN gp_segment_configuration on dbid=fsedbid)
SELECT f1.datadir, f2.datadir FROM (SELECT * FROM CTE WHERE role='m') f1
INNER JOIN (SELECT * FROM CTE where role='p') f2 on f1.content=f2.content;"
run "$gphome"/bin/psql -At -p "$port" postgres -c "$sql"
[ "$status" -eq 0 ] || fail "$output"
echo $output
}
# setup_restore_cluster gathers the necessary information to later run
# restore_cluster
setup_restore_cluster() {
local mode=$1
if is_GPDB5 "$GPHOME_SOURCE"; then
RSYNC_PAIRS=($(get_rsync_pairs $GPHOME_SOURCE))
fi
# In link mode we must bring the datadirs back to a good state, whereas in
# copy mode we can discard the duplicate copy of the datadir after the
# test. Specifically, in link mode we undo the rename of pg_control file.
if [ "$mode" == "--mode=link" ]; then
MASTER_AND_PRIMARY_DATADIRS=($(query_datadirs $GPHOME_SOURCE $PGPORT "role = 'p'"))
else
MASTER_AND_PRIMARY_DATADIRS=
fi
}
# restore_cluster brings a cluster back to a known state before upgrade. It
# uses rsync to account for an issue in GPDB5 where the standby and mirrrors
# become out of sync and fail to start, thus causing gpstart to return non-zero
# exit code.
restore_cluster() {
if is_GPDB5 "$GPHOME_SOURCE"; then
for var in "${RSYNC_PAIRS[@]}"; do IFS="|"; set -- $var;
rsync -r "$1/" "$2/" \
--exclude=internal.auto.conf \
--exclude=pg_hba.conf \
--exclude=postmaster.opts \
--exclude=postgresql.auto.conf \
--exclude=internal.auto.conf \
--exclude=gp_dbid \
--exclude=postgresql.conf \
--exclude=backup_label.old \
--exclude=postmaster.pid \
--exclude=recovery.conf
done
elif [[ -n ${MASTER_AND_PRIMARY_DATADIRS} ]]; then
for datadir in "${MASTER_AND_PRIMARY_DATADIRS[@]}"; do
mv "${datadir}/global/pg_control.old" "${datadir}/global/pg_control"
done
fi
}
# Writes the pieces of gp_segment_configuration that we need to ensure remain
# the same across upgrade, one segment per line, sorted by content ID.
get_segment_configuration() {
local gphome=$1
local port=${2:-$PGPORT}
if is_GPDB5 "$gphome"; then
$PSQL -AtF$'\t' -p "$port" postgres -c "
SELECT s.content, s.role, s.hostname, s.port, e.fselocation as datadir
FROM gp_segment_configuration s
JOIN pg_filespace_entry e ON s.dbid = e.fsedbid
JOIN pg_filespace f ON e.fsefsoid = f.oid
WHERE f.fsname = 'pg_system'
ORDER BY s.content, s.role
"
else
$PSQL -AtF$'\t' -p "$port" postgres -c "
SELECT content, role, hostname, port, datadir
FROM gp_segment_configuration
ORDER BY content, role
"
fi
}
|
<reponame>DylanRJohnston/io-ts
import * as assert from 'assert'
import * as fc from 'fast-check'
import { isRight } from 'fp-ts/lib/Either'
import { Kind, URIS, HKT } from 'fp-ts/lib/HKT'
import * as t from '../src'
import * as D from '../src/Decoder'
import * as G from '../src/Guard'
import {
memoize,
Schemable,
WithUnion,
Schemable1,
WithUnion1,
WithUnknownContainers,
WithUnknownContainers1
} from '../src/Schemable'
import * as T from '../src/Type'
import * as A from './Arbitrary'
interface Schema<A> {
<S>(S: Schemable<S> & WithUnknownContainers<S> & WithUnion<S>): HKT<S, A>
}
function make<A>(f: Schema<A>): Schema<A> {
return memoize(f)
}
function interpreter<S extends URIS>(
S: Schemable1<S> & WithUnknownContainers1<S> & WithUnion1<S>
): <A>(schema: Schema<A>) => Kind<S, A> {
return (schema: any) => schema(S)
}
function check<A>(schema: Schema<A>, type: t.Type<A>): void {
const arb = interpreter(A.schemableArbitrary)(schema)
const decoder = interpreter(D.schemableDecoder)(schema)
const guard = interpreter(G.schemableGuard)(schema)
const itype = interpreter(T.schemableType)(schema)
// decoder and type should be aligned
fc.assert(fc.property(arb, (a) => isRight(decoder.decode(a)) === isRight(type.decode(a))))
// interpreted type and type should be aligned
fc.assert(fc.property(arb, (a) => isRight(itype.decode(a)) === isRight(type.decode(a))))
// guard and `Type`'s `is` should be aligned
fc.assert(fc.property(arb, (a) => guard.is(a) === type.is(a)))
}
describe('Type', () => {
it('string', () => {
check(
make((S) => S.string),
t.string
)
})
it('number', () => {
check(
make((S) => S.number),
t.number
)
})
it('boolean', () => {
check(
make((S) => S.boolean),
t.boolean
)
})
it('UnknownArray', () => {
check(
make((S) => S.UnknownArray),
t.UnknownArray
)
})
it('UnknownRecord', () => {
check(
make((S) => S.UnknownRecord),
t.UnknownRecord
)
})
it('literal', () => {
check(
make((S) => S.literal('a', 'b')),
t.keyof({ a: null, b: null })
)
})
it('nullable', () => {
check(
make((S) => S.nullable(S.string)),
t.union([t.null, t.string])
)
})
it('type', () => {
check(
make((S) =>
S.type({
name: S.string,
age: S.number
})
),
t.type({
name: t.string,
age: t.number
})
)
})
it('partial', () => {
check(
make((S) =>
S.partial({
name: S.string,
age: S.number
})
),
t.partial({
name: t.string,
age: t.number
})
)
})
it('record', () => {
check(
make((S) => S.record(S.string)),
t.record(t.string, t.string)
)
})
it('array', () => {
check(
make((S) => S.array(S.string)),
t.array(t.string)
)
})
it('tuple', () => {
check(
make((S) => S.tuple(S.string)),
t.tuple([t.string])
)
check(
make((S) => S.tuple(S.string, S.number)),
t.tuple([t.string, t.number])
)
})
it('intersection', () => {
check(
make((S) => S.intersection(S.type({ a: S.string }), S.type({ b: S.number }))),
t.intersection([t.type({ a: t.string }), t.type({ b: t.number })])
)
})
it('sum', () => {
check(
make((S) =>
S.sum('_tag')({
A: S.type({ _tag: S.literal('A'), a: S.string }),
B: S.type({ _tag: S.literal('B'), b: S.number })
})
),
t.union([t.type({ _tag: t.literal('A'), a: t.string }), t.type({ _tag: t.literal('B'), b: t.number })])
)
})
it('lazy', () => {
interface A {
a: string
b?: A
c?: number
}
const schema: Schema<A> = make((S) =>
S.lazy('A', () => S.intersection(S.type({ a: S.string }), S.partial({ b: schema(S), c: S.number })))
)
const type: t.Type<A> = t.recursion('A', () =>
t.intersection([t.type({ a: t.string }), t.partial({ b: type, c: t.number })])
)
check(schema, type)
})
it('union', () => {
check(
make((S) => S.union(S.string, S.number)),
t.union([t.string, t.number])
)
})
it('refinement', () => {
interface NonEmptyStringBrand {
readonly NonEmptyString: unique symbol
}
type NonEmptyString = string & NonEmptyStringBrand
const type = T.refinement(T.string, (s): s is NonEmptyString => s.length > 0, 'NonEmptyString')
assert.deepStrictEqual(isRight(type.decode('a')), true)
assert.deepStrictEqual(isRight(type.decode('')), false)
})
})
|
<filename>src/db/generic/ShareConfig.h
/*
* Copyright (c) CERN 2013-2015
*
* Copyright (c) Members of the EMI Collaboration. 2010-2013
* See http://www.eu-emi.eu/partners for details on the copyright
* holders.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#ifndef SHARECONFIG_H_
#define SHARECONFIG_H_
#include <string>
class ShareConfig
{
public:
ShareConfig(): weight(0) {};
~ShareConfig() {};
std::string source;
std::string destination;
std::string vo;
int weight;
};
#endif // SHARECONFIG_H_
|
package intercept.configuration;
import org.junit.Test;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.is;
public class StubsUnitTests {
@Test
public void stubsCanBeMatchedToADomain() {
Stubs stubs = new Stubs();
StubRequest stubRequest = new StubRequest();
stubRequest.setPath("www.thoughtworks.com");
stubs.createOrUpdateStub(stubRequest);
assertThat(stubs.isStubbed("www.thoughtworks.com"), is(true));
}
@Test
public void stubs_can_be_matched_to_a_domain() {
Stubs stubs = new Stubs();
StubRequest stubRequest = new StubRequest();
stubRequest.setPath("www.thoughtworks.com");
stubs.createOrUpdateStub(stubRequest);
assertThat(stubs.isStubbed("www.thoughtworks.com"), is(true));
}
@Test
public void can_retrieve_response_body_data_for_stub() {
Stubs stubs = new Stubs();
StubRequest stubRequest = new StubRequest();
stubRequest.setPath("www.thoughtworks.com");
stubRequest.setBody("Agile is the way!");
stubs.createOrUpdateStub(stubRequest);
assertThat(stubs.getStubbedResponse("www.thoughtworks.com").getBody(), is("Agile is the way!"));
}
}
|
<reponame>will94king/PaoPaoDemo
package com.gz.pao.pao.web.admin.entity;
import lombok.Data;
import java.io.Serializable;
@Data
public class Admin implements Serializable {
private int id;
private String adminName;
private String adminPassword;
}
|
<gh_stars>10-100
/*******************************************************************************
* This file is part of the Symfony eclipse plugin.
*
* (c) <NAME> <<EMAIL>>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
******************************************************************************/
package com.dubture.symfony.core.model;
import org.eclipse.core.runtime.IProgressMonitor;
import org.eclipse.dltk.ast.Modifiers;
import org.eclipse.dltk.core.IModelElement;
import org.eclipse.dltk.core.ModelException;
import org.eclipse.dltk.internal.core.ModelElement;
import org.eclipse.dltk.internal.core.SourceType;
import com.dubture.symfony.index.model.Route;
/**
*
* A {@link IModelElement} for Routes - to display custom
* context information in the codeassist popups.
*
* @author "<NAME> <<EMAIL>>"
*
*/
@SuppressWarnings("restriction")
public class RouteSource extends SourceType {
private Route route;
public RouteSource(ModelElement parent, String name, Route route) throws IllegalArgumentException {
super(parent, name);
this.route = route;
}
@Override
public int getFlags() throws ModelException {
return Modifiers.AccPublic;
}
@Override
public Object getElementInfo() throws ModelException {
return new FakeTypeElementInfo();
}
@Override
protected Object openWhenClosed(Object info, IProgressMonitor monitor)
throws ModelException {
return new FakeTypeElementInfo();
}
public Route getRoute() {
return route;
}
/* (non-Javadoc)
* @see org.eclipse.dltk.internal.core.SourceType#getElementType()
*/
@Override
public int getElementType()
{
return ISymfonyModelElement.ROUTE;
}
}
|
package ctag.tags;
import ctag.Binary;
import ctag.CTagInput;
import ctag.exception.EndException;
import java.io.IOException;
/**
* The tag that represents a string.
* <br/><br/>
* <table>
* <tr>
* <td><b>Binary prefix: </b></td>
* <td><code>00000111 - 07</code></td>
* </tr>
* <tr>
* <td><b>Minimal payload: </b></td>
* <td>2 bytes</td>
* </tr>
* <tr>
* <td><b>Maximal payload: </b></td>
* <td>65538 bytes</td>
* </tr>
* </table>
* The string tag starts with a two bytes holding the length, followed by
* one-byte characters representing the string in UTF-8.
* <br/>
* <pre>
* Prefix Length Characters ...
* 00000111 0000000000000110 01010011 01110100 01110010 01101001 01101110 01100111
* STRING = 6 = "String"
* </pre>
* @since 1.0
*/
public class TagString implements ITag<String> {
private String value;
public TagString() {
value = "";
}
public TagString( String value ) {
this.value = value;
}
@Override
public Binary encode() {
byte[] bytes = value.getBytes();
short length = ( short ) bytes.length;
byte[] lenBytes = {
( byte ) ( length >>> 8 & 0xff ),
( byte ) ( length & 0xff )
};
Binary.Builder builder = new Binary.Builder();
builder.append( lenBytes );
builder.append( bytes );
return builder.build();
}
@Override
public String getValue() {
return value;
}
@Override
public void setValue( String value ) {
this.value = value;
}
@Override
public Binary getPrefixByte() {
return new Binary( ( byte ) 0b111 );
}
/**
* Parses a CTag code as a string.
* @param input The {@link CTagInput} stream that possibly begins with this
* string data.
* @return The parsed {@link TagString} if parsed with success.
* @exception IOException If the {@link CTagInput}'s underlying stream
* throws an IOException.
* @since 1.0
*/
public static TagString parse( CTagInput input ) throws IOException, EndException {
int len = TagShort.parse( input ).getValue();
Binary binary = input.read( len );
byte[] bytes = binary.getBytes();
String value = new String( bytes );
return new TagString( value );
}
public String toString() {
return "STRING \"" + value + "\"";
}
}
|
export interface SetMatchWinner {
matchId: number,
teamId: number,
}
|
<reponame>Yohandah/interact.js<filename>scripts/bin/esnext.js
const path = require('path')
const generate = require('../esnext')
const minify = require('../minify')
const bundleShim = require('../shimBundler')
const { getEsnextBabelOptions, getSources, getShims } = require('../utils')
const [, , ...args] = process.argv
const fileArgs = []
let watch = false
let serve = false
for (const arg of args) {
if (arg === '--watch') {
watch = true
} else if (arg === '--serve') {
serve = true
} else {
fileArgs.push(path.resolve(arg))
}
}
const babelOptions = getEsnextBabelOptions()
const shims = getShims()
const cwd = process.cwd()
const sourcesPromise = fileArgs.length ? Promise.resolve(fileArgs) : getSources({ cwd })
sourcesPromise.then(async (sources) => {
await generate({
sources,
async shim (filename) {
const shimConfig = shims.find((s) => filename.endsWith(s.source))
if (shimConfig) {
const code = await bundleShim(shimConfig)
return { code }
}
},
babelOptions,
watch,
serve,
})
})
|
import {Actor} from 'app/types';
// TODO(ts): add the correct type
export type Rules = Array<any> | null;
/**
* Given a list of rule objects returned from the API, locate the matching
* rules for a specific owner.
*/
function findMatchedRules(rules: Rules, owner: Actor) {
if (!rules) {
return undefined;
}
const matchOwner = (actorType: Actor['type'], key: string) =>
(actorType === 'user' && key === owner.email) ||
(actorType === 'team' && key === owner.name);
const actorHasOwner = ([actorType, key]) =>
actorType === owner.type && matchOwner(actorType, key);
return rules
.filter(([_, ruleActors]) => ruleActors.find(actorHasOwner))
.map(([rule]) => rule);
}
export {findMatchedRules};
|
<reponame>s00d/intro.ts<gh_stars>1-10
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports._isFixed = void 0;
var _getPropValue_1 = require("./_getPropValue");
function _isFixed(element) {
var p = element.parentNode;
if (!p || p.nodeName === "HTML") {
return false;
}
if ((0, _getPropValue_1._getPropValue)(element, "position") === "fixed") {
return true;
}
return _isFixed(p);
}
exports._isFixed = _isFixed;
|
#!/bin/bash
set -e
TARGET=${TARGET-x86_64-unknown-linux-gnu}
if [ "$TARGET" != "" ]; then
TGT="--target $TARGET"
fi
VERSION=debug
if [ "$1" == "--release" ]; then
VERSION=release
fi
# Runtimetest must be compiled in its dorectory and is
# not a part of youki workspace. For the reasoning behind this,
# please check the docs and readme
cargo build --verbose $TGT $1
cd ./runtimetest
cargo build --verbose $TGT $1
cd ..
cp target/$TARGET/$VERSION/youki .
cp target/$TARGET/$VERSION/integration_test ./youki_integration_test
cp runtimetest/target/$TARGET/$VERSION/runtimetest ./runtimetest_tool
|
#include <iostream>
namespace Morpheus {
class RendererAPI {
public:
enum class API {
None = 0,
Vulkan,
DirectX,
OpenGL
};
private:
static API s_RenderAPI;
public:
static void SetAPI(API api) {
s_RenderAPI = api;
}
static API GetAPI() {
return s_RenderAPI;
}
};
// Set the default rendering API to Vulkan
RendererAPI::API RendererAPI::s_RenderAPI = RendererAPI::API::Vulkan;
} // namespace Morpheus
int main() {
using namespace Morpheus;
// Set the rendering API to DirectX
RendererAPI::SetAPI(RendererAPI::API::DirectX);
// Retrieve and print the current rendering API
std::cout << "Current Rendering API: ";
switch (RendererAPI::GetAPI()) {
case RendererAPI::API::Vulkan:
std::cout << "Vulkan";
break;
case RendererAPI::API::DirectX:
std::cout << "DirectX";
break;
case RendererAPI::API::OpenGL:
std::cout << "OpenGL";
break;
default:
std::cout << "None";
break;
}
std::cout << std::endl;
return 0;
} |
<reponame>thearnica/dom-locky<filename>types/index.d.ts
type LockyEventHandler = (true | false | 'no-default' | 'report' | 'report-only');
interface Props {
noDefault?: boolean,
group?: string,
onEscape?: () => void,
events?: {
click?: LockyEventHandler,
mousemove?: LockyEventHandler,
mousedown?: LockyEventHandler,
touchmove?: LockyEventHandler,
touchstart?: LockyEventHandler,
keydown?: LockyEventHandler,
change?: LockyEventHandler,
scroll?: LockyEventHandler,
wheel?: LockyEventHandler,
}
}
type RemoveLocky = () => void;
export function lockyOn(selector: string | HTMLElement, settings?: Props): RemoveLocky;
export function lockyGroup(selector: string, group: string): void; |
<filename>components/Explore/Flights.js
import React, { Component } from 'react';
import { View, Text, StyleSheet, Image } from 'react-native';
import StarRating from 'react-native-star-rating';
class Flights extends Component {
render() {
return (
<View
style={{
width: this.props.width / 2 - 30,
height: this.props.width / 2 - 30,
borderWidth: 0.5,
borderColor: '#dddddd'
}}
>
<View style={{ flex: 1 }}>
<Image
style={{
flex: 1,
width: null,
height: null,
resizeMode: 'contain'
}}
source={require('../../assets/AI.jpg')}
/>
</View>
<View
style={{
flex: 1,
alignItems: 'flex-start',
justifyContent: 'space-evenly',
paddingLeft: 10
}}
>
<Text style={{ fontSize: 10, color: '#b63838' }}>
{this.props.type}
</Text>
<Text style={{ fontSize: 12, fontWeight: 'bold' }}>
{this.props.name}
</Text>
<Text style={{ fontSize: 10 }}>{this.props.price}$</Text>
<StarRating
disable={true}
maxStars={5}
rating={this.props.rating}
starSize={10}
/>
</View>
</View>
);
}
}
export default Flights;
const styles = StyleSheet.create({
container: {
flex: 1,
alignItems: 'center',
justifyContent: 'center'
}
});
|
#!/bin/sh
mkdir /work
cd /work
echo "Cloning git repository: $GIT_REPO"
git clone $GIT_REPO
cd /work/*
git checkout $GIT_BRANCH
cd $GIT_PATH
echo "Cleaning output directory."
rm -rf /antora-dist/*
echo "Running : antora $PLAYBOOK"
antora $PLAYBOOK
echo "Antora build completed successfully."
echo "Customizing output."
find ./dist -name '*.html' -exec sed -i 's/_images/assets-images/g' {} \;
find ./dist -name '_images' -ignore_readdir_race -execdir mv _images assets-images \;
echo "Copying files..."
cp -rf dist/* /antora-dist/.
echo "Done."
|
#! /bin/sh
echo "Beginning Red Hat Ceph RPM based upgrade with ceph custom name - OKR bz 1876447 testing."
random_string=$(cat /dev/urandom | tr -cd 'a-z0-9' | head -c 5)
instance_name="ci-${random_string}"
platform="rhel-7"
rhbuild="4.3"
test_suite="suites/nautilus/upgrades/tier-2_upgrade_ceph-custom-name.yaml"
test_conf="conf/nautilus/upgrades/upgrades.yaml"
test_inventory="conf/inventory/rhel-7-latest.yaml"
return_code=0
# Process the CLI arguments for IBM-C environment
CLI_ARGS=$@
cloud="ibmc"
if [ -z "${CLI_ARGS##*$cloud*}" ] ; then
test_inventory="conf/inventory/ibm-vpc-rhel-7-latest.yaml"
else
CLI_ARGS="$CLI_ARGS --post-results --report-portal"
fi
$WORKSPACE/.venv/bin/python run.py --v2 \
--osp-cred $HOME/osp-cred-ci-2.yaml \
--rhbuild $rhbuild \
--platform $platform \
--instances-name $instance_name \
--global-conf $test_conf \
--suite $test_suite \
--inventory $test_inventory \
--log-level DEBUG \
$CLI_ARGS
if [ $? -ne 0 ]; then
return_code=1
fi
CLEANUP_ARGS="--log-level debug --osp-cred $HOME/osp-cred-ci-2.yaml"
if [ -z "${CLI_ARGS##*$cloud*}" ] ; then
CLEANUP_ARGS="$CLEANUP_ARGS --cloud ibmc"
fi
$WORKSPACE/.venv/bin/python run.py --cleanup $instance_name $CLEANUP_ARGS
if [ $? -ne 0 ]; then
echo "cleanup instance failed for instance $instance_name"
fi
exit ${return_code}
|
<filename>apartments.gemspec
require_relative "./lib/apartments/version"
Gem::Specification.new do |spec|
spec.name = "apartments"
spec.version = "0.2.2"
spec.authors = ["<NAME>"]
spec.email = ["<EMAIL>"]
spec.description = "Provides details on a user selected apartment in NYC"
spec.summary = "Find apartments in NY"
spec.homepage = "https://github.com/SaraGinsburg/apartments"
spec.files = ["lib/apartments.rb", "lib/apartments/cli.rb", "lib/apartments/version.rb", "lib/apartments/scraper.rb", "lib/apartments/apartment.rb", "config/environment.rb"]
# spec.files = Dir['lib/**/*.rb']
spec.license = 'MIT'
spec.executables << 'apartments'
spec.add_development_dependency "bundler", "~> 2.1.4"
spec.add_development_dependency "rake", "~> 10.0"
spec.add_development_dependency "rspec", "~> 3.7"
spec.add_development_dependency "pry", "~> 0"
spec.add_runtime_dependency "nokogiri"
end
|
#include <mulle-objc-runtime/mulle-objc-runtime.h>
@protocol Proto1
@end
|
import mxnet as mx
from mxnet import ndarray as nd
def pad_sequences(x, max_seq_length, ctx=mx.cpu()):
row_count = len(x)
result = nd.zeros(shape=(row_count, max_seq_length), ctx=ctx)
for i, row in enumerate(x):
if len(row) >= max_seq_length:
for j, w in enumerate(row[:max_seq_length]):
result[i, j] = w
else:
for j, w in enumerate(row):
result[i, max_seq_length - len(row) + j] = w
return result
|
<filename>artifacts/spring-workshop/dao/src/main/java/com/vmware/spring/workshop/dao/GenericDao.java
package com.vmware.spring.workshop.dao;
import java.io.Serializable;
/**
* @param <T> Type of object being persisted
* @param <ID> Type of {@link Serializable} unique ID being used as primary key
* @author <NAME>.
* @since Oct 3, 2011 4:08:27 PM
*/
public interface GenericDao<T,ID extends Serializable> extends CommonOperationsDao<T,ID> {
/**
* @return The {@link Class} representing the entity managed by this DAO
*/
Class<T> getPersistentClass ();
/**
* @return The {@link Class} representing the primary entity key type
*/
Class<ID> getIdClass ();
}
|
def find_longest_seq(arr):
max_length = 0
cur_length = 1
for i in range(len(arr)-1):
if(arr[i] + 1 == arr[i+1]):
cur_length += 1
else:
if(cur_length > max_length):
max_length = cur_length
cur_length = 1
if cur_length > max_length:
max_length = cur_length
return max_length |
<filename>src/components/Register.js<gh_stars>0
import React, {Component} from 'react';
import axios from 'axios';
import {AppStyle} from './../css/app.js';
import Button from './Button';
import FancyLink from './FancyLink';
class Register extends Component {
constructor(props){
super(props);
this.state = {
firstName: '',
lastName: '',
age: 0,
password: '',
email: '',
confirmPassword: '',
validEmail: true,
validPassword: true,
registrationSuccess: false
}
this.handleFirstName = this.handleFirstName.bind(this);
this.handleLastName = this.handleLastName.bind(this);
this.handleBirthday = this.handleBirthday.bind(this);
this.handleEmail = this.handleEmail.bind(this);
this.handlePassword = this.handlePassword.bind(this);
this.handlePasswordConfirmation = this.handlePasswordConfirmation.bind(this);
this.handleSubmission = this.handleSubmission.bind(this);
};
handleSubmission(e) {
e.preventDefault();
const {firstName, lastName, age, email, password, confirmPassword, validEmail, validPassword } = this.state;
if (password !== confirmPassword) {
this.setState({validPassword: false});
} else {
this.setState({validPassword: true});
};
// email not found sends 404 means email not used
axios.get(`/users/email/${email}`).then(res => {
this.setState({validEmail: false});
}).catch((e) => {
this.setState({validEmail: true});
if (validEmail && validPassword){
this.props.onRegister({
firstName: firstName[0].toUpperCase() + firstName.slice(1),
lastName: lastName[0].toUpperCase() + lastName.slice(1),
age,
email: email.toLowerCase(),
password,
admin: false,
workoutRecord: []
})
this.setState({registrationSuccess: true})
}
});
};
handleFirstName(e){
this.setState({firstName: e.target.value});
};
handleLastName(e){
this.setState({lastName: e.target.value});
};
handleBirthday(e){
let today = new Date();
let birthDate = new Date(e.target.value);
let age = today.getFullYear() - birthDate.getFullYear();
let m = today.getMonth() - birthDate.getMonth();
if (m < 0 || (m === 0 && today.getDate() < birthDate.getDate())) age--;
this.setState({age});
};
handleEmail(e){
this.setState({email: e.target.value});
};
handlePassword(e){
this.setState({password: e.target.value});
};
handlePasswordConfirmation(e){
this.setState({confirmPassword: e.target.value});
};
render(){
let display;
if (!this.state.registrationSuccess){
let divBreaker = {
marginBottom: '12px',
}
let errorMessage = '';
let pwErrorMessage = '';
if (!this.state.validEmail){
errorMessage = <p style={{color: 'red', fontSize: '10px', marginTop: '5px', marginBottom: '0px'}}>E-mail already taken.</p>
divBreaker = {marginBottom: '10px'};
} else {
divBreaker = {marginBottom: '12px'};
errorMessage = ''
}
if (!this.state.validPassword){
pwErrorMessage = <p style={{color: 'red', fontSize: '10px', marginTop: '2px', marginBottom: '2px'}}>Passwords don't match.</p>
divBreaker = {marginBottom: '10px'};
} else {
divBreaker = {marginBottom: '12px'};
pwErrorMessage = '';
}
display =
<div style={AppStyle.regForm}>
<form onSubmit={this.handleSubmission}>
<h2 style={{marginBottom: '0px'}}> PLEASE FILL OUT THE FORM BELOW </h2>
<hr style={{...AppStyle.line, marginBottom: '10px'}}/>
<label>First Name</label>
<div style={divBreaker}>
<input
required
style={AppStyle.input}
type='text'
id='firstName'
placeholder='<NAME>'
onChange={this.handleFirstName} />
</div>
<label>Last Name</label>
<div style={divBreaker}>
<input
required
style={AppStyle.input}
type='text'
id='lastName'
placeholder='Doe'
onChange={this.handleLastName} />
</div>
<label>DATE OF BIRTH</label>
<div style={divBreaker}>
<input
required
style={AppStyle.input}
type='date'
id='birthday'
onChange={this.handleBirthday}/>
</div>
<label>E-MAIL ADDRESS</label>
<div style={divBreaker}>
<input
required
style={(this.state.validEmail) ? AppStyle.input : AppStyle.inputError}
type='text'
id='email'
placeholder='<EMAIL>'
onChange={this.handleEmail}/>
{errorMessage}
</div>
<label>PASSWORD</label>
<div style={divBreaker}>
<input
required
style={AppStyle.input}
type='password'
id='password'
placeholder='Must be 6 - 12 characters'
onChange={this.handlePassword} />
</div>
<label>CONFIRM PASSWORD</label>
<div style={divBreaker}>
<input
required
style={(this.state.validPassword) ? AppStyle.input : AppStyle.inputError}
type='password'
id='confirmation'
placeholder='Confirm Password'
onChange={this.handlePasswordConfirmation} />
{pwErrorMessage}
</div>
<hr style={{...AppStyle.line, marginBottom: '0px', marginTop: '15px'}}/>
<div style={{display: 'flex', justifyContent: 'center'}}>
<div style={{margin: 'auto'}}>
<FancyLink linkTo='/' linkName='NEVERMIND, RETURN'/>
</div>
<div style={{margin: 'auto'}}>
<Button buttonName='REGISTER' type='submit'/>
</div>
</div>
</form>
</div>
} else {
display =
<div style={{...AppStyle.regForm, display: 'block', height: '300px', marginTop: '150px'}}>
<h1>
Registration Complete!
</h1>
<hr style={AppStyle.line}/>
<h4>
Please return to the homepage and sign in
</h4>
<Button buttonName='HOME' linkTo='/'/>
</div>
}
return(
<div>
{display}
</div>
)
}
}
export default Register;
|
echo 'Killing all Jekyll instances'
kill -9 $(ps aux | grep '[j]ekyll' | awk '{print $2}')
clear
echo "Building PDF-friendly HTML site for Mydoc ...";
jekyll serve --detach --config _config.yml,pdfconfigs/config_junium_pdf.yml;
echo "done";
echo "Building the PDF ...";
prince --javascript --input-list=_site/pdfconfigs/prince-list.txt -o pdf/jUnium.pdf;
echo "done"; |
(window["webpackJsonp"] = window["webpackJsonp"] || []).push([["/js/app"],{
/***/ "./src/js/app.js":
/*!***********************!*\
!*** ./src/js/app.js ***!
\***********************/
/*! no static exports found */
/***/ (function(module, exports) {
/***/ }),
/***/ "./src/scss/app.scss":
/*!***************************!*\
!*** ./src/scss/app.scss ***!
\***************************/
/*! no static exports found */
/***/ (function(module, exports) {
// removed by extract-text-webpack-plugin
/***/ }),
/***/ 0:
/*!*************************************************!*\
!*** multi ./src/js/app.js ./src/scss/app.scss ***!
\*************************************************/
/*! no static exports found */
/***/ (function(module, exports, __webpack_require__) {
__webpack_require__(/*! /Users/lukasbecker/Documents/websites/hugo-starter-kit/src/js/app.js */"./src/js/app.js");
module.exports = __webpack_require__(/*! /Users/lukasbecker/Documents/websites/hugo-starter-kit/src/scss/app.scss */"./src/scss/app.scss");
/***/ })
},[[0,"/js/manifest"]]]); |
'use strict';
const gameSchema = require('../../schema/GameSchema');
const searchGames = function (filter, pagination, sortColumn, sortType) {
return new Promise((resolve, reject) => {
console.log('Game services - searchGame - begin');
let totalRow = 0;
gameSchema.find(filter).exec(function (err, result) {
if (err) {
console.log('Game services - searchGame - error');
reject(err);
}
totalRow = result.length;
gameSchema.find(filter)
.limit(parseInt(pagination.rowsRequest))
.sort([[sortColumn, parseInt(sortType)]])
.skip(parseInt(pagination.startRow))
.exec(function (err, result) {
if (err) {
console.log('Game services - searchGame - error');
reject(err);
} else {
console.log('Game services - searchGame - end');
resolve({
totalRow: totalRow,
data: result
});
}
})
})
});
};
module.exports = searchGames;
|
def generate_primes(upper_bound):
# array to store the results
primes = [2]
# outer loop
for i in range(3, upper_bound):
# inner loop
for j in range(2, i):
# check if i is prime
if i % j == 0:
break
else:
# add the number if it is prime
primes.append(i)
return primes |
<reponame>florecebriz/tourism-site
import React from "react";
import { Container, Row, Col, Button } from "reactstrap";
import imgCover from "./bangkawan/cover.jpg";
import imgCard1 from "./bangkawan/img1.jpg";
import imgCard2 from "./bangkawan/img2.jpg";
import imgCard3 from "./bangkawan/img3.jpg";
import imgCard4 from "./bangkawan/img4.jpg";
import imgCard5 from "./bangkawan/img5.jpg";
import imgCard6 from "./bangkawan/img6.jpg";
import imgCard7 from "./bangkawan/img7.jpg";
import imgCard8 from "./bangkawan/img8.jpg";
import imgCard9 from "./bangkawan/img9.jpg";
import imgCard10 from "./bangkawan/img10.jpg";
import Review from "./Review";
const Bangkawan = () => (
<div className="subComponent">
<Container>
<section className="tour-cover item-center">
<img src={imgCover} alt="" />
<h1>Bangkawan Cove & Camping Resort</h1>
<h4>Bangkawan Island</h4>
</section>
<section className="tour-info">
<Row>
<Col sm="8">
<div className="tour-desc">
<p>
Is a small family private island it is located near villa
lavezares northern samar it is a typically beautiful beach
destination, as to find the water and sand to be the most
replenishing it is also ideal for diving and snorkeling but the
tourist have to navigate some dirt roads with habal-habal just
to get there.
</p>
<p>
BANKAWAN Island Camping Resort is now OPEN. <br />
Location: Bankawan Island, Lavezares, Northern Samar The closest
island from mainland Lavezares for less than 30 minutes only
from the town center.
</p>
<h5>Contact</h5>
<p>For more info contact #09107595168</p>
</div>
</Col>
<Col sm="4">
<div className="tour-gallery">
<a href={imgCard1}>
<img src={imgCard1} alt="" />
</a>
<a href={imgCard2}>
<img src={imgCard2} alt="" />
</a>
<a href={imgCard3}>
<img src={imgCard3} alt="" />
</a>
<a href={imgCard4}>
<img src={imgCard4} alt="" />
</a>
<a href={imgCard5}>
<img src={imgCard5} alt="" />
</a>
<a href={imgCard6}>
<img src={imgCard6} alt="" />
</a>
<a href={imgCard7}>
<img src={imgCard7} alt="" />
</a>
<a href={imgCard8}>
<img src={imgCard8} alt="" />
</a>
<a href={imgCard9}>
<img src={imgCard9} alt="" />
</a>
<a href={imgCard10}>
<img src={imgCard10} alt="" />
</a>
</div>
</Col>
</Row>
</section>
</Container>
<section>
<iframe
title="Bangkawan Island"
src="https://www.google.com/maps/embed?pb=!1m18!1m12!1m3!1d3894.416985833421!2d124.3541513146001!3d12.554733227285562!2m3!1f0!2f0!3f0!3m2!1i1024!2i768!4f13.1!3m3!1m2!1s0x0%3A0x0!2zMTLCsDMzJzE3LjAiTiAxMjTCsDIxJzIyLjgiRQ!5e0!3m2!1sen!2sph!4v1549949557496"
/>
</section>
<Review />
</div>
);
export default Bangkawan;
|
var JinagaKnockout = function (j, ko) {
this.observeStatus = function(viewModel) {
viewModel.error = ko.observable();
viewModel.queueCount = ko.observable();
viewModel.loading = ko.observable(false);
viewModel.status = ko.computed(function () {
return this.error()
? "Error"
: this.queueCount() > 0
? "Saving..."
: this.loading()
? "Loading..."
: "";
}, viewModel);
j.onError(function (message) { viewModel.error(message); });
j.onProgress(function (queueCount) { viewModel.queueCount(queueCount); });
j.onLoading(function (loading) { viewModel.loading(loading); });
}
this.observeUser = function(viewModel) {
viewModel.user = ko.observable();
viewModel.displayName = ko.observable();
j.login(function (u, profile) {
if (!u) {
window.location = loginUrl;
}
else {
viewModel.user(u);
viewModel.displayName(profile.displayName);
j.query(u, [namesForUser], function(names) {
if (names.length != 1 || names[0].value !== profile.displayName) {
createUserName(u, profile.displayName, names);
}
});
}
function createUserName(user, value, prior) {
return j.fact({
type: "Jinaga.User.Name",
prior: prior,
from: user,
value: value
});
}
function nameIsCurrent(n) {
return j.not({
type: "Jinaga.User.Name",
prior: n
});
}
function namesForUser(u) {
return j.where({
type: "Jinaga.User.Name",
from: u
}, [nameIsCurrent]);
}
});
}
function Collection(parent, template, childConstructor, c0, c1, c2, c3, c4, c5, c6, c7, c8, c9) {
this.items = ko.observableArray();
this.watch = function () {
return j.watch(parent, template, addTo(this), removeFrom(this));
}
var map = childConstructor ? function(f) {
return new childConstructor(f, c0, c1, c2, c3, c4, c5, c6, c7, c8, c9);
} : function (f) {
return f;
};
function addTo(collection) {
return function (fact) {
var obj = map(fact);
collection.items.push(obj);
return obj;
};
}
function removeFrom(collection) {
return function (obj) {
collection.items.remove(obj);
};
}
}
this.Collection = Collection;
function Mutable(type, entity, defaultValue) {
this.facts = ko.observableArray();
this.value = ko.computed(function () {
var candidates = this.facts();
if (candidates.length === 0) {
return defaultValue;
}
else {
return candidates[candidates.length-1].value;
}
}, this);
this.capture = function() {
var prior = this.facts();
var value = ko.observable(this.value());
return {
value: value,
save: function () {
j.fact({
type: type,
entity: entity,
value: value(),
prior: prior
});
}
};
}
}
Mutable.prototype.inConflict = function () {
return this.facts().length > 1;
};
Mutable.prototype.candidates = function () {
return this.facts().map(function (f) {
return f.value;
});
};
this.Mutable = Mutable;
this.watchMutable = function(parent, property, type) {
return parent.watch([mutablesInEntity], addTo, removeFrom);
function addTo(vm, p) {
var mutable = vm[property];
mutable.facts.push(p);
return { mutable: mutable, fact: p };
}
function removeFrom(pair) {
pair.mutable.facts.remove(pair.fact);
}
function mutablesInEntity(e) {
return j.where({
type: type,
entity: e
}, [isCurrent]);
}
function isCurrent(p) {
return j.not({
type: type,
prior: p
});
}
}
}; |
source ${OPENDR_HOME}/lib/catkin_ws_mobile_manipulation/devel/setup.bash
roscore &
sleep 5
roslaunch mobile_manipulation_rl pr2_analytical.launch & |
<reponame>top-sim/topsim
# Copyright (C) 10/19 <NAME>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import time
import logging
import pandas as pd
from enum import Enum
from topsim.common.globals import TIMESTEP
from topsim.core.instrument import RunStatus
from topsim.core.planner import WorkflowStatus
from topsim.core.task import TaskStatus
LOGGER = logging.getLogger(__name__)
class Scheduler:
"""
Attributes
----------
"""
def __init__(self, env, buffer, cluster, algorithm):
"""
Parameters
----------
env : Simpy.Environment object
The simulation Environment
buffer : core.buffer.Buffer object
The SDP buffer used in the simulation
cluster : core.cluster.Cluster object
The Cluster instance used for the simluation
algorithm : core.cluster.Algorithm object
The algorithm model using
"""
self.env = env
self.algorithm = algorithm
self.cluster = cluster
self.buffer = buffer
self.status = SchedulerStatus.SLEEP
self.ingest_observation = None
self.provision_ingest = 0
self.observation_queue = []
self.schedule_status = ScheduleStatus.ONTIME
self.algtime = {}
self.delay_offset = 0
def start(self):
"""
Set the SchedulerStatus to RUNNING.
This allows us to check the Scheduler status in the simulator; if
there is nothing left the schedule, we may finish the simulation.
Returns
-------
self.status : topsim.core.scheduler.SchedulerStatus
SchedulerStatus.RUNNING for all calls to init()
"""
self.status = SchedulerStatus.RUNNING
return self.status
def shutdown(self):
self.status = SchedulerStatus.SHUTDOWN
return self.status
def run(self):
"""
Starts the 'per-TIMESTEP' process loop for the Scheduler actor.
The Scheduler coordinates interaction between the Telescope, Buffer,
and Cluster. The Telescope should not *see* the Buffer or Cluster; all
communication must be transferred through the scheduler.
Yields
-------
Timeout of common.config.TIMESTEP.
"""
if self.status is not SchedulerStatus.RUNNING:
raise RuntimeError("Scheduler has not been initialised! Call init")
LOGGER.debug("Scheduler starting up...")
while self.status is SchedulerStatus.RUNNING:
LOGGER.debug('Time on Scheduler: {0}'.format(self.env.now))
if self.buffer.has_observations_ready_for_processing():
obs = self.buffer.next_observation_for_processing()
if obs not in self.observation_queue:
self.observation_queue.append(obs)
ret = self.env.process(self.allocate_tasks(obs))
if len(self.observation_queue) == 0 \
and self.status == SchedulerStatus.SHUTDOWN:
LOGGER.debug("No more waiting workflows")
break
# for obs in self.observation_queue:
# if obs.workflow_plan.status == WorkflowStatus.FINISHED:
# self.cluster.release_batch_resources(obs)
LOGGER.debug("Scheduler Status: %s", self.status)
yield self.env.timeout(TIMESTEP)
def is_idle(self):
"""
Determine if the scheduler has completed its work
Returns True if idle
-------
"""
return len(self.observation_queue) == 0
def scheduler_status(self):
"""
The status of the scheduled observation(s) and whether or not the
scheduler has been delayed yet.
Returns
-------
status: Scheduler.ScheduleStatus
The status Enum
"""
return self.schedule_status
def check_ingest_capacity(self, observation, pipelines, max_ingest):
"""
Check the cluster and buffer to ensure that we have enough capacity
to run the INGEST pipeline for the provided observation
Parameters
----------
observation : core.Telescope.Observation object
The observation that we are attempting to run/ingest
pipelines : dict()
A dictionary of the different types of observations and the
corresponding pipeline attributes (length, num of machines etc.)
Returns
-------
has_capacity : bool
True if the buffer AND the cluster have the capacity to run the
provided observation
False if either of them do not have capacity.
"""
buffer_capacity = False
if self.buffer.check_buffer_capacity(observation):
LOGGER.debug("Buffer has enough capacity for %s", observation.name)
buffer_capacity = True
cluster_capacity = False
pipeline_demand = pipelines[observation.name]['ingest_demand']
if self.cluster.check_ingest_capacity(pipeline_demand, max_ingest):
if self.provision_ingest + pipeline_demand <= max_ingest:
cluster_capacity = True
self.provision_ingest += pipeline_demand
LOGGER.debug(
"Cluster is able to process ingest for observation %s",
observation.name
)
else:
LOGGER.debug('Cluster is unable to process ingest as two'
'observations are scheduled at the same time')
return buffer_capacity and cluster_capacity
def allocate_ingest(self, observation, pipelines, planner,
max_ingest=None, c='default'):
"""
Ingest is 'streaming' data to the buffer during the observation
How we calculate how long it takes remains to be seen
Parameters
---------
observation : core.Telescope.Observation object
The observation from which we are starting Ingest
pipelines : dict
dictionary storing the different pipeline types supported for the
current simulation:
pipelines[observation type][demand]
Returns
-------
True/False
Raises
------
"""
observation.ast = self.env.now
# self.env.process(
# observation.plan = planner.run(observation, self.buffer)
# )
pipeline_demand = pipelines[observation.name]['ingest_demand']
ingest_observation = observation
# We do an off-by-one check here, because the first time we run the
# loop we will be one timestep ahead.
time_left = observation.duration - 1
while ingest_observation.status is not RunStatus.FINISHED:
if ingest_observation.status is RunStatus.WAITING:
cluster_ingest = self.env.process(
self.cluster.provision_ingest_resources(
pipeline_demand,
observation
)
)
ret = self.env.process(
self.buffer.ingest_data_stream(
observation,
)
)
ingest_observation.status = RunStatus.RUNNING
elif ingest_observation.status is RunStatus.RUNNING:
if time_left > 0:
time_left -= 1
else:
break
yield self.env.timeout(1)
if RunStatus.FINISHED:
self.provision_ingest -= pipeline_demand
self.cluster.clean_up_ingest()
# TODO Fix this implicit object change, as whilst this is the
# same as the object in the buffer, it is from the buffer we
# get the observation. It is probably worth storing plans
# separately and then 'giving' them to the observation once it
# arrives at the scheduler.
observation.plan = planner.run(observation, self.buffer, max_ingest)
def print_state(self):
# Change this to 'workflows scheduled/workflows unscheduled'
pass
def allocate_tasks(self, observation):
"""
For the current observation, we need to allocate tasks to machines
based on:
* The plan that has been generated
* The result of the scheduler's decision based on the current
cluster state, and the original plan.
Returns
-------
"""
minst = -1
current_plan = None
if observation is None:
return False
elif current_plan is None:
current_plan = observation.plan
if current_plan is None:
raise RuntimeError(
"Observation should have pre-plan; Planner actor has "
"failed at runtime."
)
current_plan.ast = self.env.now
for task in current_plan.tasks:
task.workflow_offset = self.env.now
# Do we have a runtime delay?
if current_plan.est > self.env.now:
self.schedule_status.DELAYED
schedule = {}
allocation_pairs = {}
while True:
current_plan.tasks = self._update_current_plan(current_plan)
current_plan, schedule, finished = self._generate_current_schedule(
observation, current_plan, schedule
)
if finished:
# We have finished this observation
LOGGER.info(f'{observation.name} Removed from Queue @'
f'{self.env.now}')
# self.cluster.release_batch_resources(observation)
break
# If there are no allocations made this timestep
elif not schedule:
yield self.env.timeout(TIMESTEP)
else:
# This is where allocations are made to the cluster
schedule, allocation_pairs = self._process_current_schedule(
schedule, allocation_pairs, current_plan.id
)
yield self.env.timeout(TIMESTEP)
yield self.env.timeout(TIMESTEP)
def _generate_current_schedule(self, observation, current_plan, schedule):
"""
Each timestep, we want to generate a schedule based on the observation
plan and an existing schedule.
Parameters
----------
current_plan
schedule
Returns
-------
current_plan, schedule, finished
"""
finished = False
nm = f'{observation.name}-algtime'
self.algtime[nm] = time.time()
schedule, status = self.algorithm.run(
cluster=self.cluster,
clock=self.env.now,
workflow_plan=current_plan,
existing_schedule=schedule
)
self.algtime[nm] = (time.time() - self.algtime[nm])
current_plan.status = status
if (current_plan.status is WorkflowStatus.DELAYED and
self.schedule_status is not WorkflowStatus.DELAYED):
self.schedule_status = ScheduleStatus.DELAYED
# If the workflow is finished
if not schedule and status is WorkflowStatus.FINISHED:
if self.buffer.mark_observation_finished(observation):
self.cluster.release_batch_resources(observation.name)
self.observation_queue.remove(observation)
finished = True
return current_plan, schedule, finished
def _process_current_schedule(self, schedule, allocation_pairs,
workflow_id):
"""
Given a schedule and existing allocations, run through the schedule
and run the allocation for that tasks if possible
Parameters
----------
schedule
allocation_pairs
workflow_id : The ID of the workflow. This is so in the cluster we
can find the appropriate set of provisioned resources.
Returns
-------
"""
sorted_tasks = sorted(
schedule.keys(), key=lambda t: t.est
)
curr_allocs = []
# Allocate tasks
for task in sorted_tasks:
machine = schedule[task]
if machine.id != task.machine:
task.update_allocation(machine)
# Schedule
if machine in curr_allocs or self.cluster.is_occupied(machine):
LOGGER.debug(
"Allocation not made to cluster due to double-allocation"
)
else:
allocation_pairs[task.id] = (task, machine)
pred_allocations = self._find_pred_allocations(
task, machine, allocation_pairs
)
if task.task_status != TaskStatus.UNSCHEDULED:
raise RuntimeError("Producing schedule with Scheduled "
"Tasks")
self.env.process(
self.cluster.allocate_task_to_cluster(
task, machine, pred_allocations, workflow_id
)
)
LOGGER.debug(f"Allocation {task}-{machine} made to cluster")
task.task_status = TaskStatus.SCHEDULED
curr_allocs.append(machine)
schedule.pop(task, None)
return schedule, allocation_pairs
def _update_current_plan(self, current_plan):
"""
Check the status of tasks in the workflow plan and remove them if
they are complete
Each task has a delay_flag that is triggered if the duration or
finish time is not the same as what was estimated in the planning.
The method will update the self.schedule_status and self.delay_offset
class attributes.
Parameters
----------
current_plan : core.planning.WorkflowPlan
The workflow plan for an observation in self.observation_queue
Returns
-------
remaining_tasks : list
List of remaining tasks in the workflow plan
"""
remaining_tasks = []
for t in current_plan.tasks:
if t.task_status is not TaskStatus.FINISHED:
remaining_tasks.append(t)
else:
if t.delay_flag:
self.schedule_status = ScheduleStatus.DELAYED
self.delay_offset += t.delay_offset
return remaining_tasks
def _find_pred_allocations(self, task, machine, allocations):
"""
Return a list of machines that the current tasks' predecessors were
allocated to.
The purpose of this is to provide this to the task when calculating
its duration; communication time of data from tasks on other
machines will be non-negligible and this must be completed in full
before the task can begin executing.
Parameters
----------
task
Returns
-------
"""
pred_allocations = []
for pred in task.pred:
pred_task, pred_machine = allocations[pred]
if pred_machine != machine:
alt = True
pred_allocations.append(pred_task)
return pred_allocations
def to_df(self):
df = pd.DataFrame()
queuestr = f''
for obs in self.observation_queue:
queuestr += f'{obs.name}'
df['observation_queue'] = queuestr
df['schedule_status'] = [str(self.schedule_status)]
df['delay_offset'] = [str(self.schedule_status)]
tmp = f'alg'
if self.algtime:
for key, value in self.algtime.items():
df[key] = value
else:
df['algtime'] = tmp
return df
class Schedule:
def __init__(self):
self.allocation = []
self.replace = False
self.status = WorkflowStatus.SCHEDULED
def add_allocation(self, task, machine):
self.allocation.append((task, machine))
def replace_previous_schedule(self):
self.replace = True
class Allocation:
def __init__(self, task, machine):
self.task = task
self.machine = machine
def __eq__(self, other):
return self.task == other.task.id and self.machine == other.machine.id
def __hash__(self):
return hash(self.task.id + self.machine.id)
class SchedulerStatus(Enum):
SLEEP = 'SLEEP'
RUNNING = 'RUNNING'
SHUTDOWN = 'SHUTDOWN'
class ScheduleStatus(Enum):
ONTIME = 'ONTIME'
DELAYED = 'DELAYED'
FAILURE = 'FAILURE'
|
export enum BufferUsage {
MAP_READ = 1,
MAP_WRITE = 2,
COPY_SRC = 4,
COPY_DST = 8,
INDEX = 16,
VERTEX = 32,
UNIFORM = 64,
STORAGE = 128,
INDIRECT = 256,
QUERY_RESOLVE = 512
}
|
<filename>test/proto/src/void__float.c<gh_stars>10-100
#include <stdlib.h>
// ORACLE FLOAT foo(VOID)
float foo(void) {
int a;
float b;
int i;
a = rand() % 65536;
b = 1.0 / a;
for (i = 0; i < a; i++)
b += ((float) a) / ((float) i);
return b;
}
int main(void) {
int i;
float a = 0, b = 0;
for (i = 0; i < 100; i++) {
a += foo();
b += foo();
}
return 0;
}
|
// Copyright (C) 2019. Huawei Technologies Co., Ltd. All rights reserved.
// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
// The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
// WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#include "cpu/arm/fp16/tensor_computing_fp16.h"
inline static void dequantize_i8_f16(I32 len, INT8 *q, F32 scale, F16 *d)
{
F16 factor = 1 / scale;
int i = 0;
for (; i < len - 15; i += 16) {
int8x8_t in0 = vld1_s8(q + i);
int8x8_t in1 = vld1_s8(q + i + 8);
int16x8_t s0 = vmovl_s8(in0);
int16x8_t s1 = vmovl_s8(in1);
float16x8_t f0 = vcvtq_f16_s16(s0);
float16x8_t f1 = vcvtq_f16_s16(s1);
f0 = vmulq_n_f16(f0, factor);
f1 = vmulq_n_f16(f1, factor);
vst1q_f16(d + i, f0);
vst1q_f16(d + i + 8, f1);
}
for (; i < len; i++) {
d[i] = q[i] * factor;
}
}
inline static void dequantize_i32_f16(I32 len, I32 *q, F32 scale, I32 biasLen, F16 *biasPtr, F16 *d)
{
if (0 != biasLen) {
CHECK_REQUIREMENT(nullptr != biasPtr);
CHECK_REQUIREMENT(len % biasLen == 0);
}
float16x4_t bias[4];
F32 factor = 1 / scale;
if (biasLen % 4 == 0) {
int i = 0;
for (; i < len - 15; i += 16) {
int32x4_t in0 = vld1q_s32(q + i);
int32x4_t in1 = vld1q_s32(q + i + 4);
int32x4_t in2 = vld1q_s32(q + i + 8);
int32x4_t in3 = vld1q_s32(q + i + 12);
if (0 != biasLen) {
I32 offset = i % biasLen;
for (U32 j = 0; j < 4; j++) {
bias[j] = vld1_f16(biasPtr + offset);
offset += 4;
if (offset >= biasLen) {
offset = 0;
}
}
}
float32x4_t f0 = vcvtq_f32_s32(in0);
float32x4_t f1 = vcvtq_f32_s32(in1);
float32x4_t f2 = vcvtq_f32_s32(in2);
float32x4_t f3 = vcvtq_f32_s32(in3);
f0 = vmulq_n_f32(f0, factor);
f1 = vmulq_n_f32(f1, factor);
f2 = vmulq_n_f32(f2, factor);
f3 = vmulq_n_f32(f3, factor);
float16x4_t h0 = vcvt_f16_f32(f0);
float16x4_t h1 = vcvt_f16_f32(f1);
float16x4_t h2 = vcvt_f16_f32(f2);
float16x4_t h3 = vcvt_f16_f32(f3);
if (0 != biasLen) {
h0 = vadd_f16(h0, bias[0]);
h1 = vadd_f16(h1, bias[1]);
h2 = vadd_f16(h2, bias[2]);
h3 = vadd_f16(h3, bias[3]);
}
vst1_f16(d + i, h0);
vst1_f16(d + i + 4, h1);
vst1_f16(d + i + 8, h2);
vst1_f16(d + i + 12, h3);
}
for (; i < len; i++) {
d[i] = q[i] * factor;
if (0 != biasLen) {
d[i] += biasPtr[i % biasLen];
}
}
} else {
for (int i = 0; i < len; i += biasLen) {
int j = 0;
for (; j < biasLen - 3; j += 4) {
int32x4_t in0 = vld1q_s32(q + i + j);
bias[0] = vld1_f16(biasPtr + j);
float32x4_t f0 = vcvtq_f32_s32(in0);
f0 = vmulq_n_f32(f0, factor);
float16x4_t h0 = vcvt_f16_f32(f0);
h0 = vadd_f16(h0, bias[0]);
vst1_f16(d + i + j, h0);
}
for (; j < biasLen; j++) {
d[i + j] = q[i + j] * factor + biasPtr[j];
}
}
}
}
EE dequantize_to_f16(TensorDesc qDesc,
void *qData,
const F32 *scale,
TensorDesc bDesc,
void *bData,
TensorDesc dDesc,
void *data)
{
if (nullptr == data || nullptr == qData || nullptr == scale) {
CHECK_STATUS(NULL_POINTER);
}
EE ret = SUCCESS;
int length = tensorNumElements(qDesc);
int biasLength = tensorNumElements(bDesc);
switch (qDesc.dt) {
case DT_I8:
CHECK_REQUIREMENT(biasLength == 0);
dequantize_i8_f16(length, (INT8 *)qData, scale[0], (F16 *)data);
break;
case DT_I32:
dequantize_i32_f16(
length, (I32 *)qData, scale[0], biasLength, (F16 *)bData, (F16 *)data);
break;
default:
ret = NOT_SUPPORTED;
break;
}
return ret;
}
|
const { array, object, string, integer, empty, oneOf, boolean } = require('@qtk/schema').schema;
const info = {
title: "公众号-获取带参数二维码",
description: ""
};
const request = object().properties({
scene: oneOf(string().minLength(1).maxLength(64).desc('场景值 scene_str'), integer().desc('场景值 scene_id')),
permanent: boolean().desc('是否为永久二维码, 默认否'),
expireSeconds: integer().max(2592000).desc('该二维码有效时间,以秒为单位。 最大不超过2592000(即30天),此字段如果不填,则默认有效期为30秒。')
}).require('scene')
const response = string().desc('二维码url');
module.exports = { info, request, response }; |
# Start VM
VM_NAME_PREFIX=workbench-workshop-vm
VM_NUMBER=1
for i in $(seq 1 ${VM_NUMBER}); do
VM_NAME="${VM_NAME_PREFIX}-${i}"
echo "Stopping virtual machine ${VM_NAME} ..."
yc compute instance stop \
--name ${VM_NAME} \
--async
done
echo "Triggered stopping of ${VM_NUMBER} virtaul machines"
|
#!/bin/bash
source ../../../venv/bin/activate
python msp.py
|
package com.telpoo.frame.net;
import java.io.IOException;
import java.net.HttpURLConnection;
import java.util.List;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.http.Header;
import org.apache.http.HttpResponse;
public class BaseHttpResponse {
private int status;
private String url;
private Header[] headers;
private String stringRespone;
private HttpResponse httpResponse;
public BaseHttpResponse() {
// TODO Auto-generated constructor stub
}
// public BaseHttpResponse(HttpURLConnection urlConnection) {
// try {
// this.setStatus(urlConnection.getResponseCode());
// this.setUrl(urlConnection.getURL().toString());
// } catch (IOException e) {
// e.printStackTrace();
// }
// this.setHeaders(urlConnection.getHeaderFields());
// }
public int getStatus() {
return status;
}
public String getUrl() {
return url;
}
public Header[] getHeaders() {
return headers;
}
public String getStringRespone() {
return stringRespone;
}
public void setStringRespone(String stringRespone) {
this.stringRespone = stringRespone;
}
public void setUrl(String url) {
this.url = url;
}
public void setStatus(int status) {
this.status = status;
}
// public void setHeaders(Header[] headers2) {
// this.headers = headers2;
// }
public Header[] getHeader(String key){
return httpResponse.getHeaders(key);
}
public HttpResponse getHttpResponse() {
return httpResponse;
}
public void setHttpResponse(HttpResponse httpResponse) {
this.httpResponse = httpResponse;
}
}
|
def generate_strings(characters, length):
if length == 0:
return [""]
result = []
for char in characters:
for string in generate_strings(characters, length-1):
result.append(char + string)
return result
strings = generate_strings(["A", "B", "C"], 3)
print("Strings:", strings) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.