text
stringlengths 1
1.05M
|
|---|
//===--- EditorAdapter.h ----------------------------------------*- C++ -*-===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
//
// This class wraps a clang::edit::Commit, taking Swift source locations and
// ranges, transforming them to Clang source locations and ranges, and pushes
// them into the textual editing infrastructure. This is a temporary measure
// while lib/Syntax bringup is happening.
//
//===----------------------------------------------------------------------===//
#ifndef SWIFT_MIGRATOR_EDITORADAPTER_H
#define SWIFT_MIGRATOR_EDITORADAPTER_H
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Edit/Commit.h"
#include "llvm/ADT/DenseMap.h"
namespace swift {
class SourceLoc;
class SourceRange;
class CharSourceRange;
class SourceManager;
namespace migrator {
class EditorAdapter {
swift::SourceManager &SwiftSrcMgr;
clang::SourceManager &ClangSrcMgr;
/// This holds a mapping of identical buffers, one that exist in the
/// SwiftSrcMgr and one that exists in the ClangSrcMgr.
///
/// This is marked mutable because it is lazily populated internally
/// in the `getClangFileIDForSwiftBufferID` method below.
mutable llvm::SmallDenseMap<unsigned, clang::FileID> SwiftToClangBufferMap;
/// A running transactional collection of basic edit operations.
/// Clang uses this transaction concept to cancel a batch of edits due to
/// incompatibilities, such as those due to macro expansions, but we don't
/// have macros in Swift. However, as a temporary adapter API, we use this
/// to keep things simple.
clang::edit::Commit Edits;
/// Translate a Swift SourceLoc using the SwiftSrcMgr to a
/// clang::SourceLocation using the ClangSrcMgr.
clang::SourceLocation translateSourceLoc(SourceLoc SwiftLoc) const;
/// Translate a Swift SourceRange using the SwiftSrcMgr to a
/// clang::SourceRange using the ClangSrcMgr.
clang::SourceRange
translateSourceRange(SourceRange SwiftSourceRange) const;
/// Translate a Swift CharSourceRange using the SwiftSrcMgr to a
/// clang::CharSourceRange using the ClangSrcMgr.
clang::CharSourceRange
translateCharSourceRange(CharSourceRange SwiftSourceSourceRange) const;
public:
EditorAdapter(swift::SourceManager &SwiftSrcMgr,
clang::SourceManager &ClangSrcMgr)
: SwiftSrcMgr(SwiftSrcMgr), ClangSrcMgr(ClangSrcMgr),
Edits(clang::edit::Commit(ClangSrcMgr, clang::LangOptions())) {}
/// Lookup the BufferID in the SwiftToClangBufferMap. If it doesn't exist,
/// copy the corresponding buffer into the ClangSrcMgr.
clang::FileID getClangFileIDForSwiftBufferID(unsigned BufferID) const;
bool insert(SourceLoc Loc, StringRef Text, bool AfterToken = false,
bool BeforePreviousInsertions = false);
bool insertAfterToken(SourceLoc Loc, StringRef Text,
bool BeforePreviousInsertions = false) {
return insert(Loc, Text, /*AfterToken=*/true, BeforePreviousInsertions);
}
bool insertBefore(SourceLoc Loc, StringRef Text) {
return insert(Loc, Text, /*AfterToken=*/false,
/*BeforePreviousInsertions=*/true);
}
bool insertFromRange(SourceLoc Loc, CharSourceRange Range,
bool AfterToken = false,
bool BeforePreviousInsertions = false);
bool insertWrap(StringRef before, CharSourceRange Range, StringRef after);
bool remove(CharSourceRange Range);
bool replace(CharSourceRange Range, StringRef Text);
bool replaceWithInner(CharSourceRange Range, CharSourceRange innerRange);
bool replaceText(SourceLoc Loc, StringRef Text,
StringRef replacementText);
bool insertFromRange(SourceLoc Loc, SourceRange TokenRange,
bool AfterToken = false,
bool BeforePreviousInsertions = false);
bool insertWrap(StringRef before, SourceRange TokenRange, StringRef after);
bool remove(SourceLoc TokenLoc);
bool remove(SourceRange TokenRange);
bool replace(SourceRange TokenRange, StringRef Text);
bool replaceToken(SourceLoc TokenLoc, StringRef Text);
bool replaceWithInner(SourceRange TokenRange, SourceRange TokenInnerRange);
/// Return the batched edits encountered so far.
const clang::edit::Commit &getEdits() const {
return Edits;
}
};
} // end namespace migrator
} // end namespace swift
#endif // SWIFT_MIGRATOR_EDITORADAPTER_H
|
import logging
import logging.handlers
import os
from datetime import datetime
import sys
import errno
# Logging Levels
# https://docs.python.org/3/library/logging.html#logging-levels
# CRITICAL 50
# ERROR 40
# WARNING 30
# INFO 20
# DEBUG 10
# NOTSET 0
def set_up_logging(name=None):
file_path = sys.modules[__name__].__file__
project_path = os.path.dirname(os.path.dirname(file_path))
log_location = project_path + '/logs/'
if not os.path.exists(log_location):
try:
os.makedirs(log_location, exist_ok=True)
except TypeError:
try:
os.makedirs(log_location)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(log_location):
pass
else:
raise
current_time = datetime.now()
current_date = current_time.strftime("%Y%m%d")
#file_name = current_date + '.log'
file_name = 'zybots_' + current_date + '.log'
file_location = log_location + file_name
with open(file_location, 'a+'):
pass
if name:
logger = logging.getLogger(name)
else:
logger = logging.getLogger(__name__)
format = '[%(asctime)s] [%(levelname)s] [%(message)s] [%(pathname)s %(name)s [%(process)d]:]'
# To store in file
logging.basicConfig(format=format, filemode='a+', filename=file_location, level=logging.DEBUG)
# To print only
#logging.basicConfig(format=format, level=logging.DEBUG)
return logger
|
<reponame>NajibAdan/kitsu-server
# rubocop:disable Metrics/LineLength
# == Schema Information
#
# Table name: group_invites
#
# id :integer not null, primary key
# accepted_at :datetime
# declined_at :datetime
# revoked_at :datetime
# created_at :datetime not null
# updated_at :datetime not null
# group_id :integer not null, indexed
# sender_id :integer not null, indexed
# user_id :integer not null, indexed
#
# Indexes
#
# index_group_invites_on_group_id (group_id)
# index_group_invites_on_sender_id (sender_id)
# index_group_invites_on_user_id (user_id)
#
# Foreign Keys
#
# fk_rails_62774fb6d2 (sender_id => users.id)
# fk_rails_7255dc4343 (group_id => groups.id)
# fk_rails_d969f0761c (user_id => users.id)
#
# rubocop:enable Metrics/LineLength
require 'rails_helper'
RSpec.describe GroupInvite, type: :model do
it { should belong_to(:group) }
it { should validate_presence_of(:group) }
it { should belong_to(:user) }
it { should validate_presence_of(:user) }
it { should belong_to(:sender).class_name('User') }
it { should validate_presence_of(:sender) }
it 'should limit to one active invite per user per group' do
invite = create(:group_invite)
second_invite = build(:group_invite, group: invite.group, user: invite.user)
expect(second_invite).to be_invalid
expect(second_invite.errors[:user]).not_to be_empty
end
it 'should prevent you from inviting yourself' do
user = build(:user)
invite = build(:group_invite, user: user, sender: user)
expect(invite).to be_invalid
expect(invite.errors[:user]).not_to be_empty
end
end
|
<filename>src/main/java/com/algaworks/pedidovenda/controller/CadastroClienteBean.java
package com.algaworks.pedidovenda.controller;
import java.io.Serializable;
import java.util.Date;
import javax.faces.bean.ViewScoped;
import javax.inject.Inject;
import javax.inject.Named;
import com.algaworks.pedidovenda.model.Cliente;
import com.algaworks.pedidovenda.model.TipoPessoa;
import com.algaworks.pedidovenda.service.ClienteService;
@Named
@ViewScoped
public class CadastroClienteBean implements Serializable {
/**
*
*/
private static final long serialVersionUID = 1L;
private Cliente cliente;
@Inject
private ClienteService clienteService;
public CadastroClienteBean() {
limpar();
}
public void limpar() {
cliente = new Cliente();
}
// public void limparAssinatura() {
// cliente.setAssinatura(null);
// }
public void salvar(){
this.cliente = this.clienteService.salvar(cliente);
limpar();
}
public String editar(Cliente cliente){
this.cliente = cliente;
//return "/produtos/CadastroProduto.xhtml?faces-redirect=true";
return "/clientes/CadastroCliente.xhtml?faces-redirect=tre";
}
public boolean verificaEdicao(){
return this.cliente.getId() != null;
}
public TipoPessoa[] getTipoPessoaFisica(){
return TipoPessoa.values();
}
public Date dataHoje(){
return new Date();
}
//get and set
public Cliente getCliente() {
return cliente;
}
public void setCliente(Cliente cliente) {
this.cliente = cliente;
}
}
|
#!/bin/bash
set -e
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
. "${DIR}/defaults.sh"
usage() {
cat <<EOF
Receive the motion JPEG video data from the port.
This script uses the 'rtpbin' plugin.
Command:
gst-launch-1.0 -v udpsrc port=$PORT \
caps="application/x-rtp,media=(string)video,clock-rate=(int)90000,encoding-name=(string)JPEG,payload=(int)26" \
! .recv_rtp_sink_0 rtpbin ! rtpjpegdepay ! jpegdec ! autovideosink
Usage:
$(basename $0) [OPTION]
Options:
-p | --port <PORT> Port to receive video data [$PORT]
-h | --help this help
EOF
# exit if any argument is given
[[ -n "$1" ]] && exit 1
}
# handle commandline options
while [[ ${1:-} ]]; do
case "$1" in
-p | --port )
shift
PORT=$1; shift
;;
-h | --help )
shift
usage quit
;;
* )
usage quit
;;
esac
done
gst-launch-1.0 -v udpsrc port=$PORT \
caps="application/x-rtp,media=(string)video,clock-rate=(int)90000,encoding-name=(string)JPEG,payload=(int)26" \
! .recv_rtp_sink_0 rtpbin ! rtpjpegdepay ! jpegdec ! autovideosink
|
public class CreditCardCheckDigit {
public static int getCheckDigit(String creditCardNumber) {
int sum = 0;
for (int i = 0; i < creditCardNumber.length(); i++) {
int num = Integer.parseInt( Character.toString(creditCardNumber.charAt(i)) );
// Double every other digit
if (i % 2 == 0) {
num *= 2;
}
// Add the two digits if greater than 10
if (num > 9) {
num = num % 10 + 1;
}
sum += num;
}
// Get the check digit
int checkDigit = (sum * 9) % 10;
return checkDigit;
}
public static void main(String[] args) {
String creditCardNumber = "1234123412341234";
int checkDigit = getCheckDigit(creditCardNumber);
System.out.println("The check digit is " + checkDigit);
}
}
|
#script za ureditev peddar.param file s parametri za tek PEDDA_ROW python programa
#PEDDA_ROW = program za raw Illumina --> ped in map
#za vse subdirektorije v pwd
PATH_PR=/home/janao/Genotipi/Genotipi_CODES/SNPchimpRepo/source_codes/PEDDA_ROW
PASMA="Rjava"
GENOTIPDIR=/home/janao/Downloads/VsiGeno
GCDIR=/home/janao/Genotipi/Genotipi_DATA/Rjava_TEMP/
#mkdir $GCDIR/GCpeds
for i in $(find $GENOTIPDIR -maxdepth 1 -type d);
do
cp $PATH_PR/pedda_row.py $i #copy python script to each subdirectory
cp $PATH_PR/peddar.param $i #coppy parameter file in each subdirectory
cd $i
# unzip *FinalReport.zip
# unzip SNP_Map.zip
_FINREP=$(find *FinalReport.txt -printf '%f\n') #find FinalReport
SERNUM=${_FINREP%_FinalReport.txt}_forward #remove sufix to only get the number of the genotyping package
sed -i 's/SI /SI/g' $_FINREP #remove blank space from SI #####
sed -i "s/test_FinalReport.txt/"${_FINREP}"/g" peddar.param #change finrep parameter in place
sed -i "s/test_outputfile/"${SERNUM}"/g" peddar.param #change output prefix parameter in place
sed -i "s/TEST/$PASMA/g" peddar.param #change brdcode parameter in place
python pedda_row.py
cp $SERNUM.ped $GCDIR/GCpeds
cp $SERNUM.map $GCDIR/GCpeds
done
|
class User:
user_list = []
def __init__(self, username):
self.username = username
self.password = None
def create_pw(self, password):
'''
create_pw method stores the password in the object
'''
self.password = password
def confirm_pw(self, password):
'''
confirm_pw method checks if the input password matches the stored password
'''
return self.password == password
def login_user(self):
'''
login_user method saves the user object into user_list
'''
User.user_list.append(self)
|
#!/bin/sh
BACKUP_NAME="BACKUP-$(date +%F-%I-%M-%p).7z"
SERVER_DIR="minecraft.jmoore.dev.paper"
echo Backing up Java server...
7za a -mmt -mx9 -t7z $BACKUP_NAME $SERVER_DIR
|
<filename>test/models/fe/responsiblepeople/PositionWithinBusinessSpec.scala
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package models.fe.responsiblepeople
import models.des.responsiblepeople.{CorpBodyOrUnInCorpBodyOrLlp, Partnership, PositionInBusiness, RPExtra, ResponsiblePersons, SoleProprietor => DesSoleProprietor}
import org.joda.time.LocalDate
import org.scalatestplus.mockito.MockitoSugar
import org.scalatestplus.play.PlaySpec
import play.api.libs.json._
class PositionWithinBusinessSpec extends PlaySpec with MockitoSugar {
private val today: LocalDate = new LocalDate()
"JSON validation" must {
"successfully validate given a BeneficialOwner value" in {
Json.fromJson[PositionWithinBusiness](JsString("01")) must
be(JsSuccess(BeneficialOwner))
}
"successfully validate given a Director value" in {
Json.fromJson[PositionWithinBusiness](JsString("02")) must
be(JsSuccess(Director))
}
"successfully validate given a InternalAccountant value" in {
Json.fromJson[PositionWithinBusiness](JsString("03")) must
be(JsSuccess(InternalAccountant))
}
"successfully validate given a NominatedOfficer value" in {
Json.fromJson[PositionWithinBusiness](JsString("04")) must
be(JsSuccess(NominatedOfficer))
}
"successfully validate given a Partner value" in {
Json.fromJson[PositionWithinBusiness](JsString("05")) must
be(JsSuccess(Partner))
}
"successfully validate given a SoleProprietor value" in {
Json.fromJson[PositionWithinBusiness](JsString("06")) must
be(JsSuccess(SoleProprietor))
}
"successfully validate given an Other value" in {
Json.fromJson[PositionWithinBusiness](Json.obj("other" -> "some other role")) mustBe JsSuccess(Other("some other role"))
}
"fail to validate when given an empty value" in {
Json.fromJson[PositionWithinBusiness](JsString("")) must
be(JsError((JsPath \ "positions") -> JsonValidationError("error.invalid")))
}
"write the correct value for BeneficialOwner" in {
Json.toJson(BeneficialOwner: PositionWithinBusiness) must be(JsString("01"))
}
"write the correct value for Director" in {
Json.toJson(Director: PositionWithinBusiness) must be(JsString("02"))
}
"write the correct value for InternalAccountant" in {
Json.toJson(InternalAccountant: PositionWithinBusiness) must be(JsString("03"))
}
"write the correct value for NominatedOfficer" in {
Json.toJson(NominatedOfficer: PositionWithinBusiness) must be(JsString("04"))
}
"write the correct value for Partner" in {
Json.toJson(Partner: PositionWithinBusiness) must be(JsString("05"))
}
"write the correct value for SoleProprietor" in {
Json.toJson(SoleProprietor: PositionWithinBusiness) must be(JsString("06"))
}
"write the correct value for Other" in {
Json.toJson(Other("some other role"): PositionWithinBusiness) mustBe Json.obj("other" -> "some other role")
}
"convert des model to frontend model successfully" in {
val position = Some(PositionInBusiness(
Some(DesSoleProprietor(true, true, Some(false), Some("texty text text"))),
Some(Partnership(true, true)),
Some(CorpBodyOrUnInCorpBodyOrLlp(true, true, true, Some(true)))
))
val desModel = ResponsiblePersons(None,None,None,None,None,None,None,None,None,position,None,false,None,false,None,Some(today.toString()),None,None,extra = RPExtra())
Positions.conv(desModel) must be(Some(Positions(Set(Partner, SoleProprietor, NominatedOfficer, Director, BeneficialOwner, DesignatedMember), Some(today))))
}
"convert des model to frontend model successfully with other details" in {
val positions = Seq(
PositionInBusiness(Some(DesSoleProprietor(true, false, Some(true), Some("another sp role"))), None, None),
PositionInBusiness(None, Some(Partnership(true, false, Some(true), Some("another partnership role"))), None),
PositionInBusiness(None, None, Some(CorpBodyOrUnInCorpBodyOrLlp(false, true, false, None, Some(true), Some("another corp role"))))
)
val expectedResults = Seq(
Positions(Set(SoleProprietor, Other("another sp role")), Some(today)),
Positions(Set(Partner, Other("another partnership role")), Some(today)),
Positions(Set(BeneficialOwner, Other("another corp role")), Some(today))
)
positions.zip(expectedResults) foreach {
case (pos, result) =>
//noinspection ScalaStyle
val desModel = ResponsiblePersons(None,None,None,None,None,None,None,None,None,Some(pos),None,false,None,false,None,Some(today.toString()),None,None,extra = RPExtra())
Positions.conv(desModel) mustBe Some(result)
}
}
"convert des model to frontend model successfully when user has no data selected" in {
val position = Some(PositionInBusiness(
Some(DesSoleProprietor(false, false)),
Some(Partnership(false, false)),
Some(CorpBodyOrUnInCorpBodyOrLlp(false, false, false))
))
val desModel = ResponsiblePersons(None,None,None,None,None,None,None,None,None,position,None,false,None,false,None,None,None,None,extra = RPExtra())
Positions.conv(desModel) must be(None)
}
"convert des model to frontend model successfully when input is none" in {
Positions.conv(None) must be(None)
}
}
}
|
package com.hedera.sdk.common;
public class Entity {
// Private fields for entity properties
private String entityId;
private String entityName;
private String entityType;
// Constructor to initialize entity properties
public Entity(String entityId, String entityName, String entityType) {
this.entityId = entityId;
this.entityName = entityName;
this.entityType = entityType;
}
// Getters and setters for entity properties
public String getEntityId() {
return entityId;
}
public void setEntityId(String entityId) {
this.entityId = entityId;
}
public String getEntityName() {
return entityName;
}
public void setEntityName(String entityName) {
this.entityName = entityName;
}
public String getEntityType() {
return entityType;
}
public void setEntityType(String entityType) {
this.entityType = entityType;
}
// Method to support files
public void uploadFile(String filePath) {
// Implementation to upload file to blockchain
}
// Method to support transactions
public void initiateTransaction(double amount, String recipient) {
// Implementation to initiate transaction on blockchain
}
// Method to support smart contracts
public void deploySmartContract(String contractCode) {
// Implementation to deploy smart contract on blockchain
}
}
|
import React from 'react';
import PropTypes from 'prop-types';
const DEFAULT_LENGTH = 50;
const DEFAULT_SUFFIX = '…';
export const truncate = (text, length = DEFAULT_LENGTH, suffix = DEFAULT_SUFFIX) =>
text.substr(0, length - 1) + (text.length > length ? suffix : '');
const Truncate = ({
children, length, suffix, ...rest
}) => (
<span {...rest}>
{truncate(children, length, suffix)}
</span>
);
Truncate.propTypes = {
children: PropTypes.node.isRequired,
length: PropTypes.number,
suffix: PropTypes.string,
};
Truncate.defaultProps = {
length: DEFAULT_LENGTH,
suffix: DEFAULT_SUFFIX,
};
export default Truncate;
|
#!/bin/bash
# Copyright 2015 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
cython --cplus sasl/saslwrapper.pyx
|
#!/bin/sh
if [ -z $ES_VERSION ]; then
echo "No ES_VERSION specified";
exit 1;
fi;
killall java 2>/dev/null
echo "Downloading Elasticsearch v${ES_VERSION}-SNAPSHOT..."
ES_URL=$(curl -sS "https://esvm-props.kibana.rocks/builds" | jq -r ".branches[\"$ES_VERSION\"].zip")
curl -L -o elasticsearch-latest-SNAPSHOT.zip $ES_URL
unzip "elasticsearch-latest-SNAPSHOT.zip"
echo "Adding repo to config..."
find . -name "elasticsearch.yml" | while read TXT ; do echo 'repositories.url.allowed_urls: ["http://*"]' >> $TXT ; done
find . -name "elasticsearch.yml" | while read TXT ; do echo 'path.repo: ["/tmp"]' >> $TXT ; done
echo "Starting Elasticsearch v${ES_VERSION}"
./elasticsearch-*/bin/elasticsearch \
-Des.network.host=localhost \
-Des.discovery.zen.ping.multicast.enabled=false \
-Des.discovery.zen.ping_timeout=1s \
-Des.http.port=9200 \
-Des.node.testattr=test \
-d
sleep 3
|
const N = 10;
for (let i = 0; i <= N; i++) {
console.log(i);
}
|
<filename>android/src/main/java/plugin/album/view/alivideo/AliControlView.java
package plugin.album.view.alivideo;
import android.content.Context;
import android.os.Handler;
import android.os.Looper;
import android.os.Message;
import android.view.LayoutInflater;
import android.view.View;
import android.widget.ImageView;
import android.widget.LinearLayout;
import android.widget.RelativeLayout;
import android.widget.SeekBar;
import android.widget.TextView;
import com.aliyun.player.nativeclass.MediaInfo;
import java.lang.ref.WeakReference;
import plugin.album.R;
import plugin.album.view.ViewClickListener;
public class AliControlView extends RelativeLayout implements View.OnClickListener{
private LinearLayout mBottomContainer;
private ImageView mStateIcon;
private TextView mCurrTime;
private TextView mTotalTime;
private SeekBar mSeekBar;
private ImageView mCloseBtn;
private ImageView mMoreBtn;
private ImageView mPauseIv;
private WeakReference<AliVideoView> weakReference;
private MediaInfo mMediaInfo;
private String mTimeFormat = "%02d:%02d";
private ViewClickListener mListener;
private final int DELAY_HIDDEN_CONTROL = 101;
private int mVideoBufferPosition = 0;
private int mVideoPosition = 0;
private boolean mSimpleModel = false;
private Handler mHandler = new Handler(Looper.getMainLooper()) {
@Override
public void handleMessage(Message msg) {
super.handleMessage(msg);
switch (msg.what) {
case DELAY_HIDDEN_CONTROL:
showBottomControl(false);
break;
}
}
};
private SeekBar.OnSeekBarChangeListener onSeekBarChangeListener =
new SeekBar.OnSeekBarChangeListener() {
@Override
public void onProgressChanged(SeekBar seekBar, int progress, boolean fromUser) {
if (fromUser) {
setCurrTime(progress);
}
}
@Override
public void onStartTrackingTouch(SeekBar seekBar) {
mHandler.removeMessages(DELAY_HIDDEN_CONTROL); //防止拉进度条的时候被隐藏
}
@Override
public void onStopTrackingTouch(SeekBar seekBar) {
showBottomControl(true); //在规定时间隐藏
AliVideoView aliVideoView = weakReference.get();
if (aliVideoView != null) {
aliVideoView.onSeekEnd(seekBar.getProgress());
}
}
};
public AliControlView(Context context, AliVideoView aliVideoView) {
super(context);
weakReference = new WeakReference<>(aliVideoView);
initView();
}
private void initView() {
LayoutInflater.from(getContext()).inflate(R.layout.layout_alicontrol_view, this);
mBottomContainer = findViewById(R.id.cover_player_controller_bottom_container);
mStateIcon = findViewById(R.id.alivideo_controller_play_state);
mPauseIv = findViewById(R.id.iv_pause);
mCurrTime = findViewById(R.id.cover_player_controller_text_view_curr_time);
mTotalTime = findViewById(R.id.cover_player_controller_text_view_total_time);
mSeekBar = findViewById(R.id.cover_player_controller_seek_bar);
mMoreBtn = findViewById(R.id.cover_player_controller_more);
mCloseBtn = findViewById(R.id.cover_player_controller_close);
mSeekBar.setOnSeekBarChangeListener(onSeekBarChangeListener);
mStateIcon.setOnClickListener(this);
mPauseIv.setOnClickListener(this);
mMoreBtn.setOnClickListener(this);
mCloseBtn.setOnClickListener(this);
showBottomControl(true);
}
public void setListener(ViewClickListener listener) {
mListener = listener;
}
@Override
public void onClick(View view) {
int id = view.getId();
if (id == R.id.alivideo_controller_play_state || id == R.id.iv_pause) {
onClickPlayBtn();
} else if (id == R.id.cover_player_controller_close) {
if (mListener != null)
mListener.onCloseClick();
} else if (id == R.id.cover_player_controller_more) {
if (mListener != null)
mListener.onMoreClick();
}
showBottomControl(true); //在规定时间隐藏
}
public void onClickPlayBtn() {
boolean isStop = mStateIcon.isSelected();
AliVideoView aliVideoView = weakReference.get();
if (aliVideoView != null) {
if (isStop) {
aliVideoView.start();
} else {
aliVideoView.pause();
}
}
setPlayState(!isStop);
if (mSimpleModel && mListener != null) mListener.onClickPlay(!isStop);
}
public void setPlayState(boolean isStop) {
mStateIcon.setSelected(isStop);
mPauseIv.setVisibility(isStop ? VISIBLE : GONE);
}
public void setSimpleModel(boolean simpleModel) {
mSimpleModel = simpleModel;
showBottomControl(!simpleModel);
}
public void toggleController() {
if (mBottomContainer.getVisibility() == View.VISIBLE) {
showBottomControl(false);
} else {
showBottomControl(true);
}
}
public void showBottomControl(boolean show) {
if (show && !mSimpleModel) {
mHandler.removeMessages(DELAY_HIDDEN_CONTROL);
mHandler.sendEmptyMessageDelayed(DELAY_HIDDEN_CONTROL, 4000);
mBottomContainer.setVisibility(View.VISIBLE);
} else {
mHandler.removeMessages(DELAY_HIDDEN_CONTROL);
mBottomContainer.setVisibility(View.GONE);
}
}
public void setMediaInfo(MediaInfo aliMediaInfo) {
mMediaInfo = aliMediaInfo;
if (mMediaInfo == null) return;
int totalDuration = mMediaInfo.getDuration();
setTotalTime(totalDuration);
mSeekBar.setMax(totalDuration);
}
public void setVideoPosition(int position) {
mVideoPosition = position;
mSeekBar.setProgress(mVideoPosition);
setCurrTime(mVideoPosition);
}
public void setVideoBufferPosition(int videoBufferPosition) {
mVideoBufferPosition = videoBufferPosition;
mSeekBar.setSecondaryProgress(mVideoBufferPosition);
}
private void setTotalTime(int duration) {
mTotalTime.setText(getTime(mTimeFormat, duration));
}
private void setCurrTime(int curr) {
mCurrTime.setText(getTime(mTimeFormat, curr));
}
public static String getTime(String format, long time){
if(time <= 0) time = 0;
int totalSeconds = (int) (time / 1000);
int seconds = totalSeconds % 60;
int minutes = totalSeconds / 60;
return String.format(format, minutes, seconds);
}
}
|
<reponame>NajibAdan/kitsu-server
# rubocop:disable Metrics/LineLength
# == Schema Information
#
# Table name: groups
#
# id :integer not null, primary key
# about :text default(""), not null
# avatar_content_type :string(255)
# avatar_file_name :string(255)
# avatar_file_size :integer
# avatar_meta :text
# avatar_processing :boolean default(FALSE), not null
# avatar_updated_at :datetime
# cover_image_content_type :string(255)
# cover_image_file_name :string(255)
# cover_image_file_size :integer
# cover_image_meta :text
# cover_image_updated_at :datetime
# featured :boolean default(FALSE), not null
# last_activity_at :datetime
# leaders_count :integer default(0), not null
# locale :string
# members_count :integer default(0)
# name :string(255) not null
# neighbors_count :integer default(0), not null
# nsfw :boolean default(FALSE), not null
# privacy :integer default(0), not null
# rules :text
# rules_formatted :text
# slug :string(255) not null, indexed
# tagline :string(60)
# tags :string default([]), not null, is an Array
# created_at :datetime not null
# updated_at :datetime not null
# category_id :integer not null, indexed
# pinned_post_id :integer
#
# Indexes
#
# index_groups_on_category_id (category_id)
# index_groups_on_slug (slug) UNIQUE
#
# Foreign Keys
#
# fk_rails_a61500b09c (category_id => group_categories.id)
# fk_rails_ae0dbbc874 (pinned_post_id => posts.id)
#
# rubocop:enable Metrics/LineLength
FactoryBot.define do
factory :group do
name { Faker::University.name }
association :category, factory: :group_category, strategy: :build
end
end
|
export const Projects = [
{
name: "Random member A",
title: "Project A",
image: "./utils/images/projects/project_a.jpg",
},
{
name: "Random member B",
title: "Project b",
image: "./utils/images/projects/project_b.png",
},
{
name: "Random member C",
title: "Project C",
image: "./utils/images/projects/project_c.jpg",
},
];
|
import random
def random_list(n):
# Generating a random list of length n
random_list = []
for i in range(n):
random_list.append(random.randint(0,n-1))
return random_list
# Driver Code
n = 6
print(random_list(n))
|
public class Vector
{
public double X { get; set; }
public double Y { get; set; }
public Vector(double x, double y)
{
X = x;
Y = y;
}
}
|
/*
Info: JavaScript for JavaScript Basics Lesson 2, JavaScript Syntax, Task 13, Digital Soothsayer
Author: Removed for reasons of anonymity
Successfully checked as valid in JSLint Validator at: http://www.jslint.com/ and JSHint Validator at: http://www.jshint.com/
*/
'use strict';
function soothsayer(args) {
var soothYear = args[0];
var soothLang = args[1];
var soothCity = args[2];
var soothCar = args[3];
var result = [
soothYear[Math.floor(Math.random() * soothYear.length)],
soothLang[Math.floor(Math.random() * soothLang.length)],
soothCity[Math.floor(Math.random() * soothCity.length)],
soothCar[Math.floor(Math.random() * soothCar.length)]
];
return result;
}
function soothsayerByInput() {
var output = document.getElementById("output");
var someVariables = [
[1, 8, 3, 5, 2, 7, 4, 6, 9],
['Java', 'Python', 'C#', 'JavaScript', 'Ruby', 'PHP', 'C++', 'Database', 'Mobile Applications', 'Web Applications'],
['Silicon Valley', 'London', 'Las Vegas', 'Paris', 'Sofia', 'New York', 'Berlin', 'Bern', 'Madrid', 'Athens', 'Otawa'],
['BMW', 'Audi', 'Lada', 'Skoda', 'Opel', 'Mercedes', 'Ferrari', 'WV', 'Porshe', 'Bike', 'Harley-Davidson']
];
var elementP = document.createElement("p");
var result = soothsayer(someVariables);
elementP.innerHTML = "You will work " + result[0] + " years on " + result[1] + ". You will live in " + result[2] + " and drive " + result[3] + ".";
output.appendChild(elementP);
}
/* For node.js result */
var variables = [
[3, 5, 2, 7, 9],
['Java', 'Python', 'C#', 'JavaScript', 'Ruby'],
['Silicon Valley', 'London', 'Las Vegas', 'Paris', 'Sofia'],
['BMW', 'Audi', 'Lada', 'Skoda', 'Opel']
];
var outResult;
var sooth = "";
outResult = soothsayer(variables);
sooth = "You will work " + outResult[0] + " years on " + outResult[1] + ". You will live in " + outResult[2] + " and drive " + outResult[3] + ".";
console.log(sooth);
|
conda config --set anaconda_upload no
conda build flann
conda build --py all pyflann
conda build --python 2.7 --python 3.4 --python 3.5 --numpy 1.9 --numpy 1.10 pyamg
conda build --python 2.7 --python 3.4 --python 3.5 --numpy 1.10 megaman
|
<reponame>m-nakagawa/sample
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.jena.permissions.query.rewriter;
import java.util.ArrayList;
import java.util.List;
import org.apache.jena.graph.Node;
import org.apache.jena.graph.NodeFactory;
import org.apache.jena.graph.Triple;
import org.apache.jena.permissions.SecuredItem;
import org.apache.jena.permissions.SecurityEvaluator;
import org.apache.jena.permissions.SecurityEvaluator.Action;
import org.apache.jena.shared.AuthenticationRequiredException;
import org.apache.jena.shared.ReadDeniedException;
import org.apache.jena.sparql.algebra.Op;
import org.apache.jena.sparql.algebra.OpVisitor;
import org.apache.jena.sparql.algebra.op.*;
import org.apache.jena.sparql.core.BasicPattern;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This class rewrites the query by examining each operation in the algebra
* returned by the Jena SPARQL parser.
* <p>
* This implementation inserts security evaluator checks where necessary.
* </p>
*/
public class OpRewriter implements OpVisitor {
private static Logger LOG = LoggerFactory.getLogger(OpRewriter.class);
private OpSequence result;
private final Node graphIRI;
private final SecurityEvaluator securityEvaluator;
// if true the restricted data are silently ignored.
// default false
private final boolean silentFail;
/**
* Constructor
*
* @param securityEvaluator
* The security evaluator to use
* @param graphIRI
* The IRI for the default graph.
*/
public OpRewriter(final SecurityEvaluator securityEvaluator,
final Node graphIRI) {
this.securityEvaluator = securityEvaluator;
this.graphIRI = graphIRI;
this.silentFail = false;
reset();
}
/**
* Constructor
*
* @param securityEvaluator
* The security evaluator to use
* @param graphIRI
* The IRI for the default graph.
*/
public OpRewriter(final SecurityEvaluator securityEvaluator,
final String graphIRI) {
this(securityEvaluator, NodeFactory.createURI(graphIRI));
}
/**
* Add the operation to the result.
*
* @param op
* the operation to add.
*/
private void addOp(final Op op) {
result.add(op);
}
/**
* Get the result of the rewrite.
*
* @return the resulting operator
*/
public Op getResult() {
if (result.size() == 0) {
return OpNull.create();
}
if (result.size() == 1) {
return result.get(0);
}
return result;
}
/**
* Register variables.
*
* Registers n as a variable if it is one.
*
* @param n
* the node to check
* @param variables
* the list of variable nodes
* @Return n for chaining.
*/
private Node registerVariables(final Node n, final List<Node> variables) {
if (n.isVariable() && !variables.contains(n)) {
variables.add(n);
}
return n;
}
/**
* Reset the rewriter to the initial state.
*
* @return this rewriter for chaining.
*/
public OpRewriter reset() {
result = OpSequence.create();
return this;
}
/**
* Register all the variables in the triple.
*
* @param t
* the triple to register.
* @param variables
* The list of variables.
* @return t for chaining
*/
private Triple registerBGPTriple(final Triple t, final List<Node> variables) {
registerVariables(t.getSubject(), variables);
registerVariables(t.getPredicate(), variables);
registerVariables(t.getObject(), variables);
return t;
}
/**
* Rewrites the subop of op1 and returns the result.
*
* @param op1
* @return the rewritten op.
*/
private Op rewriteOp1(final Op1 op1) {
final OpRewriter rewriter = new OpRewriter(securityEvaluator, graphIRI);
op1.getSubOp().visit(rewriter);
return rewriter.getResult();
}
/**
* rewrites the left and right parts of the op2 the left part is returned
* the right part is placed in the rewriter
*
* @param op2
* @param rewriter
* @return the rewritten op.
*/
private Op rewriteOp2(final Op2 op2, final OpRewriter rewriter) {
op2.getLeft().visit(rewriter.reset());
final Op left = rewriter.getResult();
op2.getRight().visit(rewriter.reset());
return left;
}
/**
* rewrite source to dest and returns dest
*
* @param source
* @param dest
* @return the rewritten op.
*/
private OpN rewriteOpN(final OpN source, final OpN dest) {
final OpRewriter rewriter = new OpRewriter(securityEvaluator, graphIRI);
for (final Op o : source.getElements()) {
o.visit(rewriter.reset());
dest.add(rewriter.getResult());
}
return dest;
}
/**
* rewrites the subop of assign.
*/
@Override
public void visit(final OpAssign opAssign) {
if (LOG.isDebugEnabled()) {
LOG.debug("Starting visiting OpAssign");
}
addOp(OpAssign.assign(rewriteOp1(opAssign), opAssign.getVarExprList()));
}
@Override
public void visit(final OpBGP opBGP) throws ReadDeniedException,
AuthenticationRequiredException {
if (LOG.isDebugEnabled()) {
LOG.debug("Starting visiting OpBGP");
}
Object principal = securityEvaluator.getPrincipal();
if (!securityEvaluator.evaluate(principal, Action.Read, graphIRI)) {
if (silentFail) {
return;
} else {
throw new ReadDeniedException(
SecuredItem.Util.modelPermissionMsg(graphIRI));
}
}
// if the user can read any triple just add the opBGP
if (securityEvaluator.evaluate(principal, Action.Read, graphIRI,
Triple.ANY)) {
addOp(opBGP);
} else {
// add security filtering to the resulting triples
final List<Triple> newBGP = new ArrayList<Triple>();
final List<Node> variables = new ArrayList<Node>();
// register all variables
for (final Triple t : opBGP.getPattern().getList()) {
newBGP.add(registerBGPTriple(t, variables));
}
// create the security function.
final SecuredFunction secFunc = new SecuredFunction(graphIRI,
securityEvaluator, variables, newBGP);
// create the filter
Op filter = OpFilter.filter(secFunc,
new OpBGP(BasicPattern.wrap(newBGP)));
// add the filter
addOp(filter);
}
}
/**
* Rewrite left and right
*/
@Override
public void visit(final OpConditional opCondition) {
if (LOG.isDebugEnabled()) {
LOG.debug("Starting visiting OpConditional");
}
final OpRewriter rewriter = new OpRewriter(securityEvaluator, graphIRI);
addOp(new OpConditional(rewriteOp2(opCondition, rewriter),
rewriter.getResult()));
}
/**
* returns the dsNames
*/
@Override
public void visit(final OpDatasetNames dsNames) {
if (LOG.isDebugEnabled()) {
LOG.debug("Starting visiting OpDatasetName");
}
addOp(dsNames);
}
/**
* Rewrite left and right
*/
@Override
public void visit(final OpDiff opDiff) {
if (LOG.isDebugEnabled()) {
LOG.debug("Starting visiting OpDiff");
}
final OpRewriter rewriter = new OpRewriter(securityEvaluator, graphIRI);
addOp(OpDiff.create(rewriteOp2(opDiff, rewriter), rewriter.getResult()));
}
/**
* Rewrite sequence elements
*/
@Override
public void visit(final OpDisjunction opDisjunction) {
if (LOG.isDebugEnabled()) {
LOG.debug("Starting visiting OpDisjunction");
}
addOp(rewriteOpN(opDisjunction, OpDisjunction.create()));
}
/**
* rewrites the subop of distinct
*/
@Override
public void visit(final OpDistinct opDistinct) {
if (LOG.isDebugEnabled()) {
LOG.debug("Starting visiting OpDistinct");
}
addOp(new OpDistinct(rewriteOp1(opDistinct)));
}
/**
* Returns the Ext
*/
@Override
public void visit(final OpExt opExt) {
if (LOG.isDebugEnabled()) {
LOG.debug("Starting visiting OpExt");
}
addOp(opExt);
}
/**
* rewrites the subop of extend.
*/
@Override
public void visit(final OpExtend opExtend) {
if (LOG.isDebugEnabled()) {
LOG.debug("Starting visiting OpExtend");
}
addOp(OpExtend.extend(rewriteOp1(opExtend), opExtend.getVarExprList()));
}
/**
* rewrites the subop of filter.
*/
@Override
public void visit(final OpFilter opFilter) {
if (LOG.isDebugEnabled()) {
LOG.debug("Starting visiting OpFilter");
}
addOp(OpFilter.filter(opFilter.getExprs(), rewriteOp1(opFilter)));
}
/**
* rewrites the subop of graph.
*/
@Override
public void visit(final OpGraph opGraph) {
if (LOG.isDebugEnabled()) {
LOG.debug("Starting visiting OpGraph");
}
final OpRewriter rewriter = new OpRewriter(securityEvaluator,
opGraph.getNode());
opGraph.getSubOp().visit(rewriter);
addOp(new OpGraph(opGraph.getNode(), rewriter.getResult()));
}
/**
* rewrites the subop of group.
*/
@Override
public void visit(final OpGroup opGroup) {
if (LOG.isDebugEnabled()) {
LOG.debug("Starting visiting OpGroup");
}
addOp(new OpGroup(rewriteOp1(opGroup), opGroup.getGroupVars(),
opGroup.getAggregators()));
}
/**
* Parses the joins and recursively calls the left and right parts
*/
@Override
public void visit(final OpJoin opJoin) {
if (LOG.isDebugEnabled()) {
LOG.debug("Starting visiting OpJoin");
}
final OpRewriter rewriter = new OpRewriter(securityEvaluator, graphIRI);
addOp(OpJoin.create(rewriteOp2(opJoin, rewriter), rewriter.getResult()));
}
/**
* returns the label
*/
@Override
public void visit(final OpLabel opLabel) {
if (LOG.isDebugEnabled()) {
LOG.debug("Starting visiting OpLabel");
}
addOp(opLabel);
}
/**
* Parses the joins and recursively calls the left and right parts
*/
@Override
public void visit(final OpLeftJoin opLeftJoin) {
if (LOG.isDebugEnabled()) {
LOG.debug("Starting visiting OpLeftJoin");
}
final OpRewriter rewriter = new OpRewriter(securityEvaluator, graphIRI);
addOp(OpLeftJoin.create(rewriteOp2(opLeftJoin, rewriter),
rewriter.getResult(), opLeftJoin.getExprs()));
}
/**
* rewrites the subop of list.
*/
@Override
public void visit(final OpList opList) {
if (LOG.isDebugEnabled()) {
LOG.debug("Starting visiting OpList");
}
addOp(new OpList(rewriteOp1(opList)));
}
/**
* Rewrite left and right
*/
@Override
public void visit(final OpMinus opMinus) {
if (LOG.isDebugEnabled()) {
LOG.debug("Starting visiting OpMinus");
}
final OpRewriter rewriter = new OpRewriter(securityEvaluator, graphIRI);
addOp(OpMinus.create(rewriteOp2(opMinus, rewriter),
rewriter.getResult()));
}
/**
* returns the null
*/
@Override
public void visit(final OpNull opNull) {
if (LOG.isDebugEnabled()) {
LOG.debug("Starting visiting OpNull");
}
addOp(opNull);
}
/**
* rewrites the subop of order.
*/
@Override
public void visit(final OpOrder opOrder) {
if (LOG.isDebugEnabled()) {
LOG.debug("Starting visiting OpOrder");
}
addOp(new OpOrder(rewriteOp1(opOrder), opOrder.getConditions()));
}
/**
* Returns the path
*/
@Override
public void visit(final OpPath opPath) {
if (LOG.isDebugEnabled()) {
LOG.debug("Starting visiting OpPath");
}
addOp(opPath);
}
/**
* rewrites the subop of proc.
*/
@Override
public void visit(final OpProcedure opProc) {
if (LOG.isDebugEnabled()) {
LOG.debug("Starting visiting OpProc");
}
if (opProc.getProcId() != null) {
addOp(new OpProcedure(opProc.getProcId(), opProc.getArgs(),
rewriteOp1(opProc)));
} else {
addOp(new OpProcedure(opProc.getURI(), opProc.getArgs(),
rewriteOp1(opProc)));
}
}
/**
* rewrites the subop of project.
*/
@Override
public void visit(final OpProject opProject) {
if (LOG.isDebugEnabled()) {
LOG.debug("Starting visiting OpProject");
}
addOp(new OpProject(rewriteOp1(opProject), opProject.getVars()));
}
/**
* rewrites the subop of propFunc.
*/
@Override
public void visit(final OpPropFunc opPropFunc) {
if (LOG.isDebugEnabled()) {
LOG.debug("Starting visiting OpPropFunc");
}
addOp(new OpPropFunc(opPropFunc.getProperty(),
opPropFunc.getSubjectArgs(), opPropFunc.getObjectArgs(),
rewriteOp1(opPropFunc)));
}
/**
* Returns the quad
*/
@Override
public void visit(final OpQuad opQuad) {
if (LOG.isDebugEnabled()) {
LOG.debug("Starting visiting OpQuad");
}
addOp(opQuad);
}
/**
* Returns the quadpattern
*/
@Override
public void visit(final OpQuadPattern quadPattern) {
if (LOG.isDebugEnabled()) {
LOG.debug("Starting visiting OpQuadPattern");
}
addOp(quadPattern);
}
/**
* rewrites the subop of reduced.
*/
@Override
public void visit(final OpReduced opReduced) {
if (LOG.isDebugEnabled()) {
LOG.debug("Starting visiting OpReduced");
}
addOp(OpReduced.create(rewriteOp1(opReduced)));
}
/**
* Rewrite sequence elements
*/
@Override
public void visit(final OpSequence opSequence) {
if (LOG.isDebugEnabled()) {
LOG.debug("Starting visiting OpSequence");
}
addOp(rewriteOpN(opSequence, OpSequence.create()));
}
/**
* returns the service
*/
@Override
public void visit(final OpService opService) {
if (LOG.isDebugEnabled()) {
LOG.debug("Starting visiting opService");
}
addOp(opService);
}
/**
* rewrites the subop of slice
*
* This also handles the limit case
*/
@Override
public void visit(final OpSlice opSlice) {
if (LOG.isDebugEnabled()) {
LOG.debug("Starting visiting OpSlice");
}
addOp(opSlice);
}
/**
* returns the table
*/
@Override
public void visit(final OpTable opTable) {
if (LOG.isDebugEnabled()) {
LOG.debug("Starting visiting OpTable");
}
addOp(opTable);
}
/**
* rewrites the subop of top.
*/
@Override
public void visit(final OpTopN opTop) {
if (LOG.isDebugEnabled()) {
LOG.debug("Starting visiting OpTop");
}
addOp(new OpTopN(rewriteOp1(opTop), opTop.getLimit(),
opTop.getConditions()));
}
/**
* Converts to BGP
*/
@Override
public void visit(final OpTriple opTriple) {
if (LOG.isDebugEnabled()) {
LOG.debug("Starting visiting OpTriple");
}
visit(opTriple.asBGP());
}
/**
* Rewrite left and right
*/
@Override
public void visit(final OpUnion opUnion) {
if (LOG.isDebugEnabled()) {
LOG.debug("Starting visiting OpUnion");
}
final OpRewriter rewriter = new OpRewriter(securityEvaluator, graphIRI);
addOp(OpUnion.create(rewriteOp2(opUnion, rewriter),
rewriter.getResult()));
}
@Override
public void visit(OpQuadBlock quadBlock) {
if (LOG.isDebugEnabled()) {
LOG.debug("Starting visiting OpQuadBlock");
}
addOp(quadBlock);
}
}
|
#!/usr/bin/env bash
python3 preprocess\ data.py
python3 train_model.py --action train
python3 train_model.py --action val
python3 train_model.py --action test
|
<reponame>smagill/opensphere-desktop<gh_stars>10-100
package io.opensphere.wps.envoy;
import java.io.IOException;
import java.io.InputStream;
import java.util.List;
import java.util.Map;
import io.opensphere.core.Toolbox;
import io.opensphere.core.cache.matcher.PropertyMatcher;
import io.opensphere.core.data.QueryException;
import io.opensphere.mantle.data.DataGroupInfo;
import io.opensphere.server.services.ServerConnectionParams;
import io.opensphere.wps.util.ServerException;
import io.opensphere.wps.util.WpsServerConnectionHelper;
import net.opengis.wps._100.WPSCapabilitiesType;
/**
* An envoy implementation used to execute a get capabilities request against a single server.
*/
public class WpsGetCapabilitiesEnvoy extends AbstractWpsDataRegistryEnvoy<WPSCapabilitiesType>
{
/**
* The data group for the server.
*/
private final DataGroupInfo myServerDataGroup;
/**
* Creates a new envoy through which a WPS Get Capabilities request is executed for a single server.
*
* @param pToolbox the toolbox through which application interaction occurs.
* @param pServer the configuration of the server to which the query will be made.
* @param pServerDataGroup The data group for the server.
*/
public WpsGetCapabilitiesEnvoy(Toolbox pToolbox, ServerConnectionParams pServer, DataGroupInfo pServerDataGroup)
{
super(pToolbox, pServer, WpsRequestType.GET_CAPABLITIES, WpsPropertyDescriptors.WPS_GET_CAPABILITIES);
myServerDataGroup = pServerDataGroup;
}
/**
* Requests a capabilities document from a remote server, but does so when the server has not yet been connected. This occurs
* primarily when the user has added an entry for the server, but not yet connected to it, and clicks "validate".
*
* @return a {@link WPSCapabilitiesType} instance retrieved from the remote server.
* @throws QueryException if the remote server cannot be accessed.
*/
public WPSCapabilitiesType getCapabilities() throws QueryException
{
return executeQuery(
new WpsServerConnectionHelper(WpsUrlHelper.buildGetCapabilitiesUrl(getServer().getWpsUrl()), getToolbox()),
getParameterMap());
}
/**
* {@inheritDoc}
*
* @see io.opensphere.wps.envoy.AbstractWpsEnvoy#executeQuery(io.opensphere.wps.util.WpsServerConnectionHelper,
* java.util.Map)
*/
@Override
protected WPSCapabilitiesType executeQuery(WpsServerConnectionHelper pHelper, Map<String, String> pParameters)
throws QueryException
{
WPSCapabilitiesType responseObject;
try (InputStream stream = executeStreamQuery(pHelper, pParameters))
{
responseObject = pHelper.parseStream(WPSCapabilitiesType.class, stream);
}
catch (ServerException e)
{
throw new QueryException(
"Unable to parse WPS get capabilities response from server '" + getServer().getWpsUrl() + "'", e);
}
catch (IOException e)
{
throw new QueryException("Unable to query remote server '" + getServer().getWpsUrl() + "' for WPS Get Capabilities",
e);
}
return responseObject;
}
/**
* {@inheritDoc}
*
* @see io.opensphere.wps.envoy.AbstractWpsEnvoy#executeStreamQuery(io.opensphere.wps.util.WpsServerConnectionHelper,
* java.util.Map)
*/
@Override
protected InputStream executeStreamQuery(WpsServerConnectionHelper pHelper, Map<String, String> pParameters)
throws QueryException
{
try
{
return pHelper.requestStream();
}
catch (ServerException e)
{
throw new QueryException("Unable to query remote server '" + getServer().getWpsUrl() + "' for WPS Get Capabilities",
e);
}
}
/**
* {@inheritDoc}
*
* @see io.opensphere.wps.envoy.AbstractWpsDataRegistryEnvoy#getParameterMap(java.util.List)
*/
@Override
protected Map<String, String> getParameterMap(List<? extends PropertyMatcher<?>> pParameters)
{
return null;
}
/**
* {@inheritDoc}
*
* @see io.opensphere.wps.envoy.AbstractWpsEnvoy#fireStateEvent(java.lang.String)
*/
@Override
protected void fireStateEvent(String errorMessage)
{
fireStateEvent(getServer(), errorMessage != null, errorMessage, getToolbox().getEventManager(), myServerDataGroup);
}
}
|
<gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 22 19:11:55 2019
@author: DHANUSH
"""
import sys
class PDA:
def __init__(self):
#defining the language
self.C="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_"
self.N="0123456789"
self.start="$"
self.lparen="("
self.rparen=")"
self.op="+-*/%"
self.stack=[]
self.pda_state_dic={1:{self.start:2},
2:{self.lparen:2,self.C:4,self.N:3},
3:{self.op:2,self.N:3,self.rparen:5,self.start:6},
4:{self.C:4,self.op:2,self.start:6,self.N:4,self.rparen:5},
5:{self.rparen:5,self.start:6,self.op:2}
}
def main():
print("Project 2 for CS 341")
print("Semester: Spring 2019")
print("Written By: <NAME>, ds676")
print("Instructor: <NAME>, <EMAIL>")
print()
while True:
user_input=input("Do you want to enter a string y/n")
if(user_input=="y"):
string=input("Please Enter A String")
else:
#print("OKAY BYE")
sys.exit(0)
pda=PDA()
pda_dic=pda.pda_state_dic
state=1
stack=pda.stack
for i in string:
not_accept=False
next_state=False
#prints current state
print("current state is ",state)
for key in pda_dic[state]:
if(i in key):
#going to next_states
state=pda_dic[state][key]
next_state=True
break
if(next_state==False):
print("Not Accepted")
not_accept=True
break
print("character is ",i)
#Push/Pop from the stack
if(i==pda.start and state==2):
stack.append("$")
print("Symbol: '$' was pushed to stack")
elif(i==pda.lparen):
stack.append(pda.lparen)
print("Symbol: '",pda.lparen,"' was pushed to stack")
elif(i==pda.rparen):
if(stack.pop()!=pda.lparen):
print("Not accepted")
not_accept=True
break
print("Symbol: '",pda.lparen,"' was popped from stack")
elif(i==pda.start):
if(stack.pop()!=pda.start):
print("Not accepted")
not_accept=True
break
print("Symbol: '$' was popped from stack")
#check if final state and stack is empty
if(state==6 and len(stack)==0):
print("state is: 6, this is final state and an accepting state")
print("Accepted")
elif(not_accept==False):
print("Not accepted")
if __name__ == "__main__":
main()
|
import sys
from computer import Computer
c = Computer('program.txt')
c.run()
while True:
print(''.join([chr(n) for n in c.output()]))
cmd = input()
if cmd == 'exit':
break
cmd += '\n'
c.input([ord(n) for n in cmd])
c.run()
# take mutex, asterisk, space law space brochure, and food ration
|
package com.common.biz.help;
/**
* @author Administrator
*/
public class HelpArouterConstant {
private static final String BIZ_GROUP = "/help/";
public static final String HELP_DETAIL = BIZ_GROUP + "help_detail";
}
|
import React from 'react'
import { Route } from 'react-router'
import RedirectRoute from 'router/RedirectRoute'
export default function AuthRoute ({
component,
nonAuthComponent,
requireAuth,
isLoggedIn,
currentUser,
returnToOnAuth,
setReturnToURL,
location,
...rest
}) {
if (!isLoggedIn && nonAuthComponent) {
return <Route {...rest} render={props => React.createElement(nonAuthComponent, props)} />
}
if (isLoggedIn && location.pathname === '/signup') {
return <RedirectRoute to={'/signup/upload-photo'} />
}
// On mobile we want to only store the intended URL and forward to the
// download app modal (which is currently on the Login component/page)
// Specifically we don't want any components to do any work but this,
// namely JoinCommunity which utilizes returnToOnAuth) and may attempt
// to auth the user with a token and send them into sign-up.
if (
(!isLoggedIn && (requireAuth || returnToOnAuth))
) {
setReturnToURL(location.pathname + location.search)
}
if (
(!isLoggedIn && requireAuth)
) {
return <RedirectRoute to={'/login'} />
}
return <Route {...rest} render={props => React.createElement(component, props)} />
}
|
<reponame>tpolecat/metals
package tests.jsonrpc
import java.nio.ByteBuffer
import scala.concurrent.Await
import scala.concurrent.duration.Duration
import com.typesafe.scalalogging.LazyLogging
import io.circe.syntax._
import monix.eval.Task
import monix.execution.schedulers.TestScheduler
import monix.reactive.Observable
import org.langmeta.jsonrpc._
import tests.MegaSuite
object BaseProtocolMessageTest extends MegaSuite with LazyLogging {
val request = Request("method", Some("params".asJson), RequestId(1))
val message = BaseProtocolMessage(request)
val byteArray = MessageWriter.write(message).array()
val byteArray2 = byteArray ++ byteArray
def bytes: ByteBuffer = ByteBuffer.wrap(byteArray)
test("toString") {
assertNoDiff(
message.toString,
"""|Content-Length: 62
|
|{"method":"method","params":"params","id":"1","jsonrpc":"2.0"}""".stripMargin
)
}
val s = TestScheduler()
def await[T](f: Task[T]): T = {
val a = f.runAsync(s)
while (s.tickOne()) ()
Await.result(a, Duration("5s"))
}
def parse(buffers: List[ByteBuffer]): List[BaseProtocolMessage] = {
val buf = List.newBuilder[BaseProtocolMessage]
val t = BaseProtocolMessage
.fromByteBuffers(Observable(buffers: _*), logger)
// NOTE(olafur) toListL will not work as expected here, it will send onComplete
// for the first onNext, even when a single ByteBuffer can contain multiple
// messages
.foreachL(buf += _)
await(t)
buf.result()
}
def pairs(n: Int): List[(ByteBuffer, BaseProtocolMessage)] =
1.to(n).toList.map(_ => bytes -> message)
0.to(4).foreach { i =>
test(s"parse-$i") {
val (buffers, messages) = pairs(i).unzip
assertEquals(parse(buffers), messages)
}
}
def checkTwoMessages(name: String, buffers: List[ByteBuffer]): Unit = {
test(name) {
val obtained = parse(buffers)
val expected = List(message, message)
assertEquals(obtained, expected)
}
}
def array: ByteBuffer = ByteBuffer.wrap(byteArray)
def take(n: Int): ByteBuffer = ByteBuffer.wrap(byteArray.take(n))
def drop(n: Int): ByteBuffer = ByteBuffer.wrap(byteArray.drop(n))
checkTwoMessages(
"combined",
ByteBuffer.wrap(byteArray2) ::
Nil
)
checkTwoMessages(
"chunked",
take(10) ::
drop(10) ::
array ::
Nil
)
checkTwoMessages(
"chunked2",
take(10) ::
ByteBuffer.wrap(drop(10).array() ++ take(10).array()) ::
drop(10) ::
Nil
)
test("chunked-property") {
0.to(byteArray2.length).foreach { i =>
val buffers =
ByteBuffer.wrap(byteArray2.take(i)) ::
ByteBuffer.wrap(byteArray2.drop(i)) ::
Nil
val obtained = parse(buffers)
val expected = List(message, message)
assertEquals(obtained, expected, hint = s"chunked, i=$i")
}
}
}
|
const axios = require("axios");
module.exports = async (d) => {
const data = d.util.aoiFunc(d);
if (data.err) return d.error(data.err);
const [link] = data.inside.splits;
let response = false;
try {
response = await axios
.get(data.inside.inside.addBrackets())
.then((res) => res.headers["content-type"].startsWith("image"));
} catch (e) {
console.error(e);
response = false;
}
data.result = response;
return {
code: d.util.setCode(data),
};
};
|
/**
*/
package PhotosMetaModel.impl;
import PhotosMetaModel.PhotosMetaModelPackage;
import PhotosMetaModel.Render;
import org.eclipse.emf.ecore.EClass;
/**
* <!-- begin-user-doc -->
* An implementation of the model object '<em><b>Render</b></em>'.
* <!-- end-user-doc -->
*
* @generated
*/
public class RenderImpl extends ReactFunctionsImpl implements Render {
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
protected RenderImpl() {
super();
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
@Override
protected EClass eStaticClass() {
return PhotosMetaModelPackage.Literals.RENDER;
}
} //RenderImpl
|
<filename>admin/js/common.js
function fillTableLivros(txt='') {
$.ajax({
url: "AJAXFillLivros.php",
type: "post",
data: {
txt: txt
},
success: function (result) {
$('#tableContent').html(result);
}
});
}
function fillTableAutores(txt='') {
$.ajax({
url: "AJAXFillAutores.php",
type: "post",
data: {
txt: txt
},
success: function (result) {
$('#tableContent').html(result);
}
});
}
function fillTableUtilizadores(txt='') {
$.ajax({
url: "AJAXFillUtilizadores.php",
type: "post",
data: {
txt: txt
},
success: function (result) {
$('#tableContent').html(result);
}
});
}
function fillAutores(txt='') {
$.ajax({
url: "AJAXAutores.php",
type: "post",
data: {
txt: txt
},
success: function (result) {
$('#tableContent').html(result);
}
});
}
function fillAutoresEdita(txt='',id=-1) {
$.ajax({
url: "AJAXAutoresEdita.php",
type: "post",
data: {
txt: txt,
id: id
},
success: function (result) {
$('#tableContent').html(result);
}
});
}
|
import pyro
import pyro.distributions as dist
import torch
def joint_probability(w_p, u_p):
w_sample = pyro.sample("w", dist.Bernoulli(w_p))
u_sample = pyro.sample("u", dist.Bernoulli(u_p))
joint_prob = (w_sample.item() * u_sample.item()) # Multiply the samples to get joint probability
return joint_prob
|
def is_power_of_two(n):
i = 1
while 2**i <= n:
if 2**i == n:
return True
i += 1
return False
|
# frozen_string_literal: true
module ActiveWebhook
class Logger
def info(msg)
puts msg
end
def debug(msg)
puts msg
end
def error(msg)
puts msg
end
def warn(msg)
puts msg
end
end
end
|
load helpers
function setup() {
cleanup
rm -rf ocibuilds || true
mkdir -p ocibuilds/sub1
touch ocibuilds/sub1/import1
cat > ocibuilds/sub1/stacker.yaml <<EOF
layer1_1:
from:
type: docker
url: docker://centos:latest
import:
- import1
run: |
cp /stacker/import1 /root/import1
layer1_2:
from:
type: docker
url: docker://centos:latest
run:
touch /root/import0
EOF
mkdir -p ocibuilds/sub2
touch ocibuilds/sub2/import2
cat > ocibuilds/sub2/stacker.yaml <<EOF
config:
prerequisites:
- ../sub1/stacker.yaml
layer2:
from:
type: built
tag: layer1_1
import:
- import2
run: |
cp /stacker/import2 /root/import2
cp /root/import1 /root/import1_copied
EOF
mkdir -p ocibuilds/sub3
cat > ocibuilds/sub3/stacker.yaml <<EOF
config:
prerequisites:
- ../sub1/stacker.yaml
- ../sub2/stacker.yaml
layer3_1:
from:
type: built
tag: layer2
run: |
cp /root/import2 /root/import2_copied
layer3_2:
from:
type: built
tag: layer1_2
run: |
cp /root/import0 /root/import0_copied
EOF
}
function teardown() {
cleanup
rm -rf ocibuilds || true
}
@test "order prerequisites" {
stacker build -f ocibuilds/sub3/stacker.yaml --order-only
[[ "${lines[-1]}" =~ ^(2 build .*ocibuilds\/sub3\/stacker\.yaml: requires: \[.*\/sub1\/stacker\.yaml .*\/sub2\/stacker\.yaml\])$ ]]
[[ "${lines[-2]}" =~ ^(1 build .*ocibuilds\/sub2\/stacker\.yaml: requires: \[.*\/sub1\/stacker\.yaml\])$ ]]
[[ "${lines[-3]}" =~ ^(0 build .*ocibuilds\/sub1\/stacker\.yaml: requires: \[\])$ ]]
}
@test "build layers and prerequisites" {
stacker build -f ocibuilds/sub3/stacker.yaml
mkdir dest
umoci unpack --image oci:layer3_1 dest/layer3_1
[ "$status" -eq 0 ]
[ -f dest/layer3_1/rootfs/root/import2_copied ]
[ -f dest/layer3_1/rootfs/root/import2 ]
[ -f dest/layer3_1/rootfs/root/import1_copied ]
[ -f dest/layer3_1/rootfs/root/import1 ]
umoci unpack --image oci:layer3_2 dest/layer3_2
[ "$status" -eq 0 ]
[ -f dest/layer3_2/rootfs/root/import0_copied ]
[ -f dest/layer3_2/rootfs/root/import0 ]
}
|
SELECT u.id, u.name, max(p.created_at) AS latest_posting_date
FROM users u
LEFT JOIN posts p
ON u.id = p.user_id
GROUP BY u.id
|
#SBATCH -t 00:15:00
#SBATCH --nodes=1
#SBATCH --tasks-per-node=1
#SBATCH --cpus-per-task=24
#SBATCH -A p_readex
#SBATCH --mem-per-cpu=2500M
if [ "$PBS_ENVIRONMENT" == "PBS_BATCH" ]; then
export FM_DIR=$PBS_O_WORKDIR
else
export FM_DIR=$(pwd)
fi
source readex_env/set_env_rdd.source
source scripts_$READEX_MACHINE/environment.sh
cd $FM_DIR/OpenFOAM-v1612+/
#edit make rules - wmake/rules/linux64Gcc/c++
export FOAM_CC="scorep --online-access --user --mpp=mpi --thread=none --nomemory --nocompiler g++ -std=c++11 -m64 -I$MERIC_ROOT/include -DUSE_SCOREP_MANUAL"
source $FM_DIR/OpenFOAM-v1612+/etc/bashrc
export WM_NCOMPPROCS=24
export BOOST_ARCH_PATH=$BOOST_ROOT
cd applications/solvers/incompressible/simpleFoam/
wclean
wmake
|
# Shortcuts
alias copyssh="pbcopy < $HOME/.ssh/id_ed25519.pub"
alias reloadshell="source $HOME/.zshrc"
alias reloaddns="dscacheutil -flushcache && sudo killall -HUP mDNSResponder"
alias ll="/usr/local/opt/coreutils/libexec/gnubin/ls -AhlFo --color --group-directories-first"
alias phpstorm='open -a /Applications/PhpStorm.app "`pwd`"'
alias shrug="echo '¯\_(ツ)_/¯' | pbcopy"
alias c="clear"
# Directories
alias dotfiles="cd $DOTFILES"
alias library="cd $HOME/Library"
# Laravel
alias art="php artisan"
alias fresh="php artisan migrate:fresh --seed"
alias seed="php artisan db:seed"
# PHP
alias cfresh="rm -rf vendor/ composer.lock && composer i"
# alias php74="docker run -it -w /data -v ${PWD}:/data:delegated --entrypoint php --rm registry.gitlab.com/grahamcampbell/php:7.4"
# alias php8="docker run -it -w /data -v ${PWD}:/data:delegated --entrypoint php --rm registry.gitlab.com/grahamcampbell/php:8.0"
alias composer="php -d memory_limit=-1 /usr/local/bin/composer"
# JS
alias nfresh="rm -rf node_modules/ package-lock.json && npm install"
alias watch="npm run watch"
# Docker
alias docker-composer="docker-compose"
alias dc="docker-compose"
# Git
alias gst="git status"
alias gb="git branch"
alias gc="git checkout"
alias gl="git log --oneline --decorate --color"
alias amend="git add . && git commit --amend --no-edit"
alias commit="git add . && git commit -m"
alias diff="git diff"
alias force="git push --force"
alias nuke="git clean -df && git reset --hard"
alias pop="git stash pop"
alias pull="git pull"
alias push="git push"
alias resolve="git add . && git commit --no-edit"
alias stash="git stash -u"
alias unstage="git restore --staged ."
alias wip="commit wip"
|
#!/bin/sh
echo 'sh: mount ./tmp'
sudo mount -t tmpfs -o size=256m /dev/shm ./tmp
echo 'sh: start'
cd bin
./AeroMixer.py
cd ..
echo 'sh: umount ./tmp'
sudo umount ./tmp
|
#!/bin/bash
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
if [[ "${PROTO_OPTIONAL:-}" == "1" ]]; then
os::log::warning "Skipping protobuf generation as \$PROTO_OPTIONAL is set."
exit 0
fi
os::util::ensure::system_binary_exists 'protoc'
if [[ "$(protoc --version)" != "libprotoc 3.0."* ]]; then
os::log::fatal "Generating protobuf requires protoc 3.0.x. Please download and
install the platform appropriate Protobuf package for your OS:
https://github.com/google/protobuf/releases
To skip protobuf generation, set \$PROTO_OPTIONAL."
fi
os::util::ensure::gopath_binary_exists 'goimports'
os::build::setup_env
os::util::ensure::built_binary_exists 'genprotobuf'
os::util::ensure::built_binary_exists 'protoc-gen-gogo' vendor/k8s.io/kube-gen/cmd/go-to-protobuf/protoc-gen-gogo
genprotobuf --output-base="${GOPATH}/src" "$@"
|
#!/bin/bash -e
# Install ODBC libraries for SOCI at travis-ci.org
#
# Copyright (c) 2013 Mateusz Loskot <mateusz@loskot.net>
#
source ${TRAVIS_BUILD_DIR}/bin/ci/common.sh
sudo apt-get install -qq \
tar bzip2 \
unixodbc-dev \
libmyodbc odbc-postgresql
sudo odbcinst -i -d -f /usr/share/libmyodbc/odbcinst.ini
|
package no.mnemonic.commons.utilities.lambda;
import java.util.Collection;
import java.util.concurrent.Callable;
import java.util.concurrent.TimeUnit;
import java.util.function.BooleanSupplier;
import java.util.function.Consumer;
import java.util.function.Supplier;
import java.util.stream.Stream;
public class LambdaUtils {
private LambdaUtils() {
}
/**
* Simple helper to wait for a predicate to return true. Will test predicate every 100ms until true or timeout.
*
* @param booleanSupplier the predicate
* @param waitTime how long to wait before giving up
* @param timeUnit time unit of waitTime
* @return true if predicate returns true within timeout, false if timed out without getting true value
* @throws InterruptedException if interrupted during sleep.
*/
public static boolean waitFor(BooleanSupplier booleanSupplier, long waitTime, TimeUnit timeUnit) throws InterruptedException {
long timeout = System.currentTimeMillis() + timeUnit.toMillis(waitTime);
while (System.currentTimeMillis() < timeout) {
if (booleanSupplier.getAsBoolean()) return true;
Thread.sleep(100);
}
return false;
}
/**
* Call provided task, ignore any exception thrown.
* Convenience method to call a method/lambda without having to wrap with try/catch
*
* @param callable task to call
* @return true if task was successful, false if exception was caught
*/
public static boolean tryTo(ExceptionalTask callable) {
return tryTo(callable, e->{});
}
/**
* Call provided task, ignore any exception thrown, instead passing it to a provided exception handler.
* Convenience method to call a method/lambda without having to wrap with try/catch
*
* <code>
* LambdaUtils.tryTo(()->doSomethingThatMightThrowAnException(), error->LOGGER.warn("Error doing something", error));
* </code>
*
* @param callable task to call
* @param onException consumer to provide any exception caught
* @return true if task was successful, false if exception was caught
*/
public static boolean tryTo(ExceptionalTask callable, Consumer<Throwable> onException) {
if (callable == null) return false;
try {
callable.call();
return true;
} catch (Exception e) {
notifyException(onException, e);
return false;
}
}
/**
* Try to perform operation on multiple values, ignoring any exception thrown.
* Convenience method to call forEach on a collection of values without having to use try/catch in lambda
*
* @param values values
* @param consumer consumer to handle value, which may throw exception
* @param <T> value type
*/
public static <T> void forEachTry(Collection<T> values, ExceptionalConsumer<T, ? extends Exception> consumer) {
forEachTry(values, consumer, e->{});
}
/**
* Try to perform operation on multiple values, ignoring any exception thrown, instead pass any error to exception consumer.
* Convenience method to call forEach on a collection of values without having to use try/catch in lambda
*
* @param values values
* @param consumer consumer to handle value, which may throw exception
* @param onException exception handler to pass any exception to. Might be called once for every invocation of consumer.
* @param <T> value type
*/
public static <T, E extends Exception> void forEachTry(Collection<T> values, ExceptionalConsumer<T, E> consumer, Consumer<Throwable> onException) {
if (values == null) return;
if (consumer == null) return;
values.forEach(v->{
try {
consumer.accept(v);
} catch (Throwable t) {
notifyException(onException, t);
}
});
}
/**
* Invoke callable to fetch result and return it. If exception is thrown, return default value instead.
* Use this method to avoid try/catch blocks where the expected exception should result in a default value.
*
* @param supplier callable to fetch result from
* @param defaultValue value to return if callable fails
* @param <T> value type
* @return the value from the callable, or defaultValue on exception
*/
public static <T> T tryResult(Callable<T> supplier, T defaultValue) {
return tryResult(supplier, ()->defaultValue, e->{});
}
/**
* Invoke callable to fetch result and return it. If exception is thrown, return value from defaultValueSupplier instead.
* Notify onException of any exception caught.
* Use this method to avoid try/catch blocks where the expected exception should result in a default value.
*
* @param supplier callable to fetch result from
* @param defaultValueSupplier supplier to fetch default value from
* @param onException exception consumer to notify on exception
* @param <T> value type
* @return the value from the callable, or defaultValue on exception
*
* @see #tryResult(Callable, Object)
*/
public static <T> T tryResult(Callable<T> supplier, Supplier<T> defaultValueSupplier, Consumer<Throwable> onException) {
if (supplier == null) throw new IllegalArgumentException("supplier not set");
if (defaultValueSupplier == null) throw new IllegalArgumentException("defaultValueSupplier not set");
if (onException == null) throw new IllegalArgumentException("onException not set");
try {
return supplier.call();
} catch (Exception e) {
notifyException(onException, e);
return defaultValueSupplier.get();
}
}
/**
* Wrap stream into a TryStream.
* A TryStream allows using map/filter lambdas which throws checked exceptions.
* Any exception thrown will be caught and rethrown by this method, this is just a convenience method
* to avoid having to use try/catch inside your lambdas.
*
* @param stream any stream
* @param <T> stream type
* @param <E> checked exception
* @return a TryStream wrapping the stream
* @see TryStream
*/
public static <T, E extends Exception> TryStream<T, E> tryStream(Stream<T> stream) {
if (stream == null) return null;
return new TryStreamImpl<>(stream);
}
public static <T, E extends Exception> TryStream<T, E> tryStream(Collection<T> collection) {
if (collection == null) return null;
return tryStream(collection.stream());
}
//helpers
private static void notifyException(Consumer<Throwable> onException, Throwable t) {
if (onException == null) return;
try {
onException.accept(t);
} catch (Throwable ignored) {
//ignore
}
}
}
|
class BankAccount:
def __init__(self, initial_balance, minimum_balance):
self.balance = initial_balance
self.minimum_balance = minimum_balance
def deposit(self, amount):
self.balance += amount
def withdraw(self, amount):
if self.balance - amount >= self.minimum_balance:
self.balance -= amount
else:
print("Withdrawal denied: Insufficient funds to maintain minimum balance")
def check_balance(self):
return self.balance
|
#!/bin/sh
if [ $# = 0 ]; then
DIRECTORY="pub/media/import/"
else
DIRECTORY=$1
fi
cd $DIRECTORY
rm -f *.jpeg *.JPEG *.jpg *.JPG *.png *.PNG
exit 0
|
#!/bin/bash
THIS_DIR="$(cd "$(dirname "$(readlink -f "$0")")" && pwd)"
LIB_DIR="$(cd "$THIS_DIR/lib" && pwd)"
# shellcheck disable=SC2034
BASH_LIBRARY_PATH="$LIB_DIR"
# shellcheck disable=SC1090
source "$LIB_DIR/libimport.bash"
bash_import libloglevel.bash
[[ -z $LOGLEVEL ]] && LOGLEVEL=6
set_loglevel "$LOGLEVEL"
bash_import libassert.bash
bash_source ./mod/repl.bash # repl
bash_source ./mod/user.bash # init_users, fini_users
main() {
repl
}
init_users
trap 'fini_users' EXIT
main "$@"
|
import cv2
import numpy as np
def count_faces(image: np.ndarray, haar_cascade: cv2.CascadeClassifier) -> int:
# Convert the input image to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Detect faces in the grayscale image using the provided Haar cascade classifier
faces_rect = haar_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=4)
# Return the count of detected faces
return len(faces_rect)
|
#!/bin/sh
if [[ "${DB_MIGRATE}" == "true" && -f "./bin/otp_verification_api" ]]; then
echo "[WARNING] Migrating database!"
./bin/otp_verification_api command "Elixir.Core.ReleaseTasks" migrate
fi;
|
export * from "./loai-sach.controller"
export * from "./loai-sach.module"
export * from "./loai-sach.service"
|
package io.opensphere.osh.results.video;
import java.awt.Color;
import java.io.IOException;
import java.util.Collection;
import java.util.List;
import io.opensphere.controlpanels.animation.event.ShowTimelineEvent;
import io.opensphere.controlpanels.animation.model.ViewPreference;
import io.opensphere.core.Toolbox;
import io.opensphere.core.order.impl.DefaultOrderCategory;
import io.opensphere.core.order.impl.DefaultOrderParticipantKey;
import io.opensphere.core.util.ChangeListener;
import io.opensphere.core.util.collections.New;
import io.opensphere.core.util.io.CancellableInputStream;
import io.opensphere.core.util.lang.ThreadUtilities;
import io.opensphere.core.util.taskactivity.CancellableTaskActivity;
import io.opensphere.mantle.data.ActivationState;
import io.opensphere.mantle.data.DataTypeInfo;
import io.opensphere.mantle.data.LoadsTo;
import io.opensphere.mantle.data.MapVisualizationType;
import io.opensphere.mantle.data.PlayState;
import io.opensphere.mantle.data.impl.DefaultBasicVisualizationInfo;
import io.opensphere.mantle.data.impl.DefaultMapFeatureVisualizationInfo;
import io.opensphere.osh.model.BinaryEncoding;
import io.opensphere.osh.model.OSHDataTypeInfo;
import io.opensphere.osh.model.Offering;
import io.opensphere.osh.model.Output;
import io.opensphere.osh.results.ResultHandler;
import io.opensphere.osh.util.AnimationPlayer;
import io.opensphere.osh.util.OSHQuerier;
/**
* Handles video and series of images.
*/
public class VideoResultHandler implements ResultHandler
{
/** The toolbox. */
private final Toolbox myToolbox;
/** The data registry querier. */
private final OSHQuerier myQuerier;
/** The video frame controller. */
private final VideoFrameController myVideoFrameController;
/** The play state listeners, to prevent garbage collection. */
private final List<ChangeListener<PlayState>> myPlayStateListeners = New.list();
/** Whether we're handling the first image. */
private boolean myFirstTime = true;
/**
* Constructor.
*
* @param toolbox The toolbox
* @param querier The data registry querier
*/
public VideoResultHandler(Toolbox toolbox, OSHQuerier querier)
{
myToolbox = toolbox;
myQuerier = querier;
myVideoFrameController = new VideoFrameController(toolbox, querier);
myVideoFrameController.open();
}
@Override
public List<Output> canHandle(List<Output> outputs)
{
List<Output> canHandles = New.list();
for (Output output : outputs)
{
if (output.getProperties().contains("http://sensorml.com/ont/swe/property/VideoFrame"))
{
canHandles.add(output);
}
}
return canHandles;
}
@Override
public void initializeType(OSHDataTypeInfo dataType)
{
dataType.setBasicVisualizationInfo(new DefaultBasicVisualizationInfo(LoadsTo.TIMELINE,
DefaultBasicVisualizationInfo.LOADS_TO_TIMELINE_ONLY, Color.WHITE, false));
dataType.setMapVisualizationInfo(new DefaultMapFeatureVisualizationInfo(MapVisualizationType.MOTION_IMAGERY));
dataType.setOrderKey(new DefaultOrderParticipantKey(DefaultOrderCategory.DEFAULT_IMAGE_LAYER_FAMILY,
DefaultOrderCategory.IMAGE_OVERLAY_CATEGORY, dataType.getTypeKey()));
dataType.getStreamingSupport().setStreamingEnabled(true);
ChangeListener<PlayState> playStateListener = (obs, o, n) -> handlePlayStateChange(dataType, n);
myPlayStateListeners.add(playStateListener);
dataType.getStreamingSupport().getPlayState().addListener(playStateListener);
myToolbox.getUIRegistry().getTimelineRegistry().addLayer(dataType.getOrderKey(), dataType.getDisplayName(),
dataType.getBasicVisualizationInfo().getTypeColor(), dataType.isVisible());
}
@Override
public String getQueryProperty(Offering offering, Output output)
{
return "http://sensorml.com/ont/swe/property/VideoFrame";
}
@Override
public void handleResults(OSHDataTypeInfo dataType, List<Output> outputs, List<CancellableInputStream> streams)
throws IOException
{
myVideoFrameController.addDataType(dataType);
try (CancellableTaskActivity ta = CancellableTaskActivity.createActive("Querying OpenSensorHub results"))
{
myToolbox.getUIRegistry().getMenuBarRegistry().addTaskActivity(ta);
int index = 0;
for (Output output : outputs)
{
CancellableInputStream stream = streams.get(index);
Output resultTemplate = dataType.getResultTemplate(output);
boolean isVideo = resultTemplate.getFields().stream().anyMatch(f -> "H264".equals(f.getDataType()));
dataType.setVideo(isVideo);
VideoProcessor processor = isVideo ? new VideoVideoProcessor(myToolbox, myQuerier)
: new ImageVideoProcessor(myToolbox, myQuerier);
List<String> dataTypes = ((BinaryEncoding)resultTemplate.getEncoding()).getDataTypes();
List<VideoFieldHandler> fieldHandlers = getFieldHandlers(dataTypes);
processor.processData(dataType, stream, ta, fieldHandlers);
index++;
}
}
}
@Override
public void handleGroupActivation(DataTypeInfo dataType, ActivationState state)
{
if (state == ActivationState.INACTIVE)
{
myVideoFrameController.hideWindow(dataType);
}
}
/**
* Gets the handlers for the data types.
*
* @param dataTypes the data types
* @return the fields
*/
private List<VideoFieldHandler> getFieldHandlers(Collection<String> dataTypes)
{
List<VideoFieldHandler> handlers = New.list(dataTypes.size());
for (String dataType : dataTypes)
{
VideoFieldHandler handler;
switch (dataType)
{
case "http://www.opengis.net/def/dataType/OGC/0/double":
handler = new DoubleTimeFieldHandler();
break;
case "JPEG":
case "H264":
handler = new BinaryFieldHandler();
break;
default:
handler = null;
}
handlers.add(handler);
}
return handlers;
}
/**
* Handles a change in the play state.
*
* @param dataType the data type
* @param playState the play state
*/
private void handlePlayStateChange(OSHDataTypeInfo dataType, PlayState playState)
{
AnimationPlayer animationPlayer = new AnimationPlayer(myToolbox);
if (playState == PlayState.FORWARD)
{
myVideoFrameController.showWindow(dataType);
ThreadUtilities.runCpu(() -> animationPlayer.playHistoricalVideo(dataType));
if (myFirstTime)
{
myFirstTime = false;
myToolbox.getEventManager().publishEvent(new ShowTimelineEvent(ViewPreference.TIMELINE));
}
}
else
{
animationPlayer.stop(dataType);
}
}
}
|
#!/bin/sh
# exit script on error
set -e
# rebuild extension
sh rebuild.sh
# build for legacy code
lime build ./test/Project.xml ios -clean -verbose -Dlegacy
# build for new codebase
lime build ./test/Project.xml ios -clean -verbose
|
import torch
import torch.nn as nn
from scipy.signal import kaiser, kaiserord, kaiser_beta, firwin
from scipy.optimize import fmin
import math
import numpy as np
from einops import rearrange
import cached_conv as cc
def reverse_half(x):
mask = torch.ones_like(x)
mask[..., fc00:db20:35b:7399::5, fdf8:f53e:61e4::18] = -1
return x * mask
def center_pad_next_pow_2(x):
next_2 = 2**math.ceil(math.log2(x.shape[-1]))
pad = next_2 - x.shape[-1]
return nn.functional.pad(x, (pad // 2, pad // 2 + int(pad % 2)))
def make_odd(x):
if not x.shape[-1] % 2:
x = nn.functional.pad(x, (0, 1))
return x
def get_qmf_bank(h, n_band):
"""
Modulates an input protoype filter into a bank of
cosine modulated filters
Parameters
----------
h: torch.Tensor
prototype filter
n_band: int
number of sub-bands
"""
k = torch.arange(n_band).reshape(-1, 1)
N = h.shape[-1]
t = torch.arange(-(N // 2), N // 2 + 1)
p = (-1)**k * math.pi / 4
mod = torch.cos((2 * k + 1) * math.pi / (2 * n_band) * t + p)
hk = 2 * h * mod
return hk
def kaiser_filter(wc, atten, N=None):
"""
Computes a kaiser lowpass filter
Parameters
----------
wc: float
Angular frequency
atten: float
Attenuation (dB, positive)
"""
N_, beta = kaiserord(atten, wc / np.pi)
N_ = 2 * (N_ // 2) + 1
N = N if N is not None else N_
h = firwin(N, wc, window=('kaiser', beta), scale=False, nyq=np.pi)
return h
def loss_wc(wc, atten, M, N):
"""
Computes the objective described in https://ieeexplore.ieee.org/document/681427
"""
h = kaiser_filter(wc, atten, N)
g = np.convolve(h, h[::-1], "full")
g = abs(g[g.shape[-1] // 2::2 * M][1:])
return np.max(g)
def get_prototype(atten, M, N=None):
"""
Given an attenuation objective and the number of bands
returns the corresponding lowpass filter
"""
wc = fmin(lambda w: loss_wc(w, atten, M, N), 1 / M, disp=0)[0]
return kaiser_filter(wc, atten, N)
def polyphase_forward(x, hk, rearrange_filter=True):
"""
Polyphase implementation of the analysis process (fast)
Parameters
----------
x: torch.Tensor
signal to analyse ( B x 1 x T )
hk: torch.Tensor
filter bank ( M x T )
"""
x = rearrange(x, "b c (t m) -> b (c m) t", m=hk.shape[0])
if rearrange_filter:
hk = rearrange(hk, "c (t m) -> c m t", m=hk.shape[0])
x = nn.functional.conv1d(x, hk, padding=hk.shape[-1] // 2)[..., :-1]
return x
def polyphase_inverse(x, hk, rearrange_filter=True):
"""
Polyphase implementation of the synthesis process (fast)
Parameters
----------
x: torch.Tensor
signal to synthesize from ( B x 1 x T )
hk: torch.Tensor
filter bank ( M x T )
"""
m = hk.shape[0]
if rearrange_filter:
hk = hk.flip(-1)
hk = rearrange(hk, "c (t m) -> m c t", m=m) # polyphase
pad = hk.shape[-1] // 2 + 1
x = nn.functional.conv1d(x, hk, padding=int(pad))[..., :-1] * m
x = x.flip(1)
x = rearrange(x, "b (c m) t -> b c (t m)", m=m)
x = x[..., 2 * hk.shape[1]:]
return x
def classic_forward(x, hk):
"""
Naive implementation of the analysis process (slow)
Parameters
----------
x: torch.Tensor
signal to analyse ( B x 1 x T )
hk: torch.Tensor
filter bank ( M x T )
"""
x = nn.functional.conv1d(
x,
hk.unsqueeze(1),
stride=hk.shape[0],
padding=hk.shape[-1] // 2,
)[..., :-1]
return x
def classic_inverse(x, hk):
"""
Naive implementation of the synthesis process (slow)
Parameters
----------
x: torch.Tensor
signal to synthesize from ( B x 1 x T )
hk: torch.Tensor
filter bank ( M x T )
"""
hk = hk.flip(-1)
y = torch.zeros(*x.shape[:2], hk.shape[0] * x.shape[-1]).to(x)
y[..., ::hk.shape[0]] = x * hk.shape[0]
y = nn.functional.conv1d(
y,
hk.unsqueeze(0),
padding=hk.shape[-1] // 2,
)[..., 1:]
return y
class PQMF(nn.Module):
"""
Pseudo Quadrature Mirror Filter multiband decomposition / reconstruction
Parameters
----------
attenuation: int
Attenuation of the rejected bands (dB, 80 - 120)
n_band: int
Number of bands, must be a power of 2 if the polyphase implementation
is needed
"""
def __init__(self, attenuation, n_band, polyphase=True):
super().__init__()
h = get_prototype(attenuation, n_band)
if polyphase:
power = math.log2(n_band)
assert power == math.floor(
power
), "when using the polyphase algorithm, n_band must be a power of 2"
h = torch.from_numpy(h).float()
hk = get_qmf_bank(h, n_band)
hk = center_pad_next_pow_2(hk)
self.register_buffer("hk", hk)
self.register_buffer("h", h)
self.n_band = n_band
self.polyphase = polyphase
def forward(self, x):
if self.n_band == 1:
return x
elif self.polyphase:
x = polyphase_forward(x, self.hk)
else:
x = classic_forward(x, self.hk)
x = reverse_half(x)
return x
def inverse(self, x):
if self.n_band == 1:
return x
x = reverse_half(x)
if self.polyphase:
return polyphase_inverse(x, self.hk)
else:
return classic_inverse(x, self.hk)
class CachedPQMF(PQMF):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
hkf = make_odd(self.hk).unsqueeze(1)
hki = self.hk.flip(-1)
hki = rearrange(hki, "c (t m) -> m c t", m=self.hk.shape[0])
hki = make_odd(hki)
self.forward_conv = cc.Conv1d(
hkf.shape[1],
hkf.shape[0],
hkf.shape[2],
padding=cc.get_padding(hkf.shape[-1]),
stride=hkf.shape[0],
bias=False,
)
self.forward_conv.weight.data.copy_(hkf)
self.inverse_conv = cc.Conv1d(
hki.shape[1],
hki.shape[0],
hki.shape[-1],
padding=cc.get_padding(hki.shape[-1]),
bias=False,
)
self.inverse_conv.weight.data.copy_(hki)
def script_cache(self):
self.forward_conv.script_cache()
self.inverse_conv.script_cache()
def forward(self, x):
x = self.forward_conv(x)
x = reverse_half(x)
return x
def inverse(self, x):
x = reverse_half(x)
m = self.hk.shape[0]
x = self.inverse_conv(x) * m
x = x.flip(1)
x = x.permute(0, 2, 1)
x = x.reshape(x.shape[0], x.shape[1], -1, m).permute(0, 2, 1, 3)
x = x.reshape(x.shape[0], x.shape[1], -1)
return x
|
package spec
// UUIDs are based on the BerryLan specification from
// https://github.com/nymea/nymea-networkmanager
import "github.com/go-ble/ble"
// UUIDs of the BerryLan services.
var (
ServiceWireless = ble.MustParse("e081fec0-f757-4449-b9c9-bfa83133f7fc")
ServiceNetwork = ble.MustParse("ef6d6610-b8af-49e0-9eca-ab343513641c")
)
// UUIDs of the characteristics.
var (
CharacteristicWirelessCommander = ble.MustParse("e081fec1-f757-4449-b9c9-bfa83133f7fc")
CharacteristicWirelessCommanderResponse = ble.MustParse("e081fec2-f757-4449-b9c9-bfa83133f7fc")
CharacteristicWirelessConnectionStatus = ble.MustParse("e081fec3-f757-4449-b9c9-bfa83133f7fc")
CharacteristicWirelessMode = ble.MustParse("e081fec4-f757-4449-b9c9-bfa83133f7fc")
CharacteristicNetworkStatus = ble.MustParse("ef6d6611-b8af-49e0-9eca-ab343513641c")
CharacteristicNetworkCommander = ble.MustParse("ef6d6612-b8af-49e0-9eca-ab343513641c")
CharacteristicNetworkCommanderResponse = ble.MustParse("ef6d6613-b8af-49e0-9eca-ab343513641c")
CharacteristicNetworkEnabled = ble.MustParse("ef6d6614-b8af-49e0-9eca-ab343513641c")
CharacteristicNetworkWirelessEnabled = ble.MustParse("ef6d6615-b8af-49e0-9eca-ab343513641c")
)
|
A mutable object is one that can be changed after it is created. These objects can have their state (variables within the object) modified after the object is created. Examples of mutable objects include lists and dictionaries. An immutable object is one that cannot be changed after it is created. These objects have fixed state and cannot be changed after their creation. Examples of immutable objects include strings, numbers, and tuples.
|
<gh_stars>0
"use strict";
let hisName = prompt("What's his name");
let herName = prompt("What's her name");
const result = Math.trunc(Math.random() * 100) + 1
function loveCalculator (){
return alert(`Congratulations! ${herName}, you and ${hisName} are ${result}% compatible`)
}
loveCalculator();
//console.log(ready);
|
<filename>blog_api/users/models.py
import uuid
from datetime import timedelta
from django.conf import settings
from django.db.models import Model, Manager, DateTimeField, CharField, EmailField, IntegerField, BooleanField, \
GenericIPAddressField, ForeignKey, CASCADE, UniqueConstraint
from django.contrib.auth.models import AbstractUser
from django.contrib.auth.password_validation import validate_password
from django.contrib.auth.validators import UnicodeUsernameValidator
from django.urls import reverse
from django.utils import timezone
from django.core.mail import send_mail
from django.template.loader import render_to_string
from django.utils.translation import gettext_lazy as _
from django.core.exceptions import ValidationError
from blog_api.users.model_validators import validate_min_3_characters, validate_3_special_characters_max, \
validate_no_special_chars
class BaseModel(Model):
'''Base model to subclass.'''
created_at = DateTimeField(editable=False, null=True)
updated_at = DateTimeField(editable=False, null=True)
class Meta:
abstract = True
def save(self, *args, **kwargs):
if not self.id:
self.created_at = timezone.now()
self.updated_at = timezone.now()
return super(BaseModel, self).save(*args, **kwargs)
class User(BaseModel, AbstractUser):
'''User model. New users are inactive until verified through email'''
pub_id = CharField(editable=False, unique=True, max_length=50)
username = CharField(
max_length=150, unique=True, null=False, blank=False,
validators=[
UnicodeUsernameValidator, validate_min_3_characters, validate_3_special_characters_max
]
)
name = CharField(blank=True, null=True, max_length=100, validators=[validate_no_special_chars])
email = EmailField(unique=True, blank=False, null=False, max_length=254)
first_name = None
last_name = None
is_active = BooleanField(default=False)
ip_address = GenericIPAddressField(editable=False, blank=True, null=True)
def password_reset(self, password):
try:
validate_password(password, self)
except ValidationError:
return {
'password_reset': False,
'message': 'The password is not valid. Please choose one that is more secure.'
}
self.set_password(password)
self.save()
password_reset_codes_exist = self.password_reset_codes.all().exists()
if password_reset_codes_exist:
now = timezone.now()
for code in self.password_reset_codes.all():
code.code_expiration = (now - timedelta(days=100))
code.save()
return {
'password_reset': True,
'message': 'The password has been reset. Please continue to log in.'
}
def follow_user(self, pubid):
'''
Follow or unfollow a user based on if follow exists already.
'''
if not User.objects.filter(pub_id=pubid).exists():
return {
'followed': False,
'message': 'No user found with provided id.'
}
user_to_follow = User.objects.get(pub_id=pubid)
if not UserFollowing.objects.filter(user=self, following=user_to_follow).exists():
follow = UserFollowing.objects.create(user=self, following=user_to_follow)
# follow.following.followers_count += 1
# follow.following.save()
# self.following_count += 1
# self.save()
return {
'followed': True,
'message': f'{user_to_follow.username} followed successfully.'
}
else:
follow = UserFollowing.objects.get(user=self, following=user_to_follow)
# follow.following.followers_count -= 1
# follow.following.save()
follow.delete()
# self.following_count -= 1
# self.save()
return {
'followed': False,
'message': f'{user_to_follow.username} unfollowed successfully.'
}
def get_following_count(self):
return self.following.all().count()
def get_followers_count(self):
return self.followers.all().count()
def bookmark_post(self, slug):
'''
Bookmarks a post for this user.
'''
from blog_api.posts.models import Post # avoid circular imports
try:
post_to_bookmark = Post.objects.get(slug=slug)
if not post_to_bookmark.is_active:
raise Post.DoesNotExist()
except Post.DoesNotExist:
return {
'bookmarked': False,
'message': 'No post found with provided slug.'
}
bookmarked = post_to_bookmark.bookmark(pubid=self.pub_id)
return bookmarked
def like_post(self, slug, like):
'''
Like a post for this user.
'''
from blog_api.posts.models import Post
try:
post = Post.objects.get(slug=slug)
if not post.is_active:
raise Post.DoesNotExist()
except Post.DoesNotExist:
return {
'liked': False,
'message': 'No post found with provided slug.'
}
if not like:
return {
'liked': False,
'message': 'Please include a `like` or `dislike` keyword to like or dislike.'
}
if like == 'like':
liked = post.likes.like(self.pub_id)
post.set_like_score()
return liked
elif like == 'dislike':
disliked = post.dislikes.dislike(self.pub_id)
post.set_like_score()
return disliked
else:
return {
'liked': False,
'message': 'Please include either `like` or `dislike` to like or dislike post.'
}
def get_post_count(self):
return self.posts.filter(is_active=True).count()
class Meta:
ordering = ['-created_at',]
def save(self, *args, **kwargs):
'''
--User model save method--
1) Sets self.pub_id to UUID hex on first save.
'''
if not self.id:
self.pub_id = str(uuid.uuid4().hex)
return super(User, self).save(*args, **kwargs)
class VerificationCode(BaseModel):
'''Verification Code model. Code is sent to user for verification, users are inactive until verified'''
verification_code = CharField(editable=False, unique=True, max_length=50)
user_to_verify = ForeignKey(User, related_name='verification_codes', on_delete=CASCADE)
code_expiration = DateTimeField(default=timezone.now() + timedelta(days=3))
def send_user_verification_email(self):
'''
--Send user verification email--
Sends user a verification code email using user.email_user() if the user is not active,
the link sent points to settings.FRONTEND_URL. If code is expired then code is rotated and
expiration extended.
'''
if self.user_to_verify.is_active:
return {
'verification_sent': False,
'message': 'The user is already verified and active. Please log in.'
}
now = timezone.now()
if not (self.code_expiration >= now):
self.code_expiration = now + timedelta(days=3)
self.verification_code = str(uuid.uuid4().hex)
self.save()
subject = f'{self.user_to_verify.username}, please verify your email'
template = 'email_templates/verification_email.txt'
html_template = 'email_templates/verification_email.html'
message_data = {
'url': settings.FRONTEND_URL + str(self.verification_code) + '/'
}
message = render_to_string(template, message_data)
html_message = render_to_string(html_template, message_data)
from_email = (
'verification@' + settings.FRONTEND_URL.split('.')[-2] + '.com'
)
self.user_to_verify.email_user(subject=subject, message=message, html_message=html_message, from_email=from_email)
return {
'verification_sent': True,
'message': 'Verification code sent! Check your email.'
}
def verify(self):
'''
--Code verification--
1) If the code is expired then resend verification email return False and message.
2) Expire the code.
3) Set user to active and return True and message.
'''
now = timezone.now()
if not (self.code_expiration >= now): # 1
self.send_user_verification_email()
return {
'verified': False,
'message': 'Code expired. Please check your email for a new verification.'
}
self.code_expiration = (now - timedelta(days=100)) # 2
self.save()
self.user_to_verify.is_active = True # 3
self.user_to_verify.save()
return {
'verified': True,
'message': 'Code verified and the user is now active! You may now log in.'
}
class Meta:
ordering = ['-created_at']
def __str__(self):
return str(self.verification_code)
def save(self, *args, **kwargs):
'''
--VerificationCode model save method--
1) Sets self.verification_code to UUID hex on first save.
'''
if not self.id:
self.verification_code = str(uuid.uuid4().hex) # 1
return super(VerificationCode, self).save(*args, **kwargs)
class PasswordResetCode(BaseModel):
'''
--Password reset code--
'''
user = ForeignKey(User, related_name='password_reset_codes', on_delete=CASCADE)
password_reset_code = CharField(editable=False, unique=True, max_length=50)
code_expiration = DateTimeField(default=timezone.now() + timedelta(days=3))
def send_user_password_reset_email(self):
'''
--Send user password reset email--
1) Send user a link to the frontend with a code that will let them post back to another url
with updated passwords.
'''
now = timezone.now()
if not (self.code_expiration >= now):
self.code_expiration = now + timedelta(days=3)
self.password_reset_code = str(uuid.uuid4().hex)
self.save()
subject = f'{self.user.username}, here is the link to reset your password.'
template = 'email_templates/password_reset_link_email.txt'
html_template = 'email_templates/password_reset_link_email.html'
message_data = {
'url': settings.FRONTEND_URL + 'password/reset/' + self.password_reset_code + '/'
}
message = render_to_string(template, message_data)
html_message = render_to_string(html_template, message_data)
from_email = (
'password-reset@' + settings.FRONTEND_URL.split('.')[-2] + '.com/'
)
self.user.email_user(subject=subject, message=message, html_message=html_message, from_email=from_email)
return {
'password_reset_link_sent': True,
'message': 'Password reset link sent! Check your email.'
}
def verify(self, password):
'''
--Code verification--
1) If code is expired resend password reset email.
2) Call password_reset on user and return either True or False and a message.
'''
now = timezone.now()
if not (self.code_expiration >= now):
self.send_user_password_reset_email() # 1
return {
'password_reset': False,
'message': 'Code expired. Please check your email for a new password reset link.'
}
password_reset = self.user.password_reset(password)
return {
'password_reset': password_reset['password_reset'],
'message': password_reset['message']
}
class Meta:
ordering = ['-created_at']
def __str__(self):
return str(self.password_reset_code)
def save(self, *args, **kwargs):
'''
--PasswordResetLink model save method--
1) Sets self.pub_id to UUID hex on first save.
'''
if not self.id:
self.password_reset_code = str(uuid.uuid4().hex)
return super(PasswordResetCode, self).save(*args, **kwargs)
class UserFollowing(BaseModel):
user = ForeignKey(User, related_name='following', on_delete=CASCADE, to_field='pub_id')
following = ForeignKey(User, related_name='followers', on_delete=CASCADE, to_field='pub_id')
class Meta:
constraints = [
UniqueConstraint(fields=['user','following'], name='unique_followers')
]
ordering = ['-created_at']
def clean(self):
if self.user.email == self.following.email:
raise ValidationError({'following': 'You can not follow yourself.'})
return super().clean()
def __str__(self):
return f'{self.user.username} follows {self.following}'
def save(self, *args, **kwargs):
self.full_clean()
return super(UserFollowing, self).save(*args, **kwargs)
|
#!/usr/bin/env bats
load $BATS_TEST_DIRNAME/helper/common.bash
setup() {
setup_common
dolt sql <<SQL
CREATE TABLE onepk (
pk1 BIGINT PRIMARY KEY,
v1 BIGINT,
v2 BIGINT
);
CREATE TABLE twopk (
pk1 BIGINT,
pk2 BIGINT,
v1 BIGINT,
v2 BIGINT,
PRIMARY KEY(pk1, pk2)
);
SQL
}
teardown() {
teardown_common
}
@test "index: CREATE TABLE INDEX" {
dolt sql <<SQL
CREATE TABLE test(
pk BIGINT PRIMARY KEY,
v1 BIGINT,
v2 BIGINT,
INDEX (v1)
);
SQL
run dolt index ls test
[ "$status" -eq "0" ]
[[ "$output" =~ "v1(v1)" ]] || false
run dolt schema show test
[ "$status" -eq "0" ]
[[ "$output" =~ 'KEY `v1` (`v1`)' ]] || false
}
@test "index: CREATE TABLE UNIQUE KEY" {
dolt sql <<SQL
CREATE TABLE test(
pk BIGINT PRIMARY KEY,
v1 BIGINT,
v2 BIGINT,
UNIQUE KEY (v1)
);
CREATE TABLE test2(
pk BIGINT PRIMARY KEY,
v1 BIGINT,
v2 BIGINT,
UNIQUE (v1)
);
SQL
run dolt index ls test
[ "$status" -eq "0" ]
[[ "$output" =~ "v1(v1)" ]] || false
run dolt schema show test
[ "$status" -eq "0" ]
[[ "$output" =~ 'UNIQUE KEY `v1` (`v1`)' ]] || false
run dolt index ls test2
[ "$status" -eq "0" ]
[[ "$output" =~ "v1(v1)" ]] || false
run dolt schema show test2
[ "$status" -eq "0" ]
[[ "$output" =~ 'UNIQUE KEY `v1` (`v1`)' ]] || false
}
@test "index: CREATE TABLE INDEX named with comment" {
dolt sql <<SQL
CREATE TABLE test(
pk BIGINT PRIMARY KEY,
v1 BIGINT,
v2 BIGINT,
INDEX idx_v1 (v1, v2) COMMENT 'hello there'
);
SQL
run dolt index ls test
[ "$status" -eq "0" ]
[[ "$output" =~ "idx_v1(v1, v2)" ]] || false
run dolt schema show test
[ "$status" -eq "0" ]
[[ "$output" =~ 'KEY `idx_v1` (`v1`,`v2`)'" COMMENT 'hello there'" ]] || false
}
@test "index: CREATE TABLE INDEX multiple" {
dolt sql <<SQL
CREATE TABLE test(
pk BIGINT PRIMARY KEY,
v1 BIGINT,
v2 BIGINT,
INDEX (v1),
INDEX (v1, v2)
);
SQL
run dolt index ls test
[ "$status" -eq "0" ]
[[ "$output" =~ "v1(v1)" ]] || false
[[ "$output" =~ "v1v2(v1, v2)" ]] || false
run dolt schema show test
[ "$status" -eq "0" ]
[[ "$output" =~ 'KEY `v1` (`v1`)' ]] || false
[[ "$output" =~ 'KEY `v1v2` (`v1`,`v2`)' ]] || false
}
@test "index: CREATE INDEX then INSERT" {
dolt sql <<SQL
CREATE INDEX idx_v1 ON onepk(v1);
INSERT INTO onepk VALUES (1, 99, 51), (2, 11, 55), (3, 88, 52), (4, 22, 54), (5, 77, 53);
SQL
run dolt index ls onepk
[ "$status" -eq "0" ]
[[ "$output" =~ "idx_v1(v1)" ]] || false
run dolt index cat onepk idx_v1 -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "v1,pk1" ]] || false
[[ "$output" =~ "11,2" ]] || false
[[ "$output" =~ "22,4" ]] || false
[[ "$output" =~ "77,5" ]] || false
[[ "$output" =~ "88,3" ]] || false
[[ "$output" =~ "99,1" ]] || false
[[ "${#lines[@]}" = "6" ]] || false
run dolt schema show onepk
[ "$status" -eq "0" ]
[[ "$output" =~ 'KEY `idx_v1` (`v1`)' ]] || false
run dolt sql -q "SELECT pk1 FROM onepk WHERE v1 = 77" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1" ]] || false
[[ "$output" =~ "5" ]] || false
[[ "${#lines[@]}" = "2" ]] || false
dolt sql <<SQL
CREATE INDEX idx_v ON twopk(v2, v1);
INSERT INTO twopk VALUES (1, 99, 51, 63), (2, 11, 55, 64), (3, 88, 52, 61), (4, 22, 54, 65), (5, 77, 53, 61);
SQL
run dolt index ls twopk
[ "$status" -eq "0" ]
[[ "$output" =~ "idx_v(v2, v1)" ]] || false
run dolt index cat twopk idx_v -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "v2,v1,pk1,pk2" ]] || false
[[ "$output" =~ "61,52,3,88" ]] || false
[[ "$output" =~ "61,53,5,77" ]] || false
[[ "$output" =~ "63,51,1,99" ]] || false
[[ "$output" =~ "64,55,2,11" ]] || false
[[ "$output" =~ "65,54,4,22" ]] || false
[[ "${#lines[@]}" = "6" ]] || false
run dolt schema show twopk
[ "$status" -eq "0" ]
[[ "$output" =~ 'KEY `idx_v` (`v2`,`v1`)' ]] || false
run dolt sql -q "SELECT pk1, pk2 FROM twopk WHERE v2 = 61 AND v1 = 53" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2" ]] || false
[[ "$output" =~ "5,77" ]] || false
[[ "${#lines[@]}" = "2" ]] || false
}
@test "index: INSERT then CREATE INDEX" {
dolt sql <<SQL
INSERT INTO onepk VALUES (1, 99, 51), (2, 11, 55), (3, 88, 52), (4, 22, 54), (5, 77, 53);
CREATE INDEX idx_v1 ON onepk(v1);
SQL
run dolt index ls onepk
[ "$status" -eq "0" ]
[[ "$output" =~ "idx_v1(v1)" ]] || false
run dolt index cat onepk idx_v1 -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "v1,pk1" ]] || false
[[ "$output" =~ "11,2" ]] || false
[[ "$output" =~ "22,4" ]] || false
[[ "$output" =~ "77,5" ]] || false
[[ "$output" =~ "88,3" ]] || false
[[ "$output" =~ "99,1" ]] || false
[[ "${#lines[@]}" = "6" ]] || false
run dolt schema show onepk
[ "$status" -eq "0" ]
[[ "$output" =~ 'KEY `idx_v1` (`v1`)' ]] || false
run dolt sql -q "SELECT pk1 FROM onepk WHERE v1 = 77" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1" ]] || false
[[ "$output" =~ "5" ]] || false
[[ "${#lines[@]}" = "2" ]] || false
dolt sql <<SQL
INSERT INTO twopk VALUES (1, 99, 51, 63), (2, 11, 55, 64), (3, 88, 52, 61), (4, 22, 54, 65), (5, 77, 53, 61);
CREATE INDEX idx_v ON twopk(v2, v1);
SQL
run dolt index ls twopk
[ "$status" -eq "0" ]
[[ "$output" =~ "idx_v(v2, v1)" ]] || false
run dolt index cat twopk idx_v -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "v2,v1,pk1,pk2" ]] || false
[[ "$output" =~ "61,52,3,88" ]] || false
[[ "$output" =~ "61,53,5,77" ]] || false
[[ "$output" =~ "63,51,1,99" ]] || false
[[ "$output" =~ "64,55,2,11" ]] || false
[[ "$output" =~ "65,54,4,22" ]] || false
[[ "${#lines[@]}" = "6" ]] || false
run dolt schema show twopk
[ "$status" -eq "0" ]
[[ "$output" =~ 'KEY `idx_v` (`v2`,`v1`)' ]] || false
run dolt sql -q "SELECT pk1, pk2 FROM twopk WHERE v2 = 61 AND v1 = 53" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2" ]] || false
[[ "$output" =~ "5,77" ]] || false
[[ "${#lines[@]}" = "2" ]] || false
}
@test "index: INSERT then ALTER TABLE CREATE INDEX" {
dolt sql <<SQL
INSERT INTO onepk VALUES (1, 99, 51), (2, 11, 55), (3, 88, 52), (4, 22, 54), (5, 77, 53);
ALTER TABLE onepk ADD INDEX idx_v1 (v1);
SQL
run dolt index ls onepk
[ "$status" -eq "0" ]
[[ "$output" =~ "idx_v1(v1)" ]] || false
run dolt index cat onepk idx_v1 -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "v1,pk1" ]] || false
[[ "$output" =~ "11,2" ]] || false
[[ "$output" =~ "22,4" ]] || false
[[ "$output" =~ "77,5" ]] || false
[[ "$output" =~ "88,3" ]] || false
[[ "$output" =~ "99,1" ]] || false
[[ "${#lines[@]}" = "6" ]] || false
run dolt schema show onepk
[ "$status" -eq "0" ]
[[ "$output" =~ 'KEY `idx_v1` (`v1`)' ]] || false
run dolt sql -q "SELECT pk1 FROM onepk WHERE v1 = 77" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1" ]] || false
[[ "$output" =~ "5" ]] || false
[[ "${#lines[@]}" = "2" ]] || false
dolt sql <<SQL
INSERT INTO twopk VALUES (1, 99, 51, 63), (2, 11, 55, 64), (3, 88, 52, 61), (4, 22, 54, 65), (5, 77, 53, 61);
ALTER TABLE twopk ADD INDEX idx_v (v2, v1);
SQL
run dolt index ls twopk
[ "$status" -eq "0" ]
[[ "$output" =~ "idx_v(v2, v1)" ]] || false
run dolt index cat twopk idx_v -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "v2,v1,pk1,pk2" ]] || false
[[ "$output" =~ "61,52,3,88" ]] || false
[[ "$output" =~ "61,53,5,77" ]] || false
[[ "$output" =~ "63,51,1,99" ]] || false
[[ "$output" =~ "64,55,2,11" ]] || false
[[ "$output" =~ "65,54,4,22" ]] || false
[[ "${#lines[@]}" = "6" ]] || false
run dolt schema show twopk
[ "$status" -eq "0" ]
[[ "$output" =~ 'KEY `idx_v` (`v2`,`v1`)' ]] || false
run dolt sql -q "SELECT pk1, pk2 FROM twopk WHERE v2 = 61 AND v1 = 53" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2" ]] || false
[[ "$output" =~ "5,77" ]] || false
[[ "${#lines[@]}" = "2" ]] || false
}
@test "index: ALTER TABLE CREATE INDEX unnamed" {
dolt sql <<SQL
ALTER TABLE onepk ADD INDEX (v1);
SQL
run dolt index ls onepk
[ "$status" -eq "0" ]
[[ "$output" =~ "v1(v1)" ]] || false
run dolt schema show onepk
[ "$status" -eq "0" ]
[[ "$output" =~ 'KEY `v1` (`v1`)' ]] || false
}
@test "index: INSERT then REPLACE" {
dolt sql <<SQL
CREATE INDEX idx_v1 ON onepk(v1);
INSERT INTO onepk VALUES (1, 99, 51), (2, 11, 55), (3, 88, 52), (4, 22, 54), (5, 77, 53);
REPLACE INTO onepk VALUES (1, 98, -1), (2, 11, 55), (3, 87, 52), (4, 102, 54), (6, 77, 53);
SQL
dolt index cat onepk idx_v1 -r=csv
run dolt index cat onepk idx_v1 -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "v1,pk1" ]] || false
[[ "$output" =~ "11,2" ]] || false
[[ "$output" =~ "77,5" ]] || false
[[ "$output" =~ "77,6" ]] || false
[[ "$output" =~ "87,3" ]] || false
[[ "$output" =~ "98,1" ]] || false
[[ "$output" =~ "102,4" ]] || false
[[ "${#lines[@]}" = "7" ]] || false
run dolt sql -q "SELECT pk1 FROM onepk WHERE v1 = 77" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1" ]] || false
[[ "$output" =~ "5" ]] || false
[[ "$output" =~ "6" ]] || false
[[ "${#lines[@]}" = "3" ]] || false
run dolt sql -q "SELECT pk1 FROM onepk WHERE v1 = 22" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
run dolt sql -q "SELECT pk1 FROM onepk WHERE v1 = 102" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1" ]] || false
[[ "$output" =~ "4" ]] || false
[[ "${#lines[@]}" = "2" ]] || false
dolt sql <<SQL
CREATE INDEX idx_v ON twopk(v2, v1);
INSERT INTO twopk VALUES (1, 99, 51, 63), (2, 11, 55, 64), (3, 88, 52, 61), (4, 22, 54, 65), (5, 77, 53, 61);
REPLACE INTO twopk VALUES (1, 99, -1, 63), (2, 11, 55, 64), (3, 87, 59, 60), (4, 102, -4, 65), (6, 77, 13, -59);
SQL
run dolt index cat twopk idx_v -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "v2,v1,pk1,pk2" ]] || false
[[ "$output" =~ "-59,13,6,77" ]] || false
[[ "$output" =~ "60,59,3,87" ]] || false
[[ "$output" =~ "61,52,3,88" ]] || false
[[ "$output" =~ "61,53,5,77" ]] || false
[[ "$output" =~ "63,-1,1,99" ]] || false
[[ "$output" =~ "64,55,2,11" ]] || false
[[ "$output" =~ "65,-4,4,102" ]] || false
[[ "$output" =~ "65,54,4,22" ]] || false
[[ "${#lines[@]}" = "9" ]] || false
run dolt sql -q "SELECT pk1, pk2 FROM twopk WHERE v2 = 61 AND v1 = 53" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2" ]] || false
[[ "$output" =~ "5,77" ]] || false
[[ "${#lines[@]}" = "2" ]] || false
run dolt sql -q "SELECT pk1, pk2 FROM twopk WHERE v2 = 63 AND v1 = 51" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
run dolt sql -q "SELECT pk1, pk2 FROM twopk WHERE v2 = 63 AND v1 = -1" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2" ]] || false
[[ "$output" =~ "1,99" ]] || false
[[ "${#lines[@]}" = "2" ]] || false
}
@test "index: INSERT then UPDATE" {
dolt sql <<SQL
CREATE INDEX idx_v1 ON onepk(v1);
INSERT INTO onepk VALUES (1, 99, 51), (2, 11, 55), (3, 88, 52), (4, 22, 54), (5, 77, 53);
UPDATE onepk SET v1 = v1 - 1 WHERE pk1 >= 3;
SQL
run dolt index cat onepk idx_v1 -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "v1,pk1" ]] || false
[[ "$output" =~ "11,2" ]] || false
[[ "$output" =~ "21,4" ]] || false
[[ "$output" =~ "76,5" ]] || false
[[ "$output" =~ "87,3" ]] || false
[[ "$output" =~ "99,1" ]] || false
[[ "${#lines[@]}" = "6" ]] || false
run dolt sql -q "SELECT pk1 FROM onepk WHERE v1 = 76" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1" ]] || false
[[ "$output" =~ "5" ]] || false
[[ "${#lines[@]}" = "2" ]] || false
dolt sql <<SQL
CREATE INDEX idx_v ON twopk(v2, v1);
INSERT INTO twopk VALUES (1, 99, 51, 63), (2, 11, 55, 64), (3, 88, 52, 61), (4, 22, 54, 65), (5, 77, 53, 61);
UPDATE twopk SET v1 = v1 + 4, v2 = v2 - v1 WHERE pk1 <= 3;
SQL
run dolt index cat twopk idx_v -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "v2,v1,pk1,pk2" ]] || false
[[ "$output" =~ "5,56,3,88" ]] || false
[[ "$output" =~ "5,59,2,11" ]] || false
[[ "$output" =~ "8,55,1,99" ]] || false
[[ "$output" =~ "61,53,5,77" ]] || false
[[ "$output" =~ "65,54,4,22" ]] || false
[[ "${#lines[@]}" = "6" ]] || false
run dolt sql -q "SELECT pk1, pk2 FROM twopk WHERE v2 = 5 AND v1 = 56" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2" ]] || false
[[ "$output" =~ "3,88" ]] || false
[[ "${#lines[@]}" = "2" ]] || false
}
@test "index: INSERT then DELETE some" {
dolt sql <<SQL
CREATE INDEX idx_v1 ON onepk(v1);
INSERT INTO onepk VALUES (1, 99, 51), (2, 11, 55), (3, 88, 52), (4, 22, 54), (5, 77, 53);
DELETE FROM onepk WHERE v1 % 2 = 0;
SQL
run dolt index cat onepk idx_v1 -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "v1,pk1" ]] || false
[[ "$output" =~ "11,2" ]] || false
[[ "$output" =~ "77,5" ]] || false
[[ "$output" =~ "99,1" ]] || false
[[ "${#lines[@]}" = "4" ]] || false
run dolt sql -q "SELECT pk1 FROM onepk WHERE v1 = 77" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1" ]] || false
[[ "$output" =~ "5" ]] || false
[[ "${#lines[@]}" = "2" ]] || false
run dolt sql -q "SELECT pk1 FROM onepk WHERE v1 = 88" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
dolt sql <<SQL
CREATE INDEX idx_v ON twopk(v2, v1);
INSERT INTO twopk VALUES (1, 99, 51, 63), (2, 11, 55, 64), (3, 88, 52, 61), (4, 22, 54, 65), (5, 77, 53, 61);
DELETE FROM twopk WHERE v2 - v1 < 10;
SQL
run dolt index cat twopk idx_v -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "v2,v1,pk1,pk2" ]] || false
[[ "$output" =~ "63,51,1,99" ]] || false
[[ "$output" =~ "65,54,4,22" ]] || false
[[ "${#lines[@]}" = "3" ]] || false
run dolt sql -q "SELECT pk1, pk2 FROM twopk WHERE v2 = 65 AND v1 = 54" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2" ]] || false
[[ "$output" =~ "4,22" ]] || false
[[ "${#lines[@]}" = "2" ]] || false
run dolt sql -q "SELECT pk1, pk2 FROM twopk WHERE v2 = 61 AND v1 = 52" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
}
@test "index: INSERT then DELETE all" {
dolt sql <<SQL
CREATE INDEX idx_v1 ON onepk(v1);
INSERT INTO onepk VALUES (1, 99, 51), (2, 11, 55), (3, 88, 52), (4, 22, 54), (5, 77, 53);
DELETE FROM onepk WHERE v1 != -1;
SQL
run dolt index cat onepk idx_v1 -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "v1,pk1" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
run dolt sql -q "SELECT pk1 FROM onepk WHERE v1 = 88" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
dolt sql <<SQL
INSERT INTO onepk VALUES (1, 99, 51), (2, 11, 55), (3, 88, 52), (4, 22, 54), (5, 77, 53);
DELETE FROM onepk;
SQL
run dolt index cat onepk idx_v1 -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "v1,pk1" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
run dolt sql -q "SELECT pk1 FROM onepk WHERE v1 = 88" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
dolt sql <<SQL
CREATE INDEX idx_v ON twopk(v2, v1);
INSERT INTO twopk VALUES (1, 99, 51, 63), (2, 11, 55, 64), (3, 88, 52, 61), (4, 22, 54, 65), (5, 77, 53, 61);
DELETE FROM twopk WHERE v1 != -1 AND v1 != -1;
SQL
run dolt index cat twopk idx_v -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "v2,v1,pk1,pk2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
run dolt sql -q "SELECT pk1, pk2 FROM twopk WHERE v2 = 61 AND v1 = 52" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
dolt sql <<SQL
INSERT INTO twopk VALUES (1, 99, 51, 63), (2, 11, 55, 64), (3, 88, 52, 61), (4, 22, 54, 65), (5, 77, 53, 61);
DELETE FROM twopk;
SQL
run dolt index cat twopk idx_v -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "v2,v1,pk1,pk2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
run dolt sql -q "SELECT pk1, pk2 FROM twopk WHERE v2 = 61 AND v1 = 52" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
}
@test "index: CREATE INDEX with same name" {
dolt sql <<SQL
INSERT INTO onepk VALUES (1, 99, 51), (2, 11, 55), (3, 88, 52), (4, 22, 54), (5, 77, 53);
CREATE INDEX idx_v1 ON onepk(v1);
SQL
run dolt sql -q "CREATE INDEX idx_v1 ON onepk(v2)"
[ "$status" -eq "1" ]
run dolt index ls onepk
[ "$status" -eq "0" ]
[[ "$output" =~ "idx_v1(v1)" ]] || false
# Found bug where the above would error, yet somehow wipe the index table
run dolt index cat onepk idx_v1 -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "v1,pk1" ]] || false
[[ "$output" =~ "88,3" ]] || false
[[ "$output" =~ "99,1" ]] || false
[[ "${#lines[@]}" = "6" ]] || false
run dolt schema show onepk
[ "$status" -eq "0" ]
[[ "$output" =~ 'KEY `idx_v1` (`v1`)' ]] || false
dolt sql <<SQL
INSERT INTO twopk VALUES (1, 99, 51, 63), (2, 11, 55, 64), (3, 88, 52, 61), (4, 22, 54, 65), (5, 77, 53, 61);
CREATE INDEX idx_v ON twopk(v2, v1);
SQL
run dolt sql -q "CREATE INDEX idx_v ON twopk(v1, v2)"
[ "$status" -eq "1" ]
run dolt index ls twopk
[ "$status" -eq "0" ]
[[ "$output" =~ "idx_v(v2, v1)" ]] || false
# Found bug where the above would error, yet somehow wipe the index table
run dolt index cat twopk idx_v -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "v2,v1,pk1,pk2" ]] || false
[[ "$output" =~ "63,51,1,99" ]] || false
[[ "$output" =~ "64,55,2,11" ]] || false
[[ "${#lines[@]}" = "6" ]] || false
run dolt schema show twopk
[ "$status" -eq "0" ]
[[ "$output" =~ 'KEY `idx_v` (`v2`,`v1`)' ]] || false
}
@test "index: CREATE INDEX with same columns" {
dolt sql <<SQL
INSERT INTO onepk VALUES (1, 99, 51), (2, 11, 55), (3, 88, 52), (4, 22, 54), (5, 77, 53);
CREATE INDEX idx_v1 ON onepk(v1);
SQL
run dolt sql -q "CREATE INDEX idx_bad ON onepk(v1)"
[ "$status" -eq "1" ]
run dolt index ls onepk
[ "$status" -eq "0" ]
! [[ "$output" =~ "idx_bad(v1)" ]] || false
run dolt schema show onepk
[ "$status" -eq "0" ]
! [[ "$output" =~ 'KEY `idx_bad` (`v1`)' ]] || false
dolt sql <<SQL
INSERT INTO twopk VALUES (1, 99, 51, 63), (2, 11, 55, 64), (3, 88, 52, 61), (4, 22, 54, 65), (5, 77, 53, 61);
CREATE INDEX idx_v ON twopk(v2, v1);
SQL
run dolt sql -q "CREATE INDEX idx_bud ON twopk(v2, v1)"
[ "$status" -eq "1" ]
run dolt index ls twopk
[ "$status" -eq "0" ]
! [[ "$output" =~ "idx_bud(v2, v1)" ]] || false
run dolt schema show twopk
[ "$status" -eq "0" ]
! [[ "$output" =~ 'KEY `idx_bud` (`v2`,`v1`)' ]] || false
}
@test "index: Disallow 'dolt_' name prefix" {
run dolt sql -q "CREATE INDEX dolt_idx_v1 ON onepk(v1)"
[ "$status" -eq "1" ]
run dolt sql -q "ALTER TABLE onepk ADD INDEX dolt_idx_v1 (v1)"
[ "$status" -eq "1" ]
}
@test "index: DROP INDEX" {
dolt sql <<SQL
CREATE INDEX idx_v1 ON onepk(v1);
CREATE INDEX idx_v1pk1 ON onepk(v1,pk1);
CREATE INDEX idx_v2v1 ON twopk(v2, v1);
CREATE INDEX idx_v1v2 ON twopk(v1, v2);
SQL
run dolt index ls onepk
[ "$status" -eq "0" ]
[[ "$output" =~ "idx_v1(v1)" ]] || false
[[ "$output" =~ "idx_v1pk1(v1, pk1)" ]] || false
run dolt schema show onepk
[ "$status" -eq "0" ]
[[ "$output" =~ 'KEY `idx_v1` (`v1`)' ]] || false
[[ "$output" =~ 'KEY `idx_v1pk1` (`v1`,`pk1`)' ]] || false
run dolt index ls twopk
[ "$status" -eq "0" ]
[[ "$output" =~ "idx_v2v1(v2, v1)" ]] || false
[[ "$output" =~ "idx_v1v2(v1, v2)" ]] || false
run dolt schema show twopk
[ "$status" -eq "0" ]
[[ "$output" =~ 'KEY `idx_v2v1` (`v2`,`v1`)' ]] || false
[[ "$output" =~ 'KEY `idx_v1v2` (`v1`,`v2`)' ]] || false
dolt sql <<SQL
DROP INDEX idx_v1 ON onepk;
DROP INDEX idx_v2v1 ON twopk;
SQL
run dolt index ls onepk
[ "$status" -eq "0" ]
[[ "$output" =~ "idx_v1pk1(v1, pk1)" ]] || false
! [[ "$output" =~ "idx_v1(v1)" ]] || false
run dolt schema show onepk
[ "$status" -eq "0" ]
[[ "$output" =~ 'KEY `idx_v1pk1` (`v1`,`pk1`)' ]] || false
! [[ "$output" =~ 'KEY `idx_v1` (`v1`)' ]] || false
run dolt index ls twopk
[ "$status" -eq "0" ]
[[ "$output" =~ "idx_v1v2(v1, v2)" ]] || false
! [[ "$output" =~ "idx_v2v1(v2, v1)" ]] || false
run dolt schema show twopk
[ "$status" -eq "0" ]
[[ "$output" =~ 'KEY `idx_v1v2` (`v1`,`v2`)' ]] || false
! [[ "$output" =~ 'KEY `idx_v2v1` (`v2`,`v1`)' ]] || false
}
@test "index: ALTER TABLE DROP INDEX" {
dolt sql <<SQL
CREATE INDEX idx_v1 ON onepk(v1);
CREATE INDEX idx_v1pk1 ON onepk(v1,pk1);
CREATE INDEX idx_v2v1 ON twopk(v2, v1);
CREATE INDEX idx_v1v2 ON twopk(v1, v2);
ALTER TABLE onepk DROP INDEX idx_v1;
ALTER TABLE twopk DROP INDEX idx_v2v1;
SQL
run dolt index ls onepk
[ "$status" -eq "0" ]
[[ "$output" =~ "idx_v1pk1(v1, pk1)" ]] || false
! [[ "$output" =~ "idx_v1(v1)" ]] || false
run dolt schema show onepk
[ "$status" -eq "0" ]
[[ "$output" =~ 'KEY `idx_v1pk1` (`v1`,`pk1`)' ]] || false
! [[ "$output" =~ 'KEY `idx_v1` (`v1`)' ]] || false
run dolt index ls twopk
[ "$status" -eq "0" ]
[[ "$output" =~ "idx_v1v2(v1, v2)" ]] || false
! [[ "$output" =~ "idx_v2v1(v2, v1)" ]] || false
run dolt schema show twopk
[ "$status" -eq "0" ]
[[ "$output" =~ 'KEY `idx_v1v2` (`v1`,`v2`)' ]] || false
! [[ "$output" =~ 'KEY `idx_v2v1` (`v2`,`v1`)' ]] || false
}
@test "index: ALTER TABLE RENAME INDEX" {
dolt sql <<SQL
CREATE INDEX idx_v1 ON onepk(v1);
CREATE INDEX idx_v1pk1 ON onepk(v1,pk1);
SQL
run dolt sql -q "ALTER TABLE onepk RENAME INDEX idx_v1 TO idx_v1pk1"
[ "$status" -eq "1" ]
dolt sql -q "ALTER TABLE onepk RENAME INDEX idx_v1 TO idx_vfirst"
run dolt index ls onepk
[ "$status" -eq "0" ]
[[ "$output" =~ "idx_v1pk1(v1, pk1)" ]] || false
[[ "$output" =~ "idx_vfirst(v1)" ]] || false
! [[ "$output" =~ "idx_v1(v1)" ]] || false
run dolt schema show onepk
[ "$status" -eq "0" ]
[[ "$output" =~ 'KEY `idx_v1pk1` (`v1`,`pk1`)' ]] || false
[[ "$output" =~ 'KEY `idx_vfirst` (`v1`)' ]] || false
! [[ "$output" =~ 'KEY `idx_v1` (`v1`)' ]] || false
}
@test "index: RENAME TABLE" {
dolt sql <<SQL
CREATE INDEX idx_v1 ON onepk(v1);
CREATE INDEX idx_v1v2 ON onepk(v1,v2);
INSERT INTO onepk VALUES (1, 99, 51), (2, 11, 55), (3, 88, 52);
RENAME TABLE onepk TO newpk;
INSERT INTO newpk VALUES (4, 22, 54), (5, 77, 53);
SQL
run dolt index ls onepk
[ "$status" -eq "1" ]
run dolt index ls newpk
[ "$status" -eq "0" ]
[[ "$output" =~ "idx_v1(v1)" ]] || false
[[ "$output" =~ "idx_v1v2(v1, v2)" ]] || false
run dolt schema show newpk
[ "$status" -eq "0" ]
[[ "$output" =~ 'KEY `idx_v1` (`v1`)' ]] || false
[[ "$output" =~ 'KEY `idx_v1v2` (`v1`,`v2`)' ]] || false
run dolt index cat newpk idx_v1 -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "v1,pk1" ]] || false
[[ "$output" =~ "22,4" ]] || false
[[ "$output" =~ "77,5" ]] || false
[[ "${#lines[@]}" = "6" ]] || false
run dolt index cat newpk idx_v1v2 -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "v1,v2,pk1" ]] || false
[[ "$output" =~ "11,55,2" ]] || false
[[ "$output" =~ "22,54,4" ]] || false
[[ "${#lines[@]}" = "6" ]] || false
run dolt sql -q "SELECT pk1 FROM newpk WHERE v1 = 77" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1" ]] || false
[[ "$output" =~ "5" ]] || false
[[ "${#lines[@]}" = "2" ]] || false
run dolt sql -q "SELECT pk1 FROM newpk WHERE v1 = 88 AND v2 = 52" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1" ]] || false
[[ "$output" =~ "3" ]] || false
[[ "${#lines[@]}" = "2" ]] || false
}
@test "index: dolt table mv" {
dolt sql <<SQL
CREATE INDEX idx_v1 ON onepk(v1);
CREATE INDEX idx_v1v2 ON onepk(v1,v2);
INSERT INTO onepk VALUES (1, 99, 51), (2, 11, 55), (3, 88, 52);
SQL
dolt table mv onepk newpk
dolt sql -q "INSERT INTO newpk VALUES (4, 22, 54), (5, 77, 53)"
run dolt index ls onepk
[ "$status" -eq "1" ]
run dolt index ls newpk
[ "$status" -eq "0" ]
[[ "$output" =~ "idx_v1(v1)" ]] || false
[[ "$output" =~ "idx_v1v2(v1, v2)" ]] || false
run dolt schema show newpk
[ "$status" -eq "0" ]
[[ "$output" =~ 'KEY `idx_v1` (`v1`)' ]] || false
[[ "$output" =~ 'KEY `idx_v1v2` (`v1`,`v2`)' ]] || false
run dolt index cat newpk idx_v1 -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "v1,pk1" ]] || false
[[ "$output" =~ "77,5" ]] || false
[[ "$output" =~ "88,3" ]] || false
[[ "${#lines[@]}" = "6" ]] || false
run dolt index cat newpk idx_v1v2 -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "v1,v2,pk1" ]] || false
[[ "$output" =~ "77,53,5" ]] || false
[[ "$output" =~ "88,52,3" ]] || false
[[ "${#lines[@]}" = "6" ]] || false
run dolt sql -q "SELECT pk1 FROM newpk WHERE v1 = 77" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1" ]] || false
[[ "$output" =~ "5" ]] || false
[[ "${#lines[@]}" = "2" ]] || false
run dolt sql -q "SELECT pk1 FROM newpk WHERE v1 = 88 AND v2 = 52" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1" ]] || false
[[ "$output" =~ "3" ]] || false
[[ "${#lines[@]}" = "2" ]] || false
}
@test "index: DROP TABLE" {
dolt sql <<SQL
CREATE INDEX idx_v1 ON onepk(v1);
INSERT INTO onepk VALUES (1, 99, 51), (2, 11, 55), (3, 88, 52), (4, 22, 54), (5, 77, 53);
DROP TABLE onepk;
SQL
run dolt index ls onepk
[ "$status" -eq "1" ]
run dolt schema show onepk
[ "$status" -eq "0" ]
[[ "$output" =~ "onepk not found" ]] || false
run dolt sql -q "SELECT * FROM onepk WHERE v1 = 77"
[ "$status" -eq "1" ]
}
@test "index: dolt table rm" {
dolt sql <<SQL
CREATE INDEX idx_v1 ON onepk(v1);
INSERT INTO onepk VALUES (1, 99, 51), (2, 11, 55), (3, 88, 52), (4, 22, 54), (5, 77, 53);
SQL
dolt table rm onepk
run dolt index ls onepk
[ "$status" -eq "1" ]
run dolt schema show onepk
[ "$status" -eq "0" ]
[[ "$output" =~ "onepk not found" ]] || false
run dolt sql -q "SELECT * FROM onepk WHERE v1 = 77"
[ "$status" -eq "1" ]
}
@test "index: dolt table cp" {
dolt sql <<SQL
CREATE INDEX idx_v1 ON onepk(v1);
INSERT INTO onepk VALUES (1, 99, 51), (2, 11, 55), (3, 88, 52), (4, 22, 54), (5, 77, 53);
SQL
dolt table cp onepk onepk_new
run dolt index ls onepk_new
[ "$status" -eq "0" ]
[[ "$output" =~ "idx_v1(v1)" ]] || false
run dolt index cat onepk_new idx_v1 -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "v1,pk1" ]] || false
[[ "$output" =~ "22,4" ]] || false
[[ "$output" =~ "77,5" ]] || false
[[ "${#lines[@]}" = "6" ]] || false
run dolt schema show onepk_new
[ "$status" -eq "0" ]
[[ "$output" =~ 'KEY `idx_v1` (`v1`)' ]] || false
run dolt sql -q "SELECT pk1 FROM onepk_new WHERE v1 = 77" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1" ]] || false
[[ "$output" =~ "5" ]] || false
[[ "${#lines[@]}" = "2" ]] || false
}
@test "index: TRUNCATE TABLE" {
skip "TRUNCATE not yet supported"
dolt sql <<SQL
CREATE INDEX idx_v1 ON onepk(v1);
INSERT INTO onepk VALUES (1, 99, 51), (2, 11, 55), (3, 88, 52), (4, 22, 54), (5, 77, 53);
TRUNCATE TABLE onepk;
SQL
run dolt index ls onepk
[ "$status" -eq "0" ]
[[ "$output" =~ "idx_v1(v1)" ]] || false
run dolt index cat onepk idx_v1 -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "v1,pk1" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
run dolt schema show onepk
[ "$status" -eq "0" ]
[[ "$output" =~ 'KEY `idx_v1` (`v1`)' ]] || false
run dolt sql -q "SELECT pk1 FROM onepk WHERE v1 = 77" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
}
@test "index: SELECT = Primary Key" {
dolt sql <<SQL
INSERT INTO onepk VALUES (1, 99, 51), (2, 11, 55), (3, 88, 52), (4, 22, 54), (5, 77, 53);
INSERT INTO twopk VALUES (1, 99, 51, 63), (2, 11, 55, 64), (3, 88, 52, 61), (4, 22, 54, 65), (5, 77, 53, 61);
SQL
# found
run dolt sql -q "SELECT * FROM onepk WHERE pk1 = 5" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,v1,v2" ]] || false
[[ "$output" =~ "5,77,53" ]] || false
[[ "${#lines[@]}" = "2" ]] || false
# not found
run dolt sql -q "SELECT * FROM onepk WHERE pk1 = 999" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
# found partial pk
run dolt sql -q "SELECT * FROM twopk WHERE pk1 = 2" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "$output" =~ "2,11,55,64" ]] || false
[[ "${#lines[@]}" = "2" ]] || false
# not found partial pk
run dolt sql -q "SELECT * FROM twopk WHERE pk1 = 999" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
# found
run dolt sql -q "SELECT * FROM twopk WHERE pk1 = 5 AND pk2 = 77" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "$output" =~ "5,77,53,61" ]] || false
[[ "${#lines[@]}" = "2" ]] || false
# not found key 1
run dolt sql -q "SELECT * FROM twopk WHERE pk1 = 999 AND pk2 = 22" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
# not found key 2
run dolt sql -q "SELECT * FROM twopk WHERE pk1 = 1 AND pk2 = 999" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
# not found key mismatch
run dolt sql -q "SELECT * FROM twopk WHERE pk1 = 88 AND pk2 = 3" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
}
@test "index: SELECT = Secondary Index" {
dolt sql <<SQL
CREATE INDEX idx_v1 ON onepk(v1);
CREATE INDEX idx_v ON twopk(v2, v1);
INSERT INTO onepk VALUES (1, 99, 51), (2, 11, 55), (3, 88, 52), (4, 22, 54), (5, 77, 53);
INSERT INTO twopk VALUES (1, 99, 51, 63), (2, 11, 55, 64), (3, 88, 52, 61), (4, 22, 54, 65), (5, 77, 53, 61);
SQL
# found
run dolt sql -q "SELECT * FROM onepk WHERE v1 = 77" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,v1,v2" ]] || false
[[ "$output" =~ "5,77,53" ]] || false
[[ "${#lines[@]}" = "2" ]] || false
# not found
run dolt sql -q "SELECT * FROM onepk WHERE v1 = 111" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
# not indexed & found
run dolt sql -q "SELECT * FROM onepk WHERE v2 = 54" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,v1,v2" ]] || false
[[ "$output" =~ "4,22,54" ]] || false
[[ "${#lines[@]}" = "2" ]] || false
# not indexed & not found
run dolt sql -q "SELECT * FROM onepk WHERE v2 = 111" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
# found partial index
run dolt sql -q "SELECT * FROM twopk WHERE v2 = 64" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "$output" =~ "2,11,55,64" ]] || false
[[ "${#lines[@]}" = "2" ]] || false
# not found partial index
run dolt sql -q "SELECT * FROM twopk WHERE v2 = 111" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
# found
run dolt sql -q "SELECT * FROM twopk WHERE v2 = 61 AND v1 = 53" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "$output" =~ "5,77,53,61" ]] || false
[[ "${#lines[@]}" = "2" ]] || false
# not found key 1
run dolt sql -q "SELECT * FROM twopk WHERE v2 = 111 AND v1 = 53" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
# not found key 2
run dolt sql -q "SELECT * FROM twopk WHERE v2 = 61 AND v1 = 111" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
# not found key mismatch
run dolt sql -q "SELECT * FROM twopk WHERE v2 = 61 AND v1 = 54" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
# not indexed & found
run dolt sql -q "SELECT * FROM twopk WHERE v1 = 53" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "$output" =~ "5,77,53,61" ]] || false
[[ "${#lines[@]}" = "2" ]] || false
# not indexed & not found
run dolt sql -q "SELECT * FROM twopk WHERE v1 = 111" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
}
@test "index: SELECT > Primary Key" {
dolt sql <<SQL
INSERT INTO onepk VALUES (1, 99, 51), (2, 11, 55), (3, 88, 52), (4, 22, 54), (5, 77, 53);
INSERT INTO twopk VALUES (1, 99, 51, 63), (2, 11, 55, 64), (3, 88, 52, 61), (4, 22, 54, 65), (5, 77, 53, 61);
SQL
# found
run dolt sql -q "SELECT * FROM onepk WHERE pk1 > 2" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,v1,v2" ]] || false
[[ "$output" =~ "3,88,52" ]] || false
[[ "$output" =~ "4,22,54" ]] || false
[[ "$output" =~ "5,77,53" ]] || false
[[ "${#lines[@]}" = "4" ]] || false
# not found
run dolt sql -q "SELECT * FROM onepk WHERE pk1 > 999" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
# found partial pk
run dolt sql -q "SELECT * FROM twopk WHERE pk1 > 2" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "$output" =~ "3,88,52,61" ]] || false
[[ "$output" =~ "4,22,54,65" ]] || false
[[ "$output" =~ "5,77,53,61" ]] || false
[[ "${#lines[@]}" = "4" ]] || false
# not found partial pk
run dolt sql -q "SELECT * FROM twopk WHERE pk1 > 999" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
# found
run dolt sql -q "SELECT * FROM twopk WHERE pk1 > 4 AND pk2 > 22" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "$output" =~ "5,77,53,61" ]] || false
[[ "${#lines[@]}" = "2" ]] || false
# not found key 1
run dolt sql -q "SELECT * FROM twopk WHERE pk1 > 999 AND pk2 > 11" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
# not found key 2
run dolt sql -q "SELECT * FROM twopk WHERE pk1 > 2 AND pk2 > 999" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
# not found key mismatch
run dolt sql -q "SELECT * FROM twopk WHERE pk1 > 3 AND pk2 > 99" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
}
@test "index: SELECT > Secondary Index" {
dolt sql <<SQL
CREATE INDEX idx_v1 ON onepk(v1);
CREATE INDEX idx_v ON twopk(v2, v1);
INSERT INTO onepk VALUES (1, 99, 51), (2, 11, 55), (3, 88, 52), (4, 22, 54), (5, 77, 53);
INSERT INTO twopk VALUES (1, 99, 51, 63), (2, 11, 55, 64), (3, 88, 52, 61), (4, 22, 54, 65), (5, 77, 53, 61);
SQL
# found
run dolt sql -q "SELECT * FROM onepk WHERE v1 > 70" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,v1,v2" ]] || false
[[ "$output" =~ "5,77,53" ]] || false
[[ "$output" =~ "3,88,52" ]] || false
[[ "$output" =~ "1,99,51" ]] || false
[[ "${#lines[@]}" = "4" ]] || false
# not found
run dolt sql -q "SELECT * FROM onepk WHERE v1 > 111" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
# not indexed & found
run dolt sql -q "SELECT * FROM onepk WHERE v2 > 53" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,v1,v2" ]] || false
[[ "$output" =~ "2,11,55" ]] || false
[[ "$output" =~ "4,22,54" ]] || false
[[ "${#lines[@]}" = "3" ]] || false
# not indexed & not found
run dolt sql -q "SELECT * FROM onepk WHERE v2 > 111" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
# found partial index
run dolt sql -q "SELECT * FROM twopk WHERE v2 > 63" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "$output" =~ "2,11,55,64" ]] || false
[[ "$output" =~ "4,22,54,65" ]] || false
[[ "${#lines[@]}" = "3" ]] || false
# not found partial index
run dolt sql -q "SELECT * FROM twopk WHERE v2 > 111" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
# found
run dolt sql -q "SELECT * FROM twopk WHERE v2 > 61 AND v1 > 53" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "$output" =~ "2,11,55,64" ]] || false
[[ "$output" =~ "4,22,54,65" ]] || false
[[ "${#lines[@]}" = "3" ]] || false
# not found key 1
run dolt sql -q "SELECT * FROM twopk WHERE v2 > 111 AND v1 > 53" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
# not found key 2
run dolt sql -q "SELECT * FROM twopk WHERE v2 > 61 AND v1 > 111" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
# not found key mismatch
run dolt sql -q "SELECT * FROM twopk WHERE v2 > 64 AND v1 > 54" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
# not indexed & found
run dolt sql -q "SELECT * FROM twopk WHERE v1 > 53" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "$output" =~ "2,11,55,64" ]] || false
[[ "$output" =~ "4,22,54,65" ]] || false
[[ "${#lines[@]}" = "3" ]] || false
# not indexed & not found
run dolt sql -q "SELECT * FROM twopk WHERE v1 > 111" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
}
@test "index: SELECT < Primary Key" {
dolt sql <<SQL
INSERT INTO onepk VALUES (1, 99, 51), (2, 11, 55), (3, 88, 52), (4, 22, 54), (5, 77, 53);
INSERT INTO twopk VALUES (1, 99, 51, 63), (2, 11, 55, 64), (3, 88, 52, 61), (4, 22, 54, 65), (5, 77, 53, 61);
SQL
# found
run dolt sql -q "SELECT * FROM onepk WHERE pk1 < 3" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,v1,v2" ]] || false
[[ "$output" =~ "1,99,51" ]] || false
[[ "$output" =~ "2,11,55" ]] || false
[[ "${#lines[@]}" = "3" ]] || false
# not found
run dolt sql -q "SELECT * FROM onepk WHERE pk1 < 0" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
# found partial key
run dolt sql -q "SELECT * FROM twopk WHERE pk1 < 4" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "$output" =~ "1,99,51,63" ]] || false
[[ "$output" =~ "2,11,55,64" ]] || false
[[ "$output" =~ "3,88,52,61" ]] || false
[[ "${#lines[@]}" = "4" ]] || false
# not found partial key
run dolt sql -q "SELECT * FROM twopk WHERE pk1 < 0" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
# found
run dolt sql -q "SELECT * FROM twopk WHERE pk1 < 3 AND pk2 < 99" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "$output" =~ "2,11,55,64" ]] || false
[[ "${#lines[@]}" = "2" ]] || false
# not found key 1
run dolt sql -q "SELECT * FROM twopk WHERE pk1 < 0 AND pk2 < 77" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
# not found key 2
run dolt sql -q "SELECT * FROM twopk WHERE pk1 < 3 AND pk2 < 0" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
# not found key mismatch
run dolt sql -q "SELECT * FROM twopk WHERE pk1 < 2 AND pk2 < 22" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
}
@test "index: SELECT < Secondary Index" {
dolt sql <<SQL
CREATE INDEX idx_v1 ON onepk(v1);
CREATE INDEX idx_v ON twopk(v2, v1);
INSERT INTO onepk VALUES (1, 99, 51), (2, 11, 55), (3, 88, 52), (4, 22, 54), (5, 77, 53);
INSERT INTO twopk VALUES (1, 99, 51, 63), (2, 11, 55, 64), (3, 88, 52, 61), (4, 22, 54, 65), (5, 77, 53, 61);
SQL
# found
run dolt sql -q "SELECT * FROM onepk WHERE v1 < 99" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,v1,v2" ]] || false
[[ "$output" =~ "2,11,55" ]] || false
[[ "$output" =~ "4,22,54" ]] || false
[[ "$output" =~ "5,77,53" ]] || false
[[ "$output" =~ "3,88,52" ]] || false
[[ "${#lines[@]}" = "5" ]] || false
# not found
run dolt sql -q "SELECT * FROM onepk WHERE v1 < 0" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
# not indexed & found
run dolt sql -q "SELECT * FROM onepk WHERE v2 < 53" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,v1,v2" ]] || false
[[ "$output" =~ "1,99,51" ]] || false
[[ "$output" =~ "3,88,52" ]] || false
[[ "${#lines[@]}" = "3" ]] || false
# not indexed & not found
run dolt sql -q "SELECT * FROM onepk WHERE v2 < 0" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
# found partial index
run dolt sql -q "SELECT * FROM twopk WHERE v2 < 64" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "$output" =~ "5,77,53,61" ]] || false
[[ "$output" =~ "3,88,52,61" ]] || false
[[ "$output" =~ "1,99,51,63" ]] || false
[[ "${#lines[@]}" = "4" ]] || false
# not found partial index
run dolt sql -q "SELECT * FROM twopk WHERE v2 < 0" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
# found
run dolt sql -q "SELECT * FROM twopk WHERE v2 < 64 AND v1 < 53" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "$output" =~ "3,88,52,61" ]] || false
[[ "$output" =~ "1,99,51,63" ]] || false
[[ "${#lines[@]}" = "3" ]] || false
# not found key 1
run dolt sql -q "SELECT * FROM twopk WHERE v2 < 0 AND v1 < 53" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
# not found key 2
run dolt sql -q "SELECT * FROM twopk WHERE v2 < 61 AND v1 < 0" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
# not found key mismatch
run dolt sql -q "SELECT * FROM twopk WHERE v2 < 62 AND v1 < 52" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
# not indexed & found
run dolt sql -q "SELECT * FROM twopk WHERE v1 < 53" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "$output" =~ "3,88,52,61" ]] || false
[[ "$output" =~ "1,99,51,63" ]] || false
[[ "${#lines[@]}" = "3" ]] || false
# not indexed & not found
run dolt sql -q "SELECT * FROM twopk WHERE v1 < 0" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
}
@test "index: SELECT >= Primary Key" {
dolt sql <<SQL
INSERT INTO onepk VALUES (1, 99, 51), (2, 11, 55), (3, 88, 52), (4, 22, 54), (5, 77, 53);
INSERT INTO twopk VALUES (1, 99, 51, 63), (2, 11, 55, 64), (3, 88, 52, 61), (4, 22, 54, 65), (5, 77, 53, 61);
SQL
# found
run dolt sql -q "SELECT * FROM onepk WHERE pk1 >= 2" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,v1,v2" ]] || false
[[ "$output" =~ "2,11,55" ]] || false
[[ "$output" =~ "3,88,52" ]] || false
[[ "$output" =~ "4,22,54" ]] || false
[[ "$output" =~ "5,77,53" ]] || false
[[ "${#lines[@]}" = "5" ]] || false
# not found
run dolt sql -q "SELECT * FROM onepk WHERE pk1 >= 999" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
# found partial pk
run dolt sql -q "SELECT * FROM twopk WHERE pk1 >= 2" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "$output" =~ "2,11,55,64" ]] || false
[[ "$output" =~ "3,88,52,61" ]] || false
[[ "$output" =~ "4,22,54,65" ]] || false
[[ "$output" =~ "5,77,53,61" ]] || false
[[ "${#lines[@]}" = "5" ]] || false
# not found partial pk
run dolt sql -q "SELECT * FROM twopk WHERE pk1 >= 999" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
# found
run dolt sql -q "SELECT * FROM twopk WHERE pk1 >= 4 AND pk2 >= 22" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "$output" =~ "4,22,54,65" ]] || false
[[ "$output" =~ "5,77,53,61" ]] || false
[[ "${#lines[@]}" = "3" ]] || false
# not found key 1
run dolt sql -q "SELECT * FROM twopk WHERE pk1 >= 999 AND pk2 >= 11" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
# not found key 2
run dolt sql -q "SELECT * FROM twopk WHERE pk1 >= 2 AND pk2 >= 999" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
# not found key mismatch
run dolt sql -q "SELECT * FROM twopk WHERE pk1 >= 4 AND pk2 >= 88" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
}
@test "index: SELECT >= Secondary Index" {
dolt sql <<SQL
CREATE INDEX idx_v1 ON onepk(v1);
CREATE INDEX idx_v ON twopk(v2, v1);
INSERT INTO onepk VALUES (1, 99, 51), (2, 11, 55), (3, 88, 52), (4, 22, 54), (5, 77, 53);
INSERT INTO twopk VALUES (1, 99, 51, 63), (2, 11, 55, 64), (3, 88, 52, 61), (4, 22, 54, 65), (5, 77, 53, 61);
SQL
# found
run dolt sql -q "SELECT * FROM onepk WHERE v1 >= 70" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,v1,v2" ]] || false
[[ "$output" =~ "5,77,53" ]] || false
[[ "$output" =~ "3,88,52" ]] || false
[[ "$output" =~ "1,99,51" ]] || false
[[ "${#lines[@]}" = "4" ]] || false
# not found
run dolt sql -q "SELECT * FROM onepk WHERE v1 >= 111" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
# not indexed & found
run dolt sql -q "SELECT * FROM onepk WHERE v2 >= 53" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,v1,v2" ]] || false
[[ "$output" =~ "2,11,55" ]] || false
[[ "$output" =~ "4,22,54" ]] || false
[[ "$output" =~ "5,77,53" ]] || false
[[ "${#lines[@]}" = "4" ]] || false
# not indexed & not found
run dolt sql -q "SELECT * FROM onepk WHERE v2 >= 111" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
# found partial index
run dolt sql -q "SELECT * FROM twopk WHERE v2 >= 63" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "$output" =~ "1,99,51,63" ]] || false
[[ "$output" =~ "2,11,55,64" ]] || false
[[ "$output" =~ "4,22,54,65" ]] || false
[[ "${#lines[@]}" = "4" ]] || false
# not found partial index
run dolt sql -q "SELECT * FROM twopk WHERE v2 >= 111" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
# found
run dolt sql -q "SELECT * FROM twopk WHERE v2 >= 61 AND v1 >= 53" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "$output" =~ "5,77,53,61" ]] || false
[[ "$output" =~ "2,11,55,64" ]] || false
[[ "$output" =~ "4,22,54,65" ]] || false
[[ "${#lines[@]}" = "4" ]] || false
# not found key 1
run dolt sql -q "SELECT * FROM twopk WHERE v2 >= 111 AND v1 >= 53" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
# not found key 2
run dolt sql -q "SELECT * FROM twopk WHERE v2 >= 61 AND v1 >= 111" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
# not found key mismatch
run dolt sql -q "SELECT * FROM twopk WHERE v2 >= 65 AND v1 >= 55" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
# not indexed & found
run dolt sql -q "SELECT * FROM twopk WHERE v1 >= 53" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "$output" =~ "5,77,53,61" ]] || false
[[ "$output" =~ "2,11,55,64" ]] || false
[[ "$output" =~ "4,22,54,65" ]] || false
[[ "${#lines[@]}" = "4" ]] || false
# not indexed & not found
run dolt sql -q "SELECT * FROM twopk WHERE v1 >= 111" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
}
@test "index: SELECT <= Primary Key" {
dolt sql <<SQL
INSERT INTO onepk VALUES (1, 99, 51), (2, 11, 55), (3, 88, 52), (4, 22, 54), (5, 77, 53);
INSERT INTO twopk VALUES (1, 99, 51, 63), (2, 11, 55, 64), (3, 88, 52, 61), (4, 22, 54, 65), (5, 77, 53, 61);
SQL
# found
run dolt sql -q "SELECT * FROM onepk WHERE pk1 <= 3" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,v1,v2" ]] || false
[[ "$output" =~ "1,99,51" ]] || false
[[ "$output" =~ "2,11,55" ]] || false
[[ "$output" =~ "3,88,52" ]] || false
[[ "${#lines[@]}" = "4" ]] || false
# not found
run dolt sql -q "SELECT * FROM onepk WHERE pk1 <= 0" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
# found partial key
run dolt sql -q "SELECT * FROM twopk WHERE pk1 <= 4" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "$output" =~ "1,99,51,63" ]] || false
[[ "$output" =~ "2,11,55,64" ]] || false
[[ "$output" =~ "3,88,52,61" ]] || false
[[ "$output" =~ "4,22,54,65" ]] || false
[[ "${#lines[@]}" = "5" ]] || false
# not found partial key
run dolt sql -q "SELECT * FROM twopk WHERE pk1 <= 0" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
# found
run dolt sql -q "SELECT * FROM twopk WHERE pk1 <= 3 AND pk2 <= 99" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "$output" =~ "1,99,51,63" ]] || false
[[ "$output" =~ "2,11,55,64" ]] || false
[[ "$output" =~ "3,88,52,61" ]] || false
[[ "${#lines[@]}" = "4" ]] || false
# not found key 1
run dolt sql -q "SELECT * FROM twopk WHERE pk1 <= 0 AND pk2 <= 77" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
# not found key 2
run dolt sql -q "SELECT * FROM twopk WHERE pk1 <= 3 AND pk2 <= 0" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
# not found key mismatch
run dolt sql -q "SELECT * FROM twopk WHERE pk1 <= 1 AND pk2 <= 88" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
}
@test "index: SELECT <= Secondary Index" {
dolt sql <<SQL
CREATE INDEX idx_v1 ON onepk(v1);
CREATE INDEX idx_v ON twopk(v2, v1);
INSERT INTO onepk VALUES (1, 99, 51), (2, 11, 55), (3, 88, 52), (4, 22, 54), (5, 77, 53);
INSERT INTO twopk VALUES (1, 99, 51, 63), (2, 11, 55, 64), (3, 88, 52, 61), (4, 22, 54, 65), (5, 77, 53, 61);
SQL
# found
run dolt sql -q "SELECT * FROM onepk WHERE v1 <= 99" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,v1,v2" ]] || false
[[ "$output" =~ "2,11,55" ]] || false
[[ "$output" =~ "4,22,54" ]] || false
[[ "$output" =~ "5,77,53" ]] || false
[[ "$output" =~ "3,88,52" ]] || false
[[ "$output" =~ "1,99,51" ]] || false
[[ "${#lines[@]}" = "6" ]] || false
# not found
run dolt sql -q "SELECT * FROM onepk WHERE v1 <= 0" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
# not indexed & found
run dolt sql -q "SELECT * FROM onepk WHERE v2 <= 53" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,v1,v2" ]] || false
[[ "$output" =~ "1,99,51" ]] || false
[[ "$output" =~ "3,88,52" ]] || false
[[ "$output" =~ "5,77,53" ]] || false
[[ "${#lines[@]}" = "4" ]] || false
# not indexed & not found
run dolt sql -q "SELECT * FROM onepk WHERE v2 <= 0" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
# found partial index
run dolt sql -q "SELECT * FROM twopk WHERE v2 <= 64" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "$output" =~ "5,77,53,61" ]] || false
[[ "$output" =~ "3,88,52,61" ]] || false
[[ "$output" =~ "1,99,51,63" ]] || false
[[ "$output" =~ "2,11,55,64" ]] || false
[[ "${#lines[@]}" = "5" ]] || false
# not found partial index
run dolt sql -q "SELECT * FROM twopk WHERE v2 <= 0" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
# found
run dolt sql -q "SELECT * FROM twopk WHERE v2 <= 64 AND v1 <= 53" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "$output" =~ "3,88,52,61" ]] || false
[[ "$output" =~ "5,77,53,61" ]] || false
[[ "$output" =~ "1,99,51,63" ]] || false
[[ "${#lines[@]}" = "4" ]] || false
# not found key 1
run dolt sql -q "SELECT * FROM twopk WHERE v2 <= 0 AND v1 <= 53" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
# not found key 2
run dolt sql -q "SELECT * FROM twopk WHERE v2 <= 61 AND v1 <= 0" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
# not found key mismatch
run dolt sql -q "SELECT * FROM twopk WHERE v2 <= 61 AND v1 <= 51" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
# not indexed & found
run dolt sql -q "SELECT * FROM twopk WHERE v1 <= 53" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "$output" =~ "3,88,52,61" ]] || false
[[ "$output" =~ "5,77,53,61" ]] || false
[[ "$output" =~ "1,99,51,63" ]] || false
[[ "${#lines[@]}" = "4" ]] || false
# not indexed & not found
run dolt sql -q "SELECT * FROM twopk WHERE v1 <= 0" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
}
@test "index: SELECT BETWEEN Primary Key" {
dolt sql <<SQL
INSERT INTO onepk VALUES (1, 99, 51), (2, 11, 55), (3, 88, 52), (4, 22, 54), (5, 77, 53);
INSERT INTO twopk VALUES (1, 99, 51, 63), (2, 11, 55, 64), (3, 88, 52, 61), (4, 22, 54, 65), (5, 77, 53, 61);
SQL
# found
run dolt sql -q "SELECT * FROM onepk WHERE pk1 BETWEEN 2 AND 4" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,v1,v2" ]] || false
[[ "$output" =~ "2,11,55" ]] || false
[[ "$output" =~ "3,88,52" ]] || false
[[ "$output" =~ "4,22,54" ]] || false
[[ "${#lines[@]}" = "4" ]] || false
# not found
run dolt sql -q "SELECT * FROM onepk WHERE pk1 BETWEEN 6 AND 9" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
# found partial key
run dolt sql -q "SELECT * FROM twopk WHERE pk1 BETWEEN 1 AND 4" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "$output" =~ "1,99,51,63" ]] || false
[[ "$output" =~ "2,11,55,64" ]] || false
[[ "$output" =~ "3,88,52,61" ]] || false
[[ "$output" =~ "4,22,54,65" ]] || false
[[ "${#lines[@]}" = "5" ]] || false
# not found partial key
run dolt sql -q "SELECT * FROM twopk WHERE pk1 BETWEEN 6 AND 7" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
# found
run dolt sql -q "SELECT * FROM twopk WHERE pk1 BETWEEN 1 AND 3 AND pk2 BETWEEN 10 AND 90" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "$output" =~ "2,11,55,64" ]] || false
[[ "$output" =~ "3,88,52,61" ]] || false
[[ "${#lines[@]}" = "3" ]] || false
# not found key 1
run dolt sql -q "SELECT * FROM twopk WHERE pk1 BETWEEN 6 AND 8 AND pk2 BETWEEN 20 AND 80" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
# not found key 2
run dolt sql -q "SELECT * FROM twopk WHERE pk1 BETWEEN 1 AND 3 AND pk2 BETWEEN 100 AND 111" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
# not found key mismatch
run dolt sql -q "SELECT * FROM twopk WHERE pk1 BETWEEN 3 AND 5 AND pk2 BETWEEN 10 AND 20" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
}
@test "index: SELECT BETWEEN Secondary Index" {
dolt sql <<SQL
CREATE INDEX idx_v1 ON onepk(v1);
CREATE INDEX idx_v ON twopk(v2, v1);
INSERT INTO onepk VALUES (1, 99, 51), (2, 11, 55), (3, 88, 52), (4, 22, 54), (5, 77, 53);
INSERT INTO twopk VALUES (1, 99, 51, 63), (2, 11, 55, 64), (3, 88, 52, 61), (4, 22, 54, 65), (5, 77, 53, 61);
SQL
# found
run dolt sql -q "SELECT * FROM onepk WHERE v1 BETWEEN 11 AND 99" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,v1,v2" ]] || false
[[ "$output" =~ "2,11,55" ]] || false
[[ "$output" =~ "4,22,54" ]] || false
[[ "$output" =~ "5,77,53" ]] || false
[[ "$output" =~ "3,88,52" ]] || false
[[ "$output" =~ "1,99,51" ]] || false
[[ "${#lines[@]}" = "6" ]] || false
# not found
run dolt sql -q "SELECT * FROM onepk WHERE v1 BETWEEN 30 AND 70" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
# not indexed & found
run dolt sql -q "SELECT * FROM onepk WHERE v2 BETWEEN 50 AND 53" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,v1,v2" ]] || false
[[ "$output" =~ "1,99,51" ]] || false
[[ "$output" =~ "3,88,52" ]] || false
[[ "$output" =~ "5,77,53" ]] || false
[[ "${#lines[@]}" = "4" ]] || false
# not indexed & not found
run dolt sql -q "SELECT * FROM onepk WHERE v2 BETWEEN 20 AND 50" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
# found partial index
run dolt sql -q "SELECT * FROM twopk WHERE v2 BETWEEN 0 AND 64" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "$output" =~ "5,77,53,61" ]] || false
[[ "$output" =~ "3,88,52,61" ]] || false
[[ "$output" =~ "1,99,51,63" ]] || false
[[ "$output" =~ "2,11,55,64" ]] || false
[[ "${#lines[@]}" = "5" ]] || false
# not found partial index
run dolt sql -q "SELECT * FROM twopk WHERE v2 BETWEEN 70 AND 90" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
# found
run dolt sql -q "SELECT * FROM twopk WHERE v2 BETWEEN 60 AND 64 AND v1 BETWEEN 50 AND 53" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "$output" =~ "3,88,52,61" ]] || false
[[ "$output" =~ "5,77,53,61" ]] || false
[[ "$output" =~ "1,99,51,63" ]] || false
[[ "${#lines[@]}" = "4" ]] || false
# not found key 1
run dolt sql -q "SELECT * FROM twopk WHERE v2 BETWEEN 50 AND 53 AND v1 BETWEEN 50 AND 53" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
# not found key 2
run dolt sql -q "SELECT * FROM twopk WHERE v2 BETWEEN 60 AND 64 AND v1 BETWEEN 60 AND 64" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
# not found key mismatch
run dolt sql -q "SELECT * FROM twopk WHERE v2 BETWEEN 60 AND 62 AND v1 BETWEEN 54 AND 55" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
# not indexed & found
run dolt sql -q "SELECT * FROM twopk WHERE v1 BETWEEN 51 AND 53" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "$output" =~ "3,88,52,61" ]] || false
[[ "$output" =~ "5,77,53,61" ]] || false
[[ "$output" =~ "1,99,51,63" ]] || false
[[ "${#lines[@]}" = "4" ]] || false
# not indexed & not found
run dolt sql -q "SELECT * FROM twopk WHERE v1 BETWEEN 90 AND 100" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
}
@test "index: EXPLAIN SELECT = IndexedJoin" {
dolt sql <<SQL
CREATE INDEX idx_v1 ON onepk(v1);
CREATE INDEX idx_v ON twopk(v2, v1);
INSERT INTO onepk VALUES (1, 11, 111), (2, 22, 222), (3, 33, 333), (4, 44, 444), (5, 55, 555);
INSERT INTO twopk VALUES (5, 95, 222, 11), (4, 4, 333, 55), (3, 93, 444, 33), (2, 92, 111, 22), (1, 91, 555, 44);
SQL
run dolt sql -q "SELECT * FROM onepk JOIN twopk ON onepk.v1 = twopk.v2;" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,v1,v2,pk1,pk2,v1,v2" ]] || false
[[ "$output" =~ "1,11,111,5,95,222,11" ]] || false
[[ "$output" =~ "2,22,222,2,92,111,22" ]] || false
[[ "$output" =~ "3,33,333,3,93,444,33" ]] || false
[[ "$output" =~ "4,44,444,1,91,555,44" ]] || false
[[ "$output" =~ "5,55,555,4,4,333,55" ]] || false
[[ "${#lines[@]}" = "6" ]] || false
run dolt sql -q "EXPLAIN SELECT * FROM onepk JOIN twopk ON onepk.v1 = twopk.v2;"
[ "$status" -eq "0" ]
[[ "$output" =~ "IndexedJoin(onepk.v1 = twopk.v2)" ]] || false
run dolt sql -q "SELECT * FROM onepk JOIN twopk ON onepk.pk1 = twopk.pk1;" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "1,11,111,1,91,555,44" ]] || false
[[ "$output" =~ "2,22,222,2,92,111,22" ]] || false
[[ "$output" =~ "3,33,333,3,93,444,33" ]] || false
[[ "$output" =~ "4,44,444,4,4,333,55" ]] || false
[[ "$output" =~ "5,55,555,5,95,222,11" ]] || false
[[ "${#lines[@]}" = "6" ]] || false
run dolt sql -q "EXPLAIN SELECT * FROM onepk JOIN twopk ON onepk.pk1 = twopk.pk1;"
[ "$status" -eq "0" ]
[[ "$output" =~ "IndexedJoin(onepk.pk1 = twopk.pk1)" ]] || false
}
@test "index: ALTER TABLE ADD COLUMN" {
dolt sql <<SQL
CREATE INDEX idx_v1 ON onepk(v1);
CREATE INDEX idx_v ON twopk(v2, v1);
INSERT INTO onepk VALUES (1, 99, 51), (2, 11, 55), (3, 88, 52), (4, 22, 54), (5, 77, 53);
INSERT INTO twopk VALUES (1, 99, 51, 63), (2, 11, 55, 64), (3, 88, 52, 61), (4, 22, 54, 65), (5, 77, 53, 61);
ALTER TABLE onepk ADD COLUMN v3 BIGINT;
ALTER TABLE twopk ADD COLUMN v3 BIGINT NOT NULL DEFAULT 7;
SQL
run dolt index cat onepk idx_v1 -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "v1,pk1" ]] || false
[[ "$output" =~ "11,2" ]] || false
[[ "$output" =~ "99,1" ]] || false
[[ "${#lines[@]}" = "6" ]] || false
run dolt schema show onepk
[ "$status" -eq "0" ]
[[ "$output" =~ 'KEY `idx_v1` (`v1`)' ]] || false
run dolt sql -q "SELECT * FROM onepk WHERE v1 = 77" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,v1,v2,v3" ]] || false
[[ "$output" =~ "5,77,53," ]] || false
[[ "${#lines[@]}" = "2" ]] || false
run dolt sql -q "SELECT * FROM onepk WHERE v1 = 77" -r=tabular
[ "$status" -eq "0" ]
[[ "$output" =~ "NULL" ]] || false
run dolt index cat twopk idx_v -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "v2,v1,pk1,pk2" ]] || false
[[ "$output" =~ "61,53,5,77" ]] || false
[[ "$output" =~ "65,54,4,22" ]] || false
[[ "${#lines[@]}" = "6" ]] || false
run dolt schema show twopk
[ "$status" -eq "0" ]
[[ "$output" =~ 'KEY `idx_v` (`v2`,`v1`)' ]] || false
run dolt sql -q "SELECT * FROM twopk WHERE v2 = 61 AND v1 = 53" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,pk2,v1,v2,v3" ]] || false
[[ "$output" =~ "5,77,53,61,7" ]] || false
[[ "${#lines[@]}" = "2" ]] || false
}
@test "index: ALTER TABLE CHANGE COLUMN NULL" {
dolt sql <<SQL
CREATE TABLE onepk_new (
pk1 BIGINT PRIMARY KEY,
v1 BIGINT NOT NULL,
v2 BIGINT
);
CREATE INDEX idx_v1 ON onepk_new(v1);
CREATE INDEX idx_v2v1 ON onepk_new(v2,v1);
INSERT INTO onepk_new VALUES (1, 99, 51), (2, 11, 55), (3, 88, 52), (4, 22, 54), (5, 77, 53);
ALTER TABLE onepk_new CHANGE COLUMN v1 vnew BIGINT NULL;
SQL
run dolt index cat onepk_new idx_v1 -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "vnew,pk1" ]] || false
[[ "$output" =~ "11,2" ]] || false
[[ "$output" =~ "88,3" ]] || false
[[ "${#lines[@]}" = "6" ]] || false
run dolt index cat onepk_new idx_v2v1 -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "v2,vnew,pk1" ]] || false
[[ "$output" =~ "51,99,1" ]] || false
[[ "$output" =~ "53,77,5" ]] || false
[[ "${#lines[@]}" = "6" ]] || false
run dolt schema show onepk_new
[ "$status" -eq "0" ]
[[ "$output" =~ 'KEY `idx_v1` (`vnew`)' ]] || false
[[ "$output" =~ 'KEY `idx_v2v1` (`v2`,`vnew`)' ]] || false
run dolt sql -q "SELECT * FROM onepk_new WHERE vnew = 77" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,vnew,v2" ]] || false
[[ "$output" =~ "5,77,53" ]] || false
[[ "${#lines[@]}" = "2" ]] || false
}
@test "index: ALTER TABLE MODIFY COLUMN NULL" {
dolt sql <<SQL
CREATE TABLE onepk_new (
pk1 BIGINT PRIMARY KEY,
v1 BIGINT NOT NULL,
v2 BIGINT
);
CREATE INDEX idx_v1 ON onepk_new(v1);
CREATE INDEX idx_v2v1 ON onepk_new(v2,v1);
INSERT INTO onepk_new VALUES (1, 99, 51), (2, 11, 55), (3, 88, 52), (4, 22, 54), (5, 77, 53);
ALTER TABLE onepk_new MODIFY COLUMN v1 BIGINT NULL;
SQL
run dolt index cat onepk_new idx_v1 -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "v1,pk1" ]] || false
[[ "$output" =~ "11,2" ]] || false
[[ "$output" =~ "99,1" ]] || false
[[ "${#lines[@]}" = "6" ]] || false
run dolt index cat onepk_new idx_v2v1 -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "v2,v1,pk1" ]] || false
[[ "$output" =~ "51,99,1" ]] || false
[[ "$output" =~ "55,11,2" ]] || false
[[ "${#lines[@]}" = "6" ]] || false
run dolt schema show onepk_new
[ "$status" -eq "0" ]
[[ "$output" =~ 'KEY `idx_v1` (`v1`)' ]] || false
[[ "$output" =~ 'KEY `idx_v2v1` (`v2`,`v1`)' ]] || false
run dolt sql -q "SELECT * FROM onepk_new WHERE v1 = 77" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,v1,v2" ]] || false
[[ "$output" =~ "5,77,53" ]] || false
[[ "${#lines[@]}" = "2" ]] || false
}
@test "index: ALTER TABLE CHANGE COLUMN NOT NULL" {
dolt sql <<SQL
CREATE INDEX idx_v1 ON onepk(v1);
CREATE INDEX idx_v2v1 ON onepk(v2,v1);
INSERT INTO onepk VALUES (1, 99, 51), (2, 11, 55), (3, 88, 52), (4, 22, 54), (5, 77, 53);
ALTER TABLE onepk CHANGE COLUMN v1 vnew BIGINT NOT NULL;
SQL
run dolt index cat onepk idx_v1 -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "vnew,pk1" ]] || false
[[ "$output" =~ "11,2" ]] || false
[[ "$output" =~ "99,1" ]] || false
[[ "${#lines[@]}" = "6" ]] || false
run dolt index cat onepk idx_v2v1 -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "v2,vnew,pk1" ]] || false
[[ "$output" =~ "51,99,1" ]] || false
[[ "$output" =~ "55,11,2" ]] || false
[[ "${#lines[@]}" = "6" ]] || false
run dolt schema show onepk
[ "$status" -eq "0" ]
[[ "$output" =~ 'KEY `idx_v1` (`vnew`)' ]] || false
[[ "$output" =~ 'KEY `idx_v2v1` (`v2`,`vnew`)' ]] || false
run dolt sql -q "SELECT * FROM onepk WHERE vnew = 77" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,vnew,v2" ]] || false
[[ "$output" =~ "5,77,53" ]] || false
[[ "${#lines[@]}" = "2" ]] || false
}
@test "index: ALTER TABLE MODIFY COLUMN NOT NULL" {
dolt sql <<SQL
CREATE INDEX idx_v1 ON onepk(v1);
CREATE INDEX idx_v2v1 ON onepk(v2,v1);
INSERT INTO onepk VALUES (1, 99, 51), (2, 11, 55), (3, 88, 52), (4, 22, 54), (5, 77, 53);
ALTER TABLE onepk MODIFY COLUMN v1 BIGINT NOT NULL;
SQL
run dolt index cat onepk idx_v1 -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "v1,pk1" ]] || false
[[ "$output" =~ "11,2" ]] || false
[[ "$output" =~ "99,1" ]] || false
[[ "${#lines[@]}" = "6" ]] || false
run dolt index cat onepk idx_v2v1 -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "v2,v1,pk1" ]] || false
[[ "$output" =~ "51,99,1" ]] || false
[[ "$output" =~ "55,11,2" ]] || false
[[ "${#lines[@]}" = "6" ]] || false
run dolt schema show onepk
[ "$status" -eq "0" ]
[[ "$output" =~ 'KEY `idx_v1` (`v1`)' ]] || false
[[ "$output" =~ 'KEY `idx_v2v1` (`v2`,`v1`)' ]] || false
run dolt sql -q "SELECT * FROM onepk WHERE v1 = 77" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,v1,v2" ]] || false
[[ "$output" =~ "5,77,53" ]] || false
[[ "${#lines[@]}" = "2" ]] || false
}
@test "index: ALTER TABLE RENAME COLUMN" {
dolt sql <<SQL
CREATE INDEX idx_v1 ON onepk(v1);
CREATE INDEX idx_v2v1 ON onepk(v2,v1);
INSERT INTO onepk VALUES (1, 99, 51), (2, 11, 55), (3, 88, 52), (4, 22, 54), (5, 77, 53);
ALTER TABLE onepk RENAME COLUMN v1 TO vnew;
SQL
run dolt index cat onepk idx_v1 -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "vnew,pk1" ]] || false
[[ "$output" =~ "11,2" ]] || false
[[ "$output" =~ "99,1" ]] || false
[[ "${#lines[@]}" = "6" ]] || false
run dolt index cat onepk idx_v2v1 -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "v2,vnew,pk1" ]] || false
[[ "$output" =~ "51,99,1" ]] || false
[[ "$output" =~ "55,11,2" ]] || false
[[ "${#lines[@]}" = "6" ]] || false
run dolt schema show onepk
[ "$status" -eq "0" ]
[[ "$output" =~ 'KEY `idx_v1` (`vnew`)' ]] || false
[[ "$output" =~ 'KEY `idx_v2v1` (`v2`,`vnew`)' ]] || false
run dolt sql -q "SELECT * FROM onepk WHERE vnew = 77" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,vnew,v2" ]] || false
[[ "$output" =~ "5,77,53" ]] || false
[[ "${#lines[@]}" = "2" ]] || false
}
@test "index: ALTER TABLE DROP COLUMN" {
dolt sql <<SQL
CREATE INDEX idx_v1 ON onepk(v1);
CREATE INDEX idx_v2 ON onepk(v2);
CREATE INDEX idx_v1v2 ON onepk(v1,v2);
CREATE INDEX idx_v2v1 ON onepk(v2,v1);
INSERT INTO onepk VALUES (1, 99, 51), (2, 11, 55), (3, 88, 52), (4, 22, 54), (5, 77, 53);
ALTER TABLE onepk DROP COLUMN v1;
SQL
run dolt index ls onepk
[ "$status" -eq "0" ]
[[ "$output" =~ "idx_v2(v2)" ]] || false
! [[ "$output" =~ "idx_v1(v1)" ]] || false
! [[ "$output" =~ "idx_v1v2(v1, v2)" ]] || false
! [[ "$output" =~ "idx_v2v1(v2, v1)" ]] || false
run dolt schema show onepk
[ "$status" -eq "0" ]
[[ "$output" =~ 'KEY `idx_v2` (`v2`)' ]] || false
! [[ "$output" =~ 'KEY `idx_v1` (`v1`)' ]] || false
! [[ "$output" =~ 'KEY `idx_v2v1` (`v2`,`v1`)' ]] || false
! [[ "$output" =~ 'KEY `idx_v1v2` (`v1`,`v2`)' ]] || false
run dolt index cat onepk idx_v2 -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "v2,pk1" ]] || false
[[ "$output" =~ "51,1" ]] || false
[[ "$output" =~ "55,2" ]] || false
[[ "${#lines[@]}" = "6" ]] || false
run dolt sql -q "SELECT * FROM onepk WHERE v2 = 53" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,v2" ]] || false
[[ "$output" =~ "5,53" ]] || false
[[ "${#lines[@]}" = "2" ]] || false
}
@test "index: Two commits, then check previous" {
dolt sql <<SQL
CREATE INDEX idx_v1 ON onepk(v1);
INSERT INTO onepk VALUES (1, 99, 51), (2, 11, 55), (3, 88, 52), (4, 22, 54), (5, 77, 53);
SQL
dolt add -A
dolt commit -m "test commit"
dolt sql -q "UPDATE onepk SET v1 = v1 - 1 WHERE pk1 >= 3"
dolt add -A
dolt commit -m "test commmit 2"
run dolt index cat onepk idx_v1 -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "v1,pk1" ]] || false
[[ "$output" =~ "11,2" ]] || false
[[ "$output" =~ "21,4" ]] || false
[[ "$output" =~ "76,5" ]] || false
[[ "$output" =~ "87,3" ]] || false
[[ "$output" =~ "99,1" ]] || false
[[ "${#lines[@]}" = "6" ]] || false
run dolt sql -q "SELECT pk1 FROM onepk WHERE v1 = 76" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1" ]] || false
[[ "$output" =~ "5" ]] || false
[[ "${#lines[@]}" = "2" ]] || false
dolt checkout -b last_commit HEAD~1
run dolt index cat onepk idx_v1 -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "v1,pk1" ]] || false
[[ "$output" =~ "11,2" ]] || false
[[ "$output" =~ "22,4" ]] || false
[[ "$output" =~ "77,5" ]] || false
[[ "$output" =~ "88,3" ]] || false
[[ "$output" =~ "99,1" ]] || false
[[ "${#lines[@]}" = "6" ]] || false
run dolt schema show onepk
[ "$status" -eq "0" ]
[[ "$output" =~ 'KEY `idx_v1` (`v1`)' ]] || false
run dolt sql -q "SELECT pk1 FROM onepk WHERE v1 = 77" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1" ]] || false
[[ "$output" =~ "5" ]] || false
[[ "${#lines[@]}" = "2" ]] || false
run dolt sql -q "SELECT pk1 FROM onepk WHERE v1 = 76" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1" ]] || false
[[ "${#lines[@]}" = "1" ]] || false
}
@test "index: UNIQUE INSERT, UPDATE, REPLACE" {
dolt sql <<SQL
CREATE UNIQUE INDEX idx_v1 ON onepk(v1);
INSERT INTO onepk VALUES (1, 99, 51), (2, 11, 55), (3, 88, 52), (4, 22, 54), (5, 77, 53);
SQL
run dolt index ls onepk
[ "$status" -eq "0" ]
[[ "$output" =~ "idx_v1(v1)" ]] || false
run dolt index cat onepk idx_v1 -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "v1,pk1" ]] || false
[[ "$output" =~ "11,2" ]] || false
[[ "$output" =~ "99,1" ]] || false
[[ "${#lines[@]}" = "6" ]] || false
run dolt schema show onepk
[ "$status" -eq "0" ]
[[ "$output" =~ 'UNIQUE KEY `idx_v1` (`v1`)' ]] || false
run dolt sql -q "SELECT pk1 FROM onepk WHERE v1 = 77" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1" ]] || false
[[ "$output" =~ "5" ]] || false
[[ "${#lines[@]}" = "2" ]] || false
run dolt sql -q "INSERT INTO onepk VALUES (6, 77, 56)"
[ "$status" -eq "1" ]
[[ "$output" =~ "UNIQUE" ]] || false
run dolt sql -q "INSERT INTO onepk VALUES (6, 78, 56)"
[ "$status" -eq "0" ]
run dolt sql -q "UPDATE onepk SET v1 = 22 WHERE pk1 = 1"
[ "$status" -eq "1" ]
[[ "$output" =~ "UNIQUE" ]] || false
run dolt sql -q "UPDATE onepk SET v1 = 23 WHERE pk1 = 1"
[ "$status" -eq "0" ]
run dolt sql -q "REPLACE INTO onepk VALUES (2, 88, 55)"
[ "$status" -eq "1" ]
[[ "$output" =~ "UNIQUE" ]] || false
run dolt sql -q "REPLACE INTO onepk VALUES (2, 89, 55)"
[ "$status" -eq "0" ]
}
@test "index: UNIQUE allows multiple NULL values" {
dolt sql <<SQL
CREATE TABLE test (
pk BIGINT PRIMARY KEY,
v1 BIGINT,
UNIQUE INDEX (v1)
);
CREATE TABLE test2 (
pk BIGINT PRIMARY KEY,
v1 BIGINT,
v2 BIGINT,
UNIQUE INDEX (v1, v2)
);
INSERT INTO test VALUES (0, NULL), (1, NULL), (2, NULL);
INSERT INTO test2 VALUES (0, NULL, NULL), (1, NULL, NULL), (2, 1, NULL), (3, 1, NULL), (4, NULL, 1), (5, NULL, 1);
SQL
run dolt sql -q "SELECT * FROM test" -r=json
[ "$status" -eq "0" ]
[[ "$output" =~ '{"rows": [{"pk":0},{"pk":1},{"pk":2}]}' ]] || false
run dolt sql -q "SELECT * FROM test WHERE v1 IS NULL" -r=json
[ "$status" -eq "0" ]
[[ "$output" =~ '{"rows": [{"pk":0},{"pk":1},{"pk":2}]}' ]] || false
run dolt sql -q "SELECT * FROM test2" -r=json
[ "$status" -eq "0" ]
[[ "$output" =~ '{"rows": [{"pk":0},{"pk":1},{"pk":2,"v1":1},{"pk":3,"v1":1},{"pk":4,"v2":1},{"pk":5,"v2":1}]}' ]] || false
}
@test "index: dolt table import -u" {
dolt sql -q "CREATE INDEX idx_v1 ON onepk(v1)"
dolt table import -u onepk `batshelper index_onepk.csv`
run dolt index ls onepk
[ "$status" -eq "0" ]
[[ "$output" =~ "idx_v1(v1)" ]] || false
run dolt index cat onepk idx_v1 -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "v1,pk1" ]] || false
[[ "$output" =~ "11,2" ]] || false
[[ "$output" =~ "22,4" ]] || false
[[ "$output" =~ "77,5" ]] || false
[[ "$output" =~ "88,3" ]] || false
[[ "$output" =~ "99,1" ]] || false
[[ "${#lines[@]}" = "6" ]] || false
run dolt schema show onepk
[ "$status" -eq "0" ]
[[ "$output" =~ 'KEY `idx_v1` (`v1`)' ]] || false
run dolt sql -q "SELECT pk1 FROM onepk WHERE v1 = 77" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1" ]] || false
[[ "$output" =~ "5" ]] || false
[[ "${#lines[@]}" = "2" ]] || false
}
@test "index: dolt table import -r" {
dolt sql <<SQL
CREATE INDEX idx_v1 ON onepk(v1);
INSERT INTO onepk VALUES (1, 98, 50), (2, 10, 54), (3, 87, 51), (4, 21, 53), (5, 76, 52);
SQL
dolt table import -r onepk `batshelper index_onepk.csv`
run dolt index ls onepk
[ "$status" -eq "0" ]
[[ "$output" =~ "idx_v1(v1)" ]] || false
run dolt index cat onepk idx_v1 -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "v1,pk1" ]] || false
[[ "$output" =~ "11,2" ]] || false
[[ "$output" =~ "22,4" ]] || false
[[ "$output" =~ "77,5" ]] || false
[[ "$output" =~ "88,3" ]] || false
[[ "$output" =~ "99,1" ]] || false
[[ "${#lines[@]}" = "6" ]] || false
run dolt schema show onepk
[ "$status" -eq "0" ]
[[ "$output" =~ 'KEY `idx_v1` (`v1`)' ]] || false
run dolt sql -q "SELECT pk1 FROM onepk WHERE v1 = 77" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1" ]] || false
[[ "$output" =~ "5" ]] || false
[[ "${#lines[@]}" = "2" ]] || false
}
@test "index: UNIQUE dolt table import -u" {
dolt sql -q "CREATE UNIQUE INDEX idx_v1 ON onepk(v1)"
dolt table import -u onepk `batshelper index_onepk.csv`
run dolt index ls onepk
[ "$status" -eq "0" ]
[[ "$output" =~ "idx_v1(v1)" ]] || false
run dolt index cat onepk idx_v1 -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "v1,pk1" ]] || false
[[ "$output" =~ "11,2" ]] || false
[[ "$output" =~ "22,4" ]] || false
[[ "$output" =~ "77,5" ]] || false
[[ "$output" =~ "88,3" ]] || false
[[ "$output" =~ "99,1" ]] || false
[[ "${#lines[@]}" = "6" ]] || false
run dolt schema show onepk
[ "$status" -eq "0" ]
[[ "$output" =~ 'UNIQUE KEY `idx_v1` (`v1`)' ]] || false
run dolt sql -q "SELECT pk1 FROM onepk WHERE v1 = 77" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1" ]] || false
[[ "$output" =~ "5" ]] || false
[[ "${#lines[@]}" = "2" ]] || false
dolt sql <<SQL
DELETE FROM onepk;
INSERT INTO onepk VALUES (6, 11, 55);
SQL
run dolt table import -u onepk `batshelper index_onepk.csv`
[ "$status" -eq "1" ]
[[ "$output" =~ "UNIQUE" ]] || false
}
@test "index: UNIQUE dolt table import -r" {
dolt sql <<SQL
CREATE UNIQUE INDEX idx_v1 ON onepk(v1);
INSERT INTO onepk VALUES (1, 98, 50), (2, 10, 54), (3, 87, 51), (4, 21, 53), (5, 76, 52);
SQL
dolt table import -r onepk `batshelper index_onepk.csv`
run dolt index ls onepk
[ "$status" -eq "0" ]
[[ "$output" =~ "idx_v1(v1)" ]] || false
run dolt index cat onepk idx_v1 -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "v1,pk1" ]] || false
[[ "$output" =~ "11,2" ]] || false
[[ "$output" =~ "22,4" ]] || false
[[ "$output" =~ "77,5" ]] || false
[[ "$output" =~ "88,3" ]] || false
[[ "$output" =~ "99,1" ]] || false
[[ "${#lines[@]}" = "6" ]] || false
run dolt schema show onepk
[ "$status" -eq "0" ]
[[ "$output" =~ 'UNIQUE KEY `idx_v1` (`v1`)' ]] || false
run dolt sql -q "SELECT pk1 FROM onepk WHERE v1 = 77" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1" ]] || false
[[ "$output" =~ "5" ]] || false
[[ "${#lines[@]}" = "2" ]] || false
dolt sql -q "DELETE FROM onepk"
run dolt table import -r onepk `batshelper index_onepk_non_unique.csv`
[ "$status" -eq "1" ]
[[ "$output" =~ "UNIQUE" ]] || false
}
@test "index: Merge without conflicts" {
dolt sql -q "CREATE INDEX idx_v1 ON onepk(v1);"
dolt add -A
dolt commit -m "baseline commit"
dolt checkout -b other
dolt checkout master
dolt sql -q "INSERT INTO onepk VALUES (1, 11, 101), (2, 22, 202), (3, 33, 303), (4, 44, 404)"
dolt add -A
dolt commit -m "master changes"
dolt checkout other
dolt sql -q "INSERT INTO onepk VALUES (5, 55, 505), (6, 66, 606), (7, 77, 707), (8, 88, 808)"
dolt add -A
dolt commit -m "other changes"
dolt checkout master
dolt merge other
run dolt index ls onepk
[ "$status" -eq "0" ]
[[ "$output" =~ "idx_v1(v1)" ]] || false
run dolt index cat onepk idx_v1 -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "v1,pk1" ]] || false
[[ "$output" =~ "11,1" ]] || false
[[ "$output" =~ "22,2" ]] || false
[[ "$output" =~ "33,3" ]] || false
[[ "$output" =~ "44,4" ]] || false
[[ "$output" =~ "55,5" ]] || false
[[ "$output" =~ "66,6" ]] || false
[[ "$output" =~ "77,7" ]] || false
[[ "$output" =~ "88,8" ]] || false
[[ "${#lines[@]}" = "9" ]] || false
run dolt schema show onepk
[ "$status" -eq "0" ]
[[ "$output" =~ 'KEY `idx_v1` (`v1`)' ]] || false
run dolt sql -q "SELECT * FROM onepk WHERE v1 = 55" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk1,v1,v2" ]] || false
[[ "$output" =~ "5,55,505" ]] || false
[[ "${#lines[@]}" = "2" ]] || false
}
@test "index: Merge abort" {
dolt sql -q "CREATE INDEX idx_v1 ON onepk(v1);"
dolt add -A
dolt commit -m "baseline commit"
dolt checkout -b other
dolt checkout master
dolt sql -q "INSERT INTO onepk VALUES (1, 11, 101), (2, 22, 202), (3, -33, 33), (4, 44, 404)"
dolt add -A
dolt commit -m "master changes"
dolt checkout other
dolt sql -q "INSERT INTO onepk VALUES (1, -11, 11), (2, -22, 22), (3, -33, 33), (4, -44, 44), (5, -55, 55)"
dolt add -A
dolt commit -m "other changes"
dolt checkout master
dolt merge other
run dolt index cat onepk idx_v1 -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "v1,pk1" ]] || false
[[ "$output" =~ "-55,5" ]] || false
[[ "$output" =~ "-33,3" ]] || false
[[ "$output" =~ "11,1" ]] || false
[[ "$output" =~ "22,2" ]] || false
[[ "$output" =~ "44,4" ]] || false
[[ "${#lines[@]}" = "6" ]] || false
dolt merge --abort
run dolt index cat onepk idx_v1 -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "v1,pk1" ]] || false
[[ "$output" =~ "-33,3" ]] || false
[[ "$output" =~ "11,1" ]] || false
[[ "$output" =~ "22,2" ]] || false
[[ "$output" =~ "44,4" ]] || false
[[ "${#lines[@]}" = "5" ]] || false
}
@test "index: Merge resolving all OURS" {
dolt sql -q "CREATE INDEX idx_v1 ON onepk(v1);"
dolt add -A
dolt commit -m "baseline commit"
dolt checkout -b other
dolt checkout master
dolt sql -q "INSERT INTO onepk VALUES (1, 11, 101), (2, 22, 202), (3, -33, 33), (4, 44, 404)"
dolt add -A
dolt commit -m "master changes"
dolt checkout other
dolt sql -q "INSERT INTO onepk VALUES (1, -11, 11), (2, -22, 22), (3, -33, 33), (4, -44, 44), (5, -55, 55)"
dolt add -A
dolt commit -m "other changes"
dolt checkout master
dolt merge other
dolt conflicts resolve --ours onepk
run dolt index cat onepk idx_v1 -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "v1,pk1" ]] || false
[[ "$output" =~ "-55,5" ]] || false
[[ "$output" =~ "-33,3" ]] || false
[[ "$output" =~ "11,1" ]] || false
[[ "$output" =~ "22,2" ]] || false
[[ "$output" =~ "44,4" ]] || false
[[ "${#lines[@]}" = "6" ]] || false
}
@test "index: Merge resolving all THEIRS" {
dolt sql -q "CREATE INDEX idx_v1 ON onepk(v1);"
dolt add -A
dolt commit -m "baseline commit"
dolt checkout -b other
dolt checkout master
dolt sql -q "INSERT INTO onepk VALUES (1, 11, 101), (2, 22, 202), (3, -33, 33), (4, 44, 404)"
dolt add -A
dolt commit -m "master changes"
dolt checkout other
dolt sql -q "INSERT INTO onepk VALUES (1, -11, 11), (2, -22, 22), (3, -33, 33), (4, -44, 44), (5, -55, 55)"
dolt add -A
dolt commit -m "other changes"
dolt checkout master
dolt merge other
dolt conflicts resolve --theirs onepk
run dolt index cat onepk idx_v1 -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "v1,pk1" ]] || false
[[ "$output" =~ "-55,5" ]] || false
[[ "$output" =~ "-44,4" ]] || false
[[ "$output" =~ "-33,3" ]] || false
[[ "$output" =~ "-22,2" ]] || false
[[ "$output" =~ "-11,1" ]] || false
[[ "${#lines[@]}" = "6" ]] || false
}
@test "index: Merge individually resolving OURS/THEIRS" {
dolt sql -q "CREATE INDEX idx_v1 ON onepk(v1);"
dolt add -A
dolt commit -m "baseline commit"
dolt checkout -b other
dolt checkout master
dolt sql -q "INSERT INTO onepk VALUES (1, 11, 101), (2, 22, 202), (3, -33, 33), (4, 44, 404)"
dolt add -A
dolt commit -m "master changes"
dolt checkout other
dolt sql -q "INSERT INTO onepk VALUES (1, -11, 11), (2, -22, 22), (3, -33, 33), (4, -44, 44), (5, -55, 55)"
dolt add -A
dolt commit -m "other changes"
dolt checkout master
dolt merge other
dolt conflicts resolve onepk 4
dolt sql <<SQL
UPDATE onepk SET v1 = -11, v2 = 11 WHERE pk1 = 1;
UPDATE onepk SET v1 = -22, v2 = 22 WHERE pk1 = 2;
SQL
dolt conflicts resolve onepk 1 2
run dolt index cat onepk idx_v1 -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "v1,pk1" ]] || false
[[ "$output" =~ "-55,5" ]] || false
[[ "$output" =~ "-33,3" ]] || false
[[ "$output" =~ "-22,2" ]] || false
[[ "$output" =~ "-11,1" ]] || false
[[ "$output" =~ "44,4" ]] || false
[[ "${#lines[@]}" = "6" ]] || false
}
@test "index: Merge violates UNIQUE" {
dolt sql -q "CREATE UNIQUE INDEX idx_v1 ON onepk(v1);"
dolt add -A
dolt commit -m "baseline commit"
dolt checkout -b other
dolt checkout master
dolt sql -q "INSERT INTO onepk VALUES (1, 11, 101), (2, 22, 202), (3, 33, 303), (4, 44, 404)"
dolt add -A
dolt commit -m "master changes"
dolt checkout other
dolt sql -q "INSERT INTO onepk VALUES (1, 11, 101), (2, 22, 202), (3, 33, 303), (5, 44, 505)"
dolt add -A
dolt commit -m "other changes"
dolt checkout master
run dolt merge other
[ "$status" -eq "1" ]
[[ "$output" =~ "UNIQUE" ]] || false
}
@test "index: Merge into branch with index from branch without index" {
dolt sql <<SQL
CREATE TABLE test (
pk bigint PRIMARY KEY,
v1 bigint,
v2 bigint
);
INSERT INTO test VALUES (1, 1, 1);
SQL
dolt add -A
dolt commit -m "baseline"
dolt branch other
dolt sql -q "INSERT INTO test VALUES (2, 2, 2);"
dolt add -A
dolt commit -m "baseline"
dolt checkout other
dolt sql -q "CREATE INDEX abc ON test (v1)"
dolt add -A
dolt commit -m "added index"
dolt merge master
run dolt sql -q "select * from test where v1 = 2" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk,v1,v2" ]] || false
[[ "$output" =~ "2,2,2" ]] || false
[[ "${#lines[@]}" = "2" ]] || false
run dolt index cat test abc -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "v1,pk" ]] || false
[[ "$output" =~ "1,1" ]] || false
[[ "$output" =~ "2,2" ]] || false
[[ "${#lines[@]}" = "3" ]] || false
}
@test "index: Overwriting index auto-generated by foreign key" {
dolt sql <<SQL
CREATE TABLE parent (
pk bigint PRIMARY KEY,
v1 bigint,
INDEX (v1)
);
CREATE TABLE child (
pk varchar(10) PRIMARY KEY,
parent_value bigint,
FOREIGN KEY (parent_value)
REFERENCES parent(v1)
);
CREATE TABLE child_idx (
pk varchar(10) PRIMARY KEY,
parent_value bigint,
INDEX (parent_value),
FOREIGN KEY (parent_value)
REFERENCES parent(v1)
);
CREATE TABLE child_unq (
pk varchar(10) PRIMARY KEY,
parent_value bigint,
FOREIGN KEY (parent_value)
REFERENCES parent(v1)
);
CREATE TABLE child_non_unq (
pk varchar(10) PRIMARY KEY,
parent_value bigint,
FOREIGN KEY (parent_value)
REFERENCES parent(v1)
);
INSERT INTO parent VALUES (1, 1), (2, 2), (3, 3), (4, NULL), (5, 5), (6, 6), (7, 7);
INSERT INTO child VALUES ('1', 1), ('2', NULL), ('3', 3), ('4', 3), ('5', 5);
INSERT INTO child_idx VALUES ('1', 1), ('2', NULL), ('3', 3), ('4', 3), ('5', 5);
INSERT INTO child_unq VALUES ('1', 1), ('2', NULL), ('3', 3), ('4', NULL), ('5', 5);
INSERT INTO child_non_unq VALUES ('1', 1), ('2', NULL), ('3', 3), ('4', 3), ('5', 5);
SQL
# Check index creation
dolt sql -q "CREATE INDEX abc ON child (parent_value);"
run dolt sql -q "CREATE INDEX abc_idx ON child_idx (parent_value);"
[ "$status" -eq "1" ]
[[ "$output" =~ "duplicate" ]] || false
dolt sql -q "CREATE UNIQUE INDEX abc_unq ON child_unq (parent_value);"
run dolt sql -q "CREATE UNIQUE INDEX abc_non_unq ON child_non_unq (parent_value);"
[ "$status" -eq "1" ]
[[ "$output" =~ "UNIQUE constraint violation" ]] || false
# Verify correct index present in schema
run dolt schema show child
[ "$status" -eq "0" ]
[[ "$output" =~ 'KEY `abc` (`parent_value`)' ]] || false
! [[ "$output" =~ 'KEY `parent_value` (`parent_value`)' ]] || false
run dolt schema show child_idx
[ "$status" -eq "0" ]
[[ "$output" =~ 'KEY `parent_value` (`parent_value`)' ]] || false
! [[ "$output" =~ 'KEY `abc_idx` (`parent_value`)' ]] || false
run dolt schema show child_unq
[ "$status" -eq "0" ]
[[ "$output" =~ 'KEY `abc_unq` (`parent_value`)' ]] || false
! [[ "$output" =~ 'KEY `parent_value` (`parent_value`)' ]] || false
run dolt schema show child_non_unq
[ "$status" -eq "0" ]
[[ "$output" =~ 'KEY `parent_value` (`parent_value`)' ]] || false
! [[ "$output" =~ 'KEY `abc_non_unq` (`parent_value`)' ]] || false
# Run SELECT that uses index to ensure it's still working
run dolt sql -q "SELECT * FROM child WHERE parent_value = 5" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk,parent_value" ]] || false
[[ "$output" =~ "5,5" ]] || false
[[ "${#lines[@]}" = "2" ]] || false
run dolt sql -q "SELECT * FROM child_idx WHERE parent_value = 5" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk,parent_value" ]] || false
[[ "$output" =~ "5,5" ]] || false
[[ "${#lines[@]}" = "2" ]] || false
run dolt sql -q "SELECT * FROM child_unq WHERE parent_value = 5" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk,parent_value" ]] || false
[[ "$output" =~ "5,5" ]] || false
[[ "${#lines[@]}" = "2" ]] || false
run dolt sql -q "SELECT * FROM child_non_unq WHERE parent_value = 5" -r=csv
[ "$status" -eq "0" ]
[[ "$output" =~ "pk,parent_value" ]] || false
[[ "$output" =~ "5,5" ]] || false
[[ "${#lines[@]}" = "2" ]] || false
# INSERT against index
dolt sql -q "INSERT INTO child VALUES ('6', 5)"
dolt sql -q "INSERT INTO child_idx VALUES ('6', 5)"
run dolt sql -q "INSERT INTO child_unq VALUES ('6', 5)"
[ "$status" -eq "1" ]
[[ "$output" =~ "UNIQUE constraint violation" ]] || false
dolt sql -q "INSERT INTO child_non_unq VALUES ('6', 5)"
# INSERT against foreign key
run dolt sql -q "INSERT INTO child VALUES ('9', 9)"
[ "$status" -eq "1" ]
[[ "$output" =~ "foreign key violation" ]] || false
run dolt sql -q "INSERT INTO child_idx VALUES ('9', 9)"
[ "$status" -eq "1" ]
[[ "$output" =~ "foreign key violation" ]] || false
run dolt sql -q "INSERT INTO child_unq VALUES ('9', 9)"
[ "$status" -eq "1" ]
[[ "$output" =~ "foreign key violation" ]] || false
run dolt sql -q "INSERT INTO child_non_unq VALUES ('9', 9)"
[ "$status" -eq "1" ]
[[ "$output" =~ "foreign key violation" ]] || false
}
|
#!/usr/bin/env bash
killall ruby
source ~/.bash_profile
rbenv shell 2.2.1
git checkout master
git pull
bundle install
RAILS_ENV=production bundle exec rake db:migrate
RAILS_ENV=production bundle exec rake assets:precompile
puma -e production 1> log/console.log 2> log/console_err.log &
|
<gh_stars>1-10
const { pause, newAccumulator, newDummyChannel } = require('../common')
const { COMMANDS } = require('../../lib/constants')
const dut = require('../../lib/clients/pubsub')
describe('Pubsub client', () => {
test('writes subscriptions to remote', async () => {
const channel = newDummyChannel()
const accumulator = newAccumulator()
channel.remote.readable.pipe(accumulator)
const pubsubClient = dut.create(channel)
pubsubClient.subscribe('topic1', () => {})
pubsubClient.subscribe('topic2', () => {}, { offset: 10 })
pubsubClient.subscribe('topic3', () => {}, { offset: 10, filter: 'x' })
await pause(10)
pubsubClient.destroy()
expect(accumulator.data()).toStrictEqual([{
t: COMMANDS.SUBSCRIBE,
m: {
id: 1,
filter: undefined,
offset: 0,
topic: 'topic1'
}
}, {
t: COMMANDS.SUBSCRIBE,
m: {
id: 2,
filter: undefined,
offset: 10,
topic: 'topic2'
}
}, {
t: COMMANDS.SUBSCRIBE,
m: {
id: 3,
filter: 'x',
offset: 10,
topic: 'topic3'
}
}
])
})
test('receives messages from remote', async () => {
const channel = newDummyChannel()
const pubsubClient = dut.create(channel)
const received = []
pubsubClient.subscribe('topic1', (m) => { received.push(m) })
channel.remote.writable.write({ t: 'topic1', o: 0, m: 'message1' })
channel.remote.writable.write({ t: 'topic1', o: 1, m: 'message2' })
await pause(10)
pubsubClient.destroy()
expect(received).toStrictEqual(['message1', 'message2'])
})
test('receives messages from remote and skips duplicates', async () => {
const channel = newDummyChannel()
const pubsubClient = dut.create(channel)
const received = []
pubsubClient.subscribe('topic1', (m) => { received.push(m) })
channel.remote.writable.write({ t: 'topic1', o: 0, m: 'message1' })
channel.remote.writable.write({ t: 'topic1', o: 0, m: 'message1' })
await pause(10)
pubsubClient.destroy()
expect(received).toStrictEqual(['message1'])
})
test('receives messages from remote only in ascending order', async () => {
const channel = newDummyChannel()
const pubsubClient = dut.create(channel)
const received = []
pubsubClient.subscribe('topic1', (m) => { received.push(m) })
channel.remote.writable.write({ t: 'topic1', o: 5, m: 'message5' })
channel.remote.writable.write({ t: 'topic1', o: 0, m: 'message0' })
await pause(10)
pubsubClient.destroy()
expect(received).toStrictEqual(['message5'])
})
test('filters received messages from remote', async () => {
const channel = newDummyChannel()
const pubsubClient = dut.create(channel)
const received = []
pubsubClient.subscribe('topic1', (m) => { received.push(m) }, { filter: 'return m===1' })
channel.remote.writable.write({ t: 'topic1', o: 0, m: 0 })
channel.remote.writable.write({ t: 'topic1', o: 1, m: 1 })
channel.remote.writable.write({ t: 'topic1', o: 2, m: 2 })
await pause(10)
pubsubClient.destroy()
expect(received).toStrictEqual([1])
})
test('subscribes multiple times to the same topic', async () => {
const channel = newDummyChannel()
const pubsubClient = dut.create(channel)
const received1 = []
const received2 = []
const received3 = []
pubsubClient.subscribe('topic1', (m) => { received1.push(m) }, { filter: 'return m===1' })
pubsubClient.subscribe('topic1', (m) => { received2.push(m) }, { filter: 'return m===0' })
pubsubClient.subscribe('topic1', (m) => { received3.push(m) })
channel.remote.writable.write({ t: 'topic1', o: 0, m: 0 })
channel.remote.writable.write({ t: 'topic1', o: 1, m: 1 })
channel.remote.writable.write({ t: 'topic1', o: 2, m: 2 })
await pause(10)
pubsubClient.destroy()
expect(received1).toStrictEqual([1])
expect(received2).toStrictEqual([0])
expect(received3).toStrictEqual([0, 1, 2])
})
test('unsubscribes', async () => {
const channel = newDummyChannel()
const pubsubClient = dut.create(channel)
const received1 = []
pubsubClient.subscribe('topic1', (m) => { received1.push(m) })
channel.remote.writable.write({ t: 'topic1', o: 0, m: 0 })
await pause(10)
pubsubClient.unsubscribe('topic1')
channel.remote.writable.write({ t: 'topic1', o: 1, m: 1 })
channel.remote.writable.write({ t: 'topic1', o: 2, m: 2 })
await pause(10)
pubsubClient.destroy()
expect(received1).toStrictEqual([0])
})
})
|
<filename>extension/src/main/java/org/justinnk/masonssa/extension/Action.java
/*
* Copyright 2021 <NAME>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.justinnk.masonssa.extension;
import java.util.ArrayList;
import java.util.List;
import sim.engine.TentativeStep;
/**
* A class representing a single behaviour rule for an agent consisting of a guard expression, an
* effect and a rate.
*/
public class Action {
/**
* Holds a list of all actions created. This list has to be cleared in the SimStates finish()
* method when using the GUI.
*/
public static List<Action> ActionInstances = new ArrayList<>();
/** Counts the number of actions to give them unique names if no name is supplied. */
public static int ActionInstanceCounter = 0;
/** The guard expression for this action. */
private Guard guard = null;
/** The function this action executes when activated. */
private Effect effect = null;
/** The rate function that governs the sojourn time between executions of this action. */
private RateExpression rate = null;
/** A name for easy identification. */
private String name = "";
/** The owner of this action. */
private Agent owner = null;
/**
* The value of the rate expression when it was evaluated last. This is used for calculating the
* new rate in the NRM.
*/
private double currentRate = 0.0;
/**
* The value of the guard expression when it was evaluated last. This is used for recalculating
* the rate sum in the ODM.
*/
private boolean currentGuard = false;
/**
* The time stamp resulting from the last evaluation of the rate expression. This is used for
* calculating the new rate in the NRM.
*/
private double currentTimestamp = Double.NEGATIVE_INFINITY;
/**
* The currently scheduled event for this action. This is needs to be tracked for stopping the
* last event when rescheduling in the NRM.
*/
private TentativeStep nextEvent = null;
/**
* Define a new behaviour rule for an agents. Should not be instantiated outside a class
* inheriting from agent.
*
* @param precondition The guard function for determining whether this action is applicable in the
* current simulation state.
* @param effect The effect function that is executed when the action is applied.
* @param rate The rate of this action governing the sojourn time between applications.
*/
public Action(Guard precondition, Effect effect, RateExpression rate) {
init(precondition, effect, rate);
}
/**
* Define a new behaviour rule for an agents. Should not be instantiated outside of a class
* inheriting from agent.
*
* @param precondition The guard function for determining whether this action is applicable in the
* current simulation state.
* @param effect The effect function that is executed when the action is applied.
* @param rate The rate of this action governing the sojourn time between applications.
* @param name The name of this action. Must be unique within an agent.
*/
public Action(Guard precondition, Effect effect, RateExpression rate, String name) {
init(precondition, effect, rate);
this.name = name;
}
/**
* Internal initialisation routine that sets the name properly and also adds this instance to the
* instance list.
*/
private void init(Guard precondition, Effect effect, RateExpression rate) {
this.guard = precondition;
this.effect = effect;
this.rate = rate;
this.name = "Action" + ActionInstanceCounter;
ActionInstanceCounter++;
ActionInstances.add(this);
}
/** Replace the next event by newNextAction. Update the currentTimestamp. */
public void resetNextEvent(TentativeStep newNextAction, double timestamp) {
if (this.nextEvent != null) {
this.nextEvent.stop();
}
this.nextEvent = newNextAction;
this.currentTimestamp = timestamp;
}
/**
* If there is an event scheduled for this action, stop it. Resets the currentTimestamp to
* Double.NEGATIVE_INFINITY and currentRate to 0.0
*/
public void stopNextEvent() {
if (this.nextEvent != null) {
this.nextEvent.stop();
}
this.currentTimestamp = Double.NEGATIVE_INFINITY;
this.currentRate = 0.0;
}
/**
* Set the agent that defines this action as owner.
*
* @param owner The agent that owns this action.
*/
public void setOwner(Agent owner) {
this.owner = owner;
}
/** @return The owner of this action. */
public Agent getOwner() {
return this.owner;
}
/** @return The name of this action. */
public String getName() {
return this.name;
}
/**
* Evaluate the guard expression for this action in the current simulation state. This will also
* update the currentGuard to the new value.
*
* @return whether the condition is met in the current state.
*/
public boolean evaluateCondition() {
boolean condition = this.guard.evaluate();
this.currentGuard = condition;
return condition;
}
/**
* Calculate the rate function for this action in the current simulation state. This will also
* update the currentRate to the new value.
*
* @return the new rate at which this action is executed.
*/
public double calculateRate() {
double rate = this.rate.rate();
this.currentRate = rate;
return rate;
}
/** Execute the effect of this action. */
public void applyEffect() {
this.effect.apply();
}
/**
* @return The value of the rate expression when it was evaluated last. This is used for
* calculating the new rate in the NRM.
*/
public double getCurrentRate() {
return this.currentRate;
}
/**
* @return The value of the guard expression when it was evaluated last. This is used for
* recalculating the rate sum in the ODM.
*/
public boolean getCurrentGuard() {
return this.currentGuard;
}
/**
* @return The time stamp resulting from the last evaluation of the rate expression. This is used
* for calculating the new rate in the NRM.
*/
public double getCurrentTimestamp() {
return this.currentTimestamp;
}
/** Converts this action to a human readable string for debugging. */
public String toString() {
return "Agent " + this.owner + "s " + this.name;
}
/*
* For determining unique actions and using hash sets (generated by Eclipse
* IDE). Actions can be uniquely identified by their name and the id of their
* owner.
*/
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((name == null) ? 0 : name.hashCode());
result = prime * result + ((owner == null) ? 0 : owner.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) return true;
if (obj == null) return false;
if (getClass() != obj.getClass()) return false;
Action other = (Action) obj;
if (name == null) {
if (other.name != null) return false;
} else if (!name.equals(other.name)) return false;
if (owner == null) {
return other.owner == null;
} else return owner.equals(other.owner);
}
}
|
import * as R from 'ramda'
import xs from 'xstream'
import virtualize from 'snabbdom-virtualize/strings'
import { JSDOM } from 'jsdom'
import { htmlEncode } from 'js-htmlencode'
import { html, head, body, title, script, style, link, div, meta } from '@cycle/dom'
import { classes, Styles } from '../client/styles'
const titleText = require('../../package.json').title || 'Unicycle'
// prepare dynamically generated favicons for injection
const favicons = JSON.parse(require('fs').readFileSync('build/static/favicons.json').toString()).html
const htmlContext = new JSDOM('<html></html>')
const virtualizeHTML = html => virtualize(html, { context: htmlContext.window.document })
export const Boilerplate = (sources, ctx) => {
return {
DOM: sources.DOM.take(1).map(innerHTML =>
html([
head(R.concat(
[
title(htmlEncode(titleText)),
meta({ props: { name: 'viewport', content: 'width=device-width, initial-scale=1, maximum-scale=1' } })
],
// append favicons to HTML head
R.map(virtualizeHTML, favicons)
)),
body({
class: classes(Styles.Body)
}, [
div('#main', [innerHTML]),
script({
attrs: {
type: 'text/javascript',
src: '/static/bundle.js'
}
}),
style('#css', [`${Styles.render()}`])
])
])
),
// wrap relative HTTP requests with protocol and host
HTTP: sources.HTTP.map(request => {
if (R.test(/^\//, request.url)) {
return {
...request,
url: `${ctx.protocol}://${ctx.host}${request.url}`
}
} else {
return request
}
}),
History: sources.History,
Time: xs.empty()
}
}
export default Boilerplate
|
test_storage() {
ensure_import_testimage
# shellcheck disable=2039
local LXD_STORAGE_DIR lxd_backend
lxd_backend=$(storage_backend "$LXD_DIR")
LXD_STORAGE_DIR=$(mktemp -d -p "${TEST_DIR}" XXXXXXXXX)
chmod +x "${LXD_STORAGE_DIR}"
spawn_lxd "${LXD_STORAGE_DIR}" false
# edit storage and pool description
# shellcheck disable=2039
local storage_pool storage_volume
storage_pool="lxdtest-$(basename "${LXD_DIR}")-pool"
storage_volume="${storage_pool}-vol"
lxc storage create "$storage_pool" "$lxd_backend"
lxc storage show "$storage_pool" | sed 's/^description:.*/description: foo/' | lxc storage edit "$storage_pool"
lxc storage show "$storage_pool" | grep -q 'description: foo'
lxc storage volume create "$storage_pool" "$storage_volume"
# Test resizing/applying quota to a storage volume's of type container fails.
! lxc storage volume set "$storage_pool" "$storage_volume" size 200MB
# Test setting description on a storage volume
lxc storage volume show "$storage_pool" "$storage_volume" | sed 's/^description:.*/description: bar/' | lxc storage volume edit "$storage_pool" "$storage_volume"
lxc storage volume show "$storage_pool" "$storage_volume" | grep -q 'description: bar'
lxc storage volume delete "$storage_pool" "$storage_volume"
lxc storage delete "$storage_pool"
# Test btrfs resize
if [ "$lxd_backend" = "lvm" ] || [ "$lxd_backend" = "ceph" ]; then
# shellcheck disable=2039
local btrfs_storage_pool btrfs_storage_volume
btrfs_storage_pool="lxdtest-$(basename "${LXD_DIR}")-pool-btrfs"
btrfs_storage_volume="${storage_pool}-vol"
lxc storage create "$btrfs_storage_pool" "$lxd_backend" volume.block.filesystem=btrfs volume.size=200MB
lxc storage volume create "$btrfs_storage_pool" "$btrfs_storage_volume"
lxc storage volume show "$btrfs_storage_pool" "$btrfs_storage_volume"
lxc storage volume set "$btrfs_storage_pool" "$btrfs_storage_volume" size 256MB
lxc storage volume delete "$btrfs_storage_pool" "$btrfs_storage_volume"
# Test generation of unique UUID.
lxc init testimage uuid1 -s "lxdtest-$(basename "${LXD_DIR}")-pool-btrfs"
POOL="lxdtest-$(basename "${LXD_DIR}")-pool-btrfs"
lxc copy uuid1 uuid2
lxc start uuid1
lxc start uuid2
lxc stop --force uuid1
lxc stop --force uuid2
if [ "$lxd_backend" = "lvm" ]; then
[ "$(blkid -s UUID -o value -p /dev/"${POOL}"/containers_uuid1)" != "$(blkid -s UUID -o value -p /dev/"${POOL}"/containers_uuid2)" ]
elif [ "$lxd_backend" = "ceph" ]; then
[ "$(blkid -s UUID -o value -p /dev/rbd/"${POOL}"/container_uuid1)" != "$(blkid -s UUID -o value -p /dev/rbd/"${POOL}"/container_uuid2)" ]
fi
lxc delete --force uuid1
lxc delete --force uuid2
lxc image delete testimage
lxc storage delete "$btrfs_storage_pool"
fi
ensure_import_testimage
(
set -e
# shellcheck disable=2030
LXD_DIR="${LXD_STORAGE_DIR}"
# shellcheck disable=SC1009
if [ "$lxd_backend" = "zfs" ]; then
# Create loop file zfs pool.
lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool1" zfs
# Check that we can't create a loop file in a non-LXD owned location.
INVALID_LOOP_FILE="$(mktemp -p "${LXD_DIR}" XXXXXXXXX)-invalid-loop-file"
! lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool1" zfs source="${INVALID_LOOP_FILE}"
# Let LXD use an already existing dataset.
zfs create -p -o mountpoint=none "lxdtest-$(basename "${LXD_DIR}")-pool1/existing-dataset-as-pool"
lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool7" zfs source="lxdtest-$(basename "${LXD_DIR}")-pool1/existing-dataset-as-pool"
# Let LXD use an already existing storage pool.
configure_loop_device loop_file_4 loop_device_4
# shellcheck disable=SC2154
zpool create -f -m none -O compression=on "lxdtest-$(basename "${LXD_DIR}")-pool9-existing-pool" "${loop_device_4}"
lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool9" zfs source="lxdtest-$(basename "${LXD_DIR}")-pool9-existing-pool"
# Let LXD create a new dataset and use as pool.
lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool8" zfs source="lxdtest-$(basename "${LXD_DIR}")-pool1/non-existing-dataset-as-pool"
# Create device backed zfs pool
configure_loop_device loop_file_1 loop_device_1
# shellcheck disable=SC2154
lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool2" zfs source="${loop_device_1}"
# Test that no invalid zfs storage pool configuration keys can be set.
! lxc storage create "lxdtest-$(basename "${LXD_DIR}")-invalid-zfs-pool-config" zfs lvm.thinpool_name=bla
! lxc storage create "lxdtest-$(basename "${LXD_DIR}")-invalid-zfs-pool-config" zfs lvm.use_thinpool=false
! lxc storage create "lxdtest-$(basename "${LXD_DIR}")-invalid-zfs-pool-config" zfs lvm.vg_name=bla
! lxc storage create "lxdtest-$(basename "${LXD_DIR}")-invalid-zfs-pool-config" zfs volume.block.filesystem=ext4
! lxc storage create "lxdtest-$(basename "${LXD_DIR}")-invalid-zfs-pool-config" zfs volume.block.mount_options=discard
! lxc storage create "lxdtest-$(basename "${LXD_DIR}")-invalid-zfs-pool-config" zfs volume.size=2GB
# Test that all valid zfs storage pool configuration keys can be set.
lxc storage create "lxdtest-$(basename "${LXD_DIR}")-valid-zfs-pool-config" zfs volume.zfs.remove_snapshots=true
lxc storage delete "lxdtest-$(basename "${LXD_DIR}")-valid-zfs-pool-config"
lxc storage create "lxdtest-$(basename "${LXD_DIR}")-valid-zfs-pool-config" zfs volume.zfs.use_refquota=true
lxc storage delete "lxdtest-$(basename "${LXD_DIR}")-valid-zfs-pool-config"
lxc storage create "lxdtest-$(basename "${LXD_DIR}")-valid-zfs-pool-config" zfs zfs.clone_copy=true
lxc storage delete "lxdtest-$(basename "${LXD_DIR}")-valid-zfs-pool-config"
lxc storage create "lxdtest-$(basename "${LXD_DIR}")-valid-zfs-pool-config" zfs zfs.pool_name="lxdtest-$(basename "${LXD_DIR}")-valid-zfs-pool-config"
lxc storage delete "lxdtest-$(basename "${LXD_DIR}")-valid-zfs-pool-config"
lxc storage create "lxdtest-$(basename "${LXD_DIR}")-valid-zfs-pool-config" zfs rsync.bwlimit=1024
lxc storage delete "lxdtest-$(basename "${LXD_DIR}")-valid-zfs-pool-config"
fi
if [ "$lxd_backend" = "btrfs" ]; then
# Create loop file btrfs pool.
lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool3" btrfs
# Create device backed btrfs pool.
configure_loop_device loop_file_2 loop_device_2
# shellcheck disable=SC2154
lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool4" btrfs source="${loop_device_2}"
# Check that we cannot create storage pools inside of ${LXD_DIR} other than ${LXD_DIR}/storage-pools/{pool_name}.
! lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool5_under_lxd_dir" btrfs source="${LXD_DIR}"
# Test that no invalid btrfs storage pool configuration keys can be set.
! lxc storage create "lxdtest-$(basename "${LXD_DIR}")-invalid-btrfs-pool-config" btrfs lvm.thinpool_name=bla
! lxc storage create "lxdtest-$(basename "${LXD_DIR}")-invalid-btrfs-pool-config" btrfs lvm.use_thinpool=false
! lxc storage create "lxdtest-$(basename "${LXD_DIR}")-invalid-btrfs-pool-config" btrfs lvm.vg_name=bla
! lxc storage create "lxdtest-$(basename "${LXD_DIR}")-invalid-btrfs-pool-config" btrfs volume.block.filesystem=ext4
! lxc storage create "lxdtest-$(basename "${LXD_DIR}")-invalid-btrfs-pool-config" btrfs volume.block.mount_options=discard
! lxc storage create "lxdtest-$(basename "${LXD_DIR}")-invalid-btrfs-pool-config" btrfs volume.size=2GB
! lxc storage create "lxdtest-$(basename "${LXD_DIR}")-invalid-btrfs-pool-config" btrfs volume.zfs.remove_snapshots=true
! lxc storage create "lxdtest-$(basename "${LXD_DIR}")-invalid-btrfs-pool-config" btrfs volume.zfs.use_refquota=true
! lxc storage create "lxdtest-$(basename "${LXD_DIR}")-invalid-btrfs-pool-config" btrfs zfs.clone_copy=true
! lxc storage create "lxdtest-$(basename "${LXD_DIR}")-invalid-btrfs-pool-config" btrfs zfs.pool_name=bla
lxc storage create "lxdtest-$(basename "${LXD_DIR}")-valid-btrfs-pool-config" btrfs rsync.bwlimit=1024
lxc storage delete "lxdtest-$(basename "${LXD_DIR}")-valid-btrfs-pool-config"
lxc storage create "lxdtest-$(basename "${LXD_DIR}")-valid-btrfs-pool-config" btrfs btrfs.mount_options="rw,strictatime,nospace_cache,user_subvol_rm_allowed"
lxc storage set "lxdtest-$(basename "${LXD_DIR}")-valid-btrfs-pool-config" btrfs.mount_options "rw,relatime,space_cache,user_subvol_rm_allowed"
lxc storage delete "lxdtest-$(basename "${LXD_DIR}")-valid-btrfs-pool-config"
fi
# Create dir pool.
lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool5" dir
# Check that we cannot create storage pools inside of ${LXD_DIR} other than ${LXD_DIR}/storage-pools/{pool_name}.
! lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool5_under_lxd_dir" dir source="${LXD_DIR}"
# Check that we can create storage pools inside of ${LXD_DIR}/storage-pools/{pool_name}.
lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool5_under_lxd_dir" dir source="${LXD_DIR}/storage-pools/lxdtest-$(basename "${LXD_DIR}")-pool5_under_lxd_dir"
lxc storage delete "lxdtest-$(basename "${LXD_DIR}")-pool5_under_lxd_dir"
# Test that no invalid dir storage pool configuration keys can be set.
! lxc storage create "lxdtest-$(basename "${LXD_DIR}")-invalid-dir-pool-config" dir lvm.thinpool_name=bla
! lxc storage create "lxdtest-$(basename "${LXD_DIR}")-invalid-dir-pool-config" dir lvm.use_thinpool=false
! lxc storage create "lxdtest-$(basename "${LXD_DIR}")-invalid-dir-pool-config" dir lvm.vg_name=bla
! lxc storage create "lxdtest-$(basename "${LXD_DIR}")-invalid-dir-pool-config" dir size=10GB
! lxc storage create "lxdtest-$(basename "${LXD_DIR}")-invalid-dir-pool-config" dir volume.block.filesystem=ext4
! lxc storage create "lxdtest-$(basename "${LXD_DIR}")-invalid-dir-pool-config" dir volume.block.mount_options=discard
! lxc storage create "lxdtest-$(basename "${LXD_DIR}")-invalid-dir-pool-config" dir volume.size=2GB
! lxc storage create "lxdtest-$(basename "${LXD_DIR}")-invalid-dir-pool-config" dir volume.zfs.remove_snapshots=true
! lxc storage create "lxdtest-$(basename "${LXD_DIR}")-invalid-dir-pool-config" dir volume.zfs.use_refquota=true
! lxc storage create "lxdtest-$(basename "${LXD_DIR}")-invalid-dir-pool-config" dir zfs.clone_copy=true
! lxc storage create "lxdtest-$(basename "${LXD_DIR}")-invalid-dir-pool-config" dir zfs.pool_name=bla
lxc storage create "lxdtest-$(basename "${LXD_DIR}")-valid-dir-pool-config" dir rsync.bwlimit=1024
lxc storage delete "lxdtest-$(basename "${LXD_DIR}")-valid-dir-pool-config"
if [ "$lxd_backend" = "lvm" ]; then
# Create lvm pool.
configure_loop_device loop_file_3 loop_device_3
# shellcheck disable=SC2154
lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool6" lvm source="${loop_device_3}" volume.size=25MB
configure_loop_device loop_file_5 loop_device_5
# shellcheck disable=SC2154
# Should fail if vg does not exist, since we have no way of knowing where
# to create the vg without a block device path set.
! lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool10" lvm source=dummy_vg_1 volume.size=25MB
# shellcheck disable=SC2154
deconfigure_loop_device "${loop_file_5}" "${loop_device_5}"
configure_loop_device loop_file_6 loop_device_6
# shellcheck disable=SC2154
pvcreate "${loop_device_6}"
vgcreate "lxdtest-$(basename "${LXD_DIR}")-pool11-dummy_vg_2" "${loop_device_6}"
# Reuse existing volume group "dummy_vg_2" on existing physical volume.
lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool11" lvm source="lxdtest-$(basename "${LXD_DIR}")-pool11-dummy_vg_2" volume.size=25MB
configure_loop_device loop_file_7 loop_device_7
# shellcheck disable=SC2154
pvcreate "${loop_device_7}"
vgcreate "lxdtest-$(basename "${LXD_DIR}")-pool12-dummy_vg_3" "${loop_device_7}"
# Reuse existing volume group "dummy_vg_3" on existing physical volume.
lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool12" lvm source="lxdtest-$(basename "${LXD_DIR}")-pool12-dummy_vg_3" volume.size=25MB
configure_loop_device loop_file_8 loop_device_8
# shellcheck disable=SC2154
# Create new volume group "dummy_vg_4".
lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool13" lvm source="${loop_device_8}" lvm.vg_name="lxdtest-$(basename "${LXD_DIR}")-pool13-dummy_vg_4" volume.size=25MB
lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool14" lvm volume.size=25MB
lxc storage create "lxdtest-$(basename "${LXD_DIR}")-non-thinpool-pool15" lvm lvm.use_thinpool=false volume.size=25MB
# Test that no invalid lvm storage pool configuration keys can be set.
! lxc storage create "lxdtest-$(basename "${LXD_DIR}")-invalid-lvm-pool-config" lvm volume.zfs.remove_snapshots=true
! lxc storage create "lxdtest-$(basename "${LXD_DIR}")-invalid-lvm-pool-config" lvm volume.zfs_use_refquota=true
! lxc storage create "lxdtest-$(basename "${LXD_DIR}")-invalid-lvm-pool-config" lvm zfs.clone_copy=true
! lxc storage create "lxdtest-$(basename "${LXD_DIR}")-invalid-lvm-pool-config" lvm zfs.pool_name=bla
! lxc storage create "lxdtest-$(basename "${LXD_DIR}")-invalid-lvm-pool-config" lvm lvm.use_thinpool=false lvm.thinpool_name="lxdtest-$(basename "${LXD_DIR}")-invalid-lvm-pool-config"
# Test that all valid lvm storage pool configuration keys can be set.
lxc storage create "lxdtest-$(basename "${LXD_DIR}")-valid-lvm-pool-config-pool16" lvm lvm.thinpool_name="lxdtest-$(basename "${LXD_DIR}")-valid-lvm-pool-config"
lxc storage create "lxdtest-$(basename "${LXD_DIR}")-valid-lvm-pool-config-pool17" lvm lvm.vg_name="lxdtest-$(basename "${LXD_DIR}")-valid-lvm-pool-config"
lxc storage create "lxdtest-$(basename "${LXD_DIR}")-valid-lvm-pool-config-pool18" lvm size=10GB
lxc storage create "lxdtest-$(basename "${LXD_DIR}")-valid-lvm-pool-config-pool19" lvm volume.block.filesystem=ext4
lxc storage create "lxdtest-$(basename "${LXD_DIR}")-valid-lvm-pool-config-pool20" lvm volume.block.mount_options=discard
lxc storage create "lxdtest-$(basename "${LXD_DIR}")-valid-lvm-pool-config-pool21" lvm volume.size=2GB
lxc storage create "lxdtest-$(basename "${LXD_DIR}")-valid-lvm-pool-config-pool22" lvm lvm.use_thinpool=true
lxc storage create "lxdtest-$(basename "${LXD_DIR}")-valid-lvm-pool-config-pool23" lvm lvm.use_thinpool=true lvm.thinpool_name="lxdtest-$(basename "${LXD_DIR}")-valid-lvm-pool-config"
lxc storage create "lxdtest-$(basename "${LXD_DIR}")-valid-lvm-pool-config-pool24" lvm rsync.bwlimit=1024
lxc storage create "lxdtest-$(basename "${LXD_DIR}")-valid-lvm-pool-config-pool25" lvm volume.block.mount_options="rw,strictatime,discard"
lxc storage set "lxdtest-$(basename "${LXD_DIR}")-valid-lvm-pool-config-pool25" volume.block.mount_options "rw,lazytime"
lxc storage create "lxdtest-$(basename "${LXD_DIR}")-valid-lvm-pool-config-pool26" lvm volume.block.filesystem=btrfs
fi
# Set default storage pool for image import.
lxc profile device add default root disk path="/" pool="lxdtest-$(basename "${LXD_DIR}")-pool5"
# Import image into default storage pool.
ensure_import_testimage
# Muck around with some containers on various pools.
if [ "$lxd_backend" = "zfs" ]; then
lxc init testimage c1pool1 -s "lxdtest-$(basename "${LXD_DIR}")-pool1"
lxc list -c b c1pool1 | grep "lxdtest-$(basename "${LXD_DIR}")-pool1"
lxc init testimage c2pool2 -s "lxdtest-$(basename "${LXD_DIR}")-pool2"
lxc list -c b c2pool2 | grep "lxdtest-$(basename "${LXD_DIR}")-pool2"
lxc launch testimage c3pool1 -s "lxdtest-$(basename "${LXD_DIR}")-pool1"
lxc list -c b c3pool1 | grep "lxdtest-$(basename "${LXD_DIR}")-pool1"
lxc launch testimage c4pool2 -s "lxdtest-$(basename "${LXD_DIR}")-pool2"
lxc list -c b c4pool2 | grep "lxdtest-$(basename "${LXD_DIR}")-pool2"
lxc storage volume create "lxdtest-$(basename "${LXD_DIR}")-pool1" c1pool1
lxc storage volume set "lxdtest-$(basename "${LXD_DIR}")-pool1" c1pool1 zfs.use_refquota true
lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool1" c1pool1 c1pool1 testDevice /opt
! lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool1" c1pool1 c1pool1 testDevice2 /opt
lxc storage volume detach "lxdtest-$(basename "${LXD_DIR}")-pool1" c1pool1 c1pool1
lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool1" custom/c1pool1 c1pool1 testDevice /opt
! lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool1" custom/c1pool1 c1pool1 testDevice2 /opt
lxc storage volume detach "lxdtest-$(basename "${LXD_DIR}")-pool1" c1pool1 c1pool1
lxc storage volume create "lxdtest-$(basename "${LXD_DIR}")-pool1" c2pool2
lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool1" c2pool2 c2pool2 testDevice /opt
! lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool1" c2pool2 c2pool2 testDevice2 /opt
lxc storage volume detach "lxdtest-$(basename "${LXD_DIR}")-pool1" c2pool2 c2pool2
lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool1" custom/c2pool2 c2pool2 testDevice /opt
! lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool1" custom/c2pool2 c2pool2 testDevice2 /opt
lxc storage volume detach "lxdtest-$(basename "${LXD_DIR}")-pool1" c2pool2 c2pool2
lxc storage volume create "lxdtest-$(basename "${LXD_DIR}")-pool2" c3pool1
lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool2" c3pool1 c3pool1 testDevice /opt
! lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool2" c3pool1 c3pool1 testDevice2 /opt
lxc storage volume detach "lxdtest-$(basename "${LXD_DIR}")-pool2" c3pool1 c3pool1
lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool2" c3pool1 c3pool1 testDevice /opt
! lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool2" c3pool1 c3pool1 testDevice2 /opt
lxc storage volume detach "lxdtest-$(basename "${LXD_DIR}")-pool2" c3pool1 c3pool1
lxc storage volume create "lxdtest-$(basename "${LXD_DIR}")-pool2" c4pool2
lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool2" c4pool2 c4pool2 testDevice /opt
! lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool2" c4pool2 c4pool2 testDevice2 /opt
lxc storage volume detach "lxdtest-$(basename "${LXD_DIR}")-pool2" c4pool2 c4pool2
lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool2" custom/c4pool2 c4pool2 testDevice /opt
! lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool2" custom/c4pool2 c4pool2 testDevice2 /opt
lxc storage volume detach "lxdtest-$(basename "${LXD_DIR}")-pool2" c4pool2 c4pool2
lxc storage volume rename "lxdtest-$(basename "${LXD_DIR}")-pool2" c4pool2 c4pool2-renamed
lxc storage volume rename "lxdtest-$(basename "${LXD_DIR}")-pool2" c4pool2-renamed c4pool2
fi
if [ "$lxd_backend" = "btrfs" ]; then
lxc init testimage c5pool3 -s "lxdtest-$(basename "${LXD_DIR}")-pool3"
lxc list -c b c5pool3 | grep "lxdtest-$(basename "${LXD_DIR}")-pool3"
lxc init testimage c6pool4 -s "lxdtest-$(basename "${LXD_DIR}")-pool4"
lxc list -c b c6pool4 | grep "lxdtest-$(basename "${LXD_DIR}")-pool4"
lxc launch testimage c7pool3 -s "lxdtest-$(basename "${LXD_DIR}")-pool3"
lxc list -c b c7pool3 | grep "lxdtest-$(basename "${LXD_DIR}")-pool3"
lxc launch testimage c8pool4 -s "lxdtest-$(basename "${LXD_DIR}")-pool4"
lxc list -c b c8pool4 | grep "lxdtest-$(basename "${LXD_DIR}")-pool4"
lxc storage volume create "lxdtest-$(basename "${LXD_DIR}")-pool3" c5pool3
lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool3" c5pool3 c5pool3 testDevice /opt
! lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool3" c5pool3 c5pool3 testDevice2 /opt
lxc storage volume detach "lxdtest-$(basename "${LXD_DIR}")-pool3" c5pool3 c5pool3 testDevice
lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool3" custom/c5pool3 c5pool3 testDevice /opt
! lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool3" custom/c5pool3 c5pool3 testDevice2 /opt
lxc storage volume detach "lxdtest-$(basename "${LXD_DIR}")-pool3" c5pool3 c5pool3 testDevice
lxc storage volume create "lxdtest-$(basename "${LXD_DIR}")-pool4" c6pool4
lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool4" c6pool4 c5pool3 testDevice /opt
! lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool4" c6pool4 c5pool3 testDevice2 /opt
lxc storage volume detach "lxdtest-$(basename "${LXD_DIR}")-pool4" c6pool4 c5pool3 testDevice
lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool4" custom/c6pool4 c5pool3 testDevice /opt
! lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool4" custom/c6pool4 c5pool3 testDevice2 /opt
lxc storage volume detach "lxdtest-$(basename "${LXD_DIR}")-pool4" c6pool4 c5pool3 testDevice
lxc storage volume create "lxdtest-$(basename "${LXD_DIR}")-pool3" c7pool3
lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool3" c7pool3 c7pool3 testDevice /opt
! lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool3" c7pool3 c7pool3 testDevice2 /opt
lxc storage volume detach "lxdtest-$(basename "${LXD_DIR}")-pool3" c7pool3 c7pool3 testDevice
lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool3" custom/c7pool3 c7pool3 testDevice /opt
! lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool3" custom/c7pool3 c7pool3 testDevice2 /opt
lxc storage volume detach "lxdtest-$(basename "${LXD_DIR}")-pool3" c7pool3 c7pool3 testDevice
lxc storage volume create "lxdtest-$(basename "${LXD_DIR}")-pool4" c8pool4
lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool4" c8pool4 c8pool4 testDevice /opt
! lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool4" c8pool4 c8pool4 testDevice2 /opt
lxc storage volume detach "lxdtest-$(basename "${LXD_DIR}")-pool4" c8pool4 c8pool4 testDevice
lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool4" custom/c8pool4 c8pool4 testDevice /opt
! lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool4" custom/c8pool4 c8pool4 testDevice2 /opt
lxc storage volume detach "lxdtest-$(basename "${LXD_DIR}")-pool4" c8pool4 c8pool4 testDevice
lxc storage volume rename "lxdtest-$(basename "${LXD_DIR}")-pool4" c8pool4 c8pool4-renamed
lxc storage volume rename "lxdtest-$(basename "${LXD_DIR}")-pool4" c8pool4-renamed c8pool4
fi
lxc init testimage c9pool5 -s "lxdtest-$(basename "${LXD_DIR}")-pool5"
lxc list -c b c9pool5 | grep "lxdtest-$(basename "${LXD_DIR}")-pool5"
lxc launch testimage c11pool5 -s "lxdtest-$(basename "${LXD_DIR}")-pool5"
lxc list -c b c11pool5 | grep "lxdtest-$(basename "${LXD_DIR}")-pool5"
lxc storage volume create "lxdtest-$(basename "${LXD_DIR}")-pool5" c9pool5
lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool5" c9pool5 c9pool5 testDevice /opt
! lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool5" c9pool5 c9pool5 testDevice2 /opt
lxc storage volume detach "lxdtest-$(basename "${LXD_DIR}")-pool5" c9pool5 c9pool5 testDevice
lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool5" custom/c9pool5 c9pool5 testDevice /opt
! lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool5" custom/c9pool5 c9pool5 testDevice2 /opt
lxc storage volume detach "lxdtest-$(basename "${LXD_DIR}")-pool5" c9pool5 c9pool5 testDevice
lxc storage volume create "lxdtest-$(basename "${LXD_DIR}")-pool5" c11pool5
lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool5" c11pool5 c11pool5 testDevice /opt
! lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool5" c11pool5 c11pool5 testDevice2 /opt
lxc storage volume detach "lxdtest-$(basename "${LXD_DIR}")-pool5" c11pool5 c11pool5 testDevice
lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool5" custom/c11pool5 c11pool5 testDevice /opt
! lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool5" custom/c11pool5 c11pool5 testDevice2 /opt
lxc storage volume detach "lxdtest-$(basename "${LXD_DIR}")-pool5" c11pool5 c11pool5 testDevice
lxc storage volume rename "lxdtest-$(basename "${LXD_DIR}")-pool5" c11pool5 c11pool5-renamed
lxc storage volume rename "lxdtest-$(basename "${LXD_DIR}")-pool5" c11pool5-renamed c11pool5
if [ "$lxd_backend" = "lvm" ]; then
lxc init testimage c10pool6 -s "lxdtest-$(basename "${LXD_DIR}")-pool6"
lxc list -c b c10pool6 | grep "lxdtest-$(basename "${LXD_DIR}")-pool6"
# Test if volume group renaming works by setting lvm.vg_name.
lxc storage set "lxdtest-$(basename "${LXD_DIR}")-pool6" lvm.vg_name "lxdtest-$(basename "${LXD_DIR}")-pool6-newName"
lxc storage set "lxdtest-$(basename "${LXD_DIR}")-pool6" lvm.thinpool_name "lxdtest-$(basename "${LXD_DIR}")-pool6-newThinpoolName"
lxc launch testimage c12pool6 -s "lxdtest-$(basename "${LXD_DIR}")-pool6"
lxc list -c b c12pool6 | grep "lxdtest-$(basename "${LXD_DIR}")-pool6"
# grow lv
lxc config device set c12pool6 root size 30MB
lxc restart c12pool6 --force
# shrink lv
lxc config device set c12pool6 root size 25MB
lxc restart c12pool6 --force
lxc init testimage c10pool11 -s "lxdtest-$(basename "${LXD_DIR}")-pool11"
lxc list -c b c10pool11 | grep "lxdtest-$(basename "${LXD_DIR}")-pool11"
lxc launch testimage c12pool11 -s "lxdtest-$(basename "${LXD_DIR}")-pool11"
lxc list -c b c12pool11 | grep "lxdtest-$(basename "${LXD_DIR}")-pool11"
lxc init testimage c10pool12 -s "lxdtest-$(basename "${LXD_DIR}")-pool12"
lxc list -c b c10pool12 | grep "lxdtest-$(basename "${LXD_DIR}")-pool12"
lxc launch testimage c12pool12 -s "lxdtest-$(basename "${LXD_DIR}")-pool12"
lxc list -c b c12pool12 | grep "lxdtest-$(basename "${LXD_DIR}")-pool12"
lxc init testimage c10pool13 -s "lxdtest-$(basename "${LXD_DIR}")-pool13"
lxc list -c b c10pool13 | grep "lxdtest-$(basename "${LXD_DIR}")-pool13"
lxc launch testimage c12pool13 -s "lxdtest-$(basename "${LXD_DIR}")-pool13"
lxc list -c b c12pool13 | grep "lxdtest-$(basename "${LXD_DIR}")-pool13"
lxc init testimage c10pool14 -s "lxdtest-$(basename "${LXD_DIR}")-pool14"
lxc list -c b c10pool14 | grep "lxdtest-$(basename "${LXD_DIR}")-pool14"
lxc launch testimage c12pool14 -s "lxdtest-$(basename "${LXD_DIR}")-pool14"
lxc list -c b c12pool14 | grep "lxdtest-$(basename "${LXD_DIR}")-pool14"
lxc init testimage c10pool15 -s "lxdtest-$(basename "${LXD_DIR}")-non-thinpool-pool15"
lxc list -c b c10pool15 | grep "lxdtest-$(basename "${LXD_DIR}")-non-thinpool-pool15"
lxc launch testimage c12pool15 -s "lxdtest-$(basename "${LXD_DIR}")-non-thinpool-pool15"
lxc list -c b c12pool15 | grep "lxdtest-$(basename "${LXD_DIR}")-non-thinpool-pool15"
# Test that changing block filesystem works
lxc storage set "lxdtest-$(basename "${LXD_DIR}")-pool6" volume.block.filesystem xfs
lxc init testimage c1pool6 -s "lxdtest-$(basename "${LXD_DIR}")-pool6"
lxc storage set "lxdtest-$(basename "${LXD_DIR}")-pool6" volume.block.filesystem btrfs
lxc storage set "lxdtest-$(basename "${LXD_DIR}")-pool6" volume.size 100MB
lxc init testimage c2pool6 -s "lxdtest-$(basename "${LXD_DIR}")-pool6"
lxc storage volume create "lxdtest-$(basename "${LXD_DIR}")-pool6" c10pool6
lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool6" c10pool6 c10pool6 testDevice /opt
! lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool6" c10pool6 c10pool6 testDevice2 /opt
lxc storage volume detach "lxdtest-$(basename "${LXD_DIR}")-pool6" c10pool6 c10pool6 testDevice
lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool6" custom/c10pool6 c10pool6 testDevice /opt
! lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool6" custom/c10pool6 c10pool6 testDevice2 /opt
lxc storage volume detach "lxdtest-$(basename "${LXD_DIR}")-pool6" c10pool6 c10pool6 testDevice
lxc storage volume create "lxdtest-$(basename "${LXD_DIR}")-pool6" c12pool6
lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool6" c12pool6 c12pool6 testDevice /opt
! lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool6" c12pool6 c12pool6 testDevice2 /opt
lxc storage volume detach "lxdtest-$(basename "${LXD_DIR}")-pool6" c12pool6 c12pool6 testDevice
lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool6" custom/c12pool6 c12pool6 testDevice /opt
! lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool6" custom/c12pool6 c12pool6 testDevice2 /opt
lxc storage volume detach "lxdtest-$(basename "${LXD_DIR}")-pool6" c12pool6 c12pool6 testDevice
lxc storage volume create "lxdtest-$(basename "${LXD_DIR}")-pool11" c10pool11
lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool11" c10pool11 c10pool11 testDevice /opt
! lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool11" c10pool11 c10pool11 testDevice2 /opt
lxc storage volume detach "lxdtest-$(basename "${LXD_DIR}")-pool11" c10pool11 c10pool11 testDevice
lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool11" custom/c10pool11 c10pool11 testDevice /opt
! lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool11" custom/c10pool11 c10pool11 testDevice2 /opt
lxc storage volume detach "lxdtest-$(basename "${LXD_DIR}")-pool11" c10pool11 c10pool11 testDevice
lxc storage volume create "lxdtest-$(basename "${LXD_DIR}")-pool11" c12pool11
lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool11" c12pool11 c10pool11 testDevice /opt
! lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool11" c12pool11 c10pool11 testDevice2 /opt
lxc storage volume detach "lxdtest-$(basename "${LXD_DIR}")-pool11" c12pool11 c10pool11 testDevice
lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool11" custom/c12pool11 c10pool11 testDevice /opt
! lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool11" custom/c12pool11 c10pool11 testDevice2 /opt
lxc storage volume detach "lxdtest-$(basename "${LXD_DIR}")-pool11" c12pool11 c10pool11 testDevice
lxc storage volume create "lxdtest-$(basename "${LXD_DIR}")-pool12" c10pool12
lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool12" c10pool12 c10pool12 testDevice /opt
! lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool12" c10pool12 c10pool12 testDevice2 /opt
lxc storage volume detach "lxdtest-$(basename "${LXD_DIR}")-pool12" c10pool12 c10pool12 testDevice
lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool12" custom/c10pool12 c10pool12 testDevice /opt
! lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool12" custom/c10pool12 c10pool12 testDevice2 /opt
lxc storage volume detach "lxdtest-$(basename "${LXD_DIR}")-pool12" c10pool12 c10pool12 testDevice
lxc storage volume create "lxdtest-$(basename "${LXD_DIR}")-pool12" c12pool12
lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool12" c12pool12 c12pool12 testDevice /opt
! lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool12" c12pool12 c12pool12 testDevice2 /opt
lxc storage volume detach "lxdtest-$(basename "${LXD_DIR}")-pool12" c12pool12 c12pool12 testDevice
lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool12" custom/c12pool12 c12pool12 testDevice /opt
! lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool12" custom/c12pool12 c12pool12 testDevice2 /opt
lxc storage volume detach "lxdtest-$(basename "${LXD_DIR}")-pool12" c12pool12 c12pool12 testDevice
lxc storage volume create "lxdtest-$(basename "${LXD_DIR}")-pool13" c10pool13
lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool13" c10pool13 c10pool13 testDevice /opt
! lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool13" c10pool13 c10pool13 testDevice2 /opt
lxc storage volume detach "lxdtest-$(basename "${LXD_DIR}")-pool13" c10pool13 c10pool13 testDevice
lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool13" custom/c10pool13 c10pool13 testDevice /opt
! lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool13" custom/c10pool13 c10pool13 testDevice2 /opt
lxc storage volume detach "lxdtest-$(basename "${LXD_DIR}")-pool13" c10pool13 c10pool13 testDevice
lxc storage volume create "lxdtest-$(basename "${LXD_DIR}")-pool13" c12pool13
lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool13" c12pool13 c12pool13 testDevice /opt
! lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool13" c12pool13 c12pool13 testDevice2 /opt
lxc storage volume detach "lxdtest-$(basename "${LXD_DIR}")-pool13" c12pool13 c12pool13 testDevice
lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool13" custom/c12pool13 c12pool13 testDevice /opt
! lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool13" custom/c12pool13 c12pool13 testDevice2 /opt
lxc storage volume detach "lxdtest-$(basename "${LXD_DIR}")-pool13" c12pool13 c12pool13 testDevice
lxc storage volume create "lxdtest-$(basename "${LXD_DIR}")-pool14" c10pool14
lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool14" c10pool14 c10pool14 testDevice /opt
! lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool14" c10pool14 c10pool14 testDevice2 /opt
lxc storage volume detach "lxdtest-$(basename "${LXD_DIR}")-pool14" c10pool14 c10pool14 testDevice
lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool14" custom/c10pool14 c10pool14 testDevice /opt
! lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool14" custom/c10pool14 c10pool14 testDevice2 /opt
lxc storage volume detach "lxdtest-$(basename "${LXD_DIR}")-pool14" c10pool14 c10pool14 testDevice
lxc storage volume create "lxdtest-$(basename "${LXD_DIR}")-pool14" c12pool14
lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool14" c12pool14 c12pool14 testDevice /opt
! lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool14" c12pool14 c12pool14 testDevice2 /opt
lxc storage volume detach "lxdtest-$(basename "${LXD_DIR}")-pool14" c12pool14 c12pool14 testDevice
lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool14" custom/c12pool14 c12pool14 testDevice /opt
! lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool14" custom/c12pool14 c12pool14 testDevice2 /opt
lxc storage volume detach "lxdtest-$(basename "${LXD_DIR}")-pool14" c12pool14 c12pool14 testDevice
lxc storage volume create "lxdtest-$(basename "${LXD_DIR}")-non-thinpool-pool15" c10pool15
lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-non-thinpool-pool15" c10pool15 c10pool15 testDevice /opt
! lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-non-thinpool-pool15" c10pool15 c10pool15 testDevice2 /opt
lxc storage volume detach "lxdtest-$(basename "${LXD_DIR}")-non-thinpool-pool15" c10pool15 c10pool15 testDevice
lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-non-thinpool-pool15" custom/c10pool15 c10pool15 testDevice /opt
! lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-non-thinpool-pool15" custom/c10pool15 c10pool15 testDevice2 /opt
lxc storage volume detach "lxdtest-$(basename "${LXD_DIR}")-non-thinpool-pool15" c10pool15 c10pool15 testDevice
lxc storage volume create "lxdtest-$(basename "${LXD_DIR}")-non-thinpool-pool15" c12pool15
lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-non-thinpool-pool15" c12pool15 c12pool15 testDevice /opt
! lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-non-thinpool-pool15" c12pool15 c12pool15 testDevice2 /opt
lxc storage volume detach "lxdtest-$(basename "${LXD_DIR}")-non-thinpool-pool15" c12pool15 c12pool15 testDevice
lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-non-thinpool-pool15" custom/c12pool15 c12pool15 testDevice /opt
! lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-non-thinpool-pool15" custom/c12pool15 c12pool15 testDevice2 /opt
lxc storage volume detach "lxdtest-$(basename "${LXD_DIR}")-non-thinpool-pool15" c12pool15 c12pool15 testDevice
fi
if [ "$lxd_backend" = "zfs" ]; then
lxc launch testimage c13pool7 -s "lxdtest-$(basename "${LXD_DIR}")-pool7"
lxc launch testimage c14pool7 -s "lxdtest-$(basename "${LXD_DIR}")-pool7"
lxc launch testimage c15pool8 -s "lxdtest-$(basename "${LXD_DIR}")-pool8"
lxc launch testimage c16pool8 -s "lxdtest-$(basename "${LXD_DIR}")-pool8"
lxc launch testimage c17pool9 -s "lxdtest-$(basename "${LXD_DIR}")-pool9"
lxc launch testimage c18pool9 -s "lxdtest-$(basename "${LXD_DIR}")-pool9"
lxc storage volume create "lxdtest-$(basename "${LXD_DIR}")-pool7" c13pool7
lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool7" c13pool7 c13pool7 testDevice /opt
! lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool7" c13pool7 c13pool7 testDevice2 /opt
lxc storage volume detach "lxdtest-$(basename "${LXD_DIR}")-pool7" c13pool7 c13pool7 testDevice
lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool7" custom/c13pool7 c13pool7 testDevice /opt
! lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool7" custom/c13pool7 c13pool7 testDevice2 /opt
lxc storage volume detach "lxdtest-$(basename "${LXD_DIR}")-pool7" c13pool7 c13pool7 testDevice
lxc storage volume create "lxdtest-$(basename "${LXD_DIR}")-pool7" c14pool7
lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool7" c14pool7 c14pool7 testDevice /opt
! lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool7" c14pool7 c14pool7 testDevice2 /opt
lxc storage volume detach "lxdtest-$(basename "${LXD_DIR}")-pool7" c14pool7 c14pool7 testDevice
lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool7" custom/c14pool7 c14pool7 testDevice /opt
! lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool7" custom/c14pool7 c14pool7 testDevice2 /opt
lxc storage volume detach "lxdtest-$(basename "${LXD_DIR}")-pool7" c14pool7 c14pool7 testDevice
lxc storage volume create "lxdtest-$(basename "${LXD_DIR}")-pool8" c15pool8
lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool8" c15pool8 c15pool8 testDevice /opt
! lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool8" c15pool8 c15pool8 testDevice2 /opt
lxc storage volume detach "lxdtest-$(basename "${LXD_DIR}")-pool8" c15pool8 c15pool8 testDevice
lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool8" custom/c15pool8 c15pool8 testDevice /opt
! lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool8" custom/c15pool8 c15pool8 testDevice2 /opt
lxc storage volume detach "lxdtest-$(basename "${LXD_DIR}")-pool8" c15pool8 c15pool8 testDevice
lxc storage volume create "lxdtest-$(basename "${LXD_DIR}")-pool8" c16pool8
lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool8" c16pool8 c16pool8 testDevice /opt
! lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool8" c16pool8 c16pool8 testDevice2 /opt
lxc storage volume detach "lxdtest-$(basename "${LXD_DIR}")-pool8" c16pool8 c16pool8 testDevice
lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool8" custom/c16pool8 c16pool8 testDevice /opt
! lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool8" custom/c16pool8 c16pool8 testDevice2 /opt
lxc storage volume detach "lxdtest-$(basename "${LXD_DIR}")-pool8" c16pool8 c16pool8 testDevice
lxc storage volume create "lxdtest-$(basename "${LXD_DIR}")-pool9" c17pool9
lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool9" c17pool9 c17pool9 testDevice /opt
! lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool9" c17pool9 c17pool9 testDevice2 /opt
lxc storage volume detach "lxdtest-$(basename "${LXD_DIR}")-pool9" c17pool9 c17pool9 testDevice
lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool9" custom/c17pool9 c17pool9 testDevice /opt
! lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool9" custom/c17pool9 c17pool9 testDevice2 /opt
lxc storage volume detach "lxdtest-$(basename "${LXD_DIR}")-pool9" c17pool9 c17pool9 testDevice
lxc storage volume create "lxdtest-$(basename "${LXD_DIR}")-pool9" c18pool9
lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool9" c18pool9 c18pool9 testDevice /opt
! lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool9" c18pool9 c18pool9 testDevice2 /opt
lxc storage volume detach "lxdtest-$(basename "${LXD_DIR}")-pool9" c18pool9 c18pool9 testDevice
lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool9" custom/c18pool9 c18pool9 testDevice /opt
! lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool9" custom/c18pool9 c18pool9 testDevice2 /opt
lxc storage volume detach "lxdtest-$(basename "${LXD_DIR}")-pool9" c18pool9 c18pool9 testDevice
lxc storage volume rename "lxdtest-$(basename "${LXD_DIR}")-pool9" c18pool9 c18pool9-renamed
lxc storage volume rename "lxdtest-$(basename "${LXD_DIR}")-pool9" c18pool9-renamed c18pool9
fi
if [ "$lxd_backend" = "zfs" ]; then
lxc delete -f c1pool1
lxc delete -f c3pool1
lxc delete -f c4pool2
lxc delete -f c2pool2
lxc storage volume delete "lxdtest-$(basename "${LXD_DIR}")-pool1" c1pool1
lxc storage volume delete "lxdtest-$(basename "${LXD_DIR}")-pool1" c2pool2
lxc storage volume delete "lxdtest-$(basename "${LXD_DIR}")-pool2" c3pool1
lxc storage volume delete "lxdtest-$(basename "${LXD_DIR}")-pool2" c4pool2
fi
if [ "$lxd_backend" = "btrfs" ]; then
lxc delete -f c5pool3
lxc delete -f c7pool3
lxc delete -f c8pool4
lxc delete -f c6pool4
lxc storage volume delete "lxdtest-$(basename "${LXD_DIR}")-pool3" c5pool3
lxc storage volume delete "lxdtest-$(basename "${LXD_DIR}")-pool4" c6pool4
lxc storage volume delete "lxdtest-$(basename "${LXD_DIR}")-pool3" c7pool3
lxc storage volume delete "lxdtest-$(basename "${LXD_DIR}")-pool4" c8pool4
fi
lxc delete -f c9pool5
lxc delete -f c11pool5
lxc storage volume delete "lxdtest-$(basename "${LXD_DIR}")-pool5" c9pool5
lxc storage volume delete "lxdtest-$(basename "${LXD_DIR}")-pool5" c11pool5
if [ "$lxd_backend" = "lvm" ]; then
lxc delete -f c1pool6
lxc delete -f c2pool6
lxc delete -f c10pool6
lxc delete -f c12pool6
lxc delete -f c10pool11
lxc delete -f c12pool11
lxc delete -f c10pool12
lxc delete -f c12pool12
lxc delete -f c10pool13
lxc delete -f c12pool13
lxc delete -f c10pool14
lxc delete -f c12pool14
lxc delete -f c10pool15
lxc delete -f c12pool15
lxc storage volume delete "lxdtest-$(basename "${LXD_DIR}")-pool6" c10pool6
lxc storage volume delete "lxdtest-$(basename "${LXD_DIR}")-pool6" c12pool6
lxc storage volume delete "lxdtest-$(basename "${LXD_DIR}")-pool11" c10pool11
lxc storage volume delete "lxdtest-$(basename "${LXD_DIR}")-pool11" c12pool11
lxc storage volume delete "lxdtest-$(basename "${LXD_DIR}")-pool12" c10pool12
lxc storage volume delete "lxdtest-$(basename "${LXD_DIR}")-pool12" c12pool12
lxc storage volume delete "lxdtest-$(basename "${LXD_DIR}")-pool13" c10pool13
lxc storage volume delete "lxdtest-$(basename "${LXD_DIR}")-pool13" c12pool13
lxc storage volume delete "lxdtest-$(basename "${LXD_DIR}")-pool14" c10pool14
lxc storage volume delete "lxdtest-$(basename "${LXD_DIR}")-pool14" c12pool14
lxc storage volume delete "lxdtest-$(basename "${LXD_DIR}")-non-thinpool-pool15" c10pool15
lxc storage volume delete "lxdtest-$(basename "${LXD_DIR}")-non-thinpool-pool15" c12pool15
fi
if [ "$lxd_backend" = "zfs" ]; then
lxc delete -f c13pool7
lxc delete -f c14pool7
lxc delete -f c15pool8
lxc delete -f c16pool8
lxc delete -f c17pool9
lxc delete -f c18pool9
lxc storage volume delete "lxdtest-$(basename "${LXD_DIR}")-pool7" c13pool7
lxc storage volume delete "lxdtest-$(basename "${LXD_DIR}")-pool7" c14pool7
lxc storage volume delete "lxdtest-$(basename "${LXD_DIR}")-pool8" c15pool8
lxc storage volume delete "lxdtest-$(basename "${LXD_DIR}")-pool8" c16pool8
lxc storage volume delete "lxdtest-$(basename "${LXD_DIR}")-pool9" c17pool9
lxc storage volume delete "lxdtest-$(basename "${LXD_DIR}")-pool9" c18pool9
fi
lxc image delete testimage
if [ "$lxd_backend" = "zfs" ]; then
lxc storage delete "lxdtest-$(basename "${LXD_DIR}")-pool7"
lxc storage delete "lxdtest-$(basename "${LXD_DIR}")-pool8"
lxc storage delete "lxdtest-$(basename "${LXD_DIR}")-pool9"
# shellcheck disable=SC2154
deconfigure_loop_device "${loop_file_4}" "${loop_device_4}"
lxc storage delete "lxdtest-$(basename "${LXD_DIR}")-pool1"
lxc storage delete "lxdtest-$(basename "${LXD_DIR}")-pool2"
# shellcheck disable=SC2154
deconfigure_loop_device "${loop_file_1}" "${loop_device_1}"
fi
if [ "$lxd_backend" = "btrfs" ]; then
lxc storage delete "lxdtest-$(basename "${LXD_DIR}")-pool4"
# shellcheck disable=SC2154
deconfigure_loop_device "${loop_file_2}" "${loop_device_2}"
fi
if [ "$lxd_backend" = "lvm" ]; then
lxc storage delete "lxdtest-$(basename "${LXD_DIR}")-pool6"
# shellcheck disable=SC2154
pvremove -ff "${loop_device_3}" || true
# shellcheck disable=SC2154
deconfigure_loop_device "${loop_file_3}" "${loop_device_3}"
lxc storage delete "lxdtest-$(basename "${LXD_DIR}")-pool11"
# shellcheck disable=SC2154
vgremove -ff "lxdtest-$(basename "${LXD_DIR}")-pool11-dummy_vg_2" || true
pvremove -ff "${loop_device_6}" || true
# shellcheck disable=SC2154
deconfigure_loop_device "${loop_file_6}" "${loop_device_6}"
lxc storage delete "lxdtest-$(basename "${LXD_DIR}")-pool12"
vgremove -ff "lxdtest-$(basename "${LXD_DIR}")-pool12-dummy_vg_3" || true
pvremove -ff "${loop_device_7}" || true
# shellcheck disable=SC2154
deconfigure_loop_device "${loop_file_7}" "${loop_device_7}"
lxc storage delete "lxdtest-$(basename "${LXD_DIR}")-pool13"
vgremove -ff "lxdtest-$(basename "${LXD_DIR}")-pool13-dummy_vg_4" || true
pvremove -ff "${loop_device_8}" || true
# shellcheck disable=SC2154
deconfigure_loop_device "${loop_file_8}" "${loop_device_8}"
lxc storage delete "lxdtest-$(basename "${LXD_DIR}")-pool14"
vgremove -ff "lxdtest-$(basename "${LXD_DIR}")-pool14" || true
lxc storage delete "lxdtest-$(basename "${LXD_DIR}")-non-thinpool-pool15"
vgremove -ff "lxdtest-$(basename "${LXD_DIR}")-non-thinpool-pool15" || true
lxc storage delete "lxdtest-$(basename "${LXD_DIR}")-valid-lvm-pool-config-pool16"
vgremove -ff "lxdtest-$(basename "${LXD_DIR}")-non-thinpool-pool16" || true
lxc storage delete "lxdtest-$(basename "${LXD_DIR}")-valid-lvm-pool-config-pool17"
vgremove -ff "lxdtest-$(basename "${LXD_DIR}")-non-thinpool-pool17" || true
lxc storage delete "lxdtest-$(basename "${LXD_DIR}")-valid-lvm-pool-config-pool18"
vgremove -ff "lxdtest-$(basename "${LXD_DIR}")-non-thinpool-pool18" || true
lxc storage delete "lxdtest-$(basename "${LXD_DIR}")-valid-lvm-pool-config-pool19"
vgremove -ff "lxdtest-$(basename "${LXD_DIR}")-non-thinpool-pool19" || true
lxc storage delete "lxdtest-$(basename "${LXD_DIR}")-valid-lvm-pool-config-pool20"
vgremove -ff "lxdtest-$(basename "${LXD_DIR}")-non-thinpool-pool20" || true
lxc storage delete "lxdtest-$(basename "${LXD_DIR}")-valid-lvm-pool-config-pool21"
vgremove -ff "lxdtest-$(basename "${LXD_DIR}")-non-thinpool-pool21" || true
lxc storage delete "lxdtest-$(basename "${LXD_DIR}")-valid-lvm-pool-config-pool22"
vgremove -ff "lxdtest-$(basename "${LXD_DIR}")-non-thinpool-pool22" || true
lxc storage delete "lxdtest-$(basename "${LXD_DIR}")-valid-lvm-pool-config-pool23"
vgremove -ff "lxdtest-$(basename "${LXD_DIR}")-non-thinpool-pool23" || true
lxc storage delete "lxdtest-$(basename "${LXD_DIR}")-valid-lvm-pool-config-pool24"
vgremove -ff "lxdtest-$(basename "${LXD_DIR}")-non-thinpool-pool24" || true
lxc storage delete "lxdtest-$(basename "${LXD_DIR}")-valid-lvm-pool-config-pool25"
vgremove -ff "lxdtest-$(basename "${LXD_DIR}")-non-thinpool-pool25" || true
fi
)
# Test applying quota
QUOTA1="10GB"
QUOTA2="11GB"
if [ "$lxd_backend" = "lvm" ]; then
QUOTA1="20MB"
QUOTA2="21MB"
fi
if [ "$lxd_backend" != "dir" ] && [ "$lxd_backend" != "ceph" ]; then
lxc launch testimage quota1
lxc profile device set default root size "${QUOTA1}"
lxc stop -f quota1
lxc start quota1
lxc launch testimage quota2
lxc stop -f quota2
lxc start quota2
lxc init testimage quota3
lxc start quota3
lxc profile device set default root size "${QUOTA2}"
lxc stop -f quota1
lxc start quota1
lxc stop -f quota2
lxc start quota2
lxc stop -f quota3
lxc start quota3
lxc profile device unset default root size
lxc stop -f quota1
lxc start quota1
lxc stop -f quota2
lxc start quota2
lxc stop -f quota3
lxc start quota3
lxc delete -f quota1
lxc delete -f quota2
lxc delete -f quota3
fi
# shellcheck disable=SC2031
LXD_DIR="${LXD_DIR}"
kill_lxd "${LXD_STORAGE_DIR}"
}
|
package componets;
import javax.swing.JComponent;
import java.awt.*;
import java.awt.geom.Area;
import java.awt.geom.Ellipse2D;
/**
* The type DiscSlot.
*/
public class DiscSlot extends JComponent {
private int radius;
private final int paddingBorder = 4;
private final int offsetBorder = 4;
private final double padding;
private boolean hovered;
private Ellipse2D circle;
private Ellipse2D border;
private Boolean whoPlaced;
public DiscSlot() {
this(50,0.1,false);
}
public DiscSlot(int radius, double padding, boolean hovered) {
this.radius = radius;
this.padding = padding;
this.hovered = hovered;
}
public void setEmptySlot() {
whoPlaced = null;
}
public void placeDisc(Boolean player) {
whoPlaced = player;
}
public Boolean getWhoPlaced() {
return whoPlaced;
}
public void setIsHovered(boolean state) {
hovered = state;
}
public void paint(Graphics g) {
super.paint(g);
buildCellComponent();
drawCell(g);
}
public Dimension getPreferredSize() {
int size = radius + radius;
return new Dimension(size, size);
}
private void buildCellComponent(){
Dimension size = getSize();
radius = (int)( (Math.min(size.width, size.height) / 2)
-(radius*padding));
double sizeWidth = (double)size.width / 2.0;
double sizeHeight = (double)size.height / 2.0;
double diameter = radius + radius;
double diameterBorder = (radius + paddingBorder) + (radius + paddingBorder);
circle = new Ellipse2D.Double(
sizeWidth - radius,
sizeHeight - radius,
diameter, diameter);
border = new Ellipse2D.Double(
sizeWidth - radius - offsetBorder,
sizeHeight - radius - offsetBorder,
diameterBorder, diameterBorder);
}
private void drawCell(Graphics g){
Graphics2D g2 = (Graphics2D) g;
Area circleArea = new Area(circle);
Area borderArea = new Area(border);
g2.setColor(new Color(0,60,200));
borderArea.subtract(circleArea);
g2.fill(borderArea);
if(whoPlaced != null) {
if (whoPlaced) g2.setColor(Color.YELLOW);
else g2.setColor(Color.RED);
}else if(hovered){
g2.setColor(new Color(189, 189, 189, 255));
}else{
g2.setColor(new Color(128, 128, 128, 128));
}
g2.fill(circleArea);
g2.setColor(Color.BLACK);
g2.draw(circle);
}
}
|
<filename>src/main/java/am/ik/cloud/gateway/locator/MapUtils.java<gh_stars>1-10
package am.ik.cloud.gateway.locator;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import org.springframework.cloud.gateway.route.RouteDefinition;
import reactor.util.function.Tuple3;
import reactor.util.function.Tuples;
public class MapUtils {
public static List<Tuple3<State, String, RouteDefinition>> difference(Map<String, RouteDefinition> before,
Map<String, RouteDefinition> after) {
final List<Tuple3<State, String, RouteDefinition>> difference = new ArrayList<>();
after.forEach((k, v) -> {
if (before.containsKey(k)) {
final RouteDefinition routeDefinition = before.get(k);
if (!routeDefinition.equals(v)) {
difference.add(Tuples.of(State.UPDATED, k, v));
}
} else {
difference.add(Tuples.of(State.ADDED, k, v));
}
});
before.forEach((k, v) -> {
if (!after.containsKey(k)) {
difference.add(Tuples.of(State.DELETED, k, v));
}
});
return difference;
}
enum State {
ADDED, UPDATED, DELETED
}
}
|
<gh_stars>0
"use strict"
const textmateTokens = (colorSchemes, schemeName) => {
const scheme = colorSchemes[schemeName]
return [{
scope: ["text", "source"],
settings: {
foreground: scheme.variable,
},
},
{
scope: ["emphasis"],
settings: {
fontStyle: "italic",
},
},
{
scope: ["strong"],
settings: {
fontStyle: "bold",
},
},
{
scope: ["invalid"],
settings: {
foreground: scheme.invalid,
fontStyle: "italic",
},
},
{
scope: ["invalid.deprecated"],
settings: {
foreground: scheme.deprecated,
fontStyle: "underline italic",
},
},
{
scope: ["invalid.illegal"],
settings: {
foreground: scheme.invalid,
fontStyle: "underline italic",
},
},
{
scope: ["variable.language.this", "variable.language.self"],
settings: {
foreground: scheme.class,
},
},
{
scope: ["keyword", "keyword.operator"],
settings: {
foreground: scheme.punctuation,
},
},
{
scope: [
"keyword.control",
"punctuation.definition.keyword",
"keyword.other.important",
],
settings: {
foreground: scheme.storageType,
},
},
{
scope: ["keyword.other.unit"],
settings: {
foreground: scheme.number,
},
},
{
scope: [
"keyword.other.special-method",
"entity.name.other.preprocessor.macro",
"variable.other.event",
],
settings: {
foreground: scheme.function,
},
},
{
scope: ["constant.character"],
settings: {
foreground: scheme.punctuation,
},
},
{
scope: ["constant.character.escape"],
settings: {
foreground: scheme.property,
},
},
{
scope: ["entity.name.label"],
settings: {
foreground: scheme.variable,
},
},
{
scope: [
"variable.parameter",
"variable.language.arguments",
"variable.other.object",
],
settings: {
foreground: scheme.variable,
},
},
{
scope: ["support.type"],
settings: {
foreground: scheme.variable,
},
},
{
scope: ["keyword.operator.new", "keyword.control.new"],
settings: {
foreground: scheme.storageType,
},
},
{
name: "NAMESPACE, PRIMITIVE TYPES AND ENUM MEMBER",
scope: [
"entity.name.section",
"entity.name.namespace",
"entity.name.type.module",
"support.type.primitive",
"variable.other.enummember",
],
settings: {
foreground: scheme.number,
},
},
{
scope: [
"keyword.control.default",
"keyword.control.export",
"keyword.control.from",
"keyword.control.import",
"keyword.control.module",
"support.type.object.module",
],
settings: {
foreground: scheme.number,
},
},
{
name: "CLASSES AND TYPES",
scope: [
"support.class",
"entity.name.type.class",
"entity.name.class",
"variable.other.class",
"entity.name.type.enum",
"entity.name.type.interface",
"entity.name.type",
"entity.other.inherited-class",
"entity.name.scope-resolution",
"entity.other.attribute",
"keyword.primitive-datatypes.swift",
"keyword.type.cs",
"meta.protocol-list.objc",
"meta.return-type.objc",
"meta.return-type",
"source.go storage.type",
"source.groovy storage.type",
"source.java storage.type",
"source.powershell entity.other.attribute-name",
"storage.class.std.rust",
"storage.type.annotation.groovy",
"storage.type.annotation.java",
"storage.type.attribute.swift",
"storage.type.boolean.go",
"storage.type.byte.go",
"storage.type.c",
"storage.type.core.rust",
"storage.type.cs",
"storage.type.cs",
"storage.type.error.go",
"storage.type.generic.cs",
"storage.type.generic.groovy",
"storage.type.generic.java",
"storage.type.groovy",
"storage.type.groovy",
"storage.type.haskell",
"storage.type.java",
"storage.type.modifier.cs",
"storage.type.numeric.go",
"storage.type.objc",
"storage.type.object.array.groovy",
"storage.type.object.array.java",
"storage.type.ocaml",
"storage.type.parameters.groovy",
"storage.type.php",
"storage.type.primitive.array.groovy",
"storage.type.primitive.array.java",
"storage.type.primitive.groovy",
"storage.type.primitive.java",
"storage.type.rune.go",
"storage.type.string.go",
"storage.type.struct",
"storage.type.token.java",
"storage.type.uintptr.go",
"storage.type.variable.cs",
],
settings: {
foreground: scheme.class,
},
},
{
name: "FUNCTIONS",
scope: [
"entity.name.function.member",
"support.function",
"entity.name.function.method",
"entity.name.function.accessor",
"entity.name.function",
"support.constant.handlebars",
"source.powershell variable.other.member",
"entity.name.operator.custom-literal",
"meta.method-call.java meta.method",
"meta.method.groovy",
"support.function.any-method",
"keyword.operator.function.infix",
"keyword.control.require",
],
settings: {
foreground: scheme.function,
},
},
{
name: "PROPERTY",
scope: [
"support.type.vendored.property-name",
"support.type.property-name",
"variable.css",
"variable.scss",
"variable.other.less",
"source.coffee.embedded",
"variable.other.object.property",
"variable.other.property",
"variable.other.constant.property",
],
settings: {
foreground: scheme.property,
},
},
{
name: "STRING",
scope: [
"string",
"meta.embedded.assembly",
"string.tag",
"string.value",
],
settings: {
foreground: scheme.string,
},
},
{
scope: ["keyword.other"],
settings: {
foreground: scheme.string,
},
},
{
name: "STORAGE",
scope: [
"meta.implementation storage.type.objc",
"meta.interface-or-protocol storage.type.objc",
"source.groovy storage.type.def",
"storage.type.accessor",
"storage.type.class.jsdoc",
"storage.type.property",
"storage",
],
settings: {
foreground: scheme.punctuation,
},
},
{
scope: [
"storage.type",
"storage.modifier",
"storage.control",
"punctuation.flowtype",
"meta.object.flowtype",
],
settings: {
foreground: scheme.storageType,
},
},
{
name: "STORAGETYPE, FLOW, SPECIAL KEYWORDS",
scope: [
"entity.name.operator",
"keyword.control.module.reference",
"keyword.operator.alignas",
"keyword.operator.alignof",
"keyword.operator.cast",
"keyword.operator.delete",
"keyword.operator.expression",
"keyword.operator.in",
"keyword.operator.instanceof",
"keyword.operator.logical.python",
"keyword.operator.noexcept",
"keyword.operator.of",
"keyword.operator.sizeof",
"keyword.operator.typeid",
"keyword.operator.typeof",
"keyword.operator.void",
"keyword.operator.wordlike",
"keyword.other.operator",
"keyword.other.using",
"source.cpp keyword.operator.new",
],
settings: {
foreground: scheme.storageType,
},
},
{
name: "CONSTANT",
scope: [
"variable.other.constant",
"support.function.magic",
"variable.other.predefined",
"constant.numeric",
"keyword.operator.plus.exponent",
"keyword.operator.minus.exponent",
"constant",
"constant.variable",
"constant.language",
"support.constant",
],
settings: {
foreground: scheme.number,
},
},
{
name: "VARIABLE LANGUAGE",
scope: [
"keyword.expressions-and-types.swift",
"variable.language",
"variable.language punctuation.definition.variable.php",
"variable.other.readwrite.instance.ruby",
"variable.parameter.function.language.special",
],
settings: {
foreground: scheme.punctuation,
},
},
{
scope: [
"punctuation.section.embedded",
"meta.string-contents.quoted.double punctuation.definition.variable",
"punctuation.definition.interpolation.begin",
"punctuation.definition.interpolation.end",
"punctuation.definition.template-expression.begin",
"punctuation.definition.template-expression.end",
"punctuation.section.embedded.begin",
"punctuation.section.embedded.coffee",
"punctuation.section.embedded.end",
"punctuation.section.embedded.end source.php",
"punctuation.section.embedded.end source.ruby",
"punctuation.definition.variable.makefile",
],
settings: {
foreground: scheme.number,
},
},
{
scope: [
"keyword.operator.decorator",
"punctuation.decorator",
"entity.name.class.decorator",
"variable.other.readwrite.decorator",
],
settings: {
foreground: scheme.function,
},
},
{
scope: [
"constant.other.color",
"constant.other.symbol",
"punctuation.definition.block.tag",
"punctuation.accessor",
"keyword.operator.accessor",
"meta.attribute.href.html",
"punctuation.definition.tag.jsx",
"punctuation.definition.tag.tsx",
"punctuation.definition.typeparameters",
"punctuation.separator",
"support.class.component",
],
settings: {
foreground: scheme.punctuation,
},
},
{
scope: ["support.type.object.console"],
settings: {
foreground: scheme.class,
},
},
{
scope: ["support.type.object.dom"],
settings: {
foreground: scheme.punctuation,
},
},
{
scope: ["variable.interpolation"],
settings: {
foreground: scheme.invalid,
},
},
{
scope: ["meta.definition.variable.name"],
settings: {
foreground: scheme.number,
},
},
{
scope: ["meta.object-literal.key"],
settings: {
foreground: scheme.property,
},
},
{
scope: [
"JSXAttrs",
"JSXNested",
"support.type.property-name.css",
"support.type.property-name.scss",
],
settings: {
foreground: scheme.variable,
},
},
{
scope: [
"meta.type.cast.expr",
"meta.type.new.expr",
"support.constant.math",
"support.constant.dom",
],
settings: {
foreground: scheme.class,
},
},
{
scope: [
"comment keyword.codetag.notation",
"comment.block.documentation keyword",
"comment.block.documentation storage.type.class",
],
settings: {
foreground: scheme.punctuation,
},
},
{
scope: [
"comment.block.documentation entity.name.type punctuation.definition.bracket",
"comment.block.documentation entity.name.type",
],
settings: {
foreground: scheme.class,
fontStyle: "italic",
},
},
{
scope: ["comment.block.documentation variable"],
settings: {
foreground: scheme.variable,
fontStyle: "italic",
},
},
{
name: "BRACKETS, BRACES, PARENTHESES",
scope: [
"punctuation.separator.parameter",
"punctuation.terminator.statement",
"meta.array-binding-pattern-variable",
"keyword.operator.other.powershell",
"keyword.other.statement-separator.powershell",
"meta.brace.curly",
"meta.brace.round",
"meta.brace.square",
"punctuation.definition.arguments.begin",
"punctuation.definition.arguments.end",
"punctuation.definition.entity.begin",
"punctuation.definition.entity.end",
"punctuation.definition.tag",
"punctuation.definition.type.begin",
"punctuation.definition.type.end",
"punctuation.section.scope.begin",
"punctuation.section.scope.end",
"storage.type.generic.java",
"string.template meta.brace",
"string.template punctuation.accessor",
],
settings: {
foreground: scheme.variable,
},
},
{
scope: [
"punctuation.definition.entity",
"string.unquoted.heredoc punctuation.definition.string",
"punctuation.separator.annotation",
"punctuation.colon.graphql",
"punctuation.definition.entity.other.inherited-class",
"punctuation.function.swift",
"meta.object-binding-pattern-variable punctuation.destructuring",
"entity.other.attribute-name.placeholder punctuation",
"constant.other.symbol.hashkey punctuation.definition.constant.ruby",
"entity.other.attribute-name.pseudo-class punctuation",
"punctuation.separator.dictionary.key-value",
"punctuation.separator.hash",
"punctuation.separator.inheritance",
"punctuation.separator.key-value.mapping.yaml",
"punctuation.separator.namespace",
"punctuation.separator.pointer-access",
"punctuation.separator.slice",
],
settings: {
foreground: scheme.punctuation,
},
},
{
scope: ["meta.embedded", "source.groovy.embedded"],
settings: {
foreground: scheme.variable,
},
},
{
scope: ["entity.name.tag"],
settings: {
foreground: scheme.property,
},
},
{
scope: ["entity.other.attribute-name"],
settings: {
foreground: scheme.number,
},
},
{
scope: ["entity.other.attribute-name.id"],
settings: {
foreground: scheme.function,
},
},
{
scope: [
"entity.other.attribute-name.pseudo-element",
"entity.other.attribute-name.pseudo-class",
"entity.other.attribute-name.class",
"entity.other.attribute-name.class.mixin",
"entity.other.attribute-name.parent-selector",
"entity.other.attribute-name.attribute",
"meta.group.double.toml",
"meta.group.toml",
"support.other.chomping-indicator.yaml",
],
settings: {
foreground: scheme.number,
},
},
{
scope: [
"support.constant.font-name",
"support.constant.media-type",
"support.constant.media",
"constant.other.color.rgb-value",
"constant.other.rgb-value",
"support.constant.color",
],
settings: {
foreground: scheme.string,
},
},
{
scope: ["support.constant.property-value"],
settings: {
foreground: scheme.variable,
},
},
{
scope: ["punctuation.separator.key-value"],
settings: {
foreground: scheme.punctuation,
},
},
{
scope: ["punctuation.definition.constant"],
settings: {
foreground: scheme.number,
},
},
{
scope: [
"meta.property-list punctuation.separator.key-value"],
settings: {
foreground: scheme.variable,
},
},
{
scope: ["meta.selector"],
settings: {
foreground: scheme.punctuation,
},
},
{
scope: ["header"],
settings: {
foreground: scheme.property,
},
},
{
scope: [
"constant.other.object.key string.unquoted",
"punctuation.quasi.element",
"keyword.other.definition",
"variable.language.prototype",
"variable.other.jsdoc",
],
settings: {
foreground: scheme.property,
},
},
{
scope: [
"punctuation.definition.block.scalar.folded.yaml",
"punctuation.definition.block.scalar.literal.yaml",
"punctuation.definition.block.sequence.item.yaml",
],
settings: {
foreground: scheme.storageType,
},
},
{
scope: ["meta.preprocessor",
"entity.name.function.preprocessor"
],
settings: {
foreground: scheme.function,
},
},
{
scope: ["meta.preprocessor.string"],
settings: {
foreground: scheme.string,
},
},
{
scope: ["meta.preprocessor.numeric"],
settings: {
foreground: scheme.number,
},
},
{
scope: ["meta.structure.dictionary.key.python"],
settings: {
foreground: scheme.property,
},
},
{
scope: ["meta.diff.header"],
settings: {
foreground: scheme.punctuation,
},
},
{
scope: [
"meta.decorator variable.other.readwrite",
"meta.decorator variable.other.property",
"meta.decorator variable.other.object",
],
settings: {
foreground: scheme.variable,
},
},
{
scope: [
"entity.name.function.target.makefile",
"entity.name.section.toml",
"variable.other.key.toml",
],
settings: {
foreground: scheme.property,
},
},
{
scope: ["constant.other.date", "constant.other.timestamp"],
settings: {
foreground: scheme.punctuation,
},
},
{
scope: [
"entity.name.type.type-parameter",
"meta.indexer.mapped type.declaration entity.name.type",
"meta.type.parameters entity.name.type",
],
settings: {
foreground: scheme.variable,
},
},
{
scope: [
"punctuation.support.type.property-name.begin",
"punctuation.support.type.property-name.end",
],
settings: {
foreground: scheme.property,
},
},
{
scope: [
"string.quoted.docstring.multi",
"string.quoted.docstring.multi.python punctuation.definition.string.begin",
"string.quoted.docstring.multi.python punctuation.definition.string.end",
"string.quoted.docstring.multi.python constant.character.escape",
],
settings: {
foreground: scheme.string,
},
},
{
scope: ["source.shell variable.other"],
settings: {
foreground: scheme.class,
},
},
{
scope: ["meta.scope.prerequisites.makefile"],
settings: {
foreground: scheme.string,
},
},
{
scope: ["support.function.git-rebase"],
settings: {
foreground: scheme.number,
},
},
{
scope: ["constant.sha.git-rebase"],
settings: {
foreground: scheme.number,
},
},
]
}
const textmateItalic = [{
name: "[CORRECTIONS] ITALIC",
scope: [
"constant.language.boolean",
"entity.name.class",
"entity.name.namespace",
"entity.name.other.preprocessor.macro",
"entity.name.type.class",
"entity.name.type.interface",
"entity.name.type.module",
"entity.other.inherited-class",
"keyword.operator.decorator",
"storage.type.accessor",
"storage.type.struct",
"support.class",
"support.type.primitive",
"variable.language.self",
"variable.language.super",
"variable.language.this",
"variable.language",
"variable.other.class",
"variable.other.event",
"variable.parameter.function.language.special",
"variable.parameter",
],
settings: {
fontStyle: "italic",
},
},
{
name: "<NAME>",
scope: ["keyword.control.new", "keyword.operator.new"],
settings: {
fontStyle: "bold italic",
},
},
]
module.exports = {
textmateTokens,
textmateItalic,
}
|
const express = require('express');
const bodyParser = require('body-parser');
const db = require('./database');
const app = express();
app.use(bodyParser.urlencoded({extended: false}));
app.use(bodyParser.json());
app.post('/form', async (req, res) => {
const {input} = req.body;
const result = await db.insert(input);
res.json(result);
});
app.get('/form/:id', async (req, res) => {
const result = await db.get(req.params.id);
res.json(result);
});
app.listen(3000);
|
/*
SD card writing wrapper around the SD.h library
to write logs to and sd card
Created by <NAME>, 2020/03/07
Released into the public domain.
*/
#include "Arduino.h"
#include "Logger.h"
Logger::Logger(String filename, int _SD_CARD_SELECT_PIN, int _SD_CARD_WRITE_LED)
{
SD_CARD_SELECT_PIN = _SD_CARD_SELECT_PIN;
SD_CARD_WRITE_LED = _SD_CARD_WRITE_LED;
pinMode(LED_PIN, OUTPUT);
digitalWrite(LED_PIN,LOW);
//
//
// // see if the card is present and can be initialized:
if (!SD.begin(_SD_CARD_SELECT_PIN)) {
Serial.println("Card init. failed!");
Serial.println("Has a sd card been inserted?");
// error(SD_CARD_ERROR_CODE_FAILED_INIT);
}
//
//
// create_file(filename);
//
// pinMode(LED_PIN, OUTPUT);
pinMode(_SD_CARD_WRITE_LED, OUTPUT);
// Serial.println("Ready!");
}
void Logger::create_file(String _filename) {
//convert string to c-string
//credit
//http://www.cplusplus.com/reference/string/string/c_str/
char * filename_cstring = new char [_filename.length() + 1];
strcpy (filename_cstring, _filename.c_str());
char filename[15];
strcpy(filename, filename_cstring);
// delete[] filename_cstring;
for (uint8_t i = 0; i < 100; i++) {
//if file exits add another one using format <filename><number + 1>.txt
//ex: if log01.txt exists
//create log02.txt
filename[7] = '0' + i / 10;
filename[8] = '0' + i % 10;
// create if does not exist, do not open existing, write, sync after write
if (! SD.exists(filename)) {
break;
}
}
logfile = SD.open(filename, FILE_WRITE);
if ( ! logfile ) {
Serial.print("Couldnt create ");
Serial.println(filename);
// error(SD_CARD_ERROR_CODE_CANT_CREATE_FILE);
}
Serial.print("Writing to ");
Serial.println(filename);
}
// blink out an error code
void Logger::error(uint8_t errno) {
while (1) {
uint8_t i;
for (i = 0; i < errno; i++) {
digitalWrite(LED_PIN, HIGH);
delay(100);
digitalWrite(LED_PIN, LOW);
delay(100);
}
for (i = errno; i < 10; i++) {
delay(200);
}
}
}
void Logger::sd_print(String message) {
//flash sd card write indicator led "SD_CARD_WRITE_LED"
digitalWrite(SD_CARD_WRITE_LED, HIGH);
logfile.print(message);
digitalWrite(SD_CARD_WRITE_LED, LOW);
//save writen data to sd card
logfile.flush();
}
void Logger::sd_println(String message) {
digitalWrite(SD_CARD_WRITE_LED, HIGH);
logfile.println(message);
digitalWrite(SD_CARD_WRITE_LED, LOW);
logfile.flush();
}
|
const path = require(`path`);
const _ = require('lodash');
const { createFilePath } = require(`gatsby-source-filesystem`);
function createPostPage(posts, createPage) {
const postTemplate = path.resolve(`./src/templates/blogPost.js`);
posts.forEach(({ node }, index) => {
const next = index === posts.length - 1 ? null : posts[index + 1].node;
const previous = index === 0 ? null : posts[index - 1].node;
createPage({
path: node.fields.slug,
component: postTemplate,
context: {
slug: node.fields.slug,
previous,
next,
},
});
});
}
function createCategoryPage(categories, createPage) {
const categoryTemplate = path.resolve(`./src/pages/index.js`);
categories.forEach((category) => {
createPage({
path: `/category/${_.kebabCase(category.fieldValue)}/`,
component: categoryTemplate,
context: {
category: category.fieldValue,
},
});
});
}
function createTagMap(posts) {
const doubleTags = posts.reduce((acc, { node }) => {
return acc.concat(node.frontmatter.tags || []);
}, []);
const tagsMap = doubleTags.reduce((acc, tag) => {
if (acc.has(tag)) {
acc.set(tag, {
name: tag,
count: acc.get(tag).count + 1,
});
} else {
acc.set(tag, {
name: tag,
count: 1,
});
}
return acc;
}, new Map());
return tagsMap;
}
function createTagPage(posts, createPage) {
const tagTemplate = path.resolve(`./src/templates/blogTag.js`);
const tagsMap = createTagMap(posts);
const tags = Array.from(tagsMap.values()).sort((prev, cur) => cur.count - prev.count);
tags.forEach((tag) => {
createPage({
path: `/tag/${_.kebabCase(tag.name)}/`,
component: tagTemplate,
context: {
tag: tag.name,
tags,
},
});
});
}
exports.onCreateNode = ({ node, getNode, actions }) => {
const { createNodeField } = actions;
if (node.internal.type === `MarkdownRemark`) {
const slug = createFilePath({ node, getNode, basePath: `content` });
createNodeField({
node,
name: `slug`,
value: slug,
});
}
};
exports.createPages = async ({ graphql, actions }) => {
const { createPage } = actions;
const result = await graphql(`
{
postsRemark: allMarkdownRemark(
filter: { fileAbsolutePath: { regex: "/(content/post)/" } }
sort: { fields: frontmatter___date, order: DESC }
limit: 2000
) {
edges {
node {
fields {
slug
}
frontmatter {
tags
title
category
}
}
}
}
categoriesGroup: allMarkdownRemark(limit: 2000) {
group(field: frontmatter___category) {
fieldValue
totalCount
}
}
}
`);
const posts = result.data.postsRemark.edges;
createPostPage(posts, createPage);
createTagPage(posts, createPage);
createCategoryPage(result.data.categoriesGroup.group, createPage);
};
|
#!/bin/bash
# Copyright 2019 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script installs Cruise Control (https://github.com/linkedin/cruise-control)
# on a Dataproc Kafka cluster.
#
# Every node running Kafka broker will be updated to use the Cruise Control
# metric reporter, and Cruise Control server will be running on the first
# master node (port 9090 by default). By default, self healing is enabled for
# broker failure, goal violation and metric anomaly.
set -euxo pipefail
readonly CRUISE_CONTROL_HOME="/opt/cruise-control"
readonly CRUISE_CONTROL_CONFIG_FILE="${CRUISE_CONTROL_HOME}/config/cruisecontrol.properties"
readonly KAFKA_HOME=/usr/lib/kafka
readonly KAFKA_CONFIG_FILE='/etc/kafka/conf/server.properties'
readonly ROLE="$(/usr/share/google/get_metadata_value attributes/dataproc-role)"
readonly CRUISE_CONTROL_VERSION="$(/usr/share/google/get_metadata_value attributes/cruise-control-version || echo 2.0.37)"
readonly CRUISE_CONTROL_HTTP_PORT="$(/usr/share/google/get_metadata_value attributes/cruise-control-http-port || echo 9090)"
readonly SELF_HEALING_BROKER_FAILURE_ENABLED="$(/usr/share/google/get_metadata_value attributes/self-healing-broker-failure-enabled || echo true)"
readonly SELF_HEALING_GOAL_VIOLATION_ENABLED="$(/usr/share/google/get_metadata_value attributes/self-healing-goal-violation-enabled || echo true)"
readonly SELF_HEALING_METRIC_ANOMALY_ENABLED="$(/usr/share/google/get_metadata_value attributes/self-healing-metric-anomaly-enabled || echo false)"
readonly BROKER_FAILURE_ALERT_THRESHOLD_MS="$(/usr/share/google/get_metadata_value attributes/broker-failure-alert-threshold-ms || echo 120000)"
readonly BROKER_FAILURE_SELF_HEALING_THRESHOLD_MS="$(/usr/share/google/get_metadata_value attributes/broker-failure-alert-threshold-ms || echo 300000)"
function download_cruise_control() {
mkdir -p /opt
pushd /opt
git clone --branch ${CRUISE_CONTROL_VERSION} --depth 1 https://github.com/linkedin/cruise-control.git
popd
}
function build_cruise_control() {
pushd ${CRUISE_CONTROL_HOME}
./gradlew jar copyDependantLibs
popd
}
function update_kafka_metrics_reporter() {
if [[ ! -d "${CRUISE_CONTROL_HOME}" ]]; then
echo "Kafka is not installed on this node ${HOSTNAME}, skip configuring Cruise Control."
return 0
fi
cp ${CRUISE_CONTROL_HOME}/cruise-control-metrics-reporter/build/libs/cruise-control-metrics-reporter-${CRUISE_CONTROL_VERSION}.jar \
${KAFKA_HOME}/libs
cat >>${KAFKA_CONFIG_FILE} <<EOF
# Properties added by Cruise Control init action.
metric.reporters=com.linkedin.kafka.cruisecontrol.metricsreporter.CruiseControlMetricsReporter
EOF
systemctl restart kafka-server
}
function configure_cruise_control_server() {
echo "Configuring cruise control server."
cat >>"${CRUISE_CONTROL_CONFIG_FILE}" <<EOF
# Properties from the Cruise Control init action.
webserver.http.port=${CRUISE_CONTROL_HTTP_PORT}
self.healing.broker.failure.enabled=${SELF_HEALING_BROKER_FAILURE_ENABLED}
self.healing.goal.violation.enabled=${SELF_HEALING_GOAL_VIOLATION_ENABLED}
self.healing.metric.anomaly.enabled=${SELF_HEALING_METRIC_ANOMALY_ENABLED}
broker.failure.alert.threshold.ms=${BROKER_FAILURE_ALERT_THRESHOLD_MS}
broker.failure.self.healing.threshold.ms=${BROKER_FAILURE_SELF_HEALING_THRESHOLD_MS}
anomaly.detection.goals=com.linkedin.kafka.cruisecontrol.analyzer.goals.ReplicaCapacityGoal,com.linkedin.kafka.cruisecontrol.analyzer.goals.DiskCapacityGoal,com.linkedin.kafka.cruisecontrol.analyzer.goals.CpuCapacityGoal,com.linkedin.kafka.cruisecontrol.analyzer.goals.ReplicaDistributionGoal
metric.anomaly.finder.class=com.linkedin.kafka.cruisecontrol.detector.NoopMetricAnomalyFinder
EOF
}
function start_cruise_control_server() {
# Wait for the metrics topic to be created.
for ((i = 1; i <= 20; i++)); do
local metrics_topic=$(/usr/lib/kafka/bin/kafka-topics.sh --list --zookeeper localhost:2181 | grep __CruiseControlMetrics)
if [[ -n "${metrics_topic}" ]]; then
break
else
echo "Metrics topic __CruiseControlMetrics is not created yet, retry $i ..."
sleep 5
fi
done
if [[ -z "${metrics_topic}" ]]; then
err "Metrics topic __CruiseControlMetrics was not found in the cluster."
fi
echo "Start Cruise Control server on ${HOSTNAME}."
pushd ${CRUISE_CONTROL_HOME}
./kafka-cruise-control-start.sh config/cruisecontrol.properties &
popd
}
function main() {
download_cruise_control
build_cruise_control
update_kafka_metrics_reporter
# Run CC on the first master node.
if [[ "${HOSTNAME}" == *-m || "${HOSTNAME}" == *-m-0 ]]; then
configure_cruise_control_server
start_cruise_control_server
fi
}
main
|
<filename>src/TimeUnitValue.ts
/*!
* @author electricessence / https://github.com/electricessence/
* @license MIT
*/
import TimeQuantity from './TimeQuantity';
import TimeUnit from './TimeUnit';
/**
* TimeUnitValue allows for passing around a reference to a mutable measure of time coerced by its unit type.
*/
export default class TimeUnitValue
extends TimeQuantity
{
constructor (value: number | TimeQuantity, private _units: TimeUnit.UnitType)
{
super(typeof value=='number'
? value
: getUnitQuantityFrom(value, _units));
TimeUnit.assertValid(_units);
}
get value (): number
{
return this._quantity;
}
set value (v: number)
{
this._quantity = v;
if(!this._total.tryReset()) throw new Error('Unable to update underlying value.');
}
// To avoid confusion, the unit type can only be set once at construction.
get units (): TimeUnit.UnitType
{
return this._units;
}
static from (
value: number | TimeQuantity,
units: TimeUnit.UnitType = TimeUnit.UnitType.Milliseconds): TimeUnitValue
{
return new TimeUnitValue(value, units);
}
getTotalMilliseconds (): number
{
return TimeUnit.toMilliseconds(this._quantity, this._units);
}
to (units: TimeUnit.UnitType = this.units): TimeUnitValue
{
return TimeUnitValue.from(this, units);
}
}
function getUnitQuantityFrom (q: TimeQuantity, units: TimeUnit.UnitType): number
{
return TimeUnit.fromMilliseconds(q.getTotalMilliseconds(), units);
}
|
cp panku/panku.py bin/panku
|
import {
BaseClientSideWebPart,
IPropertyPaneSettings,
IWebPartContext,
PropertyPaneSlider
} from '@microsoft/sp-client-preview';
import styles from './StarterWp.module.scss';
import * as strings from 'starterWpStrings';
import { IStarterWpWebPartPropspropertiesStarter } from './IStarterWpWebPartProps';
import { EnvironmentType } from '@microsoft/sp-client-base';
import MockItemsStarter from './Mocks/MockItemsStarter';
export interface ISPListItem {
Id: string;
Title: string;
MyMultiText: string;
}
export interface ISPListItems {
value: ISPListItem[];
}
export default class StarterWpWebPart extends BaseClientSideWebPart<IStarterWpWebPartPropspropertiesStarter> {
public constructor(context: IWebPartContext) {
super(context);
}
public render(): void {
this.domElement.innerHTML = `
<div class="${styles.starterWp}">
<div class="${styles.container}">
<div class="ms-Grid-row ms-bgColor-themePrimary ms-fontColor-white ${styles.row}">
<p class="ms-font-l ms-fontColor-white"><strong>JOAO LIVIO SPFx DEMOS</strong></p>
<p class="ms-font-m ms-fontColor-white"><strong>Description:</strong> This WP will fetch the number of items
that you choose in the propreties. It's non reactive, you have to apply for assuming you value</p>
<p class="ms-font-l ms-fontColor-white"><strong>Items to fetch:</strong> ${this.properties.maxitems}</p>
<div class='ms-font-m ms-fontColor-white'><strong>Loaded from</strong> ${this.context.pageContext.web.title}</div>
<p>More samples go to Git from <a href="https://github.com/SharePoint/sp-dev-fx-webparts"><strong>Vesa Juvonen</strong></a></p>
</div>
</div>
<div id="spListItemsContainer" /></div>
</div>`;
this._renderListAsync();
}
private _getMockListData(): Promise<ISPListItems> {
return MockItemsStarter.get(this.context.pageContext.web.absoluteUrl)
.then((data: ISPListItem[]) => {
var listData: ISPListItems = { value: data };
return listData;
}) as Promise<ISPListItems>;
}
private _getListData(): Promise<ISPListItems> {
var lib="myCustomList";
return this.context.httpClient.get(this.context.pageContext.web.absoluteUrl + `/_api/web/lists/getbytitle('${lib}')/items?$top=` + this.properties.maxitems)
.then((response: Response) => {
return response.json();
});
}
private _renderList(items: ISPListItem[]): void {
let html: string = "";
items.forEach((item: ISPListItem) => {
html += `
<ul class="${styles.starterWp}2>
<p class="${styles.listItem}">
<span class="ms-fontSize-l">ID: ${item.Id} - Title: ${item.Title}</span>
<br/><br/>
<span class="ms-fontSize-l">Multi Text</span>
<br/><br/>
<span class="ms-font-l">${item.MyMultiText}</span>
</p>
</ul>
<br/>`;
});
const listContainer: Element = this.domElement.querySelector('#spListItemsContainer');
listContainer.innerHTML = html;
}
private _renderListAsync(): void {
// Local environment
if (this.context.environment.type === EnvironmentType.Local) {
this._getMockListData().then((response) => {
this._renderList(response.value);
}); }
else {
this._getListData()
.then((response) => {
this._renderList(response.value);
});
}
}
protected get propertyPaneSettings(): IPropertyPaneSettings {
return {
pages: [
{
header: {
description: strings.PropertyPaneDescription
},
groups: [
{
groupName: strings.BasicGroupName,
groupFields: [
PropertyPaneSlider('maxitems', {
label: strings.MaxItemsFieldLabel,
max: 5000,
min: 100,
step: 100,
showValue: true
})
]
}
]
}
]
};
}
protected get disableReactivePropertyChanges(): boolean {
return true;
}
}
|
-- AlterTable
ALTER TABLE "Scheduled" ADD COLUMN "group" TEXT;
|
#!/bin/sh
### BEGIN INIT INFO
# Provides: alarmRetarder
# Required-Start: $network $local_fs $remote_fs
# Required-Stop: $network $local_fd $remote_fs
# X-Start-Before:
# X-Stop-After:
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# X-Interactive: false
# Short-Description: alarmRetarder init script
# Description: This is the init script for alarmRetarder
### END INIT INFO
. /lib/lsb/init-functions
DAEMON_NAME=alarmRetarder
DAEMON=/opt/alarmRetarder/main.py
PIDFILE=/var/run/alarmRetarder.pid
case "${1}" in
start)
log_daemon_msg "Starting ${DAEMON_NAME}" "${DAEMON_NAME}"
start-stop-daemon --start --quiet --exec ${DAEMON} --background --make-pidfile --pidfile ${PIDFILE} > /dev/null || exit 1
log_end_msg 0
;;
stop)
log_daemon_msg "Stopping ${DAEMON_NAME}" "${DAEMON_NAME}"
start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile ${PIDFILE}
RETURN="${?}"
[ "${RETURN}" = 2 ] && exit 2
log_end_msg 0
;;
force-reload|restart)
${0} stop
${0} start
;;
status)
status_of_proc ${DAEMON} && exit 0 || exit $?
;;
*)
log_success_msg "Usage: ${0} {start|stop|restart|force-reload|status}"
exit 1
;;
esac
|
#!/bin/bash
if [[ $target_platform =~ linux.* ]] || [[ $target_platform == win-32 ]] || [[ $target_platform == win-64 ]] || [[ $target_platform == osx-64 ]]; then
export DISABLE_AUTOBREW=1
$R CMD INSTALL --build .
else
mkdir -p $PREFIX/lib/R/library/rsnps
mv * $PREFIX/lib/R/library/rsnps
if [[ $target_platform == osx-64 ]]; then
pushd $PREFIX
for libdir in lib/R/lib lib/R/modules lib/R/library lib/R/bin/exec sysroot/usr/lib; do
pushd $libdir || exit 1
for SHARED_LIB in $(find . -type f -iname "*.dylib" -or -iname "*.so" -or -iname "R"); do
echo "fixing SHARED_LIB $SHARED_LIB"
install_name_tool -change /Library/Frameworks/R.framework/Versions/3.5.0-MRO/Resources/lib/libR.dylib "$PREFIX"/lib/R/lib/libR.dylib $SHARED_LIB || true
install_name_tool -change /Library/Frameworks/R.framework/Versions/3.5/Resources/lib/libR.dylib "$PREFIX"/lib/R/lib/libR.dylib $SHARED_LIB || true
install_name_tool -change /usr/local/clang4/lib/libomp.dylib "$PREFIX"/lib/libomp.dylib $SHARED_LIB || true
install_name_tool -change /usr/local/gfortran/lib/libgfortran.3.dylib "$PREFIX"/lib/libgfortran.3.dylib $SHARED_LIB || true
install_name_tool -change /Library/Frameworks/R.framework/Versions/3.5/Resources/lib/libquadmath.0.dylib "$PREFIX"/lib/libquadmath.0.dylib $SHARED_LIB || true
install_name_tool -change /usr/local/gfortran/lib/libquadmath.0.dylib "$PREFIX"/lib/libquadmath.0.dylib $SHARED_LIB || true
install_name_tool -change /Library/Frameworks/R.framework/Versions/3.5/Resources/lib/libgfortran.3.dylib "$PREFIX"/lib/libgfortran.3.dylib $SHARED_LIB || true
install_name_tool -change /usr/lib/libgcc_s.1.dylib "$PREFIX"/lib/libgcc_s.1.dylib $SHARED_LIB || true
install_name_tool -change /usr/lib/libiconv.2.dylib "$PREFIX"/sysroot/usr/lib/libiconv.2.dylib $SHARED_LIB || true
install_name_tool -change /usr/lib/libncurses.5.4.dylib "$PREFIX"/sysroot/usr/lib/libncurses.5.4.dylib $SHARED_LIB || true
install_name_tool -change /usr/lib/libicucore.A.dylib "$PREFIX"/sysroot/usr/lib/libicucore.A.dylib $SHARED_LIB || true
install_name_tool -change /usr/lib/libexpat.1.dylib "$PREFIX"/lib/libexpat.1.dylib $SHARED_LIB || true
install_name_tool -change /usr/lib/libcurl.4.dylib "$PREFIX"/lib/libcurl.4.dylib $SHARED_LIB || true
install_name_tool -change /usr/lib/libc++.1.dylib "$PREFIX"/lib/libc++.1.dylib $SHARED_LIB || true
install_name_tool -change /Library/Frameworks/R.framework/Versions/3.5/Resources/lib/libc++.1.dylib "$PREFIX"/lib/libc++.1.dylib $SHARED_LIB || true
done
popd
done
popd
fi
fi
|
static const long powersOf10[] = {
1,
10,
100,
1000,
10000,
100000,
1000000,
10000000,
100000000
};
static const byte CodeMap[] = {
0b00001000, // #--------
0b00000100, // -#-------
0b00000010, // --#------
0b00000001, // ---#-----
0b10000000, // ----#----
0b01000000, // -----#---
0b00100000, // ------#--
0b00010000, // --------#
0b11000000, // '0'
0b11111001, // '1'
0b10100100, // '2'
0b10110000, // '3'
0b10011001, // '4'
0b10010010, // '5'
0b10000010, // '6'
0b11111000, // '7'
0b10000000, // '8'
0b10010000, // '9'
0b10001000, // 'A'
0b10000011, // 'b'
0b11000110, // 'C'
0b10100001, // 'd'
0b10000110, // 'E'
0b10001110, // 'F'
0b11000010, // 'G'
0b10001001, // 'H'
0b11001111, // 'I'
0b11110001, // 'J'
0b10001001, // 'K'
0b11000111, // 'L'
0b11111111, // 'M' NO DISPLAY
0b10101011, // 'n'
0b11000000, // 'O'
0b10001100, // 'P'
0b10011000, // 'q'
0b10101111, // 'r'
0b10010010, // 'S'
0b10000111, // 't'
0b11000001, // 'U'
0b11000001, // 'V'
0b11111111, // 'W' NO DISPLAY
0b10001001, // 'X'
0b01101110, // 'y'
0b10100100, // 'Z'
0b11111111, // ' ' BLANK
0b10111111, // '-'
0b11110111 // '_'
};
// Pointers to data
const byte * const stepCodes = CodeMap;
const byte * const numeralCodes = CodeMap + 8;
const byte * const alphaCodes = CodeMap + 18;
template<uint8_t Size>
SevenSegmentSR<Size>::SevenSegmentSR(const uint8_t serialDataPin, const uint8_t clockPin, const uint8_t latchPin): ShiftRegister74HC595<Size>(serialDataPin, clockPin, latchPin) {
}
template<uint8_t Size>
void SevenSegmentSR<Size>::refresh_display() {
if (millis() - _prev_time >= _offtime) {
_prev_time = millis();
for (int i = 0; i < 8; i++) {
byte tmp_pinValues[] = { _pinValues[i], _pinValues[i + 8] };
ShiftRegister74HC595<Size>::setAll(tmp_pinValues);
ShiftRegister74HC595<Size>::setAllHigh();
}
}
}
template<uint8_t Size>
void SevenSegmentSR<Size>::display_char(const char str[], int clock_face) {
byte digit[8];
byte steps[8];
for (byte i = 0; i < 8; i++) {
char ch = str[i];
if (ch == ' ') {
digit[i] = 0b00000000;
steps[i] = 0b00000000;
}
else if (ch >= 'A' && ch <= 'Z') {
digit[i] = alphaCodes[ch - 'A'];
steps[i] = stepCodes[i];
}
else if (ch >= 'a' && ch <= 'z') {
digit[i] = alphaCodes[ch - 'a'];
steps[i] = stepCodes[i];
}
else if (ch >= '0' && ch <= '9') {
digit[i] = numeralCodes[ch - '0'];
steps[i] = stepCodes[i];
}
else {
digit[i] = 0b00000000;
steps[i] = 0b00000000;
}
if (clock_face == 1) {
if (i == 2) {
bitClear(digit[i], 7);
steps[i] = stepCodes[i];
}
else if (i == 7) {
digit[i] = 0b00011110;
steps[i] = stepCodes[i];
}
}
else if (clock_face == 2) {
if (i == 2) {
bitClear(digit[i], 7);
steps[i] = stepCodes[i];
}
else if (i == 6) {
bitClear(digit[i], 7);
steps[i] = stepCodes[i];
}
else if (i == 7) {
digit[i] = 0b00011110;
steps[i] = stepCodes[i];
}
}
else if (clock_face == 3) {
if (i == 2) {
bitClear(digit[i], 7);
steps[i] = stepCodes[i];
}
else if (i == 7) {
bitClear(digit[i], 7);
steps[i] = stepCodes[i];
}
}
}
for (int i = 0; i < 8; i++) {
_pinValues[i] = digit[i];
_pinValues[i + 8] = steps[i];
}
}
template<uint8_t Size>
void SevenSegmentSR<Size>::set_chars(String chars, int clock_face) {
while (chars.length() != 8) {
chars += " ";
}
display_char(chars.c_str(), clock_face);
}
template<uint8_t Size>
void SevenSegmentSR<Size>::display_numbers(long number, byte nb_length) {
byte digit[8];
byte steps[8];
byte nr[8];
for (int i = 0; i < nb_length; i++) {
nr[i] = (number / powersOf10[nb_length-1-i]) % 10;
}
for (byte i = 0; i < nb_length; i++) {
digit[i] = numeralCodes[nr[i]];
steps[i] = stepCodes[i];
}
for (byte i = nb_length; i < 8; i++) {
digit[i] = 0b00000000;
steps[i] = 0b00000000;
}
for (int i = 0; i < 8; i++) {
_pinValues[i] = digit[i];
_pinValues[i + 8] = steps[i];
}
}
template<uint8_t Size>
void SevenSegmentSR<Size>::set_numbers(long numbers) {
if(numbers > 9999999) {
display_numbers(numbers,8);
}
else if (numbers > 999999) {
display_numbers(numbers,7);
}
else if (numbers > 99999) {
display_numbers(numbers,6);
}
else if (numbers > 9999) {
display_numbers(numbers,5);
}
else if (numbers > 999) {
display_numbers(numbers,4);
}
else if (numbers > 99) {
display_numbers(numbers,3);
}
else if (numbers > 9) {
display_numbers(numbers,2);
}
else {
display_numbers(numbers,1);
}
}
template<uint8_t Size>
void SevenSegmentSR<Size>::set_Brightness(int brightness) {
_offtime = brightness;
}
|
nohup node index.js > a.log &
|
from Jumpscale import j
class SSHKey(j.baseclasses.object_config):
_SCHEMATEXT = """
@url = jumpscale.sshkey.client
name** = "" (S)
pubkey = "" (S)
allow_agent = True (B)
passphrase_ = "" (S)
privkey = "" (S)
duration = 86400 (I)
path = "" (S) #path of the private key
"""
def _init(self, **kwargs):
self._connected = None
if self.name == "":
raise j.exceptions.Base("need to specify name")
if self.path == "":
keyspath = "%s/keys" % (j.sal.fs.getcwd())
if j.sal.fs.exists(keyspath):
self.path = keyspath + "/%s" % self.name
self._save()
else:
keyspath_system = j.core.tools.text_replace("{DIR_HOME}/.ssh")
if j.sal.fs.exists(keyspath_system):
self.path = keyspath_system + "/%s" % self.name
self._save()
if not j.sal.fs.exists(self.path):
if self.privkey:
j.sal.fs.writeFile(self.path, self.privkey)
else:
self.pubkey = ""
self._save()
self.generate()
self._init(**kwargs)
else:
if self.privkey:
if j.sal.fs.exists(self.path):
c = j.sal.fs.readFile(self.path)
if not c.strip() == self.privkey.strip():
raise j.exceptions.Input("mismatch between key in BCDB and in your filesystem (PRIVKEY)")
if self.pubkey:
if j.sal.fs.exists("%s.pub" % (self.path)):
c = j.sal.fs.readFile("%s.pub" % (self.path))
if not c.strip() == self.pubkey.strip():
raise j.exceptions.Input("mismatch between key in BCDB and in your filesystem (PUBKEY)")
assert j.sal.fs.exists(self.path)
if not self.privkey:
self.privkey = j.sal.fs.readFile(self.path)
self._save()
if not self.pubkey and self.privkey:
path = "%s.pub" % (self.path)
if not j.sal.fs.exists(path):
cmd = 'ssh-keygen -f {} -N "{}"'.format(self.path, self.passphrase_)
j.sal.process.execute(cmd)
self.pubkey = j.sal.fs.readFile(path)
self._save()
def load_from_filesystem(self):
"""
look for key on filesystem & load in BCDB
:return:
"""
self.pubkey = j.sal.fs.readFile("%s.pub" % (self.path))
self.privkey = j.sal.fs.readFile(self.path)
self._save()
def save(self):
self._init()
self._save()
def _save(self):
j.baseclasses.object_config.save(self)
def generate(self, reset=False):
"""
Generate ssh key
:param reset: if True, then delete old ssh key from dir, defaults to False
:type reset: bool, optional
"""
self._log_debug("generate ssh key")
if reset:
self.delete_from_sshdir()
self.pubkey = ""
self.privkey = ""
else:
if not j.sal.fs.exists(self.path):
if self.privkey != "" and self.pubkey != "":
self.write_to_sshdir()
if self.pubkey:
raise j.exceptions.Base("cannot generate key because pubkey already known")
if self.privkey:
raise j.exceptions.Base("cannot generate key because privkey already known")
if not j.sal.fs.exists(self.path) or reset:
cmd = 'ssh-keygen -t rsa -f {} -N "{}"'.format(self.path, self.passphrase_)
j.sal.process.execute(cmd, timeout=10)
self._init()
def delete(self):
"""
will delete from from config
"""
self._log_debug("delete:%s" % self.name)
j.baseclasses.object_config.delete(self)
# self.delete_from_sshdir()
def delete_from_sshdir(self):
j.sal.fs.remove("%s.pub" % self.path)
j.sal.fs.remove("%s" % self.path)
def write_to_sshdir(self):
"""
Write to ssh dir the private and public key
"""
j.sal.fs.writeFile(self.path, self.privkey)
j.sal.fs.writeFile(self.path + ".pub", self.pubkey)
# def sign_ssh_data(self, data):
# return self.agent.sign_ssh_data(data)
# # TODO: does not work, property needs to be implemented
def load(self):
"""
load ssh key in ssh-agent, if no ssh-agent is found, new ssh-agent will be started
"""
self._log_debug("load sshkey: %s for duration:%s" % (self.name, self.duration))
j.core.myenv.sshagent.key_load(self.path, passphrase=self.passphrase_, duration=self.duration)
def unload(self):
cmd = "ssh-add -d %s " % (self.path)
rc = 0
while rc == 0:
rc, _, _ = j.sal.process.execute(cmd, die=False) # there could be more than 1 instance
if self.is_loaded():
raise j.exceptions.Base("failed to unload sshkey")
def is_loaded(self):
"""
check if key is loaded in the ssh agent
:return: whether ssh key was loadeed in ssh agent or not
:rtype: bool
"""
for path, key in j.core.myenv.sshagent._read_keys():
if " " in key.strip():
keypub = key.split(" ")[1].strip()
else:
keypub = key.strip()
if self.path == path and self.pubkey_only == keypub:
return True
self._log_debug("ssh key: %s is not loaded", self.name)
return False
@property
def pubkey_only(self):
"""
return the key only with no type e.g.ssh-rsa or email/username
:return:
"""
if not self.pubkey:
raise j.exceptions.Base("pubkey is None")
r = self.pubkey.split(" ")
if len(r) == 2:
return r[1]
elif len(r) == 3:
return r[1]
else:
raise j.exceptions.Base("format of pubkey not ok:%s" % self.pubkey)
|
<reponame>rainmaple/duckdb
#include "catch.hpp"
#include "duckdb/common/types/hyperloglog.hpp"
#include <vector>
using namespace duckdb;
using namespace std;
TEST_CASE("Test that hyperloglog works", "[hyperloglog]") {
HyperLogLog log;
// add a million elements of the same value
int x = 4;
for (size_t i = 0; i < 1000000; i++) {
log.Add((uint8_t *)&x, sizeof(int));
}
REQUIRE(log.Count() == 1);
// now add a million different values
HyperLogLog log2;
for (size_t i = 0; i < 1000000; i++) {
x = i;
log2.Add((uint8_t *)&x, sizeof(int));
}
// the count is approximate, but should be pretty close to a million
size_t count = log2.Count();
REQUIRE(count > 999000LL);
REQUIRE(count < 1001000LL);
// now we can merge the HLLs
auto new_log = log.Merge(log2);
// the count should be pretty much the same
count = new_log->Count();
REQUIRE(count > 999000LL);
REQUIRE(count < 1001000LL);
// now test composability of the merge
// add everything to one big one
// add chunks to small ones and then merge them
// the result should be the same
HyperLogLog big;
HyperLogLog small[16];
for (size_t i = 0; i < 1000000; i++) {
x = ((2 * i) + 3) % (i + 3 / 2);
big.Add((uint8_t *)&x, sizeof(int));
small[i % 16].Add((uint8_t *)&x, sizeof(int));
}
// now merge them into one big HyperLogLog
auto merged = HyperLogLog::Merge(small, 16);
// the result should be identical to the big one
REQUIRE(merged->Count() == big.Count());
}
|
<reponame>joemccann/photopipe<filename>app.js
var express = require('express')
, routes = require('./routes')
, http = require('http')
, path = require('path')
, request = require('request')
, fs = require('fs')
, db_client
var app = express()
app.configure(function(){
app.set('port', process.env.PORT || 80)
app.set('views', __dirname + '/views')
app.set('view engine', 'ejs')
app.use(express.logger('dev'))
app.use(express.favicon(__dirname + '/public/favicon.ico'))
app.use(express.bodyParser())
app.use(express.methodOverride())
app.use(express.compress())
app.use(require('stylus').middleware(__dirname + '/public'))
app.use(express.static(path.join(__dirname, 'public')))
app.use(express.cookieParser('photopipe'))
app.use(express.cookieSession())
// Expose session'd accounts to every request as a
// local variable available in the view.
app.use(function(req,res,next){
app.locals.isTwitterAuth = !!req.session.twitter
app.locals.isFacebookAuth = !!req.session.facebook
app.locals.isInstagramAuth = !!req.session.instagram
app.locals.isDropboxAuth = !!req.session.dropbox
return next()
})
app.use(app.router)
// Setup local variables to be available in the views.
app.locals.title = "PhotoPipe - Download Instagram Photos, Download Facebook Galleries, Post to Twitter and More!"
app.locals.description = "PhotoPipe is a free service so you can download Instagram Photos, download Facebook galleries, Post to photos to Twitter and More!"
app.locals.node_version = process.version
// For the user databases, if you don't want redis, remove this line
// and swap out for whatever you want
db_client = require( path.resolve(__dirname, "./database/redis-client.js") )
})
app.configure('development', function(){
app.use(express.errorHandler())
})
/************************** PhotoPipe Main **************************/
/* GET routes */
app.get('/', routes.index)
app.get('/wtf', routes.wtf)
app.get('/not-implemented', routes['not-implemented'])
/* POST routes */
app.post('/smoke', routes.smoke)
app.get('/download/file', routes.download_file)
app.post('/account/login', routes.account_login)
app.post('/account/error', routes.account_error)
app.get('/account/forgot', routes.account_forgot)
app.post('/account/forgot', routes.account_forgot_find)
/************************** Dropbox Support **************************/
// Remove or comment below if you don't want Dropbox support
var dropbox_routes = require('./routes/dropbox')
app.get('/dropbox', dropbox_routes.dropbox)
app.get('/dropbox/search_for_photos', dropbox_routes.search_for_photos)
app.get('/oauth/dropbox', dropbox_routes.dropbox_oauth)
/************************** Twitter Support **************************/
// Remove or comment below if you don't want Twitter support
var twitter_routes = require('./routes/twitter')
app.get('/twitter', twitter_routes.twitter)
app.get('/twitter/get_media_timeline', twitter_routes.twitter_get_media_timeline)
app.get('/oauth/twitter', twitter_routes.twitter_oauth)
/************************** Instagram Support **************************/
// Remove or comment below if you don't want instagram support (read-only)
var instagram_routes = require('./routes/instagram')
app.get('/instagram', instagram_routes.instagram)
app.get('/instagram/get_user_recent_photos', instagram_routes.instagram_get_user_recent_photos)
app.get('/instagram/get_next_page_of_instagram_photos', instagram_routes.instagram_get_next_page_of_instagram_photos)
app.get('/oauth/instagram', instagram_routes.instagram_oauth)
app.get('/instagram/pipe/to', instagram_routes.instagram_pipe_to)
app.get('/instagram/search', instagram_routes.instagram_search)
app.post('/instagram/search', instagram_routes.instagram_search_post)
app.get('/instagram/search/geo', instagram_routes.instagram_search_geo)
app.post('/instagram/search/geo', instagram_routes.instagram_search_geo_post)
/************************** Facebook Support **************************/
// Remove or comment below if you don't want Facebook support
var facebook_routes = require('./routes/facebook')
app.get('/facebook', facebook_routes.facebook)
app.get('/facebook/get_photo_album_cover', facebook_routes.facebook_get_photo_album_cover)
app.get('/facebook/get_photos_from_album_id', facebook_routes.facebook_get_photos_from_album_id)
app.get('/facebook/get_photo_albums', facebook_routes.facebook_get_photo_albums)
app.get('/facebook/get_tagged_in_photos', facebook_routes.facebook_get_tagged_in_photos)
app.get('/facebook/get_next_page_user_photos', facebook_routes.facebook_get_next_page_user_photos)
app.get('/oauth/facebook', facebook_routes.facebook_oauth)
// Spin up le server...
http.createServer(app).listen(app.get('port'), function(){
console.log("Express server holdin it down on port " + app.get('port'));
})
|
#!/bin/bash
if [ "$2" = "" ]; then
echo "usage: $0 <cloud-account> <zone-id> [--raw]"
exit 1
elif [ ! -f /etc/polynimbus/cloudflare/$1.sh ]; then
echo "error: cloud account \"$1\" not configured"
exit 1
fi
account=$1
zoneid=$2
page=1
while [ $page -lt 10 ]; do
file=/var/cache/polynimbus/cloudflare/records-$account-$zoneid-$page.cache
if [ ! -s $file ] || [ `stat -c %Y $file` -le `date -d '-4 minutes' +%s` ]; then
/opt/polynimbus/drivers/cloudflare/get.sh $account "zones/$zoneid/dns_records?page=$page&per_page=1000&order=type&direction=asc" >$file
fi
# show all records in parsed mode, but only first 1000 (page 1) in raw mode
if [ "$3" != "--raw" ]; then
cat $file |/opt/polynimbus/drivers/cloudflare/internal/parse-records.php
elif [ $page -eq 1 ]; then
cat $file |python -m json.tool
fi
total=`cat $file |/opt/polynimbus/drivers/cloudflare/internal/parse-pages.php`
if [ $total -eq 1 ] || [ $total -eq $page ]; then break; fi
page=$[$page + 1]
done
|
'use strict'
const fs = require('fs')
const path = require('path')
const globby = require('globby')
const MarkdownIt = require('markdown-it')
const markdownItHighlight = require('markdown-it-highlight').default
const yaml = require('js-yaml')
const mkdirp = require('mkdirp')
const removeMd = require('remove-markdown')
const defaultOptions = require('./defaultOptions')
const EXTENSIONS = {
JSON: '.json',
MD: '.md',
YML: '.yml'
}
const NEWLINE = '\n'
const FRONTMATTER_SEPERATOR = '---'
const SOURCE_MODE = 'source'
// Main function
function processmd (options, callback) {
options = Object.assign({}, defaultOptions, options)
const markdownIt = MarkdownIt(options.markdownOptions)
if (options.highlightCode) {
markdownIt.use(markdownItHighlight)
}
if (options.headingIds) {
markdownIt.use(require('markdown-it-named-headings'))
}
markdownIt.use(require('markdown-it-alerts'));
options.markdownRenderer = options.markdownRenderer || function mdRender (str) { return markdownIt.render(str) }
const globs = (options.files || []).concat(options._ || [])
if (globs.length === 0) {
throw new Error('You must pass file patterns in to be processed.')
}
const p = new Promise(function (resolve, reject) {
globby(globs).then(function (result) {
const commonDir = findCommonDir(result)
options._commonDir = commonDir
if (options.watch) {
const d = debounce(
function () {
processOutput()
},
options.watchDebounce,
true
)
// fs.watch isn't supported on linux.
try {
fs.watch(commonDir, { recursive: true }, function (event, filename) {
d()
})
} catch (e) {
console.log(e)
}
}
let processingFunc = processYamlAndMarkdown
if (typeof options._customProcessingFunc === 'function') {
processingFunc = options._customProcessingFunc // used for testing.
} else if (options.convertMode === SOURCE_MODE) {
processingFunc = processJson
}
function processOutput () {
const summaryObj = {}
summaryObj.fileMap = {}
summaryObj.sourceFileArray = result
let finishCount = 0
result.forEach(function (file, i) {
processingFunc(file, options, function (newFile, content) {
finishCount++
// Replace backslashes with forward slashes to keep windows consistent.
const filename = replaceBackslashes(newFile)
// Remove body props from summary.
if (!options.includeBodyProps) {
content = removeBodyProps(content)
}
summaryObj.fileMap[filename] = options.convertMode === SOURCE_MODE
? content
: JSON.parse(content)
if (finishCount === result.length) {
if (options.summaryOutput) {
writeFileContent(options.summaryOutput, JSON.stringify(summaryObj, null, 2), function (e, d) {
resolve(summaryObj)
})
} else {
resolve(summaryObj)
}
}
})
})
}
processOutput()
})
})
// Enable callback support too.
if (callback) {
p.then(result => {
callback(null, result)
})
}
return p
}
function processYamlAndMarkdown (file, options, cb) {
readFileContent(file, (err, file, fileContent) => {
if (err) throw (err)
const hasFrontmatter = fileContent.indexOf(FRONTMATTER_SEPERATOR) === 0
const isYaml = file.endsWith('.yaml') || file.endsWith('.yml')
let content = fileContent.trim()
let frontmatter = {}
let jsonData = {}
// Markdown.
if (hasFrontmatter) {
const splitContent = fileContent.match(/^-{3}[\s\S]+?-{3}/)
frontmatter = yaml.safeLoad(splitContent[0].substring(3, splitContent[0].length - 3))
content = fileContent.substring(splitContent[0].length).trim()
}
if (isYaml) {
jsonData = yaml.safeLoad(content)
} else {
jsonData = Object.assign({}, frontmatter, {
bodyContent: content,
bodyHtml: options.markdownRenderer(content)
})
}
// Rename to the new file.
const baseFilename = file.replace(options._commonDir, '')
const parsedPath = path.parse(path.join(options.outputDir, baseFilename))
const sourceExt = parsedPath.ext
const sourceBase = parsedPath.base
const newPathObj = Object.assign({}, parsedPath, {
ext: EXTENSIONS.JSON,
base: options.filenamePrefix +
parsedPath.base.replace(sourceExt, EXTENSIONS.JSON)
})
const newPath = path.format(newPathObj)
if (options.preview > 0 && jsonData.bodyContent) {
const preview = removeMd(jsonData.bodyContent).split('\r').join('') // fix Windows eol.
let splitPoint = 0
let i = splitPoint
while (i < options.preview) {
i++
if (preview[i] === ' ') {
splitPoint = i
}
if (preview[i] === undefined) {
splitPoint = i
break
}
}
jsonData.preview = preview.substring(0, splitPoint).trim()
}
if (options.includeTitle && jsonData.bodyContent) {
jsonData.title = jsonData.title || jsonData.bodyHtml.match(/>(.*?)<\//)[1]
}
if (options.includeDir) {
jsonData.dir = replaceBackslashes(path.dirname(newPath))
}
if (options.includeBase) {
jsonData.base = path.basename(newPath)
}
if (options.includeExt) {
jsonData.ext = EXTENSIONS.JSON
}
if (options.includeSourceBase) {
jsonData.sourceBase = sourceBase
}
if (options.includeSourceExt) {
jsonData.sourceExt = sourceExt
}
// TODO: make this a default callback
// 2 spaces indent for stringify.
writeFileContent(newPath, JSON.stringify(jsonData, null, 2), function (e, d) {
cb(newPath, JSON.stringify(jsonData))
})
})
}
function processJson (file, options, cb) {
readFileContent(file, (err, file, fileContent) => {
if (err) throw (err)
const fileData = JSON.parse(fileContent)
// Process content.
let newContent = ''
const cleanProps = cleanFileProps(
cleanMarkdownProps(Object.assign({}, fileData))
)
const cleanYaml = yaml.safeDump(cleanProps)
let extension = '.yml'
if (isMarkdown(fileData)) {
newContent += fileData.bodyContent + NEWLINE
if (Object.keys(cleanProps).length > 0) {
newContent =
FRONTMATTER_SEPERATOR + NEWLINE +
cleanYaml +
FRONTMATTER_SEPERATOR + NEWLINE + NEWLINE +
newContent
}
extension = '.md'
} else {
newContent = cleanYaml
}
// Rename to the new file.
const baseFilename = file.replace(options._commonDir, '')
const parsedPath = path.parse(path.join(options.outputDir, baseFilename))
const sourceExt = parsedPath.ext
// const sourceBase = parsedPath.base
const newPathObj = Object.assign({}, parsedPath, {
ext: extension,
base: options.filenamePrefix +
parsedPath.base.replace(sourceExt, extension)
})
const newPath = path.format(newPathObj)
writeFileContent(newPath, newContent, function (e, d) {
cb(newPath, newContent)
})
})
}
function cleanFileProps (obj) {
delete obj.dir
delete obj.base
delete obj.ext
delete obj.sourceBase
delete obj.sourceExt
return obj
}
function cleanMarkdownProps (obj) {
delete obj.bodyContent
delete obj.bodyHtml
delete obj.preview
delete obj.title
return obj
}
// Read a file making sure that it is not a directory first.
function readFileContent (file, cb) {
if (!file || fs.lstatSync(file).isDirectory()) {
return null
}
fs.readFile(file, (err, data) => {
cb(err, file, data && data.toString())
})
}
// Write a file making sure the directory exists first.
function writeFileContent (file, content, cb) {
mkdirp(path.dirname(file), function (err) {
if (err) throw (err)
fs.writeFile(file, content, (e, data) => {
cb(e, data)
})
})
}
// Replace backslashes for windows paths.
function replaceBackslashes (str) {
return str.split('\\').join('/')
}
// Determine if its data for a markdown file.
function isMarkdown (data) {
return Boolean(data.bodyContent && data.bodyHtml)
}
// Find the common parent directory given an array of files.
function findCommonDir (files) {
const path = files.reduce((path, file, fileIndex) => {
// If it's a file not in any directory then just skip it
// by assigning the previous value.
if (!file.includes('/')) {
return path
}
// No path set yet
if (!path && fileIndex === 0) {
return file.substr(0, file.lastIndexOf('/') + 1)
} else {
// Get index of last shared character
let sharedIndex = Array.from(path).findIndex((element, index) => {
if (file[index] !== element) {
return index - 1
}
})
// Round to nearest full directory
if (sharedIndex > -1) {
sharedIndex = path.substr(0, sharedIndex).lastIndexOf('/')
}
// Return shared directory path
if (sharedIndex > -1) {
return path.substr(0, sharedIndex + 1)
} else if (file.startsWith(path)) {
return path
}
// No shared directory path
return ''
}
}, '')
return path
}
// Remove body props from summary.
function removeBodyProps (content) {
try {
const json = JSON.parse(content)
delete json.bodyContent
delete json.bodyHtml
return JSON.stringify(json)
} catch (e) { }
}
// Debounce from: https://davidwalsh.name/function-debounce
function debounce (func, wait, immediate) {
var timeout
return function () {
var context = this
var args = arguments
var later = function () {
timeout = null
if (!immediate) func.apply(context, args)
}
var callNow = immediate && !timeout
clearTimeout(timeout)
timeout = setTimeout(later, wait)
if (callNow) func.apply(context, args)
}
}
module.exports = {
default: processmd,
_readFileContent: readFileContent, // for testing.
_writeFileContent: writeFileContent, // for testing.
_isMarkdown: isMarkdown, // for testing.
_findCommonDir: findCommonDir // for testing.
}
|
select a.branch_code,c.customer_name1,a.cust_ac_no,a.ac_desc,a.lcy_curr_balance solde_en_compte,a.tod_limit autorisation,sc.lcy_amount salaire,b.balance encours_engagement,sum(nvl(s.amount,0)) engagement_initial,sum(nvl(s.emi_amount,0)) engagement_mensuel
from sttm_customer c,sttm_cust_account a,cltb_account_master l,cltb_account_comp_bal_breakup b, CLTB_ACCOUNT_COMP_SCH s,
(SELECT h.ac_no,h.lcy_amount FROM actb_history h WHERE h.trn_dt ='24/08/2016' and h.trn_code ='SAL' and h.drcr_ind ='C') sc
where a.cust_ac_no=l.DR_PROD_AC
and b.account_number=l.ACCOUNT_NUMBER
and s.account_number = l.ACCOUNT_NUMBER
and c.customer_no=a.cust_no
and a.cust_ac_no = sc.ac_no
and b.component='PRINCIPAL'
--and a.cust_ac_no='0212820100839270'
and a.cust_ac_no in('0422820103383944',
'0202820100248361',
'0213728023546055',
'0212820100787666',
'0202820100210046',
'0422820103402471',
'0202820100244578',
'0212820101072458',
'0202820100696695',
'0422820103405672',
'0202820100107517',
'0502820103881822',
'0202820100308792',
'0212820100859834',
'0212820100992142',
'0302820104319592',
'0212820100833838',
'0202820100086080',
'0212820100849164',
'0212820101108542',
'0402820102960181',
'0212820100912020',
'0432820104539005',
'0422820103401307',
'0202820100305397',
'0402820102704004',
'0212820100937240',
'0212820100989329',
'0212820100928316',
'0302820102051538',
'0212820101119115',
'0212820100900962',
'0212820100918422',
'0212820100861386',
'0422820103391607',
'0202820100092967',
'0212820100986031',
'0212820100818609',
'0212820100933457',
'0402820102551326',
'0202820100229155',
'0422820103399852',
'0202820104175309',
'0212820100823944',
'0202820200296489',
'0422820103552045',
'0202820100682436')
and b.balance<>0
group by a.branch_code,c.customer_name1,a.cust_ac_no,a.ac_desc,a.lcy_curr_balance,a.tod_limit,sc.lcy_amount,b.balance
--having sum(nvl(s.emi_amount,0)) > sc.lcy_amount/3
order by a.cust_ac_no;
|
<filename>app/src/main/java/com/example/shoppingapp/MainActivity.java
package com.example.shoppingapp;
import androidx.annotation.NonNull;
import androidx.appcompat.app.AppCompatActivity;
import android.app.ProgressDialog;
import android.content.Intent;
import android.os.Bundle;
import android.text.TextUtils;
import android.view.View;
import android.widget.Button;
import android.widget.Toast;
import com.example.shoppingapp.Model.Users;
import com.example.shoppingapp.Prevalent.Prevalent;
import com.google.firebase.database.DataSnapshot;
import com.google.firebase.database.DatabaseError;
import com.google.firebase.database.DatabaseReference;
import com.google.firebase.database.FirebaseDatabase;
import com.google.firebase.database.ValueEventListener;
import io.paperdb.Paper;
public class MainActivity extends AppCompatActivity {
private Button join, login;
private ProgressDialog loading;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
join = (Button) findViewById(R.id.join_button);
login= (Button) findViewById(R.id.login_button);
loading= new ProgressDialog(this);
Paper.init(this);
login.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
Intent intent = new Intent(MainActivity.this, LoginActivity.class);
startActivity(intent);
}
});
join.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
Intent intent = new Intent(MainActivity.this, RegisterActivity.class);
startActivity(intent);
}
});
}
}
|
/**
* <a href="http://www.openolat.org">
* OpenOLAT - Online Learning and Training</a><br>
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); <br>
* you may not use this file except in compliance with the License.<br>
* You may obtain a copy of the License at the
* <a href="http://www.apache.org/licenses/LICENSE-2.0">Apache homepage</a>
* <p>
* Unless required by applicable law or agreed to in writing,<br>
* software distributed under the License is distributed on an "AS IS" BASIS, <br>
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. <br>
* See the License for the specific language governing permissions and <br>
* limitations under the License.
* <p>
* Initial code contributed and copyrighted by<br>
* frentix GmbH, http://www.frentix.com
* <p>
*/
package org.olat.selenium.page.graphene;
import java.io.File;
import java.text.DateFormat;
import java.text.SimpleDateFormat;
import java.time.Duration;
import java.util.Calendar;
import java.util.Date;
import java.util.List;
import java.util.Locale;
import java.util.function.Predicate;
import org.apache.logging.log4j.Logger;
import org.junit.Assert;
import org.olat.core.logging.Tracing;
import org.olat.core.util.FileUtils;
import org.olat.core.util.StringHelper;
import org.openqa.selenium.By;
import org.openqa.selenium.ElementNotVisibleException;
import org.openqa.selenium.JavascriptExecutor;
import org.openqa.selenium.StaleElementReferenceException;
import org.openqa.selenium.TakesScreenshot;
import org.openqa.selenium.TimeoutException;
import org.openqa.selenium.WebDriver;
import org.openqa.selenium.WebElement;
import org.openqa.selenium.logging.LogEntries;
import org.openqa.selenium.logging.LogEntry;
import org.openqa.selenium.logging.LogType;
import org.openqa.selenium.support.ui.ExpectedCondition;
import org.openqa.selenium.support.ui.ExpectedConditions;
import org.openqa.selenium.support.ui.FluentWait;
import org.openqa.selenium.support.ui.WebDriverWait;
/**
*
* Initial date: 20.06.2014<br>
* @author srosse, <EMAIL>, http://www.frentix.com
*
*/
public class OOGraphene {
private static final Logger log = Tracing.createLoggerFor(OOGraphene.class);
private static final Duration waitTinyDuration = Duration.ofSeconds(20);
private static final Duration driverTimeout = Duration.ofSeconds(60);
private static final Duration polling = Duration.ofMillis(100);
private static final Duration poolingSlow = Duration.ofMillis(200);
private static final Duration poolingSlower = Duration.ofMillis(400);
private static final Duration timeout = Duration.ofSeconds(5);
private static final By closeBlueBoxButtonBy = By.cssSelector("div.o_alert_info div.o_sel_info_message a.o_alert_close.o_sel_info_close");
public static final By wizardFooterBy = By.xpath("//div[contains(@class,'modal')]//div[contains(@class,'modal-footer')]");
public static final By wizardNextBy = By.xpath("//div[contains(@class,'modal-footer')]//a[contains(@class,'o_wizard_button_next')]");
public static final By wizardFinishBy = By.xpath("//div[contains(@class,'modal-footer')]//a[contains(@class,'o_wizard_button_finish') and not(contains(@class,'o_disabled'))]");
public static FluentWait<WebDriver> wait(WebDriver browser) {
return wait(browser, timeout);
}
public static FluentWait<WebDriver> wait(WebDriver browser, Duration timeout) {
return new WebDriverWait(browser, driverTimeout)
.withTimeout(timeout).pollingEvery(poolingSlow);
}
/**
* Wait until the busy flag is ok, the browser scrolled
* to the top and that the body of the modal dialog is visible.
*
* @param browser The browser
*/
public static void waitModalDialog(WebDriver browser) {
waitBusyAndScrollTop(browser);
By modalBy = By.cssSelector("div.o_layered_panel div.modal-dialog div.modal-body");
new WebDriverWait(browser, driverTimeout)
.withTimeout(timeout).pollingEvery(poolingSlow)
.until(ExpectedConditions.visibilityOfElementLocated(modalBy));
}
/**
* Wait until the busy flag is ok, the browser scrolled
* to the top and that the body of the modal dialog is visible.
*
* @param browser The browser
*/
public static void waitModalWizard(WebDriver browser) {
waitBusyAndScrollTop(browser);
By modalBy = By.cssSelector("div.o_layered_panel div.modal-dialog div.modal-body");
new WebDriverWait(browser, driverTimeout)
.withTimeout(timeout).pollingEvery(poolingSlow)
.until(ExpectedConditions.visibilityOfElementLocated(modalBy));
}
public static void waitModalDialogDisappears(WebDriver browser) {
try {
By modalBy = By.xpath("//div[not(@id='o_form_dirty_message')]/div[contains(@class,'modal-dialog')]/div[contains(@class,'modal-content')]");
new WebDriverWait(browser, driverTimeout)
.withTimeout(timeout).pollingEvery(poolingSlow)
.until(ExpectedConditions.invisibilityOfElementLocated(modalBy));
} catch (Exception e) {
OOGraphene.takeScreenshot("waitModalDialogDisappears", browser);
throw e;
}
}
/**
* Wait until the busy flag is ok, the browser scrolled
* to the top and that the body of the top modal dialog is
* visible. Top modal dialogs are a separate beast of the
* modal and are use to show a dialog above TinyMCE, the rich
* text editor.
*
* @param browser The browser
*/
public static void waitTopModalDialog(WebDriver browser) {
waitBusyAndScrollTop(browser);
By modalBy = By.cssSelector("div.o_ltop_modal_panel div.modal-dialog div.modal-body");
new WebDriverWait(browser, driverTimeout)
.withTimeout(timeout).pollingEvery(poolingSlower)
.until(ExpectedConditions.visibilityOfElementLocated(modalBy));
}
public static void waitTopModalDialogDisappears(WebDriver browser) {
By modalBy = By.xpath("//div[@class='o_ltop_modal_panel']//div[contains(@class,'modal-dialog')]/div[contains(@class,'modal-content')]");
new WebDriverWait(browser, driverTimeout)
.withTimeout(timeout).pollingEvery(poolingSlow)
.until(ExpectedConditions.invisibilityOfElementLocated(modalBy));
}
public static void waitCallout(WebDriver browser) {
By calloutBy = By.cssSelector("div.popover-content div.o_callout_content");
waitElement(calloutBy, browser);
}
public static void waitBusy(WebDriver browser) {
waitBusy(browser, timeout);
}
public static void waitBusy(WebDriver browser, Duration timeoutDuration) {
new WebDriverWait(browser, driverTimeout)
.withTimeout(timeoutDuration)
.pollingEvery(polling)
.until(new BusyPredicate());
}
/**
*
* @param element
* @param browser
*/
public static void waitElement(By element, WebDriver browser) {
waitElement(element, timeout, polling, browser);
}
public static void waitElementClickable(By element, WebDriver browser) {
new WebDriverWait(browser, driverTimeout)
.withTimeout(timeout)
.pollingEvery(polling)
.until(ExpectedConditions.elementToBeClickable(element));
}
/**
* Wait until the element has an opacity of 1 or null.
*
* @param locator The location of the element
* @param browser The web driver
*/
public static void waitElementFullOpacity(By locator, WebDriver browser) {
new WebDriverWait(browser, driverTimeout)
.withTimeout(timeout)
.pollingEvery(polling)
.until(new ExpectedCondition<Boolean>() {
@Override
public Boolean apply(WebDriver webDriver) {
List<WebElement> elements = webDriver.findElements(locator);
if(elements != null && elements.size() == 1) {
String opacity = elements.get(0).getCssValue("opacity");
return opacity == null || opacity.equals("1");
}
return Boolean.FALSE;
}
});
}
/**
* Wait until the element is visible.
*
* @param element The selector for the element
* @param timeoutInSeconds The timeout in seconds
* @param browser The web driver
*/
public static void waitElement(By element, long timeoutInSeconds, WebDriver browser) {
waitElement(element, Duration.ofSeconds(timeoutInSeconds), polling, browser);
}
/**
* Wait until the element is visible. But slowly poll if the
* element exists (every 333ms instead of 100ms)
*
* @param element The selector for the element
* @param timeoutInSeconds The timeout in seconds
* @param browser The web driver
*/
public static void waitElementSlowly(By element, long timeoutInSeconds, WebDriver browser) {
waitElement(element, Duration.ofSeconds(timeoutInSeconds), poolingSlower, browser);
}
/**
* Wait until the element is present in DOM but it doesn't mean the element is visible.
*
* @param element The selector for the element
* @param timeoutInSeconds The timeout in seconds
* @param browser The web driver
*/
public static void waitElementPresence(By element, int timeoutInSeconds, WebDriver browser) {
waitElementPresence(element, Duration.ofSeconds(timeoutInSeconds), polling, browser);
}
/**
* Wait slowly until the element is present in DOM but it doesn't mean the element is visible.
*
* @param element The selector for the element
* @param timeoutInSeconds The timeout in seconds
* @param browser The web driver
*/
public static void waitElementPresenceSlowly(By element, int timeoutInSeconds, WebDriver browser) {
new WebDriverWait(browser, driverTimeout)
.withTimeout(Duration.ofSeconds(timeoutInSeconds)).pollingEvery(poolingSlower)
.until(ExpectedConditions.presenceOfElementLocated(element));
}
/**
* Wait until the element is present in DOM but it doesn't mean the element is visible.
*
* @param element The selector for the element
* @param timeoutInSeconds The timeout
* @param pollingDuration The polling duration
* @param browser The web driver
*/
public static void waitElement(By element, Duration timeoutDuration, Duration pollingDuration, WebDriver browser) {
new WebDriverWait(browser, driverTimeout)
.withTimeout(timeoutDuration).pollingEvery(pollingDuration)
.until(ExpectedConditions.visibilityOfElementLocated(element));
}
/**
* Wait until the element is present in the DOM.
*
* @param element
* @param timeoutInSeconds
* @param browser
*/
public static void waitElementPresence(By element, Duration timeout, Duration pollingDuration, WebDriver browser) {
new WebDriverWait(browser, driverTimeout)
.withTimeout(timeout).pollingEvery(pollingDuration)
.until(ExpectedConditions.presenceOfElementLocated(element));
}
/**
* Wait until the element is not present.
*
* @param element The selector of the element
* @param timeoutInSeconds The timeout in seconds
* @param browser The web driver
*/
public static void waitElementDisappears(By element, int timeoutInSeconds, WebDriver browser) {
new WebDriverWait(browser, driverTimeout)
.withTimeout(Duration.ofSeconds(timeoutInSeconds)).pollingEvery(polling)
.until(ExpectedConditions.invisibilityOfElementLocated(element));
}
public static void nextStep(WebDriver browser) {
clickAndWait(wizardNextBy, browser);
}
public static void finishStep(WebDriver browser) {
moveAndClick(wizardFinishBy, browser);
closeBlueMessageWindow(browser);
By wizardBy = By.cssSelector("div.o_layered_panel div.o_wizard");
waitElementDisappears(wizardBy, 10, browser);
waitBusyAndScrollTop(browser);
}
/**
* Verify the location of the button, scroll
* if needed and click. There is no wait of
* any sort.
*
* @param buttonBy The selector
* @param browser The browser
*/
public static void click(By buttonBy, WebDriver browser) {
WebElement buttonEl = browser.findElement(buttonBy);
boolean move = buttonEl.getLocation().getY() > 681;
if(move) {
scrollTo(buttonBy, browser);
}
browser.findElement(buttonBy).click();
}
/**
* Check the location of the button. If it's below the visible
* window, it scrolls to the button, waits a little longer and
* click it. After it wait until the window scroll to the top
* and/or make a classic waitBusy
*
* @param buttonBy The button to click
* @param browser The driver
*/
public static void clickAndWait(By buttonBy, WebDriver browser) {
WebElement buttonEl = browser.findElement(buttonBy);
boolean move = buttonEl.getLocation().getY() > 669;
if(move) {
scrollTo(buttonBy, browser);
browser.findElement(buttonBy).click();
OOGraphene.waitBusyAndScrollTop(browser);
} else {
browser.findElement(buttonBy).click();
OOGraphene.waitBusy(browser);
}
}
/**
* Check the location of the button. If it's below the visible
* window, it scrolls to the button, waits a little longer and
* click it.
*
* @param buttonBy The button to click
* @param browser The driver
*/
public static void moveAndClick(By buttonBy, WebDriver browser) {
waitElementPresence(buttonBy, 5, browser);
WebElement buttonEl = browser.findElement(buttonBy);
boolean move = buttonEl.getLocation().getY() > 669;
if(move) {
scrollTo(buttonBy, browser);
waitElement(buttonBy, browser);
}
browser.findElement(buttonBy).click();
}
/**
* Scroll to the element and wait a little longer.
*
* @param by The selector
* @param browser The browser
*/
public static void scrollTo(By by, WebDriver browser) {
WebElement element = browser.findElement(by);
((JavascriptExecutor)browser).executeScript("return arguments[0].scrollIntoView({behavior:\"auto\", block: \"end\"});", element);
OOGraphene.waitingALittleLonger();
}
/**
* The method waits for the element specified by the by selector.
*
* @param by The selector
* @param browser The browser
*/
public static void moveTo(By by, WebDriver browser) {
waitElementPresence(by, 5, browser);
scrollTo(by, browser);
}
/**
* Scroll to the top anchor.
*
* @param browser The browser
*/
public static void scrollTop(WebDriver browser) {
scrollTo(By.id("o_top"), browser);
}
public static final void waitTinymce(WebDriver browser) {
new WebDriverWait(browser, driverTimeout).withTimeout(waitTinyDuration)
.pollingEvery(poolingSlow)
.until(new TinyMCELoadedPredicate());
}
// tinymce.get('o_fi1000000416').setContent('<p>Hacked</p>');
// <div id="o_fi1000000416_diw" class="o_richtext_mce"> <iframe id="o_fi1000000416_ifr">
public static final void tinymce(String content, WebDriver browser) {
waitTinymce(browser);
((JavascriptExecutor)browser).executeScript("tinymce.activeEditor.setContent('" + content + "')");
}
public static final void tinymceExec(String content, WebDriver browser) {
waitTinymce(browser);
((JavascriptExecutor)browser).executeScript("tinymce.activeEditor.execCommand('mceInsertRawHTML', true, '" + content + "')");
}
public static final void tinymce(String content, String containerCssSelector, WebDriver browser) {
waitTinymce(containerCssSelector, browser);
By tinyIdBy = By.cssSelector(containerCssSelector + " div.o_richtext_mce");
WebElement tinyIdEl = browser.findElement(tinyIdBy);
String tinyId = tinyIdEl.getAttribute("id").replace("_diw", "");
waitTinymceById(tinyId, browser);
((JavascriptExecutor)browser).executeScript("tinymce.editors['" + tinyId + "'].setContent('" + content + "')");
}
/**
* Insert a piece of text in TinyMCE where is the caret.
*
* @param content The text to add
* @param containerCssSelector A selector to point where the rich text editor is
* @param browser The browser
*/
public static final void tinymceInsert(String content, String containerCssSelector, WebDriver browser) {
waitTinymce(containerCssSelector, browser);
By tinyIdBy = By.cssSelector(containerCssSelector + " div.o_richtext_mce");
WebElement tinyIdEl = browser.findElement(tinyIdBy);
String tinyId = tinyIdEl.getAttribute("id").replace("_diw", "");
waitTinymceById(tinyId, browser);
((JavascriptExecutor)browser).executeScript("tinymce.editors['" + tinyId + "'].insertContent('" + content + "')");
}
/**
* Wait until the iframe where the editing happens exists.
*
* @param containerCssSelector The container of the textarea element
* @param browser The browser
*/
private static final void waitTinymce(String containerCssSelector, WebDriver browser) {
waitElement(By.cssSelector(containerCssSelector + " div.o_richtext_mce div.mce-edit-area>iframe"), browser);
}
/**
* Check that the TinyMCE editor with the specified id is initialized.
*
* @param tinyId The id of the element
* @param browser The browser
*/
private static final void waitTinymceById(String tinyId, WebDriver browser) {
new WebDriverWait(browser, driverTimeout).withTimeout(waitTinyDuration)
.pollingEvery(polling)
.until(new TinyMCELoadedByIdPredicate(tinyId));
}
/**
*
* @param ulClass The selector for the tabs bar
* @param formBy The selector to found the form
* @param browser The browser
*/
public static final void selectTab(String ulClass, By formBy, WebDriver browser) {
selectTab(ulClass, (b) -> {
List<WebElement> chooseRepoEntry = browser.findElements(formBy);
return !chooseRepoEntry.isEmpty();
}, false, browser);
}
public static final void selectTabSlowly(String ulClass, By formBy, WebDriver browser) {
selectTab(ulClass, (b) -> {
List<WebElement> chooseRepoEntry = browser.findElements(formBy);
return !chooseRepoEntry.isEmpty();
}, false, browser);
}
/**
*
* @param ulClass The class of the nav-tabs
* @param selectTab A predicate to select the right tab
* @param browser The driver
*/
public static final void selectTab(String ulClass, Predicate<WebDriver> selectTab, boolean slowly, WebDriver browser) {
OOGraphene.waitElement(By.cssSelector("ul." + ulClass), browser);
List<WebElement> tabLinks = browser.findElements(By.cssSelector("ul." + ulClass + ">li"));
int count = tabLinks.size();
int activeIndex = 0;
for(int i=0; i<count;i++) {
String cssClass = tabLinks.get(i).getAttribute("class");
if(cssClass != null && cssClass.contains("active")) {
activeIndex = i;
}
}
boolean found;
if(activeIndex == 0) {
found = selectTab(ulClass, selectTab, 0, count, slowly, browser);
} else {
found = selectTab(ulClass, selectTab, activeIndex, count, slowly, browser);
if(!found) {
found = selectTab(ulClass, selectTab, 0, activeIndex, slowly, browser);
}
}
Assert.assertTrue("Found the tab", found);
}
public static final boolean selectTab(String ulClass, Predicate<WebDriver> selectTab, int start, int end, boolean slowly, WebDriver browser) {
boolean found = false;
a_a:
for(int i=start; i<end; i++) {
By tabLinkBy = By.xpath("//ul[contains(@class,'" + ulClass + "')]/li[" + (i+1) + "]/a");
WebElement tabEl = browser.findElement(tabLinkBy);
String tabClass = tabEl.getAttribute("onclick");
if(StringHelper.containsNonWhitespace(tabClass)) {
tabEl.click();
By activatedTabLinkBy = By.xpath("//ul[contains(@class,'" + ulClass + "')]/li[" + (i+1) + "][@class='active']/a");
if(slowly) {
waitElementSlowly(activatedTabLinkBy, 10, browser);
} else {
waitElement(activatedTabLinkBy, browser);
}
if(selectTab.test(browser)) {
found = true;
break a_a;
}
} else if(selectTab.test(browser)) {
found = true;
break a_a;
}
}
return found;
}
/**
* Make sure that the checkbox is in the correct state.
* @param checkboxEl
* @param val
*/
public static final void check(WebElement checkboxEl, Boolean val) {
if(val == null) return;
String checked = checkboxEl.getAttribute("checked");
if(Boolean.TRUE.equals(val)) {
if(checked == null) {
checkboxEl.click();
}
} else {
if(checked != null) {
checkboxEl.click();
}
}
}
public static final void textarea(WebElement textareaEl, String content, WebDriver browser) {
String id = textareaEl.getAttribute("id");
((JavascriptExecutor)browser).executeScript("document.getElementById('" + id + "').value = '" + content + "'");
}
public static final void date(Date date, String seleniumCssClass, WebDriver browser) {
Locale locale = getLocale(browser);
String dateText = DateFormat.getDateInstance(DateFormat.SHORT, locale).format(date);
By dateBy = By.cssSelector("div." + seleniumCssClass + " input.o_date_day");
browser.findElement(dateBy).sendKeys(dateText);
}
public static final void datetime(Date date, String seleniumCssClass, WebDriver browser) {
Locale locale = getLocale(browser);
String dateText = DateFormat.getDateInstance(DateFormat.SHORT, locale).format(date);
By dateBy = By.cssSelector("div." + seleniumCssClass + " input.o_date_day");
browser.findElement(dateBy).sendKeys(dateText);
By timeBy = By.cssSelector("div." + seleniumCssClass + " input.o_date_ms");
List<WebElement> timeEls = browser.findElements(timeBy);
Assert.assertNotNull(timeEls);
Assert.assertEquals(2, timeEls.size());
Calendar cal = Calendar.getInstance();
cal.setTime(date);
int hour = cal.get(Calendar.HOUR_OF_DAY);
int minute = cal.get(Calendar.MINUTE);
timeEls.get(0).click();
timeEls.get(0).clear();
timeEls.get(0).sendKeys(Integer.toString(hour));
timeEls.get(1).clear();
timeEls.get(1).sendKeys(Integer.toString(minute));
By datePickerBy = By.id("ui-datepicker-div");
waitElementDisappears(datePickerBy, 5, browser);
}
/**
* Select the next month in the jQuery UI (need to be open).
*
* @param browser The browser
*/
public static final void selectNextMonthInDatePicker(WebDriver browser) {
By nextBy = By.cssSelector("#ui-datepicker-div div.ui-datepicker-header a.ui-datepicker-next");
waitElement(nextBy, browser);
browser.findElement(nextBy).click();
waitElement(nextBy, browser);
}
/**
* Select the day in the jQuery UI date picker (need to be open).
*
* @param day The day
* @param browser The browser
*/
public static final void selectDayInDatePicker(int day, WebDriver browser) {
By datePickerBy = By.id("ui-datepicker-div");
waitElement(datePickerBy, browser);
By dayBy = By.xpath("//div[@id='ui-datepicker-div']//td//a[normalize-space(text())='" + day + "']");
waitElement(dayBy, browser);
browser.findElement(dayBy).click();
waitElementDisappears(datePickerBy, 5, browser);
}
public static final void flexiTableSelectAll(WebDriver browser) {
By selectAll = By.xpath("//th[contains(@class,'o_table_checkall')]/a[i[contains(@class,'o_icon_check_off')]]");
waitElement(selectAll, browser);
browser.findElement(selectAll).click();
waitBusy(browser);
By selectedAll = By.xpath("//th[contains(@class,'o_table_checkall')]/a[i[contains(@class,'o_icon_check_on')]]");
waitElement(selectedAll, browser);
}
public static final Locale getLocale(WebDriver browser) {
String cssLanguage = browser.findElement(By.id("o_body")).getAttribute("class");
if(cssLanguage.contains("o_lang_de")) {
return Locale.GERMAN;
}
return Locale.ENGLISH;
}
/**
* Wait the end of the transition of the user's tools bar.
* @param browser
*/
public static final void waitingTransition(WebDriver browser) {
new WebDriverWait(browser, driverTimeout)
.pollingEvery(polling)
.until(new TransitionPredicate());
waitingALittleBit();
}
private static final void waiting(int millis) {
try {
Thread.sleep(millis);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
/**
* Wait 100ms
*/
public static final void waitingALittleBit() {
waiting(100);
}
/**
* Wait 750ms second
*/
public static final void waitingALittleLonger() {
waiting(750);
}
/**
* Wait 2 seconds.
*/
public static final void waitingLong() {
waiting(2000);
}
/**
* Wait 5 seconds. Only use it if you lose all hopes.
*/
public static final void waitingTooLong() {
waiting(5000);
}
public static final void uploadFile(By inputBy, File file, WebDriver browser) {
WebElement input = browser.findElement(inputBy);
input.sendKeys(file.getAbsolutePath());
}
/**
* This take longer than the standard busy wait because it waits the
* window is stabilized.
*
* @param browser
*/
public static final void waitBusyAndScrollTop(WebDriver browser) {
BusyScrollToPredicate predicate = new BusyScrollToPredicate();
try {
new WebDriverWait(browser, driverTimeout)
.pollingEvery(polling)
.withTimeout(timeout)
.until(predicate);
} catch (Exception e) {
log.error("Predicate failed: {}", predicate.getY(), e);
OOGraphene.takeScreenshot("waitBusyAndScrollTop", browser);
}
}
/**
* Click the "<" of the bread crumbs and wait.
*
* @param browser The browser
*/
public static final void clickBreadcrumbBack(WebDriver browser) {
By backBy = By.xpath("//ol[@class='breadcrumb']/li[@class='o_breadcrumb_back']/a[i[contains(@class,'o_icon_back')]]");
waitElement(backBy, 10, browser);
try {
browser.findElement(backBy).click();
} catch (StaleElementReferenceException e) {
log.error("", e);
waitingALittleLonger();
browser.findElement(backBy).click();
}
waitBusy(browser);
}
/**
* Useful method to close error messages.
*
* @param browser The browser
*/
public static final void closeErrorBox(WebDriver browser) {
By errorBoxBy = By.cssSelector(".modal-body.alert.alert-danger");
waitElement(errorBoxBy, browser);
By closeButtonBy = By.xpath("//div[not(@id='o_form_dirty_message')]/div[contains(@class,'modal-dialog')]//button[@class='close']");
waitElement(closeButtonBy, browser);
browser.findElement(closeButtonBy).click();
waitModalDialogDisappears(browser);
}
/**
* Useful method to close warning messages.
*
* @param browser The browser
*/
public static final void closeWarningBox(WebDriver browser) {
By errorBoxBy = By.cssSelector(".modal-body.alert.alert-warning");
waitElement(errorBoxBy, browser);
By closeButtonBy = By.xpath("//div[not(@id='o_form_dirty_message')]/div[contains(@class,'modal-dialog')]//button[@class='close']");
waitElement(closeButtonBy, browser);
browser.findElement(closeButtonBy).click();
waitModalDialogDisappears(browser);
}
public static final void waitAndCloseBlueMessageWindow(WebDriver browser) {
try {
new WebDriverWait(browser, driverTimeout)
.withTimeout(timeout).pollingEvery(polling)
.until(ExpectedConditions.visibilityOfElementLocated(closeBlueBoxButtonBy));
} catch (Exception e) {
//e.printStackTrace();
}
closeBlueMessageWindow(browser);
}
private static final void closeBlueMessageWindow(WebDriver browser) {
List<WebElement> closeButtons = browser.findElements(closeBlueBoxButtonBy);
for(WebElement closeButton:closeButtons) {
if(closeButton.isDisplayed()) {
try {
clickCloseButton(browser, closeButton);
} catch (TimeoutException e) {
try {
clickCloseButton(browser, closeButton);
} catch(Exception e2) {
//e.printStackTrace();
}
} catch(ElementNotVisibleException e1) {
try {
waitingALittleLonger();
clickCloseButton(browser, closeButton);
} catch(Exception e2) {
//e2.printStackTrace();
}
}
}
}
}
private static final void clickCloseButton(WebDriver browser, WebElement closeButton) {
closeButton.click();
new WebDriverWait(browser, driverTimeout)
.withTimeout(Duration.ofMillis(1000)).pollingEvery(polling)
.until(new CloseAlertInfoPredicate());
}
public static final void closeModalDialogWindow(WebDriver browser) {
By closeModalDialogButtonBy = By.xpath("//div[not(@id='o_form_dirty_message')]/div[contains(@class,'modal-dialog')]//div[contains(@class,'modal-header')]/button[@class='close']");
List<WebElement> closeButtons = browser.findElements(closeModalDialogButtonBy);
for(WebElement closeButton:closeButtons) {
if(closeButton.isDisplayed()) {
try {
clickModalDialogCloseButton(browser, closeButton);
} catch (TimeoutException e) {
try {
clickModalDialogCloseButton(browser, closeButton);
} catch(Exception e2) {
//
}
}
}
}
}
private static final void clickModalDialogCloseButton(WebDriver browser, WebElement closeButton) {
try {
closeButton.click();
waitModalDialogDisappears(browser);
} catch (ElementNotVisibleException e) {
//e.printStackTrace();
}
}
public static final void closeOffCanvas(WebDriver browser) {
By closeBy = By.cssSelector("a.o_offcanvas_close");
WebElement closeButton = browser.findElement(closeBy);
if(closeButton.isDisplayed()) {
//timing issue if the close button is disappearing
waitNavBarTransition(browser);
if(closeButton.isDisplayed()) {
try {
closeButton.click();
waitNavBarTransition(browser);
} catch (Exception e) {
//e.printStackTrace();
}
}
}
}
public static final void waitNavBarTransition(WebDriver browser) {
try {
new WebDriverWait(browser, driverTimeout).pollingEvery(polling)
.until(new NavBarTransitionPredicate());
waitingALittleBit();
} catch (Exception e) {
//e.printStackTrace();
}
}
public static void takeScreenshot(String test, WebDriver browser) {
TakesScreenshot scrShot = ((TakesScreenshot)browser);
File screenFile = scrShot.getScreenshotAs(org.openqa.selenium.OutputType.FILE);
SimpleDateFormat format = new SimpleDateFormat("yyyyMMdd'T'HHmmss");
String filename = test + "" + format.format(new Date()) + ".jpg";
File path = new File("screenshots");
if(!path.exists() && !path.mkdirs()) {
path = new File(System.getProperty("java.io.tmpdir"), "screenshots");
path.mkdirs();
}
File screenshotFile = new File(path, filename);
log.error("Write screenshot: {} {}", test, screenshotFile);
FileUtils.copyFileToFile(screenFile, screenshotFile, true);
}
public static void logs(WebDriver browser) {
logs(browser, LogType.BROWSER);
logs(browser, LogType.DRIVER);
}
public static void logs(WebDriver browser, String logType) {
try {
LogEntries logEntries = browser.manage().logs().get(logType);
for (LogEntry logEntry : logEntries) {
java.util.logging.Level level = logEntry.getLevel();
log.error("{} {}", level.getName(), logEntry.getMessage());
}
} catch (Exception e) {
log.error("", e);
}
}
}
|
#!/bin/bash
# ========== Experiment Seq. Idx. 2500 / 48.0.4.0 / N. 0 - _S=48.0.4.0 D1_N=45 a=-1 b=1 c=-1 d=1 e=1 f=-1 D3_N=7 g=1 h=1 i=1 D4_N=2 j=2 D5_N=0 ==========
set -u
# Prints header
echo -e '\n\n========== Experiment Seq. Idx. 2500 / 48.0.4.0 / N. 0 - _S=48.0.4.0 D1_N=45 a=-1 b=1 c=-1 d=1 e=1 f=-1 D3_N=7 g=1 h=1 i=1 D4_N=2 j=2 D5_N=0 ==========\n\n'
# Prepares all environment variables
JBHI_DIR="$HOME/jbhi-special-issue"
RESULTS_DIR="$JBHI_DIR/results"
if [[ "Yes" == "Yes" ]]; then
SVM_SUFFIX="svm"
PREDICTIONS_FORMAT="isbi"
else
SVM_SUFFIX="nosvm"
PREDICTIONS_FORMAT="titans"
fi
RESULTS_PREFIX="$RESULTS_DIR/deep.45.layer.7.test.2.index.2500.$SVM_SUFFIX"
RESULTS_PATH="$RESULTS_PREFIX.results.txt"
# ...variables expected by jbhi-checks.include.sh and jbhi-footer.include.sh
SOURCES_GIT_DIR="$JBHI_DIR/jbhi-special-issue"
LIST_OF_INPUTS="$RESULTS_PREFIX.finish.txt"
# ...this experiment is a little different --- only one master procedure should run, so there's only a master lock file
METRICS_TEMP_PATH="$RESULTS_DIR/this_results.anova.txt"
METRICS_PATH="$RESULTS_DIR/all_results.anova.txt"
START_PATH="$METRICS_PATH.start.txt"
FINISH_PATH="-"
LOCK_PATH="$METRICS_PATH.running.lock"
LAST_OUTPUT="$METRICS_PATH"
mkdir -p "$RESULTS_DIR"
#
# Assumes that the following environment variables where initialized
# SOURCES_GIT_DIR="$JBHI_DIR/jbhi-special-issue"
# LIST_OF_INPUTS="$DATASET_DIR/finish.txt:$MODELS_DIR/finish.txt:"
# START_PATH="$OUTPUT_DIR/start.txt"
# FINISH_PATH="$OUTPUT_DIR/finish.txt"
# LOCK_PATH="$OUTPUT_DIR/running.lock"
# LAST_OUTPUT="$MODEL_DIR/[[[:D1_MAX_NUMBER_OF_STEPS:]]].meta"
EXPERIMENT_STATUS=1
STARTED_BEFORE=No
# Checks if code is stable, otherwise alerts scheduler
pushd "$SOURCES_GIT_DIR" >/dev/null
GIT_STATUS=$(git status --porcelain)
GIT_COMMIT=$(git log | head -n 1)
popd >/dev/null
if [ "$GIT_STATUS" != "" ]; then
echo 'FATAL: there are uncommitted changes in your git sources file' >&2
echo ' for reproducibility, experiments only run on committed changes' >&2
echo >&2
echo ' Git status returned:'>&2
echo "$GIT_STATUS" >&2
exit 162
fi
# The experiment is already finished - exits with special code so scheduler won't retry
if [[ "$FINISH_PATH" != "-" ]]; then
if [[ -e "$FINISH_PATH" ]]; then
echo 'INFO: this experiment has already finished' >&2
exit 163
fi
fi
# The experiment is not ready to run due to dependencies - alerts scheduler
if [[ "$LIST_OF_INPUTS" != "" ]]; then
IFS=':' tokens_of_input=( $LIST_OF_INPUTS )
input_missing=No
for input_to_check in ${tokens_of_input[*]}; do
if [[ ! -e "$input_to_check" ]]; then
echo "ERROR: input $input_to_check missing for this experiment" >&2
input_missing=Yes
fi
done
if [[ "$input_missing" != No ]]; then
exit 164
fi
fi
# Sets trap to return error code if script is interrupted before successful finish
LOCK_SUCCESS=No
FINISH_STATUS=161
function finish_trap {
if [[ "$LOCK_SUCCESS" == "Yes" ]]; then
rmdir "$LOCK_PATH" &> /dev/null
fi
if [[ "$FINISH_STATUS" == "165" ]]; then
echo 'WARNING: experiment discontinued because other process holds its lock' >&2
else
if [[ "$FINISH_STATUS" == "160" ]]; then
echo 'INFO: experiment finished successfully' >&2
else
[[ "$FINISH_PATH" != "-" ]] && rm -f "$FINISH_PATH"
echo 'ERROR: an error occurred while executing the experiment' >&2
fi
fi
exit "$FINISH_STATUS"
}
trap finish_trap EXIT
# While running, locks experiment so other parallel threads won't attempt to run it too
if mkdir "$LOCK_PATH" --mode=u=rwx,g=rx,o=rx &>/dev/null; then
LOCK_SUCCESS=Yes
else
echo 'WARNING: this experiment is already being executed elsewhere' >&2
FINISH_STATUS="165"
exit
fi
# If the experiment was started before, do any cleanup necessary
if [[ "$START_PATH" != "-" ]]; then
if [[ -e "$START_PATH" ]]; then
echo 'WARNING: this experiment is being restarted' >&2
STARTED_BEFORE=Yes
fi
#...marks start
date -u >> "$START_PATH"
echo GIT "$GIT_COMMIT" >> "$START_PATH"
fi
if [[ "$STARTED_BEFORE" == "Yes" ]]; then
# If the experiment was started before, do any cleanup necessary
echo -n
else
echo "D1_N;D3_N;D4_N;a;b;c;d;e;f;g;h;i;j;m_ap;m_auc;m_tn;m_fp;m_fn;m_tp;m_tpr;m_fpr;k_ap;k_auc;k_tn;k_fp;k_fn;k_tp;k_tpr;k_fpr;isbi_auc" > "$METRICS_PATH"
fi
python \
"$SOURCES_GIT_DIR/etc/compute_metrics.py" \
--metadata_file "$SOURCES_GIT_DIR/data/all-metadata.csv" \
--predictions_format "$PREDICTIONS_FORMAT" \
--metrics_file "$METRICS_TEMP_PATH" \
--predictions_file "$RESULTS_PATH"
EXPERIMENT_STATUS="$?"
echo -n "45;7;2;" >> "$METRICS_PATH"
echo -n "-1;1;-1;1;1;-1;1;1;1;2;" >> "$METRICS_PATH"
tail "$METRICS_TEMP_PATH" -n 1 >> "$METRICS_PATH"
#
#...starts training
if [[ "$EXPERIMENT_STATUS" == "0" ]]; then
if [[ "$LAST_OUTPUT" == "" || -e "$LAST_OUTPUT" ]]; then
if [[ "$FINISH_PATH" != "-" ]]; then
date -u >> "$FINISH_PATH"
echo GIT "$GIT_COMMIT" >> "$FINISH_PATH"
fi
FINISH_STATUS="160"
fi
fi
|
def sum_even_numbers(nums):
even_sum = 0
for num in nums:
if num % 2 == 0:
even_sum += num
return even_sum
|
#ifndef TERRACOTTA_CHUNK_H_
#define TERRACOTTA_CHUNK_H_
#include <mclib/block/Block.h>
#include <mclib/block/BlockEntity.h>
#include <mclib/common/Types.h>
#include <mclib/nbt/NBT.h>
#include <mclib/world/Chunk.h>
#include <array>
#include <map>
#include <memory>
namespace terra {
struct ChunkColumnMetadata {
s32 x;
s32 z;
u16 sectionmask;
bool continuous;
bool skylight;
ChunkColumnMetadata(const mc::world::ChunkColumnMetadata& metadata)
: x(metadata.x),
z(metadata.z),
sectionmask(metadata.sectionmask),
continuous(metadata.continuous),
skylight(metadata.skylight)
{
}
};
/**
* A 16x16x16 area. A ChunkColumn is made up of 16 of these
*/
class Chunk {
private:
std::array<mc::block::BlockPtr, 16 * 16 * 16> m_Blocks;
public:
Chunk();
Chunk(const Chunk& other) = delete;
Chunk& operator=(const Chunk& other) = delete;
Chunk(const mc::world::Chunk& other);
/**
* Position is relative to this chunk position
*/
mc::block::BlockPtr GetBlock(const mc::Vector3i& chunkPosition) const;
/**
* Position is relative to this chunk position
*/
void SetBlock(mc::Vector3i chunkPosition, mc::block::BlockPtr block);
};
typedef std::shared_ptr<Chunk> ChunkPtr;
/**
* Stores a 16x256x16 area. Uses chunks (16x16x16) to store the data vertically.
* A null chunk is fully air.
*/
class ChunkColumn {
public:
enum { ChunksPerColumn = 16 };
typedef std::array<ChunkPtr, ChunksPerColumn>::iterator iterator;
typedef std::array<ChunkPtr, ChunksPerColumn>::reference reference;
typedef std::array<ChunkPtr, ChunksPerColumn>::const_iterator const_iterator;
typedef std::array<ChunkPtr, ChunksPerColumn>::const_reference const_reference;
private:
std::array<ChunkPtr, ChunksPerColumn> m_Chunks;
ChunkColumnMetadata m_Metadata;
std::map<mc::Vector3i, mc::block::BlockEntityPtr> m_BlockEntities;
public:
ChunkColumn(ChunkColumnMetadata metadata);
ChunkColumn(const ChunkColumn& rhs) = default;
ChunkColumn& operator=(const ChunkColumn& rhs) = default;
ChunkColumn(ChunkColumn&& rhs) = default;
ChunkColumn& operator=(ChunkColumn&& rhs) = default;
ChunkColumn(const mc::world::ChunkColumn& rhs);
iterator begin() {
return m_Chunks.begin();
}
iterator end() {
return m_Chunks.end();
}
reference operator[](std::size_t index) {
return m_Chunks[index];
}
const_iterator begin() const {
return m_Chunks.begin();
}
const_iterator end() const {
return m_Chunks.end();
}
const_reference operator[](std::size_t index) const {
return m_Chunks[index];
}
void AddBlockEntity(mc::block::BlockEntityPtr blockEntity) {
m_BlockEntities.insert(std::make_pair(blockEntity->GetPosition(), blockEntity));
}
void RemoveBlockEntity(mc::Vector3i pos) {
m_BlockEntities.erase(pos);
}
/**
* Position is relative to this ChunkColumn position.
*/
mc::block::BlockPtr GetBlock(const mc::Vector3i& position);
const ChunkColumnMetadata& GetMetadata() const { return m_Metadata; }
mc::block::BlockEntityPtr GetBlockEntity(mc::Vector3i worldPos);
std::vector<mc::block::BlockEntityPtr> GetBlockEntities();
};
typedef std::shared_ptr<ChunkColumn> ChunkColumnPtr;
} // ns terra
#endif
|
#!/bin/sh
if test -f /etc/buildslave; then
. /etc/buildslave
fi
case "$BB_NAME" in
FreeBSD*)
MAKE=gmake
NCPU=$(sysctl -n hw.ncpu)
;;
Amazon*|CentOS*|Debian*|Fedora*|RHEL*|SUSE*|Ubuntu*)
MAKE=make
NCPU=$(nproc)
;;
*)
echo "Unknown BB_NAME, assuming Linux"
MAKE=make
NCPU=$(nproc)
;;
esac
LINUX_OPTIONS=${LINUX_OPTIONS:-""}
CONFIG_OPTIONS=${CONFIG_OPTIONS:-""}
MAKE_OPTIONS=${MAKE_OPTIONS:-"-j$NCPU"}
MAKE_TARGETS_KMOD=${MAKE_TARGETS_KMOD:-"pkg-kmod pkg-utils"}
MAKE_TARGETS_DKMS=${MAKE_TARGETS_DKMS:-"pkg-dkms pkg-utils"}
INSTALL_METHOD=${INSTALL_METHOD:-"none"}
CONFIG_LOG="configure.log"
MAKE_LOG="make.log"
INSTALL_LOG="install.log"
# Expect a custom Linux build in the ../linux/ directory.
if [ "$LINUX_CUSTOM" = "yes" ]; then
LINUX_DIR=$(readlink -f ../linux)
LINUX_OPTIONS="$LINUX_OPTIONS --with-linux=$LINUX_DIR " \
"--with-linux-obj=$LINUX_DIR"
fi
set -x
sh ./autogen.sh >>$CONFIG_LOG 2>&1 || exit 1
case "$INSTALL_METHOD" in
packages|kmod|pkg-kmod|dkms|dkms-kmod)
./configure $CONFIG_OPTIONS $LINUX_OPTIONS >>$CONFIG_LOG 2>&1 || exit 1
case "$INSTALL_METHOD" in
packages|kmod|pkg-kmod)
$MAKE $MAKE_TARGETS_KMOD >>$MAKE_LOG 2>&1 || exit 1
;;
dkms|pkg-dkms)
$MAKE $MAKE_TARGETS_DKMS >>$MAKE_LOG 2>&1 || exit 1
;;
esac
sudo -E rm *.src.rpm
# Preserve TEST and PERF packages which may be needed to investigate
# test failures. BUILD packages are discarded.
if test "$BB_MODE" = "TEST" -o "$BB_MODE" = "PERF"; then
if test -n "$UPLOAD_DIR"; then
BUILDER="$(echo $BB_NAME | cut -f1-3 -d'-')"
mkdir -p "$UPLOAD_DIR/$BUILDER/packages"
cp *.deb *.rpm $UPLOAD_DIR/$BUILDER/packages
fi
fi
case "$BB_NAME" in
Amazon*)
sudo -E yum -y localinstall *.rpm >$INSTALL_LOG 2>&1 || exit 1
;;
CentOS*)
sudo -E yum -y localinstall *.rpm >$INSTALL_LOG 2>&1 || exit 1
;;
Debian*)
sudo -E apt-get -y install ./*.deb >$INSTALL_LOG 2>&1 || exit 1
;;
Fedora*)
sudo -E dnf -y localinstall *.rpm >$INSTALL_LOG 2>&1 || exit 1
;;
RHEL*)
sudo -E yum -y localinstall *.rpm >$INSTALL_LOG 2>&1 || exit 1
;;
Ubuntu-14.04*)
for file in *.deb; do
sudo -E gdebi -n $file >$INSTALL_LOG 2>&1 || exit 1
done
;;
Ubuntu*)
sudo -E apt-get -y install ./*.deb >$INSTALL_LOG 2>&1 || exit 1
;;
*)
echo "$BB_NAME unknown platform" >$INSTALL_LOG 2>&1
;;
esac
;;
in-tree)
./configure $CONFIG_OPTIONS $LINUX_OPTIONS >>$CONFIG_LOG 2>&1 || exit 1
$MAKE $MAKE_OPTIONS >>$MAKE_LOG 2>&1 || exit 1
./scripts/zfs-tests.sh -cv >>$INSTALL_LOG 2>&1
sudo -E scripts/zfs-helpers.sh -iv >>$INSTALL_LOG 2>&1
;;
system)
./configure $CONFIG_OPTIONS >>$CONFIG_LOG 2>&1 || exit 1
$MAKE $MAKE_OPTIONS >>$MAKE_LOG 2>&1 || exit 1
sudo -E $MAKE install >>$INSTALL_LOG 2>&1 || exit 1
;;
none)
./configure $CONFIG_OPTIONS $LINUX_OPTIONS >>$CONFIG_LOG 2>&1 || exit 1
$MAKE $MAKE_OPTIONS >>$MAKE_LOG 2>&1 || exit 1
;;
*)
echo "Unknown INSTALL_METHOD: $INSTALL_METHOD"
exit 1
;;
esac
exit 0
|
# coding=utf-8
import os
import sys
import numpy as np
import pandas as pd
import unittest
sys.path.append(os.path.dirname(__file__))
from tsbitmaps.tsbitmapper import TSBitMapper
from tsbitmaps.bitmapviz import create_bitmap_grid
class TestBitmapAlgorithm(unittest.TestCase):
def test_bitmap(self):
bmp = TSBitMapper(feature_window_size=5, bins=8, level_size=2,
lag_window_size=10, lead_window_size=10, q=95)
x = np.random.rand(500)
binned_x = bmp.discretize(x)
self.assertEqual(len(binned_x), len(x))
self.assertTrue(set(binned_x) == set('01234567'))
symbol_seq = ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '0', '1', '2', '3') # '01234567890123'
sample_bitmap = bmp.get_bitmap(symbol_seq)
self.assertEqual(len(sample_bitmap), 10)
self.assertTrue(('4', '5') in sample_bitmap.keys())
self.assertTrue(('9', '0') in sample_bitmap.keys())
self.assertEqual(sample_bitmap['0', '1'], 1)
sample_bitmap_w = bmp.get_bitmap_with_feat_window(symbol_seq)
self.assertEqual(len(sample_bitmap_w), 8)
self.assertTrue(('4', '5') not in sample_bitmap_w.keys())
self.assertTrue(('9', '0') not in sample_bitmap_w.keys())
self.assertEqual(sample_bitmap_w[('0', '1')], 1)
ypred = bmp.fit_predict(x)
scores = bmp.get_ref_bitmap_scores()
self.assertTrue((scores[0:bmp._lag_window_size] == 0.0).all())
self.assertTrue((scores[bmp._lag_window_size:-bmp._lead_window_size] >= 0).all())
self.assertTrue(0 < (ypred == -1).sum() <= 25)
def test_anomaly_detection_ecg(self):
ecg_norm = np.loadtxt('data/ecg_normal.txt')
ecg_anom = np.loadtxt('data/ecg_anom.txt')
bmp = TSBitMapper(feature_window_size=20, bins=5, level_size=3, lag_window_size=200, lead_window_size=40)
ypred_unsupervised = bmp.fit_predict(ecg_anom)
self.assertTrue(0 < (ypred_unsupervised == -1).sum() <= 3)
bmp.fit(ecg_norm)
ypred_supervised = bmp.predict(ecg_anom)
self.assertTrue(0 < (ypred_supervised == -1).sum() <= 3)
def test_anomaly_detection_pattern(self):
pattern_norm = np.loadtxt('data/pattern_normal.txt')
pattern_anom = pd.read_csv('data/pattern_anom.txt').iloc[:, 0]
bmp = TSBitMapper(feature_window_size=50, bins=5, level_size=2, lag_window_size=200, lead_window_size=100)
ypred_unsupervised = bmp.fit_predict(pattern_anom)
self.assertTrue(0 < (ypred_unsupervised == -1).sum() <= 3)
bmp.fit(pattern_norm)
ypred_supervised = bmp.predict(pattern_anom)
self.assertTrue(0 < (ypred_supervised == -1).sum() <= 3)
# @unittest.skip("tmp")
def test_bitmapviz(self):
bmp = TSBitMapper(feature_window_size=20, bins=12, level_size=3, lag_window_size=200, lead_window_size=40)
ecg_anom = np.loadtxt('data/ecg_anom.txt')
ecg_bitmap = bmp.get_tsbitmap(ecg_anom)
bmp_grid = create_bitmap_grid(ecg_bitmap, n=4, num_bins=12, level_size=3)
self.assertEqual((bmp_grid > 0).sum(), len(ecg_bitmap))
self.assertEqual(bmp_grid.shape, (27, 64))
if __name__ == '__main__':
unittest.main()
|
<gh_stars>0
const util = require('util');
const chalk = require('chalk');
const glob = require('glob');
const generator = require('yeoman-generator');
const packagejs = require(`${__dirname}/../../package.json`);
const semver = require('semver');
const BaseGenerator = require('../common');
const jhipsterConstants = require('generator-jhipster/generators/generator-constants');
const _s = require('underscore.string');
const fs = require('fs');
const JhipsterGenerator = generator.extend({});
util.inherits(JhipsterGenerator, BaseGenerator);
module.exports = JhipsterGenerator.extend({
initializing: {
readConfig() {
this.jhipsterAppConfig = this.getAllJhipsterConfig();
if (!this.jhipsterAppConfig) {
this.error('Can\'t read .yo-rc.json');
}
this.entityConfig = this.options.entityConfig;
},
displayLogo() {
this.log(chalk.white(`Running ${chalk.bold(packagejs.description)} Generator! ${chalk.yellow(`v${packagejs.version}\n`)}`));
},
validate() {
// this shouldn't be run directly
if (!this.entityConfig) {
this.env.error(`${chalk.red.bold('ERROR!')} This sub generator should be used only from JHipster and cannot be run directly...\n`);
}
}
},
prompting() {
// don't prompt if data are imported from a file
if (this.entityConfig.useConfigurationFile === true && this.entityConfig.data && typeof this.entityConfig.data.yourOptionKey !== 'undefined') {
this.yourOptionKey = this.entityConfig.data.yourOptionKey;
return;
}
const done = this.async();
const prompts = [];
this.prompt(prompts).then((props) => {
this.props = props;
// To access props later use this.props.someOption;
done();
});
},
writing: {
updateFiles() {
// read config from .yo-rc.json
this.baseName = this.jhipsterAppConfig.baseName;
this.packageName = this.jhipsterAppConfig.packageName;
this.packageFolder = this.jhipsterAppConfig.packageFolder;
this.clientFramework = this.jhipsterAppConfig.clientFramework;
this.clientPackageManager = this.jhipsterAppConfig.clientPackageManager;
this.buildTool = this.jhipsterAppConfig.buildTool;
// use function in generator-base.js from generator-jhipster
// this.angularAppName = this.getAngularAppName();
// use constants from generator-constants.js
const javaDir = `${jhipsterConstants.SERVER_MAIN_SRC_DIR + this.packageFolder}/`;
const javaTestDir = `${jhipsterConstants.SERVER_TEST_SRC_DIR + this.packageFolder}/`;
// const resourceDir = jhipsterConstants.SERVER_MAIN_RES_DIR;
// const webappDir = jhipsterConstants.CLIENT_MAIN_SRC_DIR;
const entityName = this.entityConfig.entityClass;
// do your stuff here
// check if repositories are already annotated
const uuidGeneratorAnnotation = '@GeneratedValue.*"UUIDGenerator"';
const pattern = new RegExp(uuidGeneratorAnnotation, 'g');
const entityJson = this.fs.readJSON(`${process.cwd()}/.jhipster/${entityName}.json`);
const preserveLongIdRegExp = new RegExp('@puc.preserveLongId', 'g');
const preserveLongId = preserveLongIdRegExp.test(entityJson.javadoc);
const content = this.fs.read(`${javaDir}domain/${entityName}.java`, 'utf8');
this.log(`\n${chalk.bold.green(packagejs.description)} ${chalk.green('updating the entity ')}${chalk.bold.yellow(entityName)}`);
let f;
if (!pattern.test(content)) {
// if (preserveLongIdRegExp.test(entityJson.javadoc)) {
// this.log(`${chalk.bold.yellow('Type Long preserved for entity')} ${chalk.bold.yellow(entityName)}`);
// return;
// }
// We need to convert this entity
// const preserveFieldsRegExp = /(?:.*?\s)@ch.preserveLongTypesOn (.*)(?:\s|$)/g;
// const preserveFieldsMatch = preserveFieldsRegExp.exec(entityJson.javadoc);
//
// const preserveFields = []; // preserveFieldsMatch[1].split(/,\s/);
// this.log(`${chalk.yellow('DEBUG')} Type Long will be preserved for fields: ${preserveFields}\n`);
//
// const replaceFields = entityJson.fields
// .filter(f => f.fieldType === 'Long' && f.fieldName.toLowerCase().indexOf('id') > 0 &&
// !preserveFields.some(p => p === f.fieldName));
// this.log(`${chalk.green('DEBUG')} Type Long will be replaced with UUID for fields: ${replaceFields}\n`);
//
// if (replaceFields.length === 0) {
// this.log(`${chalk.green('DEBUG')} Nothing to convert for this entity\n`);
// return;
// }
// replaceFields.forEach(f => {
// switch (f) {
// case 'id':
// break;
// default:
// }
// }
const convertForRelations = [];
entityJson.relationships.forEach((rel) => {
if (rel.otherEntityField === 'id' || rel.relationshipType === 'one-to-many' || rel.relationshipType === 'one-to-one') {
const upperOtherEntityName = rel.otherEntityName.charAt(0).toUpperCase() + rel.otherEntityName.slice(1);
// this.log(`${chalk.yellow('DEBUG')} upperOtherEntityName: ${upperOtherEntityName}\n`);
const otherEntityNameJson = this.fs.readJSON(`${process.cwd()}/.jhipster/${upperOtherEntityName}.json`);
// this.log(`${chalk.yellow('DEBUG')} otherEntityNameJson: ${JSON.stringify(otherEntityNameJson,null,'\t')}\n`);
// if (!preserveLongIdRegExp.test(otherEntityNameJson.javadoc)) { // returning true for first rel
if (!new RegExp('@puc.preserveLongId', 'g').test(otherEntityNameJson.javadoc)) {
convertForRelations.push(rel);
// this.log(`${chalk.yellow('DEBUG')} convertForRelation: ${JSON.stringify(rel, null, '\t')}\n`);
}
}
});
// JAVA
// Domain
if (preserveLongId) {
this.log(`${chalk.bold.yellow('Type Long has been preserved for id field')}`);
} else {
this.convertIdField(`${javaDir}domain/${entityName}.java`);
}
// DTO
f = `${javaDir}service/dto/${entityName}DTO.java`;
if (fs.existsSync(f)) {
if (!preserveLongId || convertForRelations.length > 0) {
this.importUUID(f, 'import java.util.Objects;');
}
if (!preserveLongId) {
this.convertLongToUUIDForIdField(f);
}
convertForRelations.forEach((rel) => {
this.convertFromTypeToTypeForRelation(f, 'Long', 'UUID', rel.relationshipName, rel.otherEntityName);
});
}
// Mapper
f = `${javaDir}service/mapper/${entityName}Mapper.java`;
if (fs.existsSync(f) && !preserveLongId) {
this.importUUID(f, 'import org.mapstruct.*;');
this.longToUUID(f);
}
// And the Repository
if (!preserveLongId) {
f = `${javaDir}repository/${entityName}Repository.java`;
this.importUUID(f, 'import org.springframework.data.jpa.repository.*;');
this.convertLongToUUID(f);
}
// The Search Repository
if (fs.existsSync(`${javaDir}repository/search/${entityName}SearchRepository.java`)) {
this.importUUID(`${javaDir}repository/search/${entityName}SearchRepository.java`, 'import org.springframework.data.elasticsearch.repository.ElasticsearchRepository;');
this.longToUUID(`${javaDir}repository/search/${entityName}SearchRepository.java`);
}
// Service
f = `${javaDir}service/${entityName}Service.java`;
if (fs.existsSync(f) && !preserveLongId) {
this.importUUID(f, 'import java.util.Optional;');
this.longToUUID(f);
}
// ServiceImp
f = `${javaDir}service/impl/${entityName}ServiceImpl.java`;
if (fs.existsSync(f) && !preserveLongId) {
this.importUUID(f, 'import java.util.Optional;');
this.convertLongToUUID(f);
}
// Criteria
f = `${javaDir}service/dto/${entityName}Criteria.java`;
if (fs.existsSync(f)) {
if (!preserveLongId) {
this.convertLongFilterToFilterForIdField(f);
}
convertForRelations.forEach((rel) => {
this.convertFromTypeToTypeForRelation(f, 'LongFilter', 'Filter', rel.relationshipName, rel.otherEntityName);
});
}
// Resource
if (!preserveLongId) {
f = `${javaDir}web/rest/${entityName}Resource.java`;
this.importUUID(f);
this.convertLongToUUIDForIdField(f);
}
// JavaScript
const entityNameSpinalCased = _s.dasherize(_s.decapitalize(entityName));
const stateFile = glob.sync(`${this.webappDir}../webapp/app/entities/${entityNameSpinalCased}/${entityNameSpinalCased}*.state.js`)[0];
// TODO reimplement
// this.replaceContent(stateFile, '\{id\:int\}', '{id:[0-9a-f]{8}-[0-9a-f]{4}-[1-5][0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}}', true);
this.log(`${chalk.red('DEBUG')} Updating entity ${entityName} 3.3 ...\n`);
// Liquibase
// f = `src/main/resources/config/liquibase/changelog/entity_${entityName}.xml`;
const file = glob.sync(`src/main/resources/config/liquibase/changelog/*entity_${entityName}.xml`)[0];
if (!preserveLongId) {
this.replaceContent(file, 'name="id" type="bigint" autoIncrement="\\$\\{autoIncrement\\}"', 'name="id" type="uuid"', true);
// this.replaceContent(f, 'autoIncrement="\\$\\{autoIncrement\\}"', '', true);
}
convertForRelations.forEach((rel) => {
this.convertInLiquibaseForRelation(file, rel.relationshipName);
});
// Test
// ResourceIntTest
// f = `${javaTestDir}/web/rest/${entityName}ResourceIntTest.java`; // till JH 6.3.1
f = `${javaTestDir}/web/rest/${entityName}ResourceIT.java`;
if (!preserveLongId || convertForRelations.length > 0) {
this.importUUID(f, 'import java.util.List;');
}
if (!preserveLongId) {
// Handle the question of life check
this.replaceContent(f, '(42L|42)', 'UUID.fromString("00000000-0000-0000-0000-000000000042")', true);
this.replaceContent(f, 'setId\\(1L\\)', 'setId(UUID.fromString("00000000-0000-0000-0000-000000000001"))', true);
this.replaceContent(f, 'setId\\(2L\\)', 'setId(UUID.fromString("00000000-0000-0000-0000-000000000002"))', true);
this.replaceContent(f, 'getId\\(\\)\\.intValue\\(\\)', 'getId().toString()', true);
this.replaceContent(f, '\\.intValue\\(\\)', '.toString()', true);
this.replaceContent(f, 'Long.MAX_VALUE', 'UUID.randomUUID()', true);
// this.replaceContent(f, 'getId\\(\\);', 'getId().toString();', true);
}
convertForRelations.forEach((rel) => {
this.convertFromTypeToTypeForRelation(f, 'Long', 'UUID', rel.relationshipName, rel.otherEntityName);
this.convertShouldNotBeFoundForRelation(f, rel.relationshipName);
});
}
},
writeFiles() {
// function to use directly template
this.template = function (source, destination) {
fs.copyTpl(this.templatePath(source), this.destinationPath(destination), this);
};
},
updateConfig() {
this.updateEntityConfig(this.entityConfig.filename, 'yourOptionKey', this.yourOptionKey);
}
},
end() {
if (this.yourOptionKey) {
this.log(`\n${chalk.bold.green('postgresuuid-converter enabled')}`);
}
}
});
|
#!/bin/bash -l
#SBATCH --time=0-80:00:00 --mem-per-cpu=9000
#SBATCH -o ./logs/job-%a.out
#SBATCH --array=405
module load matlab
workfolder="/scratch/work/pajunel2/"
outputfile="saved_outputs/velocity_dodeca3_noscat_full_$SLURM_ARRAY_TASK_ID.mat"
N_diffuse_repetitions=20
setting="dodeca_velocity_noscat"
rng_seed=$SLURM_ARRAY_TASK_ID
touch $outputfile
matlab -nojvm -r "savePressureFieldsTriton('$workfolder','$outputfile',$N_diffuse_repetitions,'$setting',$rng_seed); exit(0)"
|
#!/bin/bash
FN="TxDb.Ptroglodytes.UCSC.panTro4.refGene_3.10.0.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.10/data/annotation/src/contrib/TxDb.Ptroglodytes.UCSC.panTro4.refGene_3.10.0.tar.gz"
"https://bioarchive.galaxyproject.org/TxDb.Ptroglodytes.UCSC.panTro4.refGene_3.10.0.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-txdb.ptroglodytes.ucsc.pantro4.refgene/bioconductor-txdb.ptroglodytes.ucsc.pantro4.refgene_3.10.0_src_all.tar.gz"
)
MD5="83e20bd91796199398573ed8c4af75c0"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
curl $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
|
#!/bin/bash
# Run build-kms-core.sh before building this module
set -xe
ROOT=`pwd`
KMS_ELEMENTS_DIR=$ROOT/kms-elements
KMS_CORE_DIR=$ROOT/kms-core
KMS_CMAKE_UTILS_DIR=$ROOT/kms-cmake-utils
KURENTO_MODULE_CREATOR_DIR=$ROOT/kurento-module-creator
KMS_JSONRPC_DIR=$ROOT/kms-jsonrpc
KMS_JSONCPP_DIR=$ROOT/jsoncpp
OPENWEBRTC_GST_PLUGINS_DIR=$ROOT/openwebrtc-gst-plugins
LIBRARY_PATH="$KMS_JSONCPP_DIR/src/lib_json";
#Install dependencies
sudo apt-get install --no-install-recommends -y libsoup2.4-dev libnice-dev libsctp-dev
#Build openwebrtc-gst-plugins
./build-openwebrtc-gst-plugins.sh
#build kms-elements
cd $KMS_ELEMENTS_DIR
KURENTO_MODULES_DIR="$KMS_CORE_DIR/src/server/kmd";
CMAKE_MODULE_PATH="$KMS_CMAKE_UTILS_DIR;$KMS_CMAKE_UTILS_DIR/CMake;$KURENTO_MODULE_CREATOR_DIR/classes;$KMS_CORE_DIR;$KMS_CORE_DIR/CMake;$KMS_CORE_DIR/src/server;$KMS_CORE_DIR/src/gst-plugins;$KMS_CORE_DIR/src/gst-plugins/commons;$KMS_JSONRPC_DIR;$KMS_JSONRPC_DIR/src;"
CMAKE_PREFIX_PATH="$KURENTO_MODULE_CREATOR_DIR;$KMS_CORE_DIR;$KMS_CORE_DIR/src/server;$KMS_CORE_DIR/src/gst-plugins;$KMS_CORE_DIR/src/gst-plugins/commons;$KMS_JSONRPC_DIR;$KMS_JSONRPC_DIR/src;$KMS_JSONRPC_DIR/src/jsoncpp";
CMAKE_INSTALL_LIBDIR="lib/x86_64-linux-gnu"
CMAKE_INSTALL_PREFIX="/usr"
PKG_CONFIG_PATH="$OPENWEBRTC_GST_PLUGINS_DIR:$KMS_JSONCPP_DIR/pkg-config/";
CMAKE_CXX_FLAGS=" -I$KMS_JSONCPP_DIR/include/ -I$OPENWEBRTC_GST_PLUGINS_DIR/gst-libs/ -L$KMS_JSONCPP_DIR/src/lib_json -L$OPENWEBRTC_GST_PLUGINS_DIR/gst-libs/gst/sctp/.libs/ -L$OPENWEBRTC_GST_PLUGINS_DIR/gst/videorepair/.libs"
CMAKE_C_FLAGS="$CMAKE_CXX_FLAGS"
env PKG_CONFIG_PATH=$PKG_CONFIG_PATH cmake -DKURENTO_MODULES_DIR=$KURENTO_MODULES_DIR -DCMAKE_MODULE_PATH=$CMAKE_MODULE_PATH -DCMAKE_PREFIX_PATH=$CMAKE_PREFIX_PATH -DCMAKE_INSTALL_LIBDIR=$CMAKE_INSTALL_LIBDIR -DCMAKE_INSTALL_PREFIX=$CMAKE_INSTALL_PREFIX -DCMAKE_CXX_FLAGS="$CMAKE_CXX_FLAGS" -DCMAKE_C_FLAGS="$CMAKE_C_FLAGS"
LIBRARY_PATH="$KMS_JSONCPP_DIR/src/lib_json:$OPENWEBRTC_GST_PLUGINS_DIR/gst-libs/gst/sctp/.libs/:$OPENWEBRTC_GST_PLUGINS_DIR/gst/videorepair/.libs";
env LD_LIBRARY_PATH=$LIBRARY_PATH LIBRARY_PATH=$LIBRARY_PATH make
#Tests
# GST_PLUGIN_PATH="$KMS_CORE_DIR/server:$KMS_CORE_DIR/src/gst-plugins:$KMS_ELEMENTS_DIR/src/server:$KMS_ELEMENTS_DIR/src/gst-plugins";
#run all tests
#env GST_PLUGIN_PATH=$GST_PLUGIN_PATH LD_LIBRARY_PATH=$LIBRARY_PATH LIBRARY_PATH=$LIBRARY_PATH make check
#run only rtp_endpoint test
#env GST_PLUGIN_PATH=$GST_PLUGIN_PATH LD_LIBRARY_PATH=$LIBRARY_PATH LIBRARY_PATH=$LIBRARY_PATH make test_rtp_endpoint.check
cd $ROOT
|
'use strict';
import Utils from './Utils';
import SfmcOauth from './SfmcOauth'
import RestApiHelper from './RestApiHelper'
import * as shortid from "shortid";
import Constants from './Constants';
class SfmcApiSingleton
{
private static _instance: SfmcApiSingleton;
// Instance variables
private _clientId = process.env.SFMC_API_CLIENTID;
private _clientSecret = process.env.SFMC_API_CLIENTSECRET;
private _sfmcOauth: SfmcOauth;
private _sfmcRestApiHelper: RestApiHelper;
/*
* Singleton boilerplate
*
*/
private constructor()
{
if (!this._clientId || !this._clientSecret)
{
let errorMsg = "ClientID and ClientSecret not found in environment variables";
Utils.logError(errorMsg);
throw new Error(errorMsg);
}
this._sfmcRestApiHelper = new RestApiHelper();
this._sfmcOauth = new SfmcOauth(this._clientId, this._clientSecret);
}
public static get Instance()
{
return this._instance || (this._instance = new this())
}
/*
* connectToMarketingCloud: tests connection to Marketing Cloud
* Uses SFMC_API_CLIENTID and SFMC_API_CLIENTSECRET to get an OAuth Access Token
*
*/
public connectToMarketingCloud() : Promise<boolean>
{
let self = this;
return new Promise<boolean>((resolve, reject) =>
{
self._sfmcOauth.getOAuthAccessToken()
.then((oauthAccessToken) => {
// success
Utils.logInfo("Connected to Marketing Cloud! OAuthToken: " + oauthAccessToken);
resolve(true);
})
.catch(() => {
// error
Utils.logError("Error connecting to Marketing Cloud - check console logs");
reject(false);
});
});
}
/*
* createContact: creates a new Contact in Marketing Cloud
* See: https://developer.salesforce.com/docs/atlas.en-us.mc-apis.meta/mc-apis/createContacts.htm
*
*/
public createContact(firstName?: string, lastName?: string, email?: string) : Promise<boolean>
{
let self = this;
// set defaults if missing
if(!firstName) {
firstName = 'firstName-' + shortid.generate();
}
if(!lastName) {
lastName = 'lastName-' + shortid.generate();
}
if(!email) {
email = 'email-' + shortid.generate() + '@san<EMAIL>';
}
// POST body
let postBody = {
"contactKey": 'key-' + shortid.generate(),
"attributeSets":[{
"name":"Email Addresses",
"items":[{
"values": [{
"name":"Email Address",
"value": email
},
{
"name": "HTML Enabled",
"value": true
}]
}]
}]
};
// Make the async call
return new Promise<boolean>((resolve, reject) =>
{
self._sfmcOauth.getOAuthAccessToken()
.then((oauthAccessToken) => {
Utils.logInfo("Creating new contact: " + firstName + " " + lastName + " " + email);
// Make REST API call
self._sfmcRestApiHelper.doPost(Constants.SfmcApiContactsUrl, postBody, oauthAccessToken)
.then(() => {
// success
Utils.logInfo("Successfully created contact");
resolve(true);
})
.catch((error: any) => {
// error
reject(error);
});
})
});
}
/*
* getContactCount: gets the number of Contacts in this Marketing Cloud account
* See: https://developer.salesforce.com/docs/atlas.en-us.noversion.mc-apis.meta/mc-apis/searchSchema.htm
*
*/
public getContactCount() : Promise<number>
{
let self = this;
// POST body
let postBody = {
// TBD
};
// Make the async call
return new Promise<number>((resolve, reject) =>
{
self._sfmcOauth.getOAuthAccessToken()
.then((oauthAccessToken) => {
Utils.logInfo(`Getting count of Contacts`);
// Make REST API call
self._sfmcRestApiHelper.doPost(Constants.SfmcApiContactsUrl, postBody, oauthAccessToken)
.then((response: any) => {
// success
// TBD: parse response to get count and return it
let contactCount = 10;
Utils.logInfo("Successfully got contact count: " + contactCount);
resolve(contactCount);
})
.catch((error: any) => {
// error
reject(error);
});
})
});
}
}
/** Instantiate singleton */
export const SfmcApi = SfmcApiSingleton.Instance;
|
# test basic await expression
# adapted from PEP0492
async def abinary(n):
print(n)
if n <= 0:
return 1
l = await abinary(n - 1)
r = await abinary(n - 1)
return l + 1 + r
o = abinary(4)
try:
while True:
o.send(None)
except StopIteration:
print('finished')
|
<gh_stars>1-10
#include "stdafx.h"
#include "WeaponBinoculars.h"
#include "xr_level_controller.h"
#include "level.h"
#include "ui\UIFrameWindow.h"
#include "WeaponBinocularsVision.h"
#include "object_broker.h"
#include "hudmanager.h"
#include "inventory.h"
CWeaponBinoculars::CWeaponBinoculars()
{
m_binoc_vision = NULL;
m_bVision = false;
}
CWeaponBinoculars::~CWeaponBinoculars()
{
xr_delete (m_binoc_vision);
}
void CWeaponBinoculars::Load (LPCSTR section)
{
inherited::Load(section);
// Sounds
m_sounds.LoadSound(section, "snd_zoomin", "sndZoomIn", SOUND_TYPE_ITEM_USING);
m_sounds.LoadSound(section, "snd_zoomout", "sndZoomOut", SOUND_TYPE_ITEM_USING);
m_bVision = !!pSettings->r_bool(section,"vision_present");
}
bool CWeaponBinoculars::Action(s32 cmd, u32 flags)
{
switch(cmd)
{
case kWPN_FIRE :
return inherited::Action(kWPN_ZOOM, flags);
}
return inherited::Action(cmd, flags);
}
void CWeaponBinoculars::OnZoomIn ()
{
if(H_Parent() && !IsZoomed())
{
m_sounds.StopSound("sndZoomOut");
bool b_hud_mode = (Level().CurrentEntity() == H_Parent());
m_sounds.PlaySound("sndZoomIn", H_Parent()->Position(), H_Parent(), b_hud_mode);
if(m_bVision && !m_binoc_vision)
{
//.VERIFY (!m_binoc_vision);
m_binoc_vision = xr_new<CBinocularsVision>(cNameSect());
}
}
inherited::OnZoomIn ();
SetZoomFactor (m_fRTZoomFactor);
}
void CWeaponBinoculars::OnZoomOut ()
{
if(H_Parent() && IsZoomed() && !IsRotatingToZoom())
{
m_sounds.StopSound("sndZoomIn");
bool b_hud_mode = (Level().CurrentEntity() == H_Parent());
m_sounds.PlaySound("sndZoomOut", H_Parent()->Position(), H_Parent(), b_hud_mode);
VERIFY (m_binoc_vision);
xr_delete (m_binoc_vision);
m_fRTZoomFactor = GetZoomFactor();//store current
}
inherited::OnZoomOut();
}
BOOL CWeaponBinoculars::net_Spawn(CSE_Abstract* DC)
{
m_fRTZoomFactor = m_zoom_params.m_fScopeZoomFactor;
inherited::net_Spawn (DC);
return TRUE;
}
void CWeaponBinoculars::net_Destroy()
{
inherited::net_Destroy();
xr_delete(m_binoc_vision);
}
void CWeaponBinoculars::UpdateCL()
{
inherited::UpdateCL();
//manage visible entities here...
if(H_Parent() && IsZoomed() && !IsRotatingToZoom() && m_binoc_vision)
m_binoc_vision->Update();
}
bool CWeaponBinoculars::render_item_ui_query()
{
bool b_is_active_item = m_pInventory->ActiveItem()==this;
return b_is_active_item && H_Parent() && IsZoomed() && !IsRotatingToZoom() && m_binoc_vision;
}
void CWeaponBinoculars::render_item_ui()
{
m_binoc_vision->Draw();
inherited::render_item_ui ();
}
void GetZoomData(const float scope_factor, float& delta, float& min_zoom_factor)
{
float def_fov = float(g_fov);
float min_zoom_k = 0.3f;
float zoom_step_count = 3.0f;
float delta_factor_total = def_fov-scope_factor;
VERIFY(delta_factor_total>0);
min_zoom_factor = def_fov-delta_factor_total*min_zoom_k;
delta = (delta_factor_total*(1-min_zoom_k) )/zoom_step_count;
}
void CWeaponBinoculars::ZoomInc()
{
float delta,min_zoom_factor;
GetZoomData(m_zoom_params.m_fScopeZoomFactor, delta, min_zoom_factor);
float f = GetZoomFactor()-delta;
clamp (f,m_zoom_params.m_fScopeZoomFactor,min_zoom_factor);
SetZoomFactor ( f );
}
void CWeaponBinoculars::ZoomDec()
{
float delta,min_zoom_factor;
GetZoomData(m_zoom_params.m_fScopeZoomFactor,delta,min_zoom_factor);
float f = GetZoomFactor()+delta;
clamp (f,m_zoom_params.m_fScopeZoomFactor,min_zoom_factor);
SetZoomFactor ( f );
}
void CWeaponBinoculars::save(NET_Packet &output_packet)
{
inherited::save(output_packet);
save_data (m_fRTZoomFactor,output_packet);
}
void CWeaponBinoculars::load(IReader &input_packet)
{
inherited::load(input_packet);
load_data (m_fRTZoomFactor,input_packet);
}
void CWeaponBinoculars::GetBriefInfo(xr_string& str_name, xr_string& icon_sect_name, xr_string& str_count, string16& fire_mode)
{
str_name = NameShort();
str_count = "";
icon_sect_name = *cNameSect();
}
void CWeaponBinoculars::net_Relcase (CObject *object)
{
if (!m_binoc_vision)
return;
m_binoc_vision->remove_links (object);
}
bool CWeaponBinoculars::can_kill () const
{
return (false);
}
|
#!/bin/bash
echo "Running in $(pwd)"
export ARCH=${ARCH:-64}
export BOLTDIR=lightning-rfc
export CC=${COMPILER:-gcc}
export COMPAT=${COMPAT:-1}
export TEST_CHECK_DBSTMTS=${TEST_CHECK_DBSTMTS:-0}
export DEVELOPER=${DEVELOPER:-1}
export EXPERIMENTAL_FEATURES=${EXPERIMENTAL_FEATURES:-0}
export PATH=$CWD/dependencies/bin:"$HOME"/.local/bin:"$PATH"
export PYTEST_OPTS="--maxfail=5 --suppress-no-test-exit-code ${PYTEST_OPTS}"
export PYTEST_PAR=${PYTEST_PAR:-10}
export PYTEST_SENTRY_ALWAYS_REPORT=1
export SLOW_MACHINE=1
export TEST_CMD=${TEST_CMD:-"make -j $PYTEST_PAR pytest"}
export TEST_DB_PROVIDER=${TEST_DB_PROVIDER:-"sqlite3"}
export TEST_NETWORK=${NETWORK:-"regtest"}
export TIMEOUT=900
export VALGRIND=${VALGRIND:-0}
export FUZZING=${FUZZING:-0}
export LIGHTNINGD_POSTGRES_NO_VACUUM=1
pip3 install --user -U \
-r requirements.lock
timeout 60 pip3 install --user \
--use-feature=in-tree-build \
./contrib/pyln-client \
./contrib/pyln-proto \
./contrib/pyln-testing
# Install utilities that aren't dependencies, but make
# running tests easier/feasible on CI (and pytest which
# keeps breaking the rerunfailures plugin).
pip3 install --user \
blinker \
flake8 \
flaky \
mako \
pytest-sentry \
pytest-test-groups==1.0.3 \
pytest-custom-exit-code==0.3.0 \
pytest-timeout \
pytest-json-report
git clone https://github.com/lightningnetwork/lightning-rfc.git ../lightning-rfc
git submodule update --init --recursive
./configure CC="$CC"
cat config.vars
cat << EOF > pytest.ini
[pytest]
addopts=-p no:logging --color=yes --timeout=1800 --timeout-method=thread --test-group-random-seed=42 --force-flaky --no-success-flaky-report --max-runs=3 --junitxml=report.xml --json-report --json-report-file=report.json --json-report-indent=2
markers =
slow_test: marks tests as slow (deselect with '-m "not slow_test"')
EOF
if [ "$TARGET_HOST" == "arm-linux-gnueabihf" ] || [ "$TARGET_HOST" == "aarch64-linux-gnu" ]
then
export QEMU_LD_PREFIX=/usr/"$TARGET_HOST"/
export MAKE_HOST="$TARGET_HOST"
export BUILD=x86_64-pc-linux-gnu
export AR="$TARGET_HOST"-ar
export AS="$TARGET_HOST"-as
export CC="$TARGET_HOST"-gcc
export CXX="$TARGET_HOST"-g++
export LD="$TARGET_HOST"-ld
export STRIP="$TARGET_HOST"-strip
export CONFIGURATION_WRAPPER=qemu-"${TARGET_HOST%%-*}"-static
wget -q https://zlib.net/zlib-1.2.11.tar.gz
tar xf zlib-1.2.11.tar.gz
cd zlib-1.2.11 || exit 1
./configure --prefix="$QEMU_LD_PREFIX"
make
sudo make install
cd .. || exit 1
rm zlib-1.2.11.tar.gz && rm -rf zlib-1.2.11
wget -q https://www.sqlite.org/2018/sqlite-src-3260000.zip
unzip -q sqlite-src-3260000.zip
cd sqlite-src-3260000 || exit 1
automake --add-missing --force-missing --copy || true
./configure --disable-tcl \
--enable-static \
--disable-readline \
--disable-threadsafe \
--disable-load-extension \
--host="$TARGET_HOST" \
--prefix="$QEMU_LD_PREFIX"
make
sudo make install
cd .. || exit 1
rm sqlite-src-3260000.zip
rm -rf sqlite-src-3260000
wget -q https://gmplib.org/download/gmp/gmp-6.1.2.tar.xz
tar xf gmp-6.1.2.tar.xz
cd gmp-6.1.2 || exit 1
./configure --disable-assembly --prefix="$QEMU_LD_PREFIX" --host="$TARGET_HOST"
make
sudo make install
cd ..
rm gmp-6.1.2.tar.xz
rm -rf gmp-6.1.2
./configure CC="$TARGET_HOST-gcc" --enable-static
make -j32 CC="$TARGET_HOST-gcc" > /dev/null
else
eatmydata make -j32
# shellcheck disable=SC2086
eatmydata $TEST_CMD
fi
|
declare const ResizeSensor;
const state: {
dragged: Element
} = {
dragged: null
};
let i = 0;
for (const item of document.getElementsByClassName('drag')) {
i++;
item.setAttribute('draggable', 'true');
item.setAttribute('id', 'drag-' + i);
(element => {
const title = 'Drag me #' + i;
element.setAttribute('data-label', title);
new ResizeSensor(element, (size) => {
element.setAttribute('data-label', `${title} (${size.width}x${size.height})`);
});
})(item);
item.addEventListener('dragstart', (event: DragEvent) => {
state.dragged = <Element>event.target;
event.dataTransfer.setData('text', 'thanks firefox');
event.dataTransfer.dropEffect = 'move';
});
}
for (const item of document.getElementsByClassName('container')) {
(element => {
item.addEventListener('drop', (event) => {
event.preventDefault();
item.classList.remove('drag-hover');
state.dragged.parentNode.removeChild(state.dragged);
element.appendChild(state.dragged);
state.dragged = null;
});
})(item);
item.addEventListener('dragleave', (event) => {
item.classList.remove('drag-hover');
});
item.addEventListener('dragover', (event) => {
item.classList.add('drag-hover');
});
item.addEventListener('dragover', (event) => {
event.preventDefault();
});
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.