text stringlengths 1 1.05M |
|---|
<reponame>asadmshah/moviegur<filename>app/src/prod/java/com/asadmshah/moviegur/utils/ResourceSupplierImpl.java
package com.asadmshah.moviegur.utils;
import android.content.Context;
import com.asadmshah.moviegur.R;
public class ResourceSupplierImpl implements ResourceSupplier {
private final Context context;
public ResourceSupplierImpl(Context context) {
this.context = context;
}
@Override
public String[] getLibraryTitles() {
return context.getResources().getStringArray(R.array.library_titles);
}
@Override
public String[] getLibraryDescriptions() {
return context.getResources().getStringArray(R.array.library_descriptions);
}
}
|
#!/usr/bin/env bash
# Change to script directory
sd=`dirname $0`
cd $sd
# Install Munin
apt-get install -y munin
# Configure directories
echo "dbdir /var/lib/munin" >> /etc/munin/munin.conf
echo "htmldir /var/cache/munin/www" >> /etc/munin/munin.conf
echo "logdir /var/log/munin" >> /etc/munin/munin.conf
echo "rundir /var/run/munin" >> /etc/munin/munin.conf
echo "tmpldir /etc/munin/templates" >> /etc/munin/munin.conf
# Configure munin node (https://github.com/redgeoff/munin-node-vagrant)
echo "[MuninNode]" >> /etc/munin/munin.conf
echo " address 192.168.50.20" >> /etc/munin/munin.conf
echo " use_node_name yes" >> /etc/munin/munin.conf
# Change name of main tree
sed -i "s'localhost.localdomain'MuninMaster'" /etc/munin/munin.conf
# Configure authentication passwords
cp munin-htpasswd /etc/munin/munin-htpasswd
# Restart nginx and Munin
service nginx restart
service munin-node restart
|
#pragma once
#define HAVE_CPP11_INITIALIZER_LISTS
#define HAVE_CPP11_STD_ARRAY
#include "nifty/marray/andres/marray.hxx"
#include "nifty/tools/runtime_check.hxx"
namespace nifty{
namespace marray{
using namespace andres;
}
namespace tools{
template<class T, class COORD>
inline void readSubarray(
const marray::View<T> array,
const COORD & beginCoord,
const COORD & endCoord,
marray::View<T> & subarray
){
const auto dim = array.dimension();
std::vector<int64_t> subShape(array.dimension());
for(auto d = 0 ; d<dim; ++d){
subShape[d] = beginCoord[d] - endCoord[d];
}
subarray = array.view(beginCoord.begin(), subShape.begin());
}
template<class T, class COORD>
inline void writeSubarray(
marray::View<T> array,
const COORD & beginCoord,
const COORD & endCoord,
const marray::View<T> & data
){
const auto dim = array.dimension();
COORD subShape;
for(auto d = 0 ; d<dim; ++d){
subShape[d] = endCoord[d] - beginCoord[d];
}
for(int d = 0; d < dim; ++d )
NIFTY_CHECK_OP(subShape[d],==,data.shape(d),"Shapes don't match!")
auto subarray = array.view(beginCoord.begin(), subShape.begin());
// for dim < 4 we can use forEachCoordinate (this only works if COORD is nifty::array::StaticArray)
if(dim <= 4) {
forEachCoordinate(subShape, [&](const COORD & coord){
subarray(coord.asStdArray()) = data(coord.asStdArray());
});
}
else { // otherwise use iterators
auto itArray = subarray.begin();
auto itData = data.begin();
for(; itArray != subarray.end(); ++itArray, ++itData)
*itArray = *itData;
}
}
// dummy function, because we don't lock for marrays
template<class T, class COORD>
inline void readSubarrayLocked(
const marray::View<T> array,
const COORD & beginCoord,
const COORD & endCoord,
marray::View<T> & subarray
){
readSubarray(array,beginCoord,endCoord,subarray);
}
template<class T, class COORD>
inline void writeSubarrayLocked(
marray::View<T> array,
const COORD & beginCoord,
const COORD & endCoord,
const marray::View<T> & data
){
writeSubarray(array,beginCoord,endCoord,data);
}
}
}
|
<reponame>Blackmesa-Canteen/demo-market-spring-cloud<gh_stars>0
package com.learn.demomarket.coupon.service;
import com.baomidou.mybatisplus.extension.service.IService;
import com.learn.common.utils.PageUtils;
import com.learn.demomarket.coupon.entity.SpuBoundsEntity;
import java.util.Map;
/**
* 商品spu积分设置
*
* @author 996worker
* @email
* @date 2021-11-29 15:42:53
*/
public interface SpuBoundsService extends IService<SpuBoundsEntity> {
PageUtils queryPage(Map<String, Object> params);
}
|
<filename>filebuffer_test.go
package filebuffer
import (
"bytes"
"io"
"io/ioutil"
"math/rand"
"os"
"testing"
"time"
)
func TestFileBuffer(t *testing.T) {
const fileSize = 50
const pageSize = 8
file, err := ioutil.TempFile("", "filebuffer-test.dat")
if err != nil {
t.Fatal(err)
}
t.Cleanup(func() {
file.Close()
os.Remove(file.Name())
})
buf := make([]byte, fileSize)
pageCount := int64(fileSize+pageSize-1) / pageSize
for i := int64(0); i < pageCount; i++ {
off := i * pageSize
end := (i + 1) * pageSize
if end > fileSize {
end = fileSize
}
for j := off; j < end; j++ {
buf[j] = '0' + byte(i)
}
}
if _, err := file.WriteAt(buf, 0); err != nil {
t.Fatal(err)
}
b := New(file, fileSize, pageSize)
data := buf
_, err = b.ReadAt(data, -1)
if want := "negative offset"; err == nil || err.Error() != want {
t.Errorf("unexpected error: got=%v, want=%s", err, want)
}
data = buf[:2]
_, err = b.ReadAt(data, fileSize-1)
if want := "offset and length out of bounds"; err == nil || err.Error() != want {
t.Errorf("unexpected error: got=%v, want=%s", err, want)
}
data = buf[:2]
if _, err := b.ReadAt(data, 7); err != nil {
t.Fatal(err)
} else if got, want := data, []byte("01"); !bytes.Equal(got, want) {
t.Errorf("data unmatch for GetAt(data, 2), got=%s, want=%s", string(got), string(want))
}
if _, err := b.WriteAt([]byte("aa"), 1); err != nil {
t.Fatal(err)
}
data = buf[:pageSize]
if _, err := b.ReadAt(data, 4*pageSize); err != nil {
t.Fatal(err)
} else if got, want := data, []byte("44444444"); !bytes.Equal(got, want) {
t.Errorf("data unmatch for GetAt(data, 4*pageSize), got=%s, want=%s", string(got), string(want))
}
data = buf[:fileSize-(4*pageSize-1)]
if _, err := b.ReadAt(data, 4*pageSize-1); err != nil {
t.Fatal(err)
} else if got, want := data, []byte("3444444445555555566"); !bytes.Equal(got, want) {
t.Errorf("data unmatch for GetAt(data, 4*pageSize-1), got=%s, want=%s", string(got), string(want))
}
data = buf[:pageSize]
if _, err := b.ReadAt(data, 4*pageSize); err != nil {
t.Fatal(err)
} else if got, want := data, []byte("44444444"); !bytes.Equal(got, want) {
t.Errorf("data unmatch for GetAt(data, 4*pageSize), got=%s, want=%s", string(got), string(want))
}
_, err = b.WriteAt(data, -1)
if want := "negative offset"; err == nil || err.Error() != want {
t.Errorf("unexpected error: got=%v, want=%s", err, want)
}
if _, err := b.WriteAt([]byte("bbb"), fileSize-3); err != nil {
t.Fatal(err)
}
if err := b.Flush(); err != nil {
t.Fatal(err)
}
gotBytes, err := ioutil.ReadFile(file.Name())
if err != nil {
t.Fatal(err)
}
if got, want := string(gotBytes), "0aa00000111111112222222233333333444444445555555bbb"; got != want {
t.Errorf("buf unmatch, got=%s, want=%s", got, want)
}
}
func TestFileBufferOverMaxIov(t *testing.T) {
file, err := ioutil.TempFile("", "filebuffer-test-over-max-iov.dat")
if err != nil {
t.Fatal(err)
}
t.Cleanup(func() {
os.Remove(file.Name())
})
rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
const maxIov = 1024
const pageSize = 4096
const fileSize = maxIov*pageSize + 1
if _, err := io.CopyN(file, rnd, fileSize); err != nil {
t.Fatal(err)
}
pBuf := New(file, fileSize, pageSize)
want, err := ioutil.ReadFile(file.Name())
if err != nil {
t.Fatal(err)
}
got := make([]byte, fileSize)
if _, err := pBuf.ReadAt(got, 0); err != nil {
t.Fatal(err)
}
if !bytes.Equal(got, want) {
t.Errorf("WholeFileBuffer inital content unmatch")
}
if _, err := rnd.Read(want); err != nil {
t.Fatal(err)
}
if _, err := pBuf.WriteAt(want, 0); err != nil {
t.Fatal(err)
}
if err := pBuf.Flush(); err != nil {
t.Fatal(err)
}
got, err = ioutil.ReadFile(file.Name())
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(got, want) {
t.Errorf("PagedFileBuffer modified content unmatch")
}
}
|
const Sequelize = require('sequelize');
const db = require('../../app/seeders/db.js');
const createFunStuff = (sessionId, link, type) => {
return db.FunStuff.findOrCreate({
where: {
link: link
},
defaults: {
link,
type,
id_session: sessionId,
},
})
.then(results => {
// console.log(results, 'results in createFunStuff');
return results[0].dataValues;
})
.catch(err => {
console.error(err);
});
};
const findFunStuff = (sessionId) => {
return db.FunStuff.findAll({
where:{
id_session: sessionId
},
})
.then(results => {
let funStuff = [];
// console.log(results, 'results of findFunStuff');
results.forEach(el => funStuff.push(el.dataValues));
return funStuff;
})
.catch(err => console.error(err));
};
const deleteFunStuff = (id) => {
db.FunStuff.destroy({
where:{
id: id
}
})
.then(result => result)
.catch(err => console.error(err));
};
module.exports.createFunStuff = createFunStuff;
module.exports.findFunStuff = findFunStuff;
module.exports.deleteFunStuff = deleteFunStuff;
|
package io.swagger.model;
import java.util.Objects;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.annotation.JsonCreator;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
import io.swagger.model.ValidTicket;
import org.springframework.validation.annotation.Validated;
import javax.validation.Valid;
import javax.validation.constraints.*;
/**
* ApiResponseValidTicket
*/
@Validated
@javax.annotation.Generated(value = "io.swagger.codegen.languages.SpringCodegen", date = "2019-10-14T21:38:57.474Z")
public class ApiResponseValidTicket {
@JsonProperty("secret")
private String secret = null;
@JsonProperty("ticket")
private ValidTicket ticket = null;
public ApiResponseValidTicket secret(String secret) {
this.secret = secret;
return this;
}
/**
* Get secret
* @return secret
**/
@ApiModelProperty(example = "secret", value = "")
public String getSecret() {
return secret;
}
public void setSecret(String secret) {
this.secret = secret;
}
public ApiResponseValidTicket ticket(ValidTicket ticket) {
this.ticket = ticket;
return this;
}
/**
* Get ticket
* @return ticket
**/
@ApiModelProperty(value = "")
@Valid
public ValidTicket getTicket() {
return ticket;
}
public void setTicket(ValidTicket ticket) {
this.ticket = ticket;
}
@Override
public boolean equals(java.lang.Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
ApiResponseValidTicket apiResponseValidTicket = (ApiResponseValidTicket) o;
return Objects.equals(this.secret, apiResponseValidTicket.secret) &&
Objects.equals(this.ticket, apiResponseValidTicket.ticket);
}
@Override
public int hashCode() {
return Objects.hash(secret, ticket);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("class ApiResponseValidTicket {\n");
sb.append(" secret: ").append(toIndentedString(secret)).append("\n");
sb.append(" ticket: ").append(toIndentedString(ticket)).append("\n");
sb.append("}");
return sb.toString();
}
/**
* Convert the given object to string with each line indented by 4 spaces
* (except the first line).
*/
private String toIndentedString(java.lang.Object o) {
if (o == null) {
return "null";
}
return o.toString().replace("\n", "\n ");
}
}
|
#!/bin/bash
# Copyright (c) 2020 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
function is_ip6 () {
if [[ $1 =~ .*:.* ]]; then
echo "true"
else
echo "false"
fi
}
function green ()
{
printf "\e[0;32m$1\e[0m\n"
}
function red ()
{
printf "\e[0;31m$1\e[0m\n"
}
function 6safe () {
if [[ $(is_ip6 $1) = true ]]; then
echo "[$1]"
else
echo "$1"
fi
}
function parse_variables ()
{
FIRST_POD_CIDR=""
for cidr in $(echo $POD_CIDR | sed 's/,/ /g' ) ; do
if [[ x$FIRST_POD_CIDR == x ]]; then
FIRST_POD_CIDR=$cidr
fi
if [[ $(is_ip6 $cidr) == true ]]; then
CLUSTER_POD_CIDR6=$cidr
else
CLUSTER_POD_CIDR4=$cidr
fi
done
FIRST_SERVICE_CIDR=""
for cidr in $(echo $SERVICE_CIDR | sed 's/,/ /g' ) ; do
if [[ x$FIRST_SERVICE_CIDR == x ]]; then
FIRST_SERVICE_CIDR=$cidr
fi
done
FIRST_NODE_IP=""
for cidr in $(echo $NODE_IP | sed 's/,/ /g' ) ; do
if [[ x$FIRST_NODE_IP == x ]]; then
FIRST_NODE_IP=${cidr%%/*}
fi
done
if [[ x$CLUSTER_POD_CIDR4 != x ]] && [[ x$CLUSTER_POD_CIDR6 != x ]]; then
IS_DUAL=true
else
IS_DUAL=false
fi
}
function calico_if_linux_setup ()
{
sudo modprobe vfio-pci
echo Y | sudo tee /sys/module/vfio/parameters/enable_unsafe_noiommu_mode
sudo ip link set $VPP_DATAPLANE_IF down
sudo ip link set $VPP_DATAPLANE_IF up
sudo ip addr flush dev $VPP_DATAPLANE_IF
for cidr in $(echo $NODE_IP | sed 's/,/ /g' ) ; do
sudo ip addr add $cidr dev $VPP_DATAPLANE_IF
done
if [ x$AVF = xyes ]; then
calico_avf_setup $VPP_DATAPLANE_IF 1 $@
fi
}
function calico_avf_setup ()
{
DEVNAME=$1
PCI=$(readlink /sys/class/net/$DEVNAME/device | cut -d '/' -f 4)
AVF_PCI=nope
if [ -f /home/nskrzypc/vpp/vfpci$2 ]; then
AVF_PCI=$(cat /home/nskrzypc/vpp/vfpci$2)
fi
if [ ! -d /sys/bus/pci/devices/$AVF_PCI ]; then
if [ x$3 = xmaster ]; then
sudo $SCRIPTDIR/utils/avf.sh $PCI 00:11:22:33:4$2:00
else
sudo $SCRIPTDIR/utils/avf.sh $PCI 00:11:22:33:4$2:01
fi
mv -f /home/nskrzypc/vpp/vfpci /home/nskrzypc/vpp/vfpci$2
fi
}
function raw_create_cluster_conf ()
{
# node ip
export MAIN_NODE_IP=$MAIN_NODE_IP
export SAFE6_MAIN_NODE_IP="$(6safe $MAIN_NODE_IP)"
# node ip
export FIRST_NODE_IP=$FIRST_NODE_IP
export SAFE6_FIRST_NODE_IP="$(6safe $FIRST_NODE_IP)"
if [[ $IS_DUAL == true ]]; then
export NODE_CIDR_MASK_SIZE4=24
export NODE_CIDR_MASK_SIZE6=120
export NODE_CIDR_MASK_SIZE=0
elif [[ x$CLUSTER_POD_CIDR6 != x ]]; then
export NODE_CIDR_MASK_SIZE4=0
export NODE_CIDR_MASK_SIZE6=0
export NODE_CIDR_MASK_SIZE=120
else
export NODE_CIDR_MASK_SIZE4=0
export NODE_CIDR_MASK_SIZE6=0
export NODE_CIDR_MASK_SIZE=16
fi
# pod cidr
export POD_CIDR=$POD_CIDR
export FIRST_POD_CIDR=$FIRST_POD_CIDR
#
export SERVICE_CIDR=$SERVICE_CIDR
export FIRST_SERVICE_CIDR=$FIRST_SERVICE_CIDR
export NODE_NAME=$NODE_NAME
export DNS_TYPE=$DNS_TYPE
export IS_DUAL=$IS_DUAL
export K8_VERSION=${K8_VERSION:=v1.17.4}
cat $1 | envsubst | sudo tee /tmp/ClusterConf.yaml > /dev/null
}
function raw_create_master_k8 ()
{
calico_if_linux_setup master
raw_create_cluster_conf $SCRIPTDIR/kubeadm/ClusterNewConfiguration.template.yaml
if [ x$VERBOSE = xyes ]; then
sudo kubeadm init -v 100 --config /tmp/ClusterConf.yaml $@
else
sudo kubeadm init --config /tmp/ClusterConf.yaml $@
fi
rm -rf $HOME/.kube
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
kubectl config set-context --current --namespace=kube-system
}
function raw_join_master_k8 ()
{
calico_if_linux_setup slave
raw_create_cluster_conf $SCRIPTDIR/kubeadm/ClusterJoinConfiguration.template.yaml
if [ x$VERBOSE = xyes ]; then
sudo kubeadm join -v 100 $MAIN_NODE_IP:6443 --config /tmp/ClusterConf.yaml $@
else
sudo kubeadm join $MAIN_NODE_IP:6443 --config /tmp/ClusterConf.yaml $@
fi
}
function provision_cli ()
{
NODE_NAME=node
POD_CIDR=10.0.0.0/16
SERVICE_CIDR=10.96.0.0/16
DNS_TYPE=CoreDNS
if [[ $1 = up ]]; then
ACTION=up
elif [[ $1 = dn ]]; then
ACTION=dn
else
print_usage_and_exit;
fi
shift
while (( "$#" )) ; do
eval $1
shift
done
VPP_DATAPLANE_IF=$IF
if [[ x$VPP_DATAPLANE_IF = x ]] && [[ x$ACTION = up ]]; then
print_usage_and_exit
fi
if [[ $ACTION = up ]]; then
green "Creating cluster"
green "master ip : $MAIN_NODE_IP"
green "node ip : $NODE_IP"
green "pod cidr : $POD_CIDR"
green "service cidr : $SERVICE_CIDR"
else
green "Teardown cluster"
fi
parse_variables
if [[ $ACTION = up ]] && [[ x$MAIN_NODE_IP = x ]]; then
raw_create_master_k8
elif [[ $ACTION = up ]]; then
raw_join_master_k8
elif [[ $ACTION = dn ]]; then
sudo kubeadm reset -f
sudo rm -rf /etc/cni/net.d/
fi
}
function print_usage_and_exit ()
{
echo "Usage :"
echo "provision.sh [up|dn] [OPTIONS]"
echo
echo "On the first node - provision.sh up IF=eth0 NODE_IP=10.0.0.1/24"
echo "On the second - provision.sh up IF=eth0 NODE_IP=10.0.0.2/24 MAIN_NODE_IP=10.0.0.1 - start master node <IP>"
echo
echo "To drain - provision.sh dn"
echo
echo "Options are :"
echo "IF - linux if name to use"
echo "NODE_IP - ip of this node"
echo "MAIN_NODE_IP - ip of the master node to join (if any)"
echo "POD_CIDR - CIDR for pods (defaults to 10.0.0.0/16)"
echo "SERVICE_CIDR - CIDR for services (defaults to 10.96.0.0/16)"
echo "DNS_TYPE - CoreDNS or kube-dns"
echo "AVF - if 'yes' Create a VF for vpp's avf driver"
echo "VERBOSE - verbose"
exit 1
}
provision_cli $@
|
<filename>parity_rpc.rb
require "httparty"
require "json"
class ParityRPCError < StandardError
end
# a custom JSONRPC interface to parity that does only what we need
class ParityRPC
def initialize(opts)
@id = 0
@url = opts[:url]
@method_cache = {}
end
def batch_call(calls, contract_addr: nil)
batch(calls.collect do |name, method|
if method.is_a?(String)
[name, ["eth_call", [{to: contract_addr, data: solidity_signature(method)}, "latest"]]]
else
[name, method]
end
end.to_h)
end
private
def batch(method_params)
ids = {}
rpc_calls = method_params.collect do |name, (method, params)|
id = next_id
ids[id] = name
{method: method, params: params, id: id, jsonrpc: "2.0"}
end
body = rpc_calls.to_json
headers = {"Content-Type" => "application/json"}
r = HTTParty.post(@url, body: body, headers: headers, format: :plain)
j = JSON.parse(r, symbolize_names: true)
raise ParityRPCError.new(j[:error][:message]) if j.is_a?(Hash) && j[:error] # overall error for batch request
decode(j.collect do |br|
next if br[:error] # turn errors in to nils, not great
[ids[br[:id]], br[:result]]
end.compact.to_h)
end
DECODE_STRING = Set[:name, :symbol]
DECODE_ADDR = Set[:lastRewardTo]
NO_DECODE_HEX = Set[:challengeNumber]
def decode(data)
data.collect do |k, v|
if v.is_a?(String)
if DECODE_ADDR.include?(k)
v = decode_addr(v)
elsif DECODE_STRING.include?(k)
v = decode_string(v)
elsif !NO_DECODE_HEX.include?(k)
v = v.to_i(16)
end
end
[k, v]
end.to_h
end
def solidity_signature(method)
@method_cache[method] ||= "0x" + Digest::SHA3.hexdigest(method, 256)[0..7]
end
def next_id
@id += 1
end
def decode_string(hex_repr)
hex_repr = hex_repr[2..-1]
_, length, data = hex_repr.scan(%r(.{64}))
[data].pack("H*")[0...length.to_i(16)]
end
def decode_addr(hex_repr)
"0x%040x" % hex_repr.to_i(16)
end
end
|
<gh_stars>0
package main
import (
"context"
"encoding/json"
"fmt"
"os"
"sync"
"github.com/mxschmitt/try-playwright/internal/workertypes"
log "github.com/sirupsen/logrus"
"github.com/google/uuid"
"github.com/streadway/amqp"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/utils/pointer"
)
type Workers struct {
language workertypes.WorkerLanguage
workers chan *Worker
amqpReplyQueueName string
amqpChannel *amqp.Channel
k8ClientSet kubernetes.Interface
repliesMu sync.Mutex
replies map[string]chan *workertypes.WorkerResponsePayload
}
func newWorkers(language workertypes.WorkerLanguage, workerCount int, k8ClientSet kubernetes.Interface, amqpChannel *amqp.Channel) (*Workers, error) {
w := &Workers{
language: language,
replies: make(map[string]chan *workertypes.WorkerResponsePayload),
k8ClientSet: k8ClientSet,
amqpChannel: amqpChannel,
workers: make(chan *Worker, workerCount),
}
if err := w.consumeReplies(); err != nil {
return nil, fmt.Errorf("could not consume replies: %w", err)
}
if err := w.AddWorkers(workerCount); err != nil {
return nil, fmt.Errorf("could not add initial workers: %w", err)
}
return w, nil
}
func (w *Workers) consumeReplies() error {
amqpReplyQueue, err := w.amqpChannel.QueueDeclare(
"", // name
false, // durable
false, // delete when unused
true, // exclusive
false, // noWait
nil, // arguments
)
if err != nil {
return fmt.Errorf("Failed to declare reply queue: %w", err)
}
w.amqpReplyQueueName = amqpReplyQueue.Name
msgs, err := w.amqpChannel.Consume(
amqpReplyQueue.Name, // queue
"", // consumer
true, // auto-ack
false, // exclusive
false, // no-local
false, // no-wait
nil, // args
)
if err != nil {
return fmt.Errorf("Failed to register a consumer: %w", err)
}
go func() {
for msg := range msgs {
log.Printf("received rpc callback, corr id: %v", msg.CorrelationId)
w.repliesMu.Lock()
replyChan, ok := w.replies[msg.CorrelationId]
w.repliesMu.Unlock()
if !ok {
log.Printf("no reply channel exists for worker %s", msg.CorrelationId)
continue
}
var reply *workertypes.WorkerResponsePayload
if err := json.Unmarshal(msg.Body, &reply); err != nil {
log.Printf("could not unmarshal reply json: %v", err)
continue
}
replyChan <- reply
}
}()
return nil
}
func (w *Workers) AddWorkers(amount int) error {
for i := 0; i < amount; i++ {
worker, err := newWorker(w)
if err != nil {
return fmt.Errorf("could not create new worker: %w", err)
}
w.workers <- worker
}
return nil
}
func (w *Workers) GetCh() <-chan *Worker {
return w.workers
}
func (w *Workers) Cleanup() error {
close(w.workers)
for worker := range w.workers {
if err := worker.Cleanup(); err != nil {
return fmt.Errorf("could not cleanup worker: %w", err)
}
}
return nil
}
type Worker struct {
id string
workers *Workers
pod *v1.Pod
language workertypes.WorkerLanguage
}
func newWorker(workers *Workers) (*Worker, error) {
w := &Worker{
id: uuid.New().String(),
workers: workers,
language: workers.language,
}
w.workers.repliesMu.Lock()
w.workers.replies[w.id] = make(chan *workertypes.WorkerResponsePayload, 1)
w.workers.repliesMu.Unlock()
_, err := w.workers.amqpChannel.QueueDeclare(
fmt.Sprintf("rpc_queue_%s", w.id), // name
false, // durable
true, // delete when unused
false, // exclusive
false, // noWait
nil, // arguments
)
if err != nil {
return nil, fmt.Errorf("could not declare worker queue: %w", err)
}
if err := w.createPod(); err != nil {
return nil, fmt.Errorf("could not create pod: %w", err)
}
return w, nil
}
func (w *Worker) createPod() error {
var err error
w.pod, err = w.workers.k8ClientSet.CoreV1().Pods(K8_NAMESPACE_NAME).Create(context.Background(), &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
GenerateName: fmt.Sprintf("worker-%s-", w.language),
Labels: map[string]string{
"role": "worker",
},
},
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicy(v1.RestartPolicyNever),
AutomountServiceAccountToken: pointer.BoolPtr(false),
EnableServiceLinks: pointer.BoolPtr(false),
Containers: []v1.Container{
{
Name: "worker",
Image: determineWorkerImageName(w.workers.language),
ImagePullPolicy: v1.PullIfNotPresent,
Env: []v1.EnvVar{
{
Name: "WORKER_ID",
Value: w.id,
},
{
Name: "AMQP_URL",
Value: "amqp://rabbitmq:5672?heartbeat=5s",
},
{
Name: "WORKER_HTTP_PROXY",
Value: "http://squid:3128",
},
{
Name: "FILE_SERVICE_URL",
Value: "http://file:8080",
},
},
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("512Mi"),
v1.ResourceCPU: resource.MustParse("500m"),
v1.ResourceEphemeralStorage: resource.MustParse("512Mi"),
},
Requests: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("128Mi"),
v1.ResourceCPU: resource.MustParse("200m"),
v1.ResourceEphemeralStorage: resource.MustParse("512Mi"),
},
},
},
},
},
}, metav1.CreateOptions{})
if err != nil {
return fmt.Errorf("could not create pod: %w", err)
}
return nil
}
func determineWorkerImageName(language workertypes.WorkerLanguage) string {
tag := os.Getenv("WORKER_IMAGE_TAG")
return fmt.Sprintf("ghcr.io/mxschmitt/try-playwright/worker-%s:%s", language, tag)
}
func (w *Worker) Publish(code string) error {
msgBody, err := json.Marshal(map[string]string{
"code": code,
})
if err != nil {
return fmt.Errorf("could not marshal json: %v", err)
}
if err := w.workers.amqpChannel.Publish(
"", // exchange
fmt.Sprintf("rpc_queue_%s", w.id), // routing key
false, // mandatory
false, // immediate
amqp.Publishing{
ContentType: "application/json",
CorrelationId: w.id,
ReplyTo: w.workers.amqpReplyQueueName,
Body: msgBody,
}); err != nil {
return fmt.Errorf("could not publish message: %w", err)
}
return nil
}
func (w *Worker) Cleanup() error {
if err := w.workers.k8ClientSet.CoreV1().Pods(K8_NAMESPACE_NAME).
Delete(context.Background(), w.pod.Name, metav1.DeleteOptions{
GracePeriodSeconds: pointer.Int64Ptr(0),
}); err != nil {
return fmt.Errorf("could not delete pod: %w", err)
}
w.workers.repliesMu.Lock()
delete(w.workers.replies, w.id)
w.workers.repliesMu.Unlock()
return nil
}
func (w *Worker) Subscribe() <-chan *workertypes.WorkerResponsePayload {
w.workers.repliesMu.Lock()
ch := w.workers.replies[w.id]
w.workers.repliesMu.Unlock()
return ch
}
|
<reponame>krzpiesiewicz/play-squeryl-twirl-forms
package controllers
import com.google.inject._
import akka.actor.ActorSystem
import scala.concurrent.ExecutionContext
import scala.concurrent.Future
import play.api._
import play.api.mvc._
import play.api.data._
import play.api.data.Forms._
import play.api.i18n.I18nSupport
import play.api.i18n.MessagesApi
import play.api.i18n.Messages
import play.api.http._
import play.api.http.Status._
import play.api.libs.concurrent.CustomExecutionContext
import org.squeryl.{ Session, SessionFactory }
import database.CustomTypeMode._
import database.{ DBSchema => schema }
import schema._
import schema.implicits._
import models._
import forms.TraitMapping
import Authentication.{AuthenticatedUserAction, UserActionBuilder, AdminAction}
@Singleton
class AddressController @Inject() (
cc: ControllerComponents,
implicit val executionContext: ExecutionContext,
implicit val parser: BodyParsers.Default,
implicit val userActionBuilder: UserActionBuilder) extends AbstractController(cc) with I18nSupport {
import AddressController._
val addressForm = Form(Address.mapping)
def addresses() = Action.async { implicit request: Request[AnyContent] => Future {
addressesHelp()
}}
def addressesWithOneSelected(addressId: Long) = Action.async { implicit request: Request[AnyContent] => Future {
addressesHelp(Some(addressId))
}}
def addressDelete(addressId: Long) = AdminAction { implicit request: Request[AnyContent] => Future {
transaction {
schema.addresses.delete(addressId)
} match {
case true => Redirect(routes.AddressController.addresses())
case false => addressesHelp(
Some(addressId),
None,
Some(Messages("addresses.delete.error", addressId)),
Some(routes.AddressController.addressesWithOneSelected(addressId).url),
Some(BAD_REQUEST))
}
}}
def addressPost() = AuthenticatedUserAction { implicit request: Request[AnyContent] =>
{
val idOpt = Form(single(TraitMapping.tagFieldName -> nonEmptyText)).bindFromRequest.value match {
case None => None
case Some(tag) => Form(single(s"${tag}.idOpt" -> optional(longNumber))).bindFromRequest().value.getOrElse(None)
}
addressForm.bindFromRequest.fold(
formWithErrors => Future {
addressesHelp(idOpt, Some(formWithErrors), None, None, Some(BAD_REQUEST))
},
address => Future {
val callback = transaction {
schema.addresses.insertOrUpdate(address) match {
case Left(updated) => {
val redirectionOpt = idOpt match {
case None => None
case Some(id) => Some(routes.AddressController.addressesWithOneSelected(id).url)
}
val (msgOpt, statusCode) = updated match {
case false => (Some(Messages("addresses.cannotUpdateAddress")), BAD_REQUEST)
case true => (None, OK)
}
() => addressesHelp(idOpt, None, msgOpt, redirectionOpt, Some(statusCode))
}
case Right(createdId) => () => addressesHelp(
None,
Some(addressForm.fill(address)),
Some(Messages("addresses.addressCreated")),
Some(routes.AddressController.addressesWithOneSelected(createdId).url),
Some(CREATED))
}
}
callback()
})
}
}
def addressesHelp(
addressIdOpt: Option[Long] = None,
addressFormOpt: Option[Form[Address]] = None,
msgOpt: Option[String] = None,
urlToRedirectOpt: Option[String] = None,
statusOpt: Option[Int] = None)(implicit request: Request[AnyContent]) = transaction {
val listOfAddresses = schema.addresses.table.allRows
val (addressProps, newAddressForm, statusCode) = addressIdOpt match {
case Some(addressId) => {
schema.addresses.lookup(addressId) match {
case Some(dao) => {
val form = addressFormOpt match {
case Some(form) => form
case None => addressForm.fill(dao)
}
(EditAddress(addressId, form), addressForm, statusOpt.getOrElse(OK))
}
case None => (AddressNotFound(addressId), addressForm, NOT_FOUND)
}
}
case None => (NoProps, addressFormOpt.getOrElse(addressForm), statusOpt.getOrElse(OK))
}
Status(statusCode)(views.html.addressesPage(listOfAddresses, addressProps, newAddressForm, msgOpt, urlToRedirectOpt))
}
}
object AddressController {
sealed trait AddressProps
case class AddressNotFound(addressId: Long) extends AddressProps
case class EditAddress(addressId: Long, addressForm: Form[Address]) extends AddressProps
case object NoProps extends AddressProps
}
|
package com.pharmacySystem.model.appointment;
public enum AppointmentType {
DERMATOLOGIST_APPOINTMENT,
PHARMACIST_APPOINTMENT;
} |
<gh_stars>0
require_relative "model_methods/class_methods"
require_relative "model_methods/instance_methods"
module Audit
module ModelMethods
def self.included(base)
base.extend Audit::ModelMethods::ClassMethods
base.field :last_modified_by, type: BSON::ObjectId unless base.respond_to?(:last_modified_by)
base.field :last_modified_via, type: String unless base.respond_to?(:last_modified_via)
base.field :last_modified_via_value, type: String unless base.respond_to?(:last_modified_via_value)
options = Audit.configuration.metadata.select{ |x| x[:klass] == base.to_s }.first
base.after_create :track_create if options[:track].blank? || options[:track].include?("create")
base.after_update :track_update if options[:track].blank? || options[:track].include?("update")
base.after_destroy :track_destroy if options[:track].blank? || options[:track].include?("destroy")
base.before_save :ensure_last_modified_by, :ensure_last_modified_via
base.include Audit::ModelMethods::InstanceMethods
end
private
def track_create
create_audit_details "create"
end
def track_update
create_audit_details "update"
end
def track_destroy
create_audit_details "destroy"
end
def ensure_last_modified_by
current_user_id = Userstamp::Store.get("current_user_id")
if current_user_id.present?
self.last_modified_by = current_user_id
end
end
def ensure_last_modified_via
referer = Audit::Userstamp::Store.get("referer")
if referer.present?
self.last_modified_via = "api"
self.last_modified_via_value = referer
else
warn "Setting last_modified_via to system for #{self.class} with id #{self.id}" unless Rails.env.test? || Rails.env.development?
self.last_modified_via = "system"
self.last_modified_via_value = "system"
end
end
end
end
|
##
## MidKnight PROJECT, 2019
## {MidKnight}projects
## File description:
## linux_config.sh
##
#!bin/bash
green='\e[0;32m'
red='\e[0;31m'
neutral='\e[0;m'
pacman -S nano
clear
echo -e "${red}SET UP LOCALE && TIME\n${neutral}"
echo -e "${green}Uncomment *en_US.UTF-8* or any other locale that you want to use."
sleep 5
nano /etc/locale.gen
locale-gen
read -p "Enter the name of the selected locale: " locale
echo LANG=$locale >> /etc/locale.conf
tzselect
ln -s /usr/share/zoneinfo/Zone/SubZone /etc/localtime
hwclock --systohc --utc
clear
echo -e "${red}LINUX INSTALLATION\n${neutral}"
sleep 6
mv mkinitcpio.conf /etc
pacman -S lvm2
mkinitcpio -p linux
clear
echo -e "${red}GRUB INSTALLATION\n${neutral}"
pacman -S grub
if [ "$input" = "" ]
then
grub-install --target=i386-pc /dev/sda
else
grub-install --target=i386-pc $input
fi
grub-mkconfig -o /boot/grub/grub.cfg
pacman -S iw wpa_supplicant dialog networkmanager
clear
read -p "Enter the name of your machine: " hostname
echo $hostname >> /etc/hostname
echo -e "${green}Enter your root password\n${neutral}"
passwd
clear
echo -e "${green}Please run the exit command, then execute the umount.sh script.\n${neutral}"
sleep 4
exit |
import { replace } from 'react-router-redux'
import { toast } from 'react-toastify';
import api from '../../common/api/';
export const REQUEST_REMOVE_TASK = "TASK/REMOVE/REQUEST_TASK"
export const RESPONSE_REMOVE_TASK = "TASK/REMOVE/RESPONSE_TASK"
export const ERROR_REMOVE_TASK = "TASK/REMOVE/ERROR_TASK"
export const LOAD_REMOVE_TASK = "TASK/REMOVE/LOAD_REMOVE_TASK"
export const LOADED_REMOVE_TASK = "TASK/REMOVE/LOADED_REMOVE_TASK"
export const LOADED_ERROR_TASK = "TASK/REMOVE/LOADED_ERROR_TASK"
let initialState = {
task: null,
error: null,
isOpen: false
}
export default function reducer(state = initialState, action = {}) {
switch (action.type) {
case LOAD_REMOVE_TASK:
return { ...initialState }
case LOADED_REMOVE_TASK:
return { ...state, task: action.payload, isOpen: true }
case LOADED_ERROR_TASK:
return { ...state, task: null, error: action.error }
case REQUEST_REMOVE_TASK:
return { ...state, }
case RESPONSE_REMOVE_TASK:
return { ...state, task: null }
case ERROR_REMOVE_TASK:
return { ...state, error: action.error }
default:
return state;
}
}
export const load = (id) => (dispatch, state) => {
dispatch({ type: LOAD_REMOVE_TASK })
// TODO: TRAERLO DESDE EL BACKEND PARA VALIDAR
let task = state().task.list.tasks.find(x => x.id === id);
if (task) {
dispatch({ type: LOADED_REMOVE_TASK, payload: task })
} else {
dispatch(replace('/task'));
toast.warn("No se puede editar la tarea seleccionada")
}
}
export const remove = (id) => (dispatch, state) => {
dispatch({ type: REQUEST_REMOVE_TASK })
let task = state().task.remove.task;
api.delete(`task/${task.id}`)
.then(response => {
toast.success("Tarea eliminada")
dispatch(replace('/task'));
dispatch({ type: RESPONSE_REMOVE_TASK, payload: task })
}).catch(() => {
toast.error("Error al eliminar la tarea")
})
}
export const goBack = () => dispatch => {
// TODO: mensaje cancelada
dispatch({ type: LOAD_REMOVE_TASK })
dispatch(replace('/task'));
} |
import os
import sys
import time
import atexit
import signal
class Daemon:
def __init__(self, pidfile):
self.pidfile = pidfile
def _write_pid_file(self, pid):
with open(self.pidfile, 'w') as f:
f.write(str(pid))
def _read_pid_file(self):
with open(self.pidfile, 'r') as f:
return int(f.read().strip())
def start(self):
if os.path.isfile(self.pidfile):
print("Daemon is already running.")
sys.exit(1)
try:
pid = os.fork()
if pid > 0:
sys.exit(0)
except OSError as e:
print(f"Fork failed: {e}")
sys.exit(1)
os.chdir("/")
os.setsid()
os.umask(0)
try:
pid = os.fork()
if pid > 0:
self._write_pid_file(pid)
sys.exit(0)
except OSError as e:
print(f"Fork failed: {e}")
sys.exit(1)
sys.stdout.flush()
sys.stderr.flush()
with open(os.devnull, 'r') as devnull_r, open(os.devnull, 'w') as devnull_w:
os.dup2(devnull_r.fileno(), sys.stdin.fileno())
os.dup2(devnull_w.fileno(), sys.stdout.fileno())
os.dup2(devnull_w.fileno(), sys.stderr.fileno())
# Daemon process implementation goes here
# Example: while True: perform_task()
def stop(self):
if os.path.isfile(self.pidfile):
pid = self._read_pid_file()
try:
while True:
os.kill(pid, signal.SIGTERM)
time.sleep(0.1)
except OSError as e:
print(f"Failed to stop daemon: {e}")
sys.exit(1)
os.remove(self.pidfile)
else:
print("Daemon is not running.")
def restart(self):
self.stop()
self.start() |
<reponame>MrBattary/ncstore-back
package com.netcracker.ncstore.config;
import com.netcracker.ncstore.config.handler.AsyncExceptionHandler;
import org.springframework.aop.interceptor.AsyncUncaughtExceptionHandler;
import org.springframework.context.annotation.Configuration;
import org.springframework.scheduling.annotation.AsyncConfigurer;
import org.springframework.scheduling.annotation.EnableAsync;
import org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor;
import java.util.concurrent.Executor;
@EnableAsync
@Configuration
public class AsyncConfiguration implements AsyncConfigurer {
@Override
public Executor getAsyncExecutor() {
ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor();
executor.setCorePoolSize(2);
executor.setMaxPoolSize(6);
executor.setThreadNamePrefix("MyExecutor-");
executor.initialize();
return executor;
}
@Override
public AsyncUncaughtExceptionHandler getAsyncUncaughtExceptionHandler() {
return new AsyncExceptionHandler();
}
}
|
function stringLengthPlusOne(str) {
return str.length + 1;
}
let str = 'Hello';
let length = stringLengthPlusOne(str);
console.log(length); |
<filename>app/torii-providers/slack-oauth2.js
import Oauth2 from 'torii/providers/oauth2-code';
export default Oauth2.extend({
name: 'slack-oauth2',
baseUrl: 'https://slack.com/oauth/authorize',
responseParams: ['code', 'state']
});
|
def process_dataset(dataset_path, label_name):
if "coco" in dataset_path:
return coco_readAnnotation(dataset_path, label_name)
elif "voc" in dataset_path:
return voc_readAnnotation(dataset_path, label_name)
elif "cub200" in dataset_path:
return cub_readAnnotation(dataset_path, label_name)
else:
return "Unsupported dataset type"
# Example usage
dataset_path = "path_to_dataset/cub200"
label_name = "bird"
result = process_dataset(dataset_path, label_name)
print(result) # Output: Result of processing cub200 dataset |
#!/bin/bash
set -euo pipefail
IFS=$'\n\t'
make serve
|
import HttpRequest from 'Util/redux/HttpRequest';
const ENDPOINT = process.env.REACT_APP_SERVER_END_POINT;
const setAvatarRequest = new HttpRequest({
method: 'POST',
type: 'SET_AVATAR',
headers: {
'access-token': localStorage.getItem('token'),
},
endpoint: `${ENDPOINT}/setAvatar`,
// payload: { userId, avatar_url },
});
const setPasswordRequest = new HttpRequest({
method: 'POST',
type: 'SET_PASSWORD',
endpoint: `${ENDPOINT}/setPassword`,
headers: {
'access-token': localStorage.getItem('token'),
},
});
const setUsernameRequest = new HttpRequest({
method: 'POST',
type: 'SET_USERNAME',
endpoint: `${ENDPOINT}/setUsername`,
headers: {
'access-token': localStorage.getItem('token'),
},
// payload: { userId, username },
});
const setUserInformationRequest = new HttpRequest({
method: 'POST',
type: 'SET_USER_INFORMATION',
endpoint: `${ENDPOINT}/setUserInformation`,
headers: {
'access-token': localStorage.getItem('token'),
},
});
const setCoverRequest = new HttpRequest({
method: 'POST',
type: 'SET_COVER',
endpoint: `${ENDPOINT}/setCover`,
headers: {
'access-token': localStorage.getItem('token'),
},
});
export const setAvatar = setAvatarRequest.getAction();
export const setPassword = setPasswordRequest.getAction();
export const setUsername = setUsernameRequest.getAction();
export const setUserInformation = setUserInformationRequest.getAction();
export const setCover = setCoverRequest.getAction();
|
<reponame>NYCMOTI/open-bid<filename>app/models/sam_account_reckoner.rb
class SamAccountReckoner < Struct.new(:user)
def set_default_sam_status
if user.duns_number.blank?
user.sam_status = :duns_blank
elsif should_clear_status?
user.sam_status = :sam_pending
end
end
def set!
update_sam_status
update_small_business
if user.changed?
user.save
end
end
private
def update_small_business
if user.sam_accepted?
user.small_business = duns_is_small_business?
elsif user.duns_blank?
user.small_business = false
end
end
def update_sam_status
if user.sam_pending?
user.sam_status = sam_status
end
end
def should_clear_status?
user.persisted? && user.duns_number_changed?
end
def sam_status
if duns_is_in_sam?
:sam_accepted
else
:sam_rejected
end
end
def client
@client ||= Samwise::Client.new(api_key: DataDotGovCredentials.api_key)
end
def vendor_summary
@vendor_summary ||= client.get_vendor_summary(duns: user.duns_number)
end
def duns_is_in_sam?
vendor_summary[:in_sam] == true
end
def duns_is_small_business?
vendor_summary[:small_business] == true
end
end
|
<reponame>liamdawson/DIM<gh_stars>0
import { hot } from 'react-hot-loader/root';
import React from 'react';
import { UIRouter, UIRouterReact } from '@uirouter/react';
import { Provider } from 'react-redux';
import HTML5Backend from 'react-dnd-html5-backend';
import App from './app/App';
import store from './app/store/store';
import makeRouter from './router.config';
import { setRouter } from './router';
import { DndProvider } from 'react-dnd';
class Root extends React.Component {
router: UIRouterReact;
constructor(props) {
super(props);
this.router = makeRouter();
setRouter(this.router);
}
render() {
return (
<Provider store={store}>
<DndProvider backend={HTML5Backend}>
<UIRouter router={this.router}>
<App />
</UIRouter>
</DndProvider>
</Provider>
);
}
}
export default hot(Root);
|
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package com.azure.communication.administration;
import com.azure.communication.administration.models.AcquiredPhoneNumber;
import com.azure.communication.administration.models.AreaCodes;
import com.azure.communication.administration.models.Capability;
import com.azure.communication.administration.models.CreateReservationOptions;
import com.azure.communication.administration.models.LocationOptionsQuery;
import com.azure.communication.administration.models.LocationOptionsResponse;
import com.azure.communication.administration.models.LocationType;
import com.azure.communication.administration.models.NumberConfigurationResponse;
import com.azure.communication.administration.models.NumberUpdateCapabilities;
import com.azure.communication.administration.models.PhoneNumberCountry;
import com.azure.communication.administration.models.PhoneNumberEntity;
import com.azure.communication.administration.models.PhoneNumberRelease;
import com.azure.communication.administration.models.PhoneNumberReservation;
import com.azure.communication.administration.models.PhonePlan;
import com.azure.communication.administration.models.PhonePlanGroup;
import com.azure.communication.administration.models.PstnConfiguration;
import com.azure.communication.administration.models.ReleaseStatus;
import com.azure.communication.administration.models.UpdateNumberCapabilitiesResponse;
import com.azure.communication.administration.models.UpdatePhoneNumberCapabilitiesResponse;
import com.azure.communication.common.PhoneNumberIdentifier;
import com.azure.core.http.HttpClient;
import com.azure.core.http.rest.PagedFlux;
import com.azure.core.http.rest.Response;
import com.azure.core.util.polling.PollerFlux;
import com.azure.core.util.polling.AsyncPollResponse;
import com.azure.core.util.polling.LongRunningOperationStatus;
import com.azure.core.util.Context;
import org.junit.jupiter.api.condition.DisabledIfEnvironmentVariable;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.MethodSource;
import reactor.core.publisher.Mono;
import reactor.test.StepVerifier;
import java.time.Duration;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class PhoneNumberAsyncClientIntegrationTest extends PhoneNumberIntegrationTestBase {
private PhoneNumberAsyncClient getClientWithConnectionString(HttpClient httpClient, String testName) {
PhoneNumberClientBuilder builder = super.getClientBuilderWithConnectionString(httpClient);
return addLoggingPolicy(builder, testName).buildAsyncClient();
}
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase#getHttpClients")
public void createAsyncPhoneNumberClientWithConnectionString(HttpClient httpClient) {
PhoneNumberAsyncClient phoneNumberAsyncClient = this.getClientWithConnectionString(httpClient, "createAsyncClient");
assertNotNull(phoneNumberAsyncClient);
// Smoke test using phoneNumberAsyncClient to list all phone numbers
PagedFlux<AcquiredPhoneNumber> pagedFlux = phoneNumberAsyncClient.listAllPhoneNumbers(LOCALE);
StepVerifier.create(pagedFlux.next())
.assertNext(item -> {
assertNotNull(item.getPhoneNumber());
})
.verifyComplete();
}
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase#getHttpClients")
public void listAllPhoneNumbers(HttpClient httpClient) {
PagedFlux<AcquiredPhoneNumber> pagedFlux = this.getClientWithConnectionString(httpClient, "listAllPhoneNumbers").listAllPhoneNumbers(LOCALE);
StepVerifier.create(pagedFlux.next())
.assertNext(item -> {
assertNotNull(item.getPhoneNumber());
})
.verifyComplete();
}
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase#getHttpClients")
public void listPhonePlanGroups(HttpClient httpClient) {
PagedFlux<PhonePlanGroup> pagedFlux =
this.getClientWithConnectionString(httpClient, "listPhonePlanGroups").listPhonePlanGroups(COUNTRY_CODE, LOCALE, true);
StepVerifier.create(pagedFlux.next())
.assertNext(item -> {
assertNotNull(item.getPhonePlanGroupId());
})
.verifyComplete();
}
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase#getHttpClients")
public void listPhonePlans(HttpClient httpClient) {
StepVerifier.create(
this.getClientWithConnectionString(httpClient, "listPhonePlans_listPlanGroups").listPhonePlanGroups(COUNTRY_CODE, LOCALE, true).next()
.flatMap((PhonePlanGroup phonePlanGroup) -> {
return this.getClientWithConnectionString(httpClient, "listPhonePlans").listPhonePlans(COUNTRY_CODE, phonePlanGroup.getPhonePlanGroupId(), LOCALE).next();
}))
.assertNext((PhonePlan phonePlan) -> {
assertNotNull(phonePlan.getPhonePlanId());
})
.verifyComplete();
}
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase#getHttpClients")
public void listAllReleases(HttpClient httpClient) {
PagedFlux<PhoneNumberEntity> pagedFlux = this.getClientWithConnectionString(httpClient, "listAllReleases").listAllReleases();
StepVerifier.create(pagedFlux.next())
.assertNext(item -> {
assertNotNull(item.getId());
})
.verifyComplete();
}
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase#getHttpClients")
public void listAllReservations(HttpClient httpClient) {
PagedFlux<PhoneNumberEntity> pagedFlux = this.getClientWithConnectionString(httpClient, "listAllReservations").listAllReservations();
StepVerifier.create(pagedFlux.next())
.assertNext(item -> {
assertNotNull(item.getId());
})
.verifyComplete();
}
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase#getHttpClients")
public void listAllSupportedCountries(HttpClient httpClient) {
PagedFlux<PhoneNumberCountry> pagedFlux = this.getClientWithConnectionString(httpClient, "listAllSupportedCountries").listAllSupportedCountries(LOCALE);
StepVerifier.create(pagedFlux.next())
.assertNext(item -> {
assertNotNull(item.getCountryCode());
})
.verifyComplete();
}
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase#getHttpClients")
public void getPhonePlanLocationOptions(HttpClient httpClient) {
StepVerifier.create(
this.getClientWithConnectionString(httpClient, "getPhonePlanLocationOptions_listPlanGroups").listPhonePlanGroups(COUNTRY_CODE, LOCALE, true).next()
.flatMap((PhonePlanGroup phonePlanGroup) -> {
return this.getClientWithConnectionString(httpClient, "getPhonePlanLocationOptions_listPlans").listPhonePlans(COUNTRY_CODE, phonePlanGroup.getPhonePlanGroupId(), LOCALE).next()
.flatMap((PhonePlan phonePlan) -> {
return this.getClientWithConnectionString(httpClient, "getPhonePlanLocationOptions").getPhonePlanLocationOptions(COUNTRY_CODE, phonePlanGroup.getPhonePlanGroupId(), phonePlan.getPhonePlanId(), LOCALE);
});
}))
.assertNext(item -> {
assertNotNull(item.getLocationOptions().getLabelId());
})
.verifyComplete();
}
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase#getHttpClients")
public void getAllAreaCodes(HttpClient httpClient) {
List<LocationOptionsQuery> locationOptions = new ArrayList<>();
LocationOptionsQuery query = new LocationOptionsQuery();
query.setLabelId("state");
query.setOptionsValue(LOCATION_OPTION_STATE);
locationOptions.add(query);
query = new LocationOptionsQuery();
query.setLabelId("city");
query.setOptionsValue(LOCATION_OPTION_CITY);
locationOptions.add(query);
StepVerifier.create(
this.getClientWithConnectionString(httpClient, "getAllAreaCodes_listPlanGroups").listPhonePlanGroups(COUNTRY_CODE, LOCALE, true).next()
.flatMap(phonePlanGroups -> {
return this.getClientWithConnectionString(httpClient, "getAllAreaCodes_listPlans").listPhonePlans(COUNTRY_CODE, phonePlanGroups.getPhonePlanGroupId(), LOCALE).next()
.flatMap(phonePlans -> {
return this.getClientWithConnectionString(httpClient, "getAllAreaCodes").getAllAreaCodes(LocationType.SELECTION.toString(), COUNTRY_CODE, phonePlans.getPhonePlanId(), locationOptions);
});
}))
.assertNext(item -> {
assertTrue(item.getPrimaryAreaCodes().size() > 0);
})
.verifyComplete();
}
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase#getHttpClients")
public void getAllAreaCodesWithResponse(HttpClient httpClient) {
List<LocationOptionsQuery> locationOptions = new ArrayList<>();
LocationOptionsQuery query = new LocationOptionsQuery();
query.setLabelId("state");
query.setOptionsValue(LOCATION_OPTION_STATE);
locationOptions.add(query);
query = new LocationOptionsQuery();
query.setLabelId("city");
query.setOptionsValue(LOCATION_OPTION_CITY);
locationOptions.add(query);
StepVerifier.create(
this.getClientWithConnectionString(httpClient, "getAllAreaCodesWithResponse_listPlanGroups")
.listPhonePlanGroups(COUNTRY_CODE, LOCALE, true).next()
.flatMap(phonePlanGroups -> {
return this.getClientWithConnectionString(httpClient, "getAllAreaCodesWithResponse_listPlans").listPhonePlans(COUNTRY_CODE, phonePlanGroups.getPhonePlanGroupId(), LOCALE).next()
.flatMap(phonePlans -> {
return this.getClientWithConnectionString(httpClient, "getAllAreaCodesWithResponse").getAllAreaCodesWithResponse(LocationType.SELECTION.toString(), COUNTRY_CODE, phonePlans.getPhonePlanId(), locationOptions, Context.NONE);
});
}))
.assertNext(item -> {
assertEquals(200, item.getStatusCode());
assertTrue(item.getValue().getPrimaryAreaCodes().size() > 0);
})
.verifyComplete();
}
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase#getHttpClients")
public void beginCreateReservationGetReservationByIdCancelReservation(HttpClient httpClient) {
StepVerifier.create(
// Setting up for phone number reservation creation
this.getClientWithConnectionString(httpClient, "reservationTests_listPlanGroups").listPhonePlanGroups(COUNTRY_CODE, LOCALE, true).next()
.flatMap((PhonePlanGroup phonePlanGroup) -> {
return this.getClientWithConnectionString(httpClient, "reservationTests_listPlans").listPhonePlans(COUNTRY_CODE, phonePlanGroup.getPhonePlanGroupId(), LOCALE).next()
.flatMap((PhonePlan phonePlan) -> {
// Create Reservation
return beginCreateReservation(httpClient, phonePlan, "reservationTests_beginCreateReservation").last()
.flatMap((AsyncPollResponse<PhoneNumberReservation, PhoneNumberReservation> createdRes) -> {
assertEquals(createdRes.getValue().getPhoneNumbers().size(), 1);
assertNotNull(createdRes.getValue().getReservationId());
// Get Reservation by id
return this.getClientWithConnectionString(httpClient, "reservationTests_getReservationById").getReservationById(createdRes.getValue().getReservationId()).
flatMap(reservation -> {
assertEquals(createdRes.getValue().getReservationId(), reservation.getReservationId());
// Cancel Reservation
return this.getClientWithConnectionString(httpClient, "reservationTests_cancelReservation").cancelReservation(reservation.getReservationId());
});
});
});
}))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase#getHttpClients")
public void beginCreateReservationGetReservationByIdCancelReservationWithResponse(HttpClient httpClient) {
StepVerifier.create(
// Setting up for phone number reservation creation
this.getClientWithConnectionString(httpClient, "reservationWithResponseTests_listPlanGroups").listPhonePlanGroups(COUNTRY_CODE, LOCALE, true).next()
.flatMap((PhonePlanGroup phonePlanGroup) -> {
return this.getClientWithConnectionString(httpClient, "reservationWithResponseTests_listPlans").listPhonePlans(COUNTRY_CODE, phonePlanGroup.getPhonePlanGroupId(), LOCALE).next()
.flatMap((PhonePlan phonePlan) -> {
// Create Reservation
return beginCreateReservation(httpClient, phonePlan, "reservationWithResponseTests_beginCreateReservation").last()
.flatMap((AsyncPollResponse<PhoneNumberReservation, PhoneNumberReservation> createdRes) -> {
assertEquals(createdRes.getValue().getPhoneNumbers().size(), 1);
assertNotNull(createdRes.getValue().getReservationId());
// Get Reservation by id with response
return this.getClientWithConnectionString(httpClient, "reservationWithResponseTests_getResponseById").getReservationByIdWithResponse(createdRes.getValue().getReservationId())
.flatMap((Response<PhoneNumberReservation> reservationResponse) -> {
assertEquals(200, reservationResponse.getStatusCode());
assertEquals(createdRes.getValue().getReservationId(), reservationResponse.getValue().getReservationId());
// Cancel Reservation with response
return this.getClientWithConnectionString(httpClient, "reservationWithResponseTests_cancelReservation").cancelReservationWithResponse(reservationResponse.getValue().getReservationId());
});
});
});
}))
.assertNext(cancelReservationResponse -> {
assertEquals(202, cancelReservationResponse.getStatusCode());
})
.verifyComplete();
}
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase#getHttpClients")
@DisabledIfEnvironmentVariable(
named = "SKIP_LIVE_TEST",
matches = "(?i)(true)")
public void purchaseReservationBeginReleasePhoneNumber(HttpClient httpClient) {
StepVerifier.create(
// Setting up for phone number reservation creation
this.getClientWithConnectionString(httpClient, "purchaseReleaseNumberTests_listPlanGroups").listPhonePlanGroups(COUNTRY_CODE, LOCALE, true).next()
.flatMap((PhonePlanGroup phonePlanGroup) -> {
return this.getClientWithConnectionString(httpClient, "purchaseReleaseNumberTests_listPlans").listPhonePlans(COUNTRY_CODE, phonePlanGroup.getPhonePlanGroupId(), LOCALE).next()
.flatMap((PhonePlan phonePlan) -> {
// Create Reservation
return beginCreateReservation(httpClient, phonePlan, "purchaseReleaseNumberTests_beginCreateReservation").last()
.flatMap((AsyncPollResponse<PhoneNumberReservation, PhoneNumberReservation> createdRes) -> {
assertEquals(createdRes.getValue().getPhoneNumbers().size(), 1);
String purchasedNumber = createdRes.getValue().getPhoneNumbers().get(0);
// Purchase Reservation
return beginPurchaseReservation(httpClient, createdRes.getValue().getReservationId(), "purchaseReleaseNumberTests_beginPurchaseReservation").last()
.flatMap((AsyncPollResponse<Void, Void> response) -> {
assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED.toString(), response.getStatus().toString());
return beginReleasePhoneNumbers(httpClient, purchasedNumber, "purchaseReleaseNumberTests_beginReleasePhoneNumbers").last();
});
});
});
}))
.assertNext((AsyncPollResponse<PhoneNumberRelease, PhoneNumberRelease> releaseNumberResponse) -> {
assertEquals(ReleaseStatus.COMPLETE, releaseNumberResponse.getValue().getStatus());
})
.verifyComplete();
}
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase#getHttpClients")
public void configureNumberGetNumberConfigurationUnconfigureNumberWithResponse(HttpClient httpClient) {
// Configuring purchased number
PhoneNumberIdentifier number = new PhoneNumberIdentifier(PHONE_NUMBER);
PstnConfiguration pstnConfiguration = new PstnConfiguration();
pstnConfiguration.setApplicationId("ApplicationId");
pstnConfiguration.setCallbackUrl("https://callbackurl");
StepVerifier.create(
this.getClientWithConnectionString(httpClient, "configureNumberWithResponseTests_configureNumber").configureNumberWithResponse(number, pstnConfiguration)
.flatMap((Response<Void> configResponse) -> {
assertEquals(200, configResponse.getStatusCode());
// Get configurations of purchased number
return this.getClientWithConnectionString(httpClient, "configureNumberWithResponseTests_getNumberConfig").getNumberConfigurationWithResponse(number)
.flatMap((Response<NumberConfigurationResponse> getConfigResponse) -> {
assertEquals(200, getConfigResponse.getStatusCode());
assertNotNull(getConfigResponse.getValue().getPstnConfiguration().getApplicationId());
assertNotNull(getConfigResponse.getValue().getPstnConfiguration().getCallbackUrl());
// Unconfigure the purchased number
return this.getClientWithConnectionString(httpClient, "configureNumberWithResponseTests_unconfigureNumber").unconfigureNumberWithResponse(number);
});
}))
.assertNext((Response<Void> unconfigureResponse) -> {
assertEquals(200, unconfigureResponse.getStatusCode());
})
.verifyComplete();
}
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase#getHttpClients")
public void configureNumberGetNumberConfigurationUnconfigureNumber(HttpClient httpClient) {
// Configuring purchased number
PhoneNumberIdentifier number = new PhoneNumberIdentifier(PHONE_NUMBER);
PstnConfiguration pstnConfiguration = new PstnConfiguration();
pstnConfiguration.setApplicationId("ApplicationId");
pstnConfiguration.setCallbackUrl("https://callbackurl");
StepVerifier.create(
this.getClientWithConnectionString(httpClient, "configureNumberTests_configureNumber").configureNumber(number, pstnConfiguration)
.flatMap(response -> {
// Get configurations of purchased number
return this.getClientWithConnectionString(httpClient, "configureNumberTests_getNumberConfig").getNumberConfiguration(number)
.flatMap((NumberConfigurationResponse configResponse) -> {
assertNotNull(configResponse.getPstnConfiguration().getApplicationId());
assertNotNull(configResponse.getPstnConfiguration().getCallbackUrl());
// Unconfigure the purchased number
return this.getClientWithConnectionString(httpClient, "configureNumberTests_unconfigureNumber").unconfigureNumber(number);
});
}))
.verifyComplete();
}
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase#getHttpClients")
public void updateCapabilitiesGetCapabilitiesUpdateWithResponse(HttpClient httpClient) {
List<Capability> capabilitiesToAdd = new ArrayList<>();
capabilitiesToAdd.add(Capability.INBOUND_CALLING);
NumberUpdateCapabilities update = new NumberUpdateCapabilities();
update.setAdd(capabilitiesToAdd);
Map<PhoneNumberIdentifier, NumberUpdateCapabilities> updateMap = new HashMap<>();
updateMap.put(new PhoneNumberIdentifier(PHONE_NUMBER), update);
StepVerifier.create(
this.getClientWithConnectionString(httpClient, "capabilitiesWithResponseTests_updateCapabilities").updateCapabilitiesWithResponse(updateMap)
.flatMap((Response<UpdateNumberCapabilitiesResponse> updateResponse) -> {
assertEquals(200, updateResponse.getStatusCode());
// Get capabilities update
String capabilitiesUpdateId = updateResponse.getValue().getCapabilitiesUpdateId();
assertNotNull(capabilitiesUpdateId);
return this.getClientWithConnectionString(httpClient, "capabilitiesWithResponseTests_getCapabilitiesUpdate").getCapabilitiesUpdateWithResponse(capabilitiesUpdateId);
}))
.assertNext((Response<UpdatePhoneNumberCapabilitiesResponse> retrievedUpdateResponse) -> {
assertEquals(200, retrievedUpdateResponse.getStatusCode());
assertNotNull(retrievedUpdateResponse.getValue().getCapabilitiesUpdateId());
})
.verifyComplete();
}
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase#getHttpClients")
public void updateCapabilitiesGetCapabilitiesUpdate(HttpClient httpClient) {
List<Capability> capabilitiesToAdd = new ArrayList<>();
capabilitiesToAdd.add(Capability.INBOUND_CALLING);
NumberUpdateCapabilities update = new NumberUpdateCapabilities();
update.setAdd(capabilitiesToAdd);
Map<PhoneNumberIdentifier, NumberUpdateCapabilities> updateMap = new HashMap<>();
updateMap.put(new PhoneNumberIdentifier(PHONE_NUMBER), update);
StepVerifier.create(
this.getClientWithConnectionString(httpClient, "capabilitiesTests_updateCapabilities").updateCapabilities(updateMap)
.flatMap((UpdateNumberCapabilitiesResponse updateResponse) -> {
// Get capabilities update
String capabilitiesUpdateId = updateResponse.getCapabilitiesUpdateId();
assertNotNull(capabilitiesUpdateId);
return this.getClientWithConnectionString(httpClient, "capabilitiesTests_getCapabilitiesUpdate").getCapabilitiesUpdate(capabilitiesUpdateId);
}))
.assertNext((UpdatePhoneNumberCapabilitiesResponse retrievedUpdateResponse) -> {
assertNotNull(retrievedUpdateResponse.getCapabilitiesUpdateId());
})
.verifyComplete();
}
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase#getHttpClients")
public void listPhonePlansNullCountryCode(HttpClient httpClient) {
PagedFlux<PhonePlan> pagedFlux = this.getClientWithConnectionString(httpClient, "listPhonePlansNullCountryCode")
.listPhonePlans(null, "PHONE_PLAN_GROUP_ID", LOCALE);
StepVerifier.create(pagedFlux.next())
.verifyError(NullPointerException.class);
}
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase#getHttpClients")
public void listPhonePlansNullPhonePlanGroupId(HttpClient httpClient) {
PagedFlux<PhonePlan> phonePlans =
this.getClientWithConnectionString(httpClient, "listPhonePlansNullPhonePlanGroupId").listPhonePlans(COUNTRY_CODE, null, LOCALE);
StepVerifier.create(phonePlans)
.verifyError(NullPointerException.class);
}
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase#getHttpClients")
public void getPhonePlanLocationOptionsWithResponseNullCountryCode(HttpClient httpClient) {
Mono<LocationOptionsResponse> mono =
this.getClientWithConnectionString(httpClient, "getPhonePlanLocationOptionsWithResponseNullCountryCode")
.getPhonePlanLocationOptions(null, "PHONE_PLAN_GROUP_ID", "PHONE_PLAN_ID", LOCALE);
StepVerifier.create(mono)
.verifyError(NullPointerException.class);
}
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase#getHttpClients")
public void getPhonePlanLocationOptionsWithResponseNullPhonePlanGroupId(HttpClient httpClient) {
Mono<LocationOptionsResponse> mono =
this.getClientWithConnectionString(httpClient, "getPhonePlanLocationOptionsWithResponseNullPhonePlanGroupId")
.getPhonePlanLocationOptions(COUNTRY_CODE, null, "PHONE_PLAN_ID", LOCALE);
StepVerifier.create(mono)
.verifyError(NullPointerException.class);
}
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase#getHttpClients")
public void getPhonePlanLocationOptionsWithResponseNullPhonePlanId(HttpClient httpClient) {
Mono<LocationOptionsResponse> mono =
this.getClientWithConnectionString(httpClient, "getPhonePlanLocationOptionsWithResponseNullPhonePlanId")
.getPhonePlanLocationOptions(COUNTRY_CODE, "PHONE_PLAN_GROUP_ID", null, LOCALE);
StepVerifier.create(mono)
.verifyError(NullPointerException.class);
}
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase#getHttpClients")
public void getAllAreaCodesWithResponseNullLocationType(HttpClient httpClient) {
List<LocationOptionsQuery> locationOptions = new ArrayList<>();
Mono<Response<AreaCodes>> mono = this.getClientWithConnectionString(httpClient, "getAllAreaCodesWithResponseNullLocationType")
.getAllAreaCodesWithResponse(null, COUNTRY_CODE, "PHONE_PLAN_ID", locationOptions, Context.NONE);
StepVerifier.create(mono)
.verifyError(java.lang.RuntimeException.class);
}
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase#getHttpClients")
public void getAllAreaCodesWithResponseNullCountryCode(HttpClient httpClient) {
List<LocationOptionsQuery> locationOptions = new ArrayList<>();
Mono<Response<AreaCodes>> mono = this.getClientWithConnectionString(httpClient, "getAllAreaCodesWithResponseNullCountryCode")
.getAllAreaCodesWithResponse("selection", null, "PHONE_PLAN_ID", locationOptions, Context.NONE);
StepVerifier.create(mono)
.verifyError(java.lang.RuntimeException.class);
}
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase#getHttpClients")
public void getAllAreaCodesWithResponseNullPhonePlanId(HttpClient httpClient) {
List<LocationOptionsQuery> locationOptions = new ArrayList<>();
Mono<Response<AreaCodes>> mono = this.getClientWithConnectionString(httpClient, "getAllAreaCodesWithResponseNullPhonePlanId")
.getAllAreaCodesWithResponse("selection", COUNTRY_CODE, null, locationOptions, Context.NONE);
StepVerifier.create(mono)
.verifyError(java.lang.RuntimeException.class);
}
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase#getHttpClients")
public void updateCapabilitiesWithResponseNullPhoneNumberCapabilitiesUpdate(HttpClient httpClient) {
Mono<Response<UpdateNumberCapabilitiesResponse>> mono =
this.getClientWithConnectionString(httpClient, "updateCapabilitiesWithResponseNullPhoneNumberCapabilitiesUpdate")
.updateCapabilitiesWithResponse(null, Context.NONE);
StepVerifier.create(mono)
.verifyError(NullPointerException.class);
}
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase#getHttpClients")
public void getReservationByIdWithResponseNullSearchId(HttpClient httpClient) {
Mono<Response<PhoneNumberReservation>> mono = this.getClientWithConnectionString(httpClient, "getReservationByIdWithResponseNullSearchId")
.getReservationByIdWithResponse(null, Context.NONE);
StepVerifier.create(mono)
.verifyError(NullPointerException.class);
}
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase#getHttpClients")
public void cancelReservationWithResponseNullReservationId(HttpClient httpClient) {
Mono<Response<Void>> mono = this.getClientWithConnectionString(httpClient, "cancelReservationWithResponseNullReservationId")
.cancelReservationWithResponse(null, Context.NONE);
StepVerifier.create(mono)
.verifyError(NullPointerException.class);
}
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase#getHttpClients")
public void configureNumberWithResponseNullPhoneNumber(HttpClient httpClient) {
PstnConfiguration pstnConfiguration = new PstnConfiguration();
pstnConfiguration.setApplicationId("ApplicationId");
pstnConfiguration.setCallbackUrl("https://callbackurl");
Mono<Response<Void>> mono = this.getClientWithConnectionString(httpClient, "configureNumberWithResponseNullPhoneNumber")
.configureNumberWithResponse(null, pstnConfiguration, Context.NONE);
StepVerifier.create(mono)
.verifyError(NullPointerException.class);
}
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase#getHttpClients")
public void configureNumberWithResponseNullPstnConfig(HttpClient httpClient) {
PhoneNumberIdentifier number = new PhoneNumberIdentifier("PHONENUMBER_TO_CONFIGURE");
Mono<Response<Void>> mono = this.getClientWithConnectionString(httpClient, "configureNumberWithResponseNullPstnConfig")
.configureNumberWithResponse(number, null, Context.NONE);
StepVerifier.create(mono)
.verifyError(NullPointerException.class);
}
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase#getHttpClients")
public void getNumberConfigurationWithResponseNullPhoneNumber(HttpClient httpClient) {
Mono<Response<NumberConfigurationResponse>> mono =
this.getClientWithConnectionString(httpClient, "getNumberConfigurationWithResponseNullPhoneNumber")
.getNumberConfigurationWithResponse(null, Context.NONE);
StepVerifier.create(mono)
.verifyError(NullPointerException.class);
}
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase#getHttpClients")
public void getCapabilitiesUpdateWithResponseNullCapabilitiesId(HttpClient httpClient) {
Mono<Response<UpdatePhoneNumberCapabilitiesResponse>> mono = this.getClientWithConnectionString(httpClient, "getCapabilitiesUpdateWithResponseNullCapabilitiesId")
.getCapabilitiesUpdateWithResponse(null, Context.NONE);
StepVerifier.create(mono)
.verifyError(NullPointerException.class);
}
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase#getHttpClients")
public void unconfigureNumberWithResponseNullPhoneNumber(HttpClient httpClient) {
Mono<Response<Void>> mono = this.getClientWithConnectionString(httpClient, "unconfigureNumberWithResponseNullPhoneNumber")
.unconfigureNumberWithResponse(null, Context.NONE);
StepVerifier.create(mono)
.verifyError(NullPointerException.class);
}
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase#getHttpClients")
public void listPhonePlanGroupsNullCountryCode(HttpClient httpClient) {
PagedFlux<PhonePlanGroup> phonePlanGroups = this.getClientWithConnectionString(httpClient, "listPhonePlanGroupsNullCountryCode")
.listPhonePlanGroups(null, LOCALE, true, Context.NONE);
StepVerifier.create(phonePlanGroups)
.verifyError(NullPointerException.class);
}
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase#getHttpClients")
public void getReleaseByIdWithResponseNullReleaseId(HttpClient httpClient) {
Mono<Response<PhoneNumberRelease>> mono = this.getClientWithConnectionString(httpClient, "getReleaseByIdWithResponseNullReleaseId")
.getReleaseByIdWithResponse(null, Context.NONE);
StepVerifier.create(mono)
.verifyError(NullPointerException.class);
}
private PollerFlux<PhoneNumberRelease, PhoneNumberRelease> beginReleasePhoneNumbers(HttpClient httpClient, String phoneNumber, String testName) {
PhoneNumberIdentifier releasedPhoneNumber = new PhoneNumberIdentifier(phoneNumber);
List<PhoneNumberIdentifier> phoneNumbers = new ArrayList<>();
phoneNumbers.add(releasedPhoneNumber);
Duration pollInterval = Duration.ofSeconds(1);
return this.getClientWithConnectionString(httpClient, testName).beginReleasePhoneNumbers(phoneNumbers, pollInterval);
}
private PollerFlux<PhoneNumberReservation, PhoneNumberReservation> beginCreateReservation(HttpClient httpClient, PhonePlan phonePlan, String testName) {
List<String> phonePlanIds = new ArrayList<>();
phonePlanIds.add(phonePlan.getPhonePlanId());
CreateReservationOptions createReservationOptions = new CreateReservationOptions();
createReservationOptions
.setAreaCode(AREA_CODE)
.setDescription(RESERVATION_OPTIONS_DESCRIPTION)
.setDisplayName(RESERVATION_OPTIONS_NAME)
.setPhonePlanIds(phonePlanIds)
.setQuantity(1);
Duration duration = Duration.ofSeconds(1);
return this.getClientWithConnectionString(httpClient, testName).beginCreateReservation(createReservationOptions, duration);
}
private PollerFlux<Void, Void> beginPurchaseReservation(HttpClient httpClient, String reservationId, String testName) {
Duration pollInterval = Duration.ofSeconds(1);
return this.getClientWithConnectionString(httpClient, testName)
.beginPurchaseReservation(reservationId, pollInterval);
}
}
|
import { Injectable } from '@angular/core';
import { Cacheable, CacheBuster } from 'ngx-cacheable';
import { Observable, from, Subject } from 'rxjs';
import { map, publishReplay, refCount } from 'rxjs/operators';
import { ApiService } from 'src/app/core/services/api.service';
import { AttributeCount } from '../models/attribute-count.model';
import { CategoryMappingsResponse } from '../models/category-mapping-response.model';
import { CategoryMapping } from '../models/category-mapping.model';
import { CustomSegment } from '../models/custom-segment.model';
import { EmployeeMappingsResponse } from '../models/employee-mapping-response.model';
import { EmployeeMapping } from '../models/employee-mapping.model';
import { ExpenseField } from '../models/expense-field.model';
import { GeneralMapping } from '../models/general-mapping.model';
import { GroupedDestinationAttributes } from '../models/grouped-destination-attributes';
import { MappingDestination } from '../models/mapping-destination.model';
import { MappingSource } from '../models/mapping-source.model';
import { MappingsResponse } from '../models/mappings-response.model';
import { Mapping } from '../models/mappings.model';
import { SubsidiaryMapping } from '../models/subsidiary-mapping.model';
import { WorkspaceService } from './workspace.service';
const generalMappingsCache = new Subject<void>();
const subsidiaryMappingCache$ = new Subject<void>();
@Injectable({
providedIn: 'root',
})
export class MappingsService {
destinationWorkspace: Observable<{}>;
sourceWorkspace: Observable<{}>;
constructor(
private apiService: ApiService,
private workspaceService: WorkspaceService) { }
postNetsuiteCustomSegments(data: CustomSegment): Observable<CustomSegment> {
const workspaceId = this.workspaceService.getWorkspaceId();
return this.apiService.post(`/workspaces/${workspaceId}/netsuite/custom_segments/`, data);
}
syncNetSuiteDimensions() {
const workspaceId = this.workspaceService.getWorkspaceId();
if (!this.destinationWorkspace) {
this.destinationWorkspace = this.apiService.post(`/workspaces/${workspaceId}/netsuite/sync_dimensions/`, {}).pipe(
map(data => data),
publishReplay(1),
refCount()
);
}
return this.destinationWorkspace;
}
syncFyleDimensions() {
const workspaceId = this.workspaceService.getWorkspaceId();
if (!this.sourceWorkspace) {
this.sourceWorkspace = this.apiService.post(`/workspaces/${workspaceId}/fyle/sync_dimensions/`, {}).pipe(
map(data => data),
publishReplay(1),
refCount()
);
}
return this.sourceWorkspace;
}
refreshNetSuiteDimensions(dimensionsToSync: string[] = []) {
const workspaceId = this.workspaceService.getWorkspaceId();
return this.apiService.post(`/workspaces/${workspaceId}/netsuite/refresh_dimensions/`, {
dimensions_to_sync: dimensionsToSync
});
}
refreshDimension() {
const workspaceId = this.workspaceService.getWorkspaceId();
this.apiService.post(`/workspaces/${workspaceId}/netsuite/refresh_dimensions/`, {}).subscribe();
this.apiService.post(`/workspaces/${workspaceId}/fyle/refresh_dimensions/`, {}).subscribe();
}
getFyleFields(): Observable<ExpenseField[]> {
const workspaceId = this.workspaceService.getWorkspaceId();
return this.apiService.get(`/workspaces/${workspaceId}/fyle/fyle_fields/`, {});
}
getNetSuiteFields(): Observable<ExpenseField[]> {
const workspaceId = this.workspaceService.getWorkspaceId();
return this.apiService.get(`/workspaces/${workspaceId}/netsuite/netsuite_fields/`, {});
}
getFyleExpenseAttributes(attributeType: string): Observable<MappingSource[]> {
const workspaceId = this.workspaceService.getWorkspaceId();
return this.apiService.get(`/workspaces/${workspaceId}/fyle/expense_attributes/`, {
attribute_type: attributeType
});
}
getNetSuiteDestinationAttributes(attributeTypes: string | string[]): Observable<MappingDestination[]> {
const workspaceId = this.workspaceService.getWorkspaceId();
return this.apiService.get(`/workspaces/${workspaceId}/netsuite/destination_attributes/`, {
attribute_types: attributeTypes
});
}
getNetsuiteAttributesCount(attributeType: string): Observable<AttributeCount> {
const workspaceId = this.workspaceService.getWorkspaceId();
return this.apiService.get(`/workspaces/${workspaceId}/netsuite/attributes/count/`, {
attribute_type: attributeType
});
}
getGroupedNetSuiteDestinationAttributes(attributeTypes: string[]): Observable<GroupedDestinationAttributes> {
return from(this.getNetSuiteDestinationAttributes(attributeTypes).toPromise().then((response: MappingDestination[]) => {
return response.reduce((groupedAttributes: GroupedDestinationAttributes, attribute: MappingDestination) => {
const group: MappingDestination[] = groupedAttributes[attribute.attribute_type] || [];
group.push(attribute);
groupedAttributes[attribute.attribute_type] = group;
return groupedAttributes;
}, {
VENDOR_PAYMENT_ACCOUNT: [],
VENDOR: [],
CLASS: [],
ACCOUNTS_PAYABLE: [],
EMPLOYEE: [],
ACCOUNT: [],
SUBSIDIARY: [],
CURRENCY: [],
DEPARTMENT: [],
PROJECT: [],
TAX_ITEM: [],
LOCATION: [],
EXPENSE_CATEGORY: [],
BANK_ACCOUNT: [],
CREDIT_CARD_ACCOUNT: [],
});
}));
}
getNetsuiteExpenseSegments(): Observable<CustomSegment[]> {
const workspaceId = this.workspaceService.getWorkspaceId();
return this.apiService.get(`/workspaces/${workspaceId}/netsuite/custom_segments/`, {});
}
@CacheBuster({
cacheBusterNotifier: generalMappingsCache
})
postGeneralMappings(generalMappings: GeneralMapping): Observable<GeneralMapping> {
const workspaceId = this.workspaceService.getWorkspaceId();
return this.apiService.post(`/workspaces/${workspaceId}/mappings/general/`, generalMappings);
}
@Cacheable({
cacheBusterObserver: generalMappingsCache
})
getGeneralMappings(): Observable<GeneralMapping> {
const workspaceId = this.workspaceService.getWorkspaceId();
return this.apiService.get(
`/workspaces/${workspaceId}/mappings/general/`, {}
);
}
@Cacheable({
cacheBusterObserver: subsidiaryMappingCache$
})
getSubsidiaryMappings(): Observable<SubsidiaryMapping> {
const workspaceId = this.workspaceService.getWorkspaceId();
return this.apiService.get(
`/workspaces/${workspaceId}/mappings/subsidiaries/`, {}
);
}
@CacheBuster({
cacheBusterNotifier: subsidiaryMappingCache$
})
postSubsidiaryMappings(subsidiaryMappingPayload: SubsidiaryMapping): Observable<SubsidiaryMapping> {
const workspaceId = this.workspaceService.getWorkspaceId();
return this.apiService.post(`/workspaces/${workspaceId}/mappings/subsidiaries/`, subsidiaryMappingPayload);
}
@CacheBuster({
cacheBusterNotifier: subsidiaryMappingCache$
})
postCountryDetails(): Observable<SubsidiaryMapping> {
const workspaceId = this.workspaceService.getWorkspaceId();
return this.apiService.post(`/workspaces/${workspaceId}/mappings/post_country/`, {});
}
getMappings(pageLimit: number, pageOffset: number, sourceType: string, tableDimension: number = 2): Observable<MappingsResponse> {
const workspaceId = this.workspaceService.getWorkspaceId();
return this.apiService.get(
`/workspaces/${workspaceId}/mappings/`, {
source_type: sourceType,
limit: pageLimit,
offset: pageOffset,
table_dimension: tableDimension
}
);
}
getEmployeeMappings(pageLimit: number, pageOffset: number): Observable<EmployeeMappingsResponse> {
const workspaceId = this.workspaceService.getWorkspaceId();
return this.apiService.get(
`/workspaces/${workspaceId}/mappings/employee/`, {
limit: pageLimit,
offset: pageOffset
}
);
}
getAllMappings(sourceType: string): Observable<MappingsResponse> {
const limit = 500;
const offset = 0;
// tslint:disable-next-line: prefer-const
let allMappingsResponse;
return from(this.getAllMappingsInternal(limit, offset, sourceType, allMappingsResponse));
}
private getAllMappingsInternal(limit: number, offset: number, sourceType: string, allMappingsResponse: MappingsResponse): Promise<MappingsResponse> {
const that = this;
return that.getMappings(limit, offset, sourceType).toPromise().then((expenseGroupRes) => {
if (!allMappingsResponse) {
allMappingsResponse = expenseGroupRes;
} else {
allMappingsResponse.results = allMappingsResponse.results.concat(expenseGroupRes.results);
}
if (allMappingsResponse.results.length < allMappingsResponse.count) {
return that.getAllMappingsInternal(limit, offset + limit, sourceType, allMappingsResponse);
} else {
return allMappingsResponse;
}
});
}
postMappings(mapping: Mapping): Observable<Mapping> {
const workspaceId = this.workspaceService.getWorkspaceId();
return this.apiService.post(`/workspaces/${workspaceId}/mappings/`, mapping);
}
getCategoryMappings(pageLimit: number, pageOffset: number): Observable<CategoryMappingsResponse> {
const workspaceId = this.workspaceService.getWorkspaceId();
return this.apiService.get(
`/workspaces/${workspaceId}/mappings/category/`, {
limit: pageLimit,
offset: pageOffset
}
);
}
postCategoryMappings(mapping: CategoryMapping): Observable<Mapping> {
const workspaceId = this.workspaceService.getWorkspaceId();
return this.apiService.post(`/workspaces/${workspaceId}/mappings/category/`, mapping);
}
triggerAutoMapEmployees() {
const workspaceId = this.workspaceService.getWorkspaceId();
return this.apiService.post(`/workspaces/${workspaceId}/mappings/auto_map_employees/trigger/`, {});
}
postEmployeeMappings(employeeMapping: EmployeeMapping): Observable<EmployeeMapping> {
const workspaceId = this.workspaceService.getWorkspaceId();
return this.apiService.post(`/workspaces/${workspaceId}/mappings/employee/`, employeeMapping);
}
}
|
db.users.find(
{ "company": "Microsoft" },
{ "name": 1, "company": 1 }
) |
<reponame>AlbertoHSande/ARKstocks
import requests
import pandas as pd
import csv
from datetime import date, timedelta
import numpy as np
from twython import Twython
import time
from bokeh.io import export_png, export_svgs
from bokeh.models import ColumnDataSource, DataTable, TableColumn, HTMLTemplateFormatter
from io import BytesIO
import cloudscraper
def save_df_as_image(df, path):
source = ColumnDataSource(df)
df_columns = [df.index.name]
df_columns.extend(df.columns.values)
columns_for_table=[]
template="""
<div style="color:<%=
(function colorfromint(){
if (Variation > 0)
{return('green')}
else if (Variation < 0)
{return('red')}
else
{return('blue')}
}())%>;">
<%=value%>
</div>
"""
formatter = HTMLTemplateFormatter(template=template)
for column in df_columns:
if(column == 'Variation'):
columns_for_table.append(TableColumn(field=column, title=column, formatter=formatter))
else:
columns_for_table.append(TableColumn(field=column, title=column))
full_height=(26*len(df.index))
data_table = DataTable(source=source, columns=columns_for_table,fit_columns=True,height=full_height,width_policy="auto",index_position=None)
export_png(data_table, filename = path)
urls = ['https://ark-funds.com/wp-content/fundsiteliterature/csv/ARK_INNOVATION_ETF_ARKK_HOLDINGS.csv',
'https://ark-funds.com/wp-content/fundsiteliterature/csv/ARK_AUTONOMOUS_TECHNOLOGY_&_ROBOTICS_ETF_ARKQ_HOLDINGS.csv',
'https://ark-funds.com/wp-content/fundsiteliterature/csv/ARK_NEXT_GENERATION_INTERNET_ETF_ARKW_HOLDINGS.csv',
'https://ark-funds.com/wp-content/fundsiteliterature/csv/ARK_GENOMIC_REVOLUTION_MULTISECTOR_ETF_ARKG_HOLDINGS.csv',
'https://ark-funds.com/wp-content/fundsiteliterature/csv/ARK_FINTECH_INNOVATION_ETF_ARKF_HOLDINGS.csv',
'https://ark-funds.com/wp-content/fundsiteliterature/csv/ARK_SPACE_EXPLORATION_&_INNOVATION_ETF_ARKX_HOLDINGS.csv']
#twitter
api_key = ''
api_secret = ''
access_token = ''
secret_token = ''
twitter = Twython(
api_key,
api_secret,
access_token,
secret_token
)
#Get current date
today = date.today()
delta = 1
#If today is Monday, we need to compare to Friday
if date.today().weekday() == 0:
delta = 3
yesterday = date.today() - timedelta(delta)
#Download files
for csv_url in urls:
scraper = cloudscraper.create_scraper()
req = scraper.get(csv_url)
url_content = req.content
csv_file = open('/tmp/downloaded.csv', 'wb')
csv_file.write(url_content)
csv_file.close()
#Read downloaded csv
csv_file = open('/tmp/downloaded.csv','r')
df = pd.read_csv(url_content)
print(df.fund.iloc[0])
#Create for saving info
df.to_csv('/tmp/'+df.fund.iloc[0]+'-'+today.strftime("%b-%d-%Y")+'.csv')
df_yesterday = pd.read_csv('/tmp/'+df.fund.iloc[0]+'-'+yesterday.strftime("%b-%d-%Y")+'.csv')
csv_file.close()
#Merge two dataframes based on ticker
merged = pd.merge(df_yesterday, df, on='ticker')
merged['variation'] = np.where(merged['shares_x'] >= merged['shares_y'], merged['shares_y']-merged['shares_x'], merged['shares_y']-merged['shares_x'])
merged['pct'] = np.where(merged['shares_x'] >= merged['shares_y'], ((merged['variation'] * 100)/merged['shares_x']), ((merged['variation'] * 100)/merged['shares_x']))
#merged.to_csv('/mnt/c/Users/herna/OneDrive/Documentos/pyhton_ark_investments/ark/test'+df.fund.iloc[0]+'.csv')
#We create final dataframe for converting to image later
df_final = pd.DataFrame({
"Company":[],
"Ticker":[],
"Total shares":[],
"Change":[],
"Variation":[],
"%Variation":[]
})
hashtags = ' #ARKinsights #ARKstocks #ARKinvest #CathieWood #investing #stock #market'
for index, row in merged.iterrows():
try:
if row.variation > 0:
msg = row.fund_x + ' increases $' + row.ticker + ' position by ' + str(np.abs(int(row.variation))) + ' shares for a total of ' + str(np.abs(int(merged.shares_y.iloc[index]))) + hashtags
if (len(row.ticker) >= 1):
new_row = pd.DataFrame({
"Company":[row.company_x],
"Ticker":[row.ticker],
"Total shares":[np.abs(int(row.shares_y))],
"Change":['BUY'],
"Variation":[row.variation],
"%Variation":[row.pct]
})
df_final = df_final.append(new_row)
elif row.variation < 0:
msg = row.fund_x + ' decreases $' + row.ticker + ' position by ' + str(np.abs(int(row.variation))) + ' shares for a total of ' + str(np.abs(int(merged.shares_y.iloc[index]))) + hashtags
if (len(row.ticker) >= 1):
new_row = pd.DataFrame({
"Company":[row.company_x],
"Ticker":[row.ticker],
"Total shares":[np.abs(int(row.shares_y))],
"Change":['SELL'],
"Variation":[row.variation],
"%Variation":[row.pct]
})
df_final = df_final.append(new_row)
except:
print('ENDED')
#Find new or closed positions
df_closed = df.merge(df_yesterday, on=['ticker'], how = 'outer' ,indicator=True).loc[lambda x : x['_merge']=='right_only']
#df_closed.to_csv("df_closed.csv")
df_new = df.merge(df_yesterday, on=['ticker'], how = 'outer' ,indicator=True).loc[lambda x : x['_merge']=='left_only']
#df_new.to_csv("df_new.csv")
for index, row in df_closed.iterrows():
try:
#msg = row.fund_y + ' closed its position of $' + row.ticker + '. Its position was reported to be ' + str(np.abs(int(row.shares_y))) + ' shares' + hashtags
if (len(row.ticker) >= 1):
new_row = pd.DataFrame({
"Company":[row.company_x],
"Ticker":[row.ticker],
"Total shares":[np.abs(int(row.shares_y))],
"Change":['CLOSED POSITION'],
"Variation":[row.variation],
"%Variation":[row.pct]
})
df_final = df_final.append(new_row)
except:
print('ENDED')
for index, row in df_new.iterrows():
try:
#msg = row.fund_x + ' opened a new position in $' + row.ticker + ' with ' + str(np.abs(int(row.shares_x))) + ' shares' + hashtags
if (len(row.ticker) >= 1):
new_row = pd.DataFrame({
"Company":[row.company_x],
"Ticker":[row.ticker],
"Total shares":[np.abs(int(row.shares_x))],
"Change":['NEW POSITION'],
"Variation":[row.variation],
"%Variation":[row.pct]
})
df_final = df_final.append(new_row)
except:
print('ENDED')
try:
#df_final.to_csv('/mnt/c/Users/herna/OneDrive/Documentos/pyhton_ark_investments/ark/FINAL_'+df.fund.iloc[0]+'.csv')
df_final.set_index('Ticker', inplace=True)
save_df_as_image(df_final, '/tmp/'+df.fund.iloc[0]+'.png')
image = open('/tmp/'+df.fund.iloc[0]+'.png', 'rb')
image_ids = twitter.upload_media(media=image)
twitter.update_status(status='Daily report about ' + df.fund.iloc[0] + ' changes. ' + hashtags, media_ids=[image_ids['media_id']])
except:
twitter.update_status(status="I didn't detect any changes in " + df.fund.iloc[0] + '. ' + hashtags)
|
<reponame>kdaemonv/arquillian-cube
package org.arquillian.cube.kubernetes.impl.enricher.internal;
import java.lang.annotation.Annotation;
import java.util.Map;
import io.fabric8.kubernetes.api.model.v3_1.ReplicationControllerList;
import org.arquillian.cube.kubernetes.impl.enricher.AbstractKubernetesResourceProvider;
import org.jboss.arquillian.test.api.ArquillianResource;
import org.jboss.arquillian.test.spi.enricher.resource.ResourceProvider;
/**
* A {@link ResourceProvider} for {@link ReplicationControllerList}.
* It refers to replication controllers that have been created during the current session.
*/
public class ReplicationControllerListResourceProvider extends AbstractKubernetesResourceProvider {
@Override
public boolean canProvide(Class<?> type) {
return ReplicationControllerList.class.isAssignableFrom(type);
}
@Override
public Object lookup(ArquillianResource resource, Annotation... qualifiers) {
Map<String, String> labels = getLabels(qualifiers);
if (labels.isEmpty()) {
return getClient().replicationControllers().inNamespace(getSession().getNamespace()).list();
} else {
return getClient().replicationControllers().inNamespace(getSession().getNamespace()).withLabels(labels).list();
}
}
}
|
#!/bin/sh
if [ ! -d $2 ]; then
mkdir $2
fi
cd $2
echo "downloading logs for $2"
curl -O $1/ws/work/logs/*zip*/logs.zip
echo "download jtr work files for $2"
curl -O $1/ws/work/javaeetck/bin/JTwork/com/sun/ts/tests/*zip*/tests.zip
echo "downloading consoleText for $2"
curl -O $1/lastCompletedBuild/consoleText
unzip -o logs.zip
unzip -o tests.zip
echo "processing complete for $2"
|
#!/usr/bin/env bash
set -ex -o pipefail -o errtrace -o functrace
function catch() {
echo "error $1 on line $2"
exit 255
}
trap 'catch $? $LINENO' ERR TERM INT
# build-manifests is designed to populate the deploy directory
# with all of the manifests necessary for use in development
# and for consumption with the operator-lifecycle-manager.
#
# First, we create a temporary directory and filling it with
# all of the component operator's ClusterServiceVersion (CSV for OLM)
# and CustomResourceDefinitions (CRDs); being sure to copy the CRDs
# into the deploy/crds directory.
#
# The CSV manifests contain all of the information we need to 1) generate
# a combined CSV and 2) other development related manifests (like the
# operator deployment + rbac).
#
# Second, we pass all of the component CSVs off to the manifest-templator
# that handles the deployment specs, service account names, permissions, and
# clusterPermissions by converting them into their corresponding Kubernetes
# manifests (ie. permissions + serviceAccountName = role + service account
# + role binding) before writing them to disk.
#
# Lastly, we take give the component CSVs to the csv-merger that combines all
# of the manifests into a single, unified, ClusterServiceVersion.
function get_image_digest() {
if [[ ! -f ${PROJECT_ROOT}/tools/digester/digester ]]; then
(
cd "${PROJECT_ROOT}/tools/digester"
go build .
)
fi
local image
image=$("${PROJECT_ROOT}/tools/digester/digester" -image "$1" "$2")
echo "${image}"
}
PROJECT_ROOT="$(readlink -e $(dirname "${BASH_SOURCE[0]}")/../)"
source "${PROJECT_ROOT}"/hack/config
source "${PROJECT_ROOT}"/deploy/images.env
HCO_OPERATOR_IMAGE=${HCO_OPERATOR_IMAGE:-quay.io/kubevirt/hyperconverged-cluster-operator:${CSV_VERSION}-unstable}
HCO_WEBHOOK_IMAGE=${HCO_WEBHOOK_IMAGE:-quay.io/kubevirt/hyperconverged-cluster-webhook:${CSV_VERSION}-unstable}
DIGEST_LIST="${DIGEST_LIST},${HCO_OPERATOR_IMAGE}|hyperconverged-cluster-operator,${HCO_WEBHOOK_IMAGE}|hyperconverged-cluster-webhook"
DEPLOY_DIR="${PROJECT_ROOT}/deploy"
CRD_DIR="${DEPLOY_DIR}/crds"
OLM_DIR="${DEPLOY_DIR}/olm-catalog"
CSV_VERSION=${CSV_VERSION}
CSV_TIMESTAMP=$(date +%Y%m%d%H%M -u)
PACKAGE_NAME="community-kubevirt-hyperconverged"
CSV_DIR="${OLM_DIR}/${PACKAGE_NAME}/${CSV_VERSION}"
DEFAULT_CSV_GENERATOR="/usr/bin/csv-generator"
SSP_CSV_GENERATOR="/csv-generator"
INDEX_IMAGE_DIR=${DEPLOY_DIR}/index-image
CSV_INDEX_IMAGE_DIR="${INDEX_IMAGE_DIR}/${PACKAGE_NAME}/${CSV_VERSION}"
OPERATOR_NAME="${OPERATOR_NAME:-kubevirt-hyperconverged-operator}"
OPERATOR_NAMESPACE="${OPERATOR_NAMESPACE:-kubevirt-hyperconverged}"
IMAGE_PULL_POLICY="${IMAGE_PULL_POLICY:-IfNotPresent}"
# Important extensions
CSV_EXT="clusterserviceversion.yaml"
CSV_CRD_EXT="csv_crds.yaml"
CRD_EXT="crd.yaml"
function gen_csv() {
# Handle arguments
local csvGeneratorPath="$1" && shift
local operatorName="$1" && shift
local imagePullUrl="$1" && shift
local dumpCRDsArg="$1" && shift
local operatorArgs="$@"
# Handle important vars
local csv="${operatorName}.${CSV_EXT}"
local csvWithCRDs="${operatorName}.${CSV_CRD_EXT}"
local crds="${operatorName}.crds.yaml"
# TODO: Use oc to run if cluster is available
local dockerArgs="docker run --rm --entrypoint=${csvGeneratorPath} ${imagePullUrl} ${operatorArgs}"
eval $dockerArgs > $csv
eval $dockerArgs $dumpCRDsArg > $csvWithCRDs
# diff returns 1 when there is a diff, and there is always diff here. Added `|| :` to cancel trap here.
diff -u $csv $csvWithCRDs | grep -E "^\+" | sed -E 's/^\+//' | tail -n+2 > $crds || :
csplit --digits=2 --quiet --elide-empty-files \
--prefix="${operatorName}" \
--suffix-format="%02d.${CRD_EXT}" \
$crds \
"/^---$/" "{*}"
}
function create_virt_csv() {
local apiSha
local controllerSha
local launcherSha
local handlerSha
apiSha="${KUBEVIRT_API_IMAGE/*@/}"
controllerSha="${KUBEVIRT_CONTROLLER_IMAGE/*@/}"
launcherSha="${KUBEVIRT_LAUNCHER_IMAGE/*@/}"
handlerSha="${KUBEVIRT_HANDLER_IMAGE/*@/}"
local operatorName="kubevirt"
local dumpCRDsArg="--dumpCRDs"
local operatorArgs
operatorArgs=" \
--namespace=${OPERATOR_NAMESPACE} \
--csvVersion=${CSV_VERSION} \
--operatorImageVersion=${KUBEVIRT_OPERATOR_IMAGE/*@/} \
--dockerPrefix=${KUBEVIRT_OPERATOR_IMAGE%\/*} \
--kubeVirtVersion=${KUBEVIRT_VERSION} \
--apiSha=${apiSha} \
--controllerSha=${controllerSha} \
--handlerSha=${handlerSha} \
--launcherSha=${launcherSha} \
"
gen_csv "${DEFAULT_CSV_GENERATOR}" "${operatorName}" "${KUBEVIRT_OPERATOR_IMAGE}" "${dumpCRDsArg}" "${operatorArgs}"
echo "${operatorName}"
}
function create_cna_csv() {
local operatorName="cluster-network-addons"
local dumpCRDsArg="--dump-crds"
local containerPrefix="${CNA_OPERATOR_IMAGE%/*}"
local imageName="${CNA_OPERATOR_IMAGE#${containerPrefix}/}"
local tag="${CNA_OPERATOR_IMAGE/*:/}"
local operatorArgs=" \
--namespace=${OPERATOR_NAMESPACE} \
--version=${CSV_VERSION} \
--version-replaces=${REPLACES_VERSION} \
--image-pull-policy=IfNotPresent \
--operator-version=${NETWORK_ADDONS_VERSION} \
--container-tag=${CNA_OPERATOR_IMAGE/*:/} \
--container-prefix=${containerPrefix} \
--image-name=${imageName/:*/}
"
gen_csv ${DEFAULT_CSV_GENERATOR} ${operatorName} "${CNA_OPERATOR_IMAGE}" ${dumpCRDsArg} ${operatorArgs}
echo "${operatorName}"
}
function create_ssp_csv() {
local operatorName="scheduling-scale-performance"
local dumpCRDsArg="--dump-crds"
local operatorArgs=" \
--namespace=${OPERATOR_NAMESPACE} \
--csv-version=${CSV_VERSION} \
--operator-image=${SSP_OPERATOR_IMAGE} \
--operator-version=${SSP_VERSION} \
"
gen_csv ${SSP_CSV_GENERATOR} ${operatorName} "${SSP_OPERATOR_IMAGE}" ${dumpCRDsArg} ${operatorArgs}
echo "${operatorName}"
}
function create_cdi_csv() {
local operatorName="containerized-data-importer"
local dumpCRDsArg="--dump-crds"
local operatorArgs=" \
--namespace=${OPERATOR_NAMESPACE} \
--csv-version=${CSV_VERSION} \
--pull-policy=IfNotPresent \
--operator-image=${CDI_OPERATOR_IMAGE} \
--controller-image=${CDI_CONTROLLER_IMAGE} \
--apiserver-image=${CDI_APISERVER_IMAGE} \
--cloner-image=${CDI_CLONER_IMAGE} \
--importer-image=${CDI_IMPORTER_IMAGE} \
--uploadproxy-image=${CDI_UPLOADPROXY_IMAGE} \
--uploadserver-image=${CDI_UPLOADSERVER_IMAGE} \
--operator-version=${CDI_VERSION} \
"
gen_csv ${DEFAULT_CSV_GENERATOR} ${operatorName} "${CDI_OPERATOR_IMAGE}" ${dumpCRDsArg} ${operatorArgs}
echo "${operatorName}"
}
function create_nmo_csv() {
local operatorName="node-maintenance"
local dumpCRDsArg="--dump-crds"
local operatorArgs=" \
--namespace=${OPERATOR_NAMESPACE} \
--csv-version=${CSV_VERSION} \
--operator-image=${NMO_IMAGE} \
"
local csvGeneratorPath="/usr/local/bin/csv-generator"
gen_csv ${csvGeneratorPath} ${operatorName} "${NMO_IMAGE}" ${dumpCRDsArg} ${operatorArgs}
echo "${operatorName}"
}
function create_hpp_csv() {
local operatorName="hostpath-provisioner"
local dumpCRDsArg="--dump-crds"
local operatorArgs=" \
--csv-version=${CSV_VERSION} \
--operator-image-name=${HPPO_IMAGE} \
--provisioner-image-name=${HPP_IMAGE} \
--namespace=${OPERATOR_NAMESPACE} \
--pull-policy=IfNotPresent \
"
gen_csv ${DEFAULT_CSV_GENERATOR} ${operatorName} "${HPPO_IMAGE}" ${dumpCRDsArg} ${operatorArgs}
echo "${operatorName}"
}
function create_vm_import_csv() {
local operatorName="vm-import-operator"
local containerPrefix="${VMIMPORT_OPERATOR_IMAGE%/*}"
local dumpCRDsArg="--dump-crds"
local operatorArgs=" \
--csv-version=${CSV_VERSION} \
--operator-version=${VM_IMPORT_VERSION} \
--operator-image=${VMIMPORT_OPERATOR_IMAGE} \
--controller-image=${VMIMPORT_CONTROLLER_IMAGE} \
--namespace=${OPERATOR_NAMESPACE} \
--virtv2v-image=${VMIMPORT_VIRTV2V_IMAGE} \
--pull-policy=IfNotPresent \
"
gen_csv ${DEFAULT_CSV_GENERATOR} ${operatorName} "${VMIMPORT_OPERATOR_IMAGE}" ${dumpCRDsArg} ${operatorArgs}
echo "${operatorName}"
}
TEMPDIR=$(mktemp -d) || (echo "Failed to create temp directory" && exit 1)
pushd $TEMPDIR
virtFile=$(create_virt_csv)
virtCsv="${TEMPDIR}/${virtFile}.${CSV_EXT}"
cnaFile=$(create_cna_csv)
cnaCsv="${TEMPDIR}/${cnaFile}.${CSV_EXT}"
sspFile=$(create_ssp_csv)
sspCsv="${TEMPDIR}/${sspFile}.${CSV_EXT}"
cdiFile=$(create_cdi_csv)
cdiCsv="${TEMPDIR}/${cdiFile}.${CSV_EXT}"
nmoFile=$(create_nmo_csv)
nmoCsv="${TEMPDIR}/${nmoFile}.${CSV_EXT}"
hhpFile=$(create_hpp_csv)
hppCsv="${TEMPDIR}/${hhpFile}.${CSV_EXT}"
vmImportFile=$(create_vm_import_csv)
importCsv="${TEMPDIR}/${vmImportFile}.${CSV_EXT}"
csvOverrides="${TEMPDIR}/csv_overrides.${CSV_EXT}"
keywords=" keywords:
- KubeVirt
- Virtualization
- VM"
cat > ${csvOverrides} <<- EOM
---
spec:
$keywords
EOM
# Write HCO CRDs
(cd ${PROJECT_ROOT}/tools/csv-merger/ && go build)
hco_crds=${TEMPDIR}/hco.crds.yaml
(cd ${PROJECT_ROOT} && ${PROJECT_ROOT}/tools/csv-merger/csv-merger --api-sources=${PROJECT_ROOT}/pkg/apis/... --output-mode=CRDs > $hco_crds)
csplit --digits=2 --quiet --elide-empty-files \
--prefix=hco \
--suffix-format="%02d.${CRD_EXT}" \
$hco_crds \
"/^---$/" "{*}"
popd
rm -fr "${CSV_DIR}"
mkdir -p "${CSV_DIR}/metadata" "${CSV_DIR}/manifests"
cat << EOF > "${CSV_DIR}/metadata/annotations.yaml"
annotations:
operators.operatorframework.io.bundle.channel.default.v1: ${CSV_VERSION}
operators.operatorframework.io.bundle.channels.v1: ${CSV_VERSION}
operators.operatorframework.io.bundle.manifests.v1: manifests/
operators.operatorframework.io.bundle.mediatype.v1: registry+v1
operators.operatorframework.io.bundle.metadata.v1: metadata/
operators.operatorframework.io.bundle.package.v1: ${PACKAGE_NAME}
EOF
SMBIOS=$(cat <<- EOM
Family: KubeVirt
Manufacturer: KubeVirt
Product: None
EOM
)
# validate CSVs. Make sure each one of them contain an image (and so, also not empty):
csvs=("${cnaCsv}" "${virtCsv}" "${sspCsv}" "${cdiCsv}" "${nmoCsv}" "${hppCsv}" "${importCsv}")
for csv in "${csvs[@]}"; do
grep -E "^ *image: [a-zA-Z0-9/\.:@\-]+$" ${csv}
done
# Build and write deploy dir
(cd ${PROJECT_ROOT}/tools/manifest-templator/ && go build)
${PROJECT_ROOT}/tools/manifest-templator/manifest-templator \
--api-sources=${PROJECT_ROOT}/pkg/apis/... \
--cna-csv="$(<${cnaCsv})" \
--virt-csv="$(<${virtCsv})" \
--ssp-csv="$(<${sspCsv})" \
--cdi-csv="$(<${cdiCsv})" \
--nmo-csv="$(<${nmoCsv})" \
--hpp-csv="$(<${hppCsv})" \
--vmimport-csv="$(<${importCsv})" \
--ims-conversion-image-name="${CONVERSION_IMAGE}" \
--ims-vmware-image-name="${VMWARE_IMAGE}" \
--kv-virtiowin-image-name="${KUBEVIRT_VIRTIO_IMAGE}" \
--operator-namespace="${OPERATOR_NAMESPACE}" \
--smbios="${SMBIOS}" \
--hco-kv-io-version="${CSV_VERSION}" \
--kubevirt-version="${KUBEVIRT_VERSION}" \
--cdi-version="${CDI_VERSION}" \
--cnao-version="${NETWORK_ADDONS_VERSION}" \
--ssp-version="${SSP_VERSION}" \
--nmo-version="${NMO_VERSION}" \
--hppo-version="${HPPO_VERSION}" \
--vm-import-version="${VM_IMPORT_VERSION}" \
--operator-image="${HCO_OPERATOR_IMAGE}" \
--webhook-image="${HCO_WEBHOOK_IMAGE}"
(cd ${PROJECT_ROOT}/tools/manifest-templator/ && go clean)
if [[ "$1" == "UNIQUE" ]]; then
CSV_VERSION_PARAM=${CSV_VERSION}-${CSV_TIMESTAMP}
ENABLE_UNIQUE="true"
else
CSV_VERSION_PARAM=${CSV_VERSION}
ENABLE_UNIQUE="false"
fi
# Build and merge CSVs
CSV_DIR=${CSV_DIR}/manifests
${PROJECT_ROOT}/tools/csv-merger/csv-merger \
--cna-csv="$(<${cnaCsv})" \
--virt-csv="$(<${virtCsv})" \
--ssp-csv="$(<${sspCsv})" \
--cdi-csv="$(<${cdiCsv})" \
--nmo-csv="$(<${nmoCsv})" \
--hpp-csv="$(<${hppCsv})" \
--vmimport-csv="$(<${importCsv})" \
--ims-conversion-image-name="${CONVERSION_IMAGE}" \
--ims-vmware-image-name="${VMWARE_IMAGE}" \
--kv-virtiowin-image-name="${KUBEVIRT_VIRTIO_IMAGE}" \
--csv-version=${CSV_VERSION_PARAM} \
--replaces-csv-version=${REPLACES_CSV_VERSION} \
--hco-kv-io-version="${CSV_VERSION}" \
--spec-displayname="KubeVirt HyperConverged Cluster Operator" \
--spec-description="$(<${PROJECT_ROOT}/docs/operator_description.md)" \
--metadata-description="A unified operator deploying and controlling KubeVirt and its supporting operators with opinionated defaults" \
--crd-display="HyperConverged Cluster Operator" \
--smbios="${SMBIOS}" \
--csv-overrides="$(<${csvOverrides})" \
--enable-unique-version=${ENABLE_UNIQUE} \
--kubevirt-version="${KUBEVIRT_VERSION}" \
--cdi-version="${CDI_VERSION}" \
--cnao-version="${NETWORK_ADDONS_VERSION}" \
--ssp-version="${SSP_VERSION}" \
--nmo-version="${NMO_VERSION}" \
--hppo-version="${HPPO_VERSION}" \
--vm-import-version="${VM_IMPORT_VERSION}" \
--related-images-list="${DIGEST_LIST}" \
--operator-image-name="${HCO_OPERATOR_IMAGE}" \
--webhook-image-name="${HCO_WEBHOOK_IMAGE}" > "${CSV_DIR}/${OPERATOR_NAME}.v${CSV_VERSION}.${CSV_EXT}"
rendered_csv="$(cat "${CSV_DIR}/${OPERATOR_NAME}.v${CSV_VERSION}.${CSV_EXT}")"
rendered_keywords="$(echo "$rendered_csv" |grep 'keywords' -A 3)"
# assert that --csv-overrides work
[ "$keywords" == "$rendered_keywords" ]
# TODO: remove this once https://github.com/operator-framework/enhancements/pull/40 got fixed
# Pre-process all CRDs to drop description on older versions to keep the bundle small
for FILE in ${TEMPDIR}/*.${CRD_EXT}
do
hack/strip_old_descriptions.py -i ${FILE} > ${FILE}.filtered
if [[ -s "${FILE}.filtered" ]]
then
mv ${FILE}.filtered ${FILE}
else
rm ${FILE}.filtered
fi
done
# Copy all CRDs into the CRD and CSV directories
rm -f ${CRD_DIR}/*
cp -f ${TEMPDIR}/*.${CRD_EXT} ${CRD_DIR}
cp -f ${TEMPDIR}/*.${CRD_EXT} ${CSV_DIR}
# Validate the yaml files
(cd ${CRD_DIR} && docker run --rm -v "$(pwd)":/yaml quay.io/pusher/yamllint yamllint -d "{extends: relaxed, rules: {line-length: disable}}" /yaml)
(cd ${CSV_DIR} && docker run --rm -v "$(pwd)":/yaml quay.io/pusher/yamllint yamllint -d "{extends: relaxed, rules: {line-length: disable}}" /yaml)
# Check there are not API Groups overlap between different CNV operators
${PROJECT_ROOT}/tools/csv-merger/csv-merger --crds-dir=${CRD_DIR}
(cd ${PROJECT_ROOT}/tools/csv-merger/ && go clean)
if [[ "$1" == "UNIQUE" ]]; then
# Add the current CSV_TIMESTAMP to the currentCSV in the packages file
sed -Ei "s/(currentCSV: ${OPERATOR_NAME}.v${CSV_VERSION}).*/\1-${CSV_TIMESTAMP}/" \
${PACKAGE_DIR}/kubevirt-hyperconverged.package.yaml
fi
# Intentionally removing last so failure leaves around the templates
rm -rf ${TEMPDIR}
# If the only change in the CSV file is its "created_at" field, rollback this change as it causes git conflicts for
# no good reason.
CSV_FILE="${CSV_DIR}/kubevirt-hyperconverged-operator.v${CSV_VERSION}.${CSV_EXT}"
if git difftool -y --trust-exit-code --extcmd=./hack/diff-csv.sh ${CSV_FILE}; then
git checkout ${CSV_FILE}
fi
# TODO: remove this once https://github.com/operator-framework/enhancements/pull/40 got fixed
# Check bundle size
[[ $(find ${CSV_DIR} -type d -size -1048576c 2>/dev/null) ]] && echo "Acceptable bundle size" || (echo "The bundle is too big" && exit 1)
# Prepare files for index-image files that will be used for testing in openshift CI
rm -rf "${INDEX_IMAGE_DIR:?}"
mkdir -p "${INDEX_IMAGE_DIR:?}/${PACKAGE_NAME}"
cp -r "${CSV_DIR%/*}" "${INDEX_IMAGE_DIR:?}/${PACKAGE_NAME}/"
cp "${OLM_DIR}/bundle.Dockerfile" "${INDEX_IMAGE_DIR:?}/"
cp "${OLM_DIR}/Dockerfile.bundle.ci-index-image-upgrade" "${INDEX_IMAGE_DIR:?}/"
INDEX_IMAGE_CSV="${INDEX_IMAGE_DIR}/${PACKAGE_NAME}/${CSV_VERSION}/manifests/kubevirt-hyperconverged-operator.v${CSV_VERSION}.${CSV_EXT}"
sed -r -i "s|createdAt: \".*\$|createdAt: \"2020-10-23 08:58:25\"|; s|quay.io/kubevirt/hyperconverged-cluster-operator.*$|+IMAGE_TO_REPLACE+|; s|quay.io/kubevirt/hyperconverged-cluster-webhook.*$|+WEBHOOK_IMAGE_TO_REPLACE+|" ${INDEX_IMAGE_CSV}
|
package net.pds;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.util.ArrayList;
import net.utility.Utility;
import DBPKG.DBOpen;
public class PdsDAO {
//사진추가
public boolean insert(PdsDTO dto){
boolean flag=false;
try{
Connection con=DBOpen.getConnection();
StringBuilder sql=new StringBuilder();
sql.append(" INSERT INTO tb_pds(pdsno, wname, subject, passwd, filename, filesize, regdate) ");
sql.append(" VALUES ( ");
sql.append(" (SELECT NVL(MAX(pdsno),0)+1 FROM tb_pds) ");
sql.append(" ,?,?,?,?,?,SYSDATE) ");
PreparedStatement pstmt=con.prepareStatement(sql.toString());
pstmt.setString(1, dto.getWname());
pstmt.setString(2, dto.getSubject());
pstmt.setString(3, dto.getPasswd());
pstmt.setString(4, dto.getFilename());
pstmt.setLong(5, dto.getFilesize());
int cnt=pstmt.executeUpdate();
if(cnt!=0){
flag=true;
}
}catch(Exception e){
System.out.println("사진 업로드 실패 : "+e);
}//try
return flag;
}//insert
//사진 리스트 불러오기
//synchronized 라고 하면 문제가 생기면 os의 도움을 받을 수 있음.
//많은 사람들이 공유하는 DAO을 이용할때는 synchronized를 붙여주는게 좋다
public synchronized ArrayList<PdsDTO> list(){
ArrayList<PdsDTO> list=null;
try{
Connection con=DBOpen.getConnection();
StringBuilder sql=new StringBuilder();
sql.append(" SELECT pdsno, wname, subject, regdate, passwd, readcnt, filename ");
sql.append(" FROM tb_pds ");
sql.append(" ORDER BY regdate DESC ");
PreparedStatement pstmt=con.prepareStatement(sql.toString());
ResultSet rs=pstmt.executeQuery();
if(rs.next()){
list=new ArrayList<PdsDTO>();
do{
PdsDTO dto=new PdsDTO();
dto.setPdsno(rs.getInt("pdsno"));
dto.setWname(rs.getString("wname"));
dto.setSubject(rs.getString("subject"));
dto.setRegdate(rs.getString("regdate"));
dto.setPasswd(rs.getString("passwd"));
dto.setReadcnt(rs.getInt("readcnt"));
dto.setFilename(rs.getString("filename"));
list.add(dto);
}while(rs.next());//do~while
}else{
list=null;
}//if
}catch(Exception e){
System.out.println("목록 읽어오기 실패 : "+e);
}//try
return list;
}//list
//사진 상세보기
public PdsDTO read(int pdsno) {
PdsDTO dto=null;
try{
Connection con=DBOpen.getConnection();
StringBuilder sql=new StringBuilder();
sql.append(" SELECT pdsno, wname, subject, regdate, passwd, readcnt, filename, filesize ");
sql.append(" FROM tb_pds ");
sql.append(" WHERE pdsno=? ");
PreparedStatement pstmt=con.prepareStatement(sql.toString());
pstmt.setInt(1, pdsno);
ResultSet rs=pstmt.executeQuery();
if(rs.next()){
dto=new PdsDTO();
dto.setPdsno(rs.getInt("pdsno"));
dto.setWname(rs.getString("wname"));
dto.setSubject(rs.getString("subject"));
dto.setRegdate(rs.getString("regdate"));
dto.setPasswd(rs.getString("passwd"));
dto.setReadcnt(rs.getInt("readcnt"));
dto.setFilename(rs.getString("filename"));
dto.setFilesize(rs.getLong("filesize"));
}else{
dto=null;
}//if
}catch(Exception e){
System.out.println("사진 상세보기 실패 : "+e);
}//try
return dto;
}//read
//조회수 증가
public void incrementCnt(int pdsno){
try{
Connection con=DBOpen.getConnection();
StringBuilder sql=new StringBuilder();
sql.append(" UPDATE tb_pds ");
sql.append(" SET readcnt=readcnt+1 ");
sql.append(" WHERE pdsno=? ");
PreparedStatement pstmt=con.prepareStatement(sql.toString());
pstmt.setInt(1, pdsno);
pstmt.executeUpdate();
}catch(Exception e){
System.out.println("조회수 증가 실패 : "+e);
}//try
}//incrementCnt
//사진 삭제
public int delete(int pdsno, String passwd, String saveDir){
int cnt=0;
try{
//삭제하고자 하는 파일명을 가져온다
String filename="";
PdsDTO oldDTO=read(pdsno); //상세보기함수
if(oldDTO!=null){
filename=oldDTO.getFilename();
}//if
Connection con=DBOpen.getConnection();
StringBuilder sql=new StringBuilder();
sql.append(" DELETE FROM tb_pds ");
sql.append(" WHERE passwd=? AND pdsno=? ");
PreparedStatement pstmt=con.prepareStatement(sql.toString());
pstmt.setString(1, passwd);
pstmt.setInt(2, pdsno);
cnt=pstmt.executeUpdate();
if(cnt==1){//테이블에서 행삭제가 성공했으므로
//첨부된 파일도 삭제 (경로와 파일명을 알아야함)
//경로는 saveDir
Utility.deleteFile(saveDir,filename);
}
}catch(Exception e){
System.out.println("사진 삭제 실패 : "+e);
}
return cnt;
}//delete
}//class
|
# Copy of https://gist.github.com/antonio/4586456
# With a modification to collect all the branch names so we can make one git request
# Set DRY_RUN=1 to get an echo of the command
DRY_RUN=1
# Format that works with `git log --since`, e.g. 2018-01-01
date=$1
branches=
for branch in $(git branch -a | sed 's/^\s*//' | sed 's/^remotes\///' | grep -v 'cioos$\|master$'); do
if [[ "$(git log $branch --since $date | wc -l)" -eq 0 ]]; then
if [[ "$branch" =~ "origin/" ]]; then
if [[ -z $branches ]]; then
branches=$(echo "$branch" | sed 's/^origin\///')
else
branches="$branches "$(echo "$branch" | sed 's/^origin\///')
fi
fi
fi
done
if [[ ! -z $branches ]]; then
if [[ "$DRY_RUN" -eq 1 ]]; then
echo git branch -D $branches
echo git push --delete origin $branches
else
git branch -D $branches
git push --delete origin $branches
# clean up locally
git remote prune origin
fi
fi
|
#!/bin/bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
source "$DIR/setup_test_env.bash"
blender --background $BLEND/scene_children.blend \
--python $PYSCRIPT -- $JSON --vertices --faces --scene \
--cameras --materials --embedGeometry --lights --cameras --geometryType Geometry
makereview $@ --tag $(tagname)
|
public class Point2D {
private int x;
private int y;
public Point2D(int x, int y) {
this.x = x;
this.y = y;
}
public int getX() {
return x;
}
public int getY() {
return y;
}
public double distanceTo(Point2D otherPoint) {
return Math.sqrt(
Math.pow(otherPoint.getX() - this.x, 2) +
Math.pow(otherPoint.getY() - this.y, 2)
);
}
} |
<reponame>verdipratama/Appsera<filename>app/src/main/java/com/verside/appsera/PenjumlahanActivity.java<gh_stars>0
package com.verside.appsera;
import android.content.Intent;
import android.support.v7.app.AppCompatActivity;
import android.os.Bundle;
import android.view.View;
import android.widget.Button;
import android.widget.EditText;
import android.widget.TextView;
public class PenjumlahanActivity extends AppCompatActivity {
EditText bil1,bil2;
TextView hasil;
Button hitung;
private int hasil_akhir=0;
private String sBil1="";
private String sBil2="";
private Button btnLogout;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_penjumlahan);
bil1 = (EditText)findViewById(R.id.txtbil1);
bil2 = (EditText)findViewById(R.id.txtbil2);
hasil = (TextView) findViewById(R.id.text_hasil);
hitung = (Button) findViewById(R.id.cb_hitung);
btnLogout = (Button)findViewById(R.id.btnLogout);
btnLogout.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
logout();
}
});
}
public void klikHitung(View view){
sBil1 = bil1.getText().toString();
sBil2 = bil2.getText().toString();
hasil_akhir = Integer.parseInt(sBil1)*Integer.parseInt(sBil2);
tampilkan(hasil_akhir);
}
public void tampilkan(int hasil_akhir){
hasil.setText(""+hasil_akhir);
}
private void logout(){
startActivity(new Intent(PenjumlahanActivity.this,Activity_Login.class));
}
public void keluar (View view){
finish();
}
}
|
#include <stdio.h>
#include <string.h>
// Function to find the length of the longest common substring
int longestCommonSubstring(char str1[], char str2[])
{
// Get the length of both strings
int m = strlen(str1);
int n = strlen(str2);
// Create an array to store the lengths of common substrings
int lengths[m + 1][n + 1];
// Initialize the array to 0
memset(lengths, 0, sizeof(lengths));
int result = 0;
// Outer loop to traverse all characters of str1
for (int i = 1; i <= m; i++)
{
// Inner loop to traverse all characters of str2
for (int j = 1; j <= n; j++)
{
// Compare the characters
if (str1[i - 1] == str2[j - 1])
{
// If the characters are same, add 1 to the length value in cell
lengths[i][j] = lengths[i - 1][j - 1] + 1;
// Update the result if necessary
if (lengths[i][j] > result)
result = lengths[i][j];
}
}
}
// Return the length of the longest common substring
return result;
}
// Driver program
int main()
{
//declare two string
char str1[] = "abcdef";
char str2[] = "abcghi";
int result = longestCommonSubstring(str1, str2);
printf("The length of the longest common substring is: %d", result);
return 0;
} |
TERMUX_PKG_HOMEPAGE=https://openzim.org
TERMUX_PKG_DESCRIPTION="The ZIM library is the reference implementation for the ZIM file format."
TERMUX_PKG_LICENSE="GPL-2.0"
TERMUX_PKG_MAINTAINER="@termux"
TERMUX_PKG_VERSION=6.3.2
TERMUX_PKG_SRCURL=https://github.com/openzim/libzim/archive/$TERMUX_PKG_VERSION.tar.gz
TERMUX_PKG_SHA256=648fb5af65dd28a6b221b9294217939bf5abf929d706e7323ebb4196849a923c
TERMUX_PKG_DEPENDS="zstd, libuuid, zlib, libicu, liblzma, libxapian, googletest"
|
<!DOCTYPE html>
<html>
<head>
<title>Two Columns - Three Rows</title>
<style>
main {
display: flex;
flex-direction: row;
}
header, footer {
flex: 1 1 auto;
}
main > div {
flex: 1 0 50%;
overflow: auto;
}
</style>
</head>
<body>
<header>
<h1>Header</h1>
</header>
<main>
<div>Body Content Column 1</div>
<div>Body Content Column 2</div>
</main>
<footer>
<h3>Footer 1</h3>
</footer>
<footer>
<h3>Footer 2</h3>
</footer>
</body>
</html> |
<gh_stars>1-10
/*
*
*/
package net.community.apps.apache.ant.antrunner;
import net.community.chest.apache.ant.build.EmbeddedHandlerLogger;
/**
* <P>Copyright 2008 as per GPLv2</P>
*
* @author <NAME>.
* @since Jul 31, 2008 1:25:59 PM
*/
public class EventsExecutor extends EmbeddedHandlerLogger {
public EventsExecutor ()
{
super(MainFrame.getBuildEventsHandler());
}
}
|
given_matrix <- matrix(c(2, 3, 4, 5, 6, 7), nrow = 2, ncol = 3)
sum_of_elements <- sum(given_matrix)
print(sum_of_elements) |
#!/bin/bash
set -exo pipefail
docker_image=$1
port=$2
container_id=''
wait_start() {
for in in {1..10}; do
if /usr/bin/curl -s -m 5 -f "http://localhost:${port}/metrics" > /dev/null; then
docker_cleanup
exit 0
else
sleep 1
fi
done
exit 1
}
docker_start() {
container_id=$(docker run -d -p "${port}":"${port}" "${docker_image}")
}
docker_cleanup() {
docker kill "${container_id}"
}
if [[ "$#" -ne 2 ]] ; then
echo "Usage: $0 ynsta/nftables-exporter:v0.1.1 9732" >&2
exit 1
fi
docker_start
wait_start
|
import React, { Fragment, useEffect } from 'react';
import { Main } from '@redhat-cloud-services/frontend-components/Main';
import {
PageHeader,
PageHeaderTitle,
} from '@redhat-cloud-services/frontend-components/PageHeader';
import { Breadcrumb, BreadcrumbItem } from '@patternfly/react-core';
import { Link, useRouteMatch } from 'react-router-dom';
import ClusterDetailTable from './ClusterDetailTable';
import WithLoader, {
LoaderType,
} from '../..//PresentationalComponents/WithLoader';
import { useDispatch, useSelector } from 'react-redux';
import { fetchClusterDetails } from '../../../Store/Actions';
import DateFormat from '@redhat-cloud-services/frontend-components/DateFormat/DateFormat';
const ClusterDetailPage = () => {
const match = useRouteMatch();
const dispatch = useDispatch();
const { cluster, isDetailLoading } = useSelector(
({ ClusterDetailStore }) => ClusterDetailStore
);
useEffect(() => {
dispatch(fetchClusterDetails(match.params.clusterId));
}, []);
const pageTitle = (
<WithLoader
variant={LoaderType.inlineSkeleton}
width="300px"
fontSize="lg"
isLoading={isDetailLoading}
style={{ verticalAlign: -4 }}
>
{cluster.display_name}
</WithLoader>
);
return (
<Fragment>
<PageHeader>
<Breadcrumb className="pf-u-mb-md">
<BreadcrumbItem>
<Link to="/">Vulnerability</Link>
</BreadcrumbItem>
<BreadcrumbItem>
<Link to="/clusters">Clusters</Link>
</BreadcrumbItem>
<BreadcrumbItem>
<WithLoader
variant={LoaderType.inlineSkeleton}
width="200px"
fontSize="sm"
isLoading={isDetailLoading}
style={{ verticalAlign: -4 }}
>
{cluster.display_name}
</WithLoader>
</BreadcrumbItem>
</Breadcrumb>
<PageHeaderTitle title={pageTitle} className="pf-u-mb-sm" />
<Fragment>
UUID:
<WithLoader
variant={LoaderType.inlineSkeleton}
width="300px"
fontSize="sm"
isLoading={isDetailLoading}
style={{ verticalAlign: -4 }}
>
{match.params.clusterId}
</WithLoader>
<br />
Last seen:
<WithLoader
variant={LoaderType.inlineSkeleton}
width="200px"
fontSize="sm"
isLoading={isDetailLoading}
style={{ verticalAlign: -4 }}
>
<DateFormat date={cluster.updated} type="exact" />
</WithLoader>
</Fragment>
</PageHeader>
<Main>
<ClusterDetailTable />
</Main>
</Fragment>
);
};
export default ClusterDetailPage;
|
import { describe, it } from '@xutl/test';
import { strict as assert } from 'assert';
import { extract } from '../extract';
const args = [process.argv0, process.argv[1] as string];
describe('extract', () => {
const expect = ['hullo', 'me', 'wee', 'darlins'];
it('node-style', () => {
const parsed = extract([...args, ...expect]);
assert.deepStrictEqual(parsed, expect);
});
it('plain-style', () => {
const parsed = extract([...expect]);
assert.deepStrictEqual(parsed, expect);
});
it('combined flags', () => {
const parsed = extract([...args, '-dxz']);
assert.equal(parsed.length, 3);
assert.deepStrictEqual(parsed, ['-d', '-x', '-z']);
});
});
|
/*
* Copyright 2017-2020 original authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.micronaut.data.runtime.intercept.criteria.reactive;
import io.micronaut.aop.MethodInvocationContext;
import io.micronaut.core.annotation.Internal;
import io.micronaut.core.annotation.NonNull;
import io.micronaut.core.async.publisher.Publishers;
import io.micronaut.data.intercept.RepositoryMethodKey;
import io.micronaut.data.model.runtime.PreparedQuery;
import io.micronaut.data.operations.RepositoryOperations;
import org.reactivestreams.Publisher;
/**
* Interceptor that supports reactive count specifications.
*
* @author <NAME>
* @since 3.2
*/
@Internal
public class CountReactiveSpecificationInterceptor extends AbstractReactiveSpecificationInterceptor<Object, Publisher<Number>> {
/**
* Default constructor.
*
* @param operations The operations
*/
public CountReactiveSpecificationInterceptor(@NonNull RepositoryOperations operations) {
super(operations);
}
@Override
public Publisher<Number> intercept(RepositoryMethodKey methodKey, MethodInvocationContext<Object, Publisher<Number>> context) {
PreparedQuery<?, Long> preparedQuery = preparedQueryForCriteria(methodKey, context, Type.COUNT);
return Publishers.convertPublisher(
reactiveOperations.findAll(preparedQuery),
context.getReturnType().getType()
);
}
}
|
<gh_stars>1-10
package com.bigsomething.Decryption.MultipleKeys.OneKey;
import com.bigsomething.Decryption.StatAnalysis.ChiSquare;
import com.bigsomething.Decryption.StatAnalysis.Frequencies.Language;
import com.google.common.collect.BiMap;
import com.google.common.collect.HashBiMap;
import com.google.common.collect.Range;
import lombok.Getter;
import java.util.Collections;
//Class retrieves the value of one key based on the string inputted
public class DecryptOneKey
{
private final Range<Integer> keyRange = Range.closed(0, 26);
private final BiMap<Integer, Double> chiSquaredMap = HashBiMap.create();
@Getter private int key;
@Getter private double min;
private void setChiSquaredMap(String encryptedInput, Language language)
{
//fake sum is there to prevent zeros from entering the map
double fakeSum = 500000.0;
for (int key = 0; key < keyRange.upperEndpoint(); key++)
{
String decrypted = CaesarCipher.createEncryption(key, encryptedInput);
double sum = ChiSquare.getChiSquareSum(decrypted, language);
if(sum != 0 && sum < 500000)
{
chiSquaredMap.put(key, sum);
} else {
chiSquaredMap.put(key, fakeSum);
fakeSum += 2;
}
}
}
private void setKey()
{
this.min = Collections.min(chiSquaredMap.values());
this.key = chiSquaredMap.inverse().get(min);
}
public void runDecrypt(String encryptedInput, Language language)
{
setChiSquaredMap(encryptedInput, language);
setKey();
}
}
|
// Copyright 2017-2021 @polkadot/util-crypto authors & contributors
// SPDX-License-Identifier: Apache-2.0
import { secp256k1DeriveHard } from '../secp256k1/deriveHard';
import { secp256k1KeypairFromSeed } from '../secp256k1/keypair/fromSeed';
import { createSeedDeriveFn } from './hdkdDerive';
export const keyHdkdEcdsa = createSeedDeriveFn(secp256k1KeypairFromSeed, secp256k1DeriveHard);
|
<reponame>rasaford/info1<filename>src/main/java/aufgabe12_9/FormatVisitor.java<gh_stars>0
package aufgabe12_9;
// utf8: "Köpfchen in das Wasser, Schwänzchen in die Höh." -CIA-Verhörmethode
public class FormatVisitor extends Visitor {
private StringBuilder result = new StringBuilder();
private int level = 0;
public String getResult() {
return result.toString();
}
private void indent() {
if (result.length() > 0) {
result.append('\n');
}
for (int i = 0; i < level; i++) {
result.append(" ");
}
}
private void indented(String text) {
indent();
result.append(text);
}
private void append(Visitable node) {
node.accept(this);
}
private void recurse(Visitable node) {
level++;
node.accept(this);
level--;
}
/*
* Expression
*/
@Override
public void visit(Number number) {
result.append(number.getValue());
}
@Override
public void visit(Variable variable) {
result.append(variable.getName());
}
@Override
public void visit(Unary unary) {
result.append('-');
int myPriority = unary.firstLevelPriority();
int otherPriority = unary.getOperand().firstLevelPriority();
if (myPriority <= otherPriority) {
result.append('(');
}
append(unary.getOperand());
if (myPriority <= otherPriority) {
result.append(')');
}
}
@Override
public void visit(Binary binary) {
boolean bracketSameRight = false;
switch (binary.getOperator()) {
case DivisionOperator:
case Modulo:
case Minus:
bracketSameRight = true;
break;
case MultiplicationOperator:
case Plus:
bracketSameRight = false;
break;
}
int myPriority = binary.firstLevelPriority();
int leftPriority = binary.getLhs().firstLevelPriority();
if (myPriority < leftPriority) {
result.append('(');
}
append(binary.getLhs());
if (myPriority < leftPriority) {
result.append(')');
}
result.append(" " + binary.getOperator().codeString() + " ");
int rightPriority = binary.getRhs().firstLevelPriority();
// Manche Operatoren (z.B. -) erfordern Klammern auf der rechten
// Seite auch für den Fall, dass die Priorität gleich ist
if ((!bracketSameRight && myPriority < rightPriority)
|| (bracketSameRight && myPriority <= rightPriority)) {
result.append('(');
}
append(binary.getRhs());
if ((!bracketSameRight && myPriority < rightPriority)
|| (bracketSameRight && myPriority <= rightPriority)) {
result.append(')');
}
}
@Override
public void visit(Call call) {
result.append(call.getFunctionName() + "(");
for (int i = 0; i < call.getArguments().length; i++) {
if (i > 0) {
result.append(", ");
}
append(call.getArguments()[i]);
}
result.append(')');
}
@Override
public void visit(ArrayAccess arrayAccess) {
int myPriority = arrayAccess.firstLevelPriority();
int otherPriority = arrayAccess.getArray().firstLevelPriority();
if (myPriority < otherPriority) {
result.append('(');
}
append(arrayAccess.getArray());
if (myPriority < otherPriority) {
result.append(')');
}
result.append('[');
append(arrayAccess.getIndex());
result.append(']');
}
@Override
public void visit(ArrayInitializer arrayInitializer) {
result.append("new ");
result.append(arrayInitializer.getType());
result.append("[");
append(arrayInitializer.getSize());
result.append(']');
}
@Override
public void visit(ArrayLength arrayLength) {
result.append("length(");
append(arrayLength.getArray());
result.append(')');
}
/*
* Statement
*/
@Override
public void visit(Read read) {
indented(read.getName() + " = read();");
}
@Override
public void visit(Write write) {
indented("write(");
append(write.getExpression());
result.append(");");
}
@Override
public void visit(Assignment assignment) {
indented(assignment.getName() + " = ");
append(assignment.getExpression());
result.append(";");
}
@Override
public void visit(Composite composite) {
indented("{");
for (int i = 0; i < composite.getStatements().length; i++) {
recurse(composite.getStatements()[i]);
}
indent();
result.append('}');
}
@Override
public void visit(IfThenElse ifThenElse) {
indented("if (");
ifThenElse.getCond().accept(this);
result.append(")");
recurse(ifThenElse.getThenBranch());
indented("else");
recurse(ifThenElse.getElseBranch());
}
@Override
public void visit(IfThen ifThen) {
indented("if (");
ifThen.getCond().accept(this);
result.append(")");
recurse(ifThen.getThenBranch());
}
@Override
public void visit(While while_) {
indented("while(");
// result.append("while(");
while_.getCond().accept(this);
result.append(") ");
recurse(while_.getBody());
}
@Override
public void visit(Return return_) {
indented("return ");
return_.getExpression().accept(this);
result.append(";");
}
@Override
public void visit(EmptyStatement emptyStatement) {
}
@Override
public void visit(ArrayAssignment arrayAssignment) {
indented(arrayAssignment.getName() + "[");
arrayAssignment.getIndex().accept(this);
result.append("] = ");
append(arrayAssignment.getRhs());
result.append(";");
}
/*
* Condition
*/
@Override
public void visit(True true_) {
result.append("true");
}
@Override
public void visit(False false_) {
result.append("false");
}
@Override
public void visit(Comparison comparison) {
append(comparison.getLhs());
result.append(" " + comparison.getOperator().codeString() + " ");
append(comparison.getRhs());
}
@Override
public void visit(UnaryCondition unaryCondition) {
result.append('!');
int myPriority = unaryCondition.firstLevelPriority();
int otherPriority = unaryCondition.getOperand().firstLevelPriority();
if (myPriority < otherPriority) {
result.append('(');
}
append(unaryCondition.getOperand());
if (myPriority < otherPriority) {
result.append(')');
}
}
@Override
public void visit(BinaryCondition binaryCondition) {
int myPriority = binaryCondition.firstLevelPriority();
int leftPriority = binaryCondition.getLhs().firstLevelPriority();
if (myPriority < leftPriority) {
result.append('(');
}
append(binaryCondition.getLhs());
if (myPriority < leftPriority) {
result.append(')');
}
switch (binaryCondition.getOperator()) {
case And:
result.append(" && ");
break;
case Or:
result.append(" || ");
break;
}
int rightPriority = binaryCondition.getRhs().firstLevelPriority();
if (myPriority < rightPriority) {
result.append('(');
}
append(binaryCondition.getRhs());
if (myPriority < rightPriority) {
result.append(')');
}
}
/*
* Rest
*/
private void visitDeclarations(Type type, String[] names) {
indented(type.codeString());
result.append(' ');
for (int i = 0; i < names.length; i++) {
if (i > 0) {
result.append(", ");
}
result.append(names[i]);
}
result.append(';');
}
@Override
public void visit(SingleDeclaration singleDeclaration) {
visitDeclarations(singleDeclaration.getType(), new String[]{singleDeclaration.getName()});
}
@Override
public void visit(Declaration declaration) {
visitDeclarations(declaration.getType(), declaration.getNames());
}
@Override
public void visit(Function function) {
indented(function.getReturnType().codeString());
result.append(' ');
result.append(function.getName());
result.append('(');
for (int i = 0; i < function.getParameters().length; i++) {
SingleDeclaration sd = function.getParameters()[i];
if (i > 0) {
result.append(", ");
}
result.append(sd.getType().codeString());
result.append(' ');
result.append(sd.getName());
}
result.append(")");
indent();
result.append("{");
for (Declaration decl : function.getDeclarations()) {
recurse(decl);
}
for (Statement stmt : function.getStatements()) {
recurse(stmt);
}
indented("}");
}
@Override
public void visit(Program program) {
for (int i = 0; i < program.getClasses().length; i++) {
if (i > 0) {
result.append("\n");
}
program.getClasses()[i].accept(this);
}
for (int i = 0; i < program.getFunctions().length; i++) {
if (i > 0) {
result.append("\n");
}
program.getFunctions()[i].accept(this);
}
}
@Override
public void visit(Class classObj) {
result.append("class ");
result.append(classObj.getName());
result.append(" ");
if (classObj.getSuperClass() != null) {
result.append(String.format("extends %s ", classObj.getSuperClass()));
}
result.append("{");
for (SingleDeclaration s : classObj.getFields()) {
recurse(s);
result.append("\n");
}
recurse(classObj.getConstructor());
for (Function function : classObj.getFunctions()) {
recurse(function);
}
indented("}\n");
}
@Override
public void visit(Constructor constructor) {
indent();
result.append(constructor.getName());
result.append("(");
for (int i = 0; i < constructor.getParameters().length; i++) {
SingleDeclaration sd = constructor.getParameters()[i];
if (i > 0) {
result.append(", ");
}
result.append(sd.getType().codeString());
result.append(' ');
result.append(sd.getName());
}
result.append(")");
if (constructor.getDeclarations().length != 0 ||
constructor.getStatements().length != 0) {
indent();
}
result.append("{");
for (Declaration declaration : constructor.getDeclarations()) {
recurse(declaration);
}
for (Statement statement : constructor.getStatements()) {
recurse(statement);
}
indented("}\n");
}
@Override
public void visit(MethodCall methodCall) {
result.append(methodCall.getRefName());
result.append(".");
result.append(methodCall.getMethodName());
result.append("(");
for (int i = 0; i < methodCall.getArguments().length; i++) {
Expression ex = methodCall.getArguments()[i];
if (i > 0) {
result.append(", ");
}
result.append(ex);
}
result.append(")");
}
@Override
public void visit(ObjectInitializer objectInitializer) {
result.append("new ");
result.append(objectInitializer.getClassName());
result.append("(");
for (int i = 0; i < objectInitializer.getArguments().length; i++) {
Expression ex = objectInitializer.getArguments()[i];
if (i > 0) {
result.append(", ");
}
result.append(ex);
}
result.append(")");
}
@Override
public void visit(ExpressionStatement expressionStatement) {
expressionStatement.getExpression().accept(this);
result.append(";");
}
}
|
<reponame>williamcheong/origen<gh_stars>0
require "spec_helper"
# Some dummy classes to test out configurable targets
class MyDut1
include Origen::TopLevel
def initialize
add_pin :pin1
add_pin_alias :alias1, :pin1
end
end
class MyDut2
include Origen::TopLevel
def initialize
add_pin :pin1
end
end
describe "Application Target" do
before :each do
Origen.load_application
end
it "is accessible via Origen.target" do
Origen.target.should be
end
it "can be loaded" do
Origen.target.temporary = "production"
Origen.target.load!
$top.should be
$nvm.should be
end
it "reloading a target should not cause a duplicate pins error" do
lambda do
Origen.target.temporary = "debug"
Origen.target.load!
Origen.target.load!
Origen.app.load_target!
Origen.app.load_target!
end.should_not raise_error
end
it "although duplicate pins should still be caught when appropriate" do
lambda do
Origen.target.temporary = "debug"
Origen.target.load!
C99::SOC.new
end.should raise_error
end
it "ignores kwrite temp files" do
begin
`touch #{Origen.top}/target/debug.rb~`
lambda do
Origen.target.temporary = "debug"
end.should_not raise_error
ensure
`rm -f #{Origen.top}/target/debug.rb~`
end
end
it "can be used to switch to debug mode" do
Origen.target.switch_to "debug"
Origen.target.load!
Origen.mode.should == :debug
end
specify "only recognized modes allowed" do
[:production, :debug].each do |mode|
lambda { Origen.mode = mode }.should_not raise_error
end
[:dummy, :data].each do |mode|
lambda { Origen.mode = mode }.should raise_error
end
end
specify "loading a target resets the mode" do
Origen.mode = :debug
Origen.mode.to_s.should == "debug"
Origen.target.temporary = "production"
Origen.target.load!
Origen.mode.to_s.should == "production"
end
specify "recognizes moo numbers" do
# In config/application.rb the prod targets are defined as:
# "1m79x" => "production"
# "2m79x" => "debug"
Origen.target.temporary = "production"
Origen.target.load!
Origen.mode.to_s.should == "production"
Origen.target.temporary = "2m79x"
Origen.target.load!
Origen.mode.to_s.should == "debug"
Origen.target.temporary = "1m79x"
Origen.target.load!
Origen.mode.to_s.should == "production"
Origen.target.temporary = "2M79X"
Origen.target.load!
Origen.mode.to_s.should == "debug"
Origen.target.temporary = "1M79X"
Origen.target.load!
Origen.mode.to_s.should == "production"
puts "******************** Missing target error expected here for 'm79x' ********************"
lambda { Origen.target.temporary = "m79x" }.should raise_error
puts "******************** Missing target error expected here for 'n86b' ********************"
lambda { Origen.target.temporary = "n86b" }.should raise_error
end
it "returns the moo number (upcased)" do
Origen.target.temporary = "production"
Origen.target.load!
Origen.target.moo.should == "1M79X"
Origen.target.temporary = "debug"
Origen.target.load!
Origen.target.moo.should == "2M79X"
end
it "can find targets in sub directories of /target" do
Origen.target.temporary = "debug"
Origen.target.load!
$tester.should_not == "found in subdir"
Origen.target.temporary = "mock.rb"
Origen.target.load!
$tester.should == "found in subdir"
# Works with MOO numbers
Origen.target.temporary = "production"
Origen.target.load!
$tester.should_not == "found in subdir"
Origen.target.temporary = "3M79X"
Origen.target.load!
$tester.should == "found in subdir"
Origen.target.moo.should == "3M79X"
# Symlinks don't work too well on windows...
unless Origen.running_on_windows?
# Works with symlinks
Origen.target.temporary = "mock2"
Origen.target.load!
$tester.should == "found in symlinked subdir"
Origen.target.temporary = "mock3"
Origen.target.load!
$tester.should == "found in subdir of a symlinked subdir!"
end
end
it "can check if a target exists" do
Origen.target.exist?("debug").should == true
Origen.target.exists?("debug").should == true
Origen.target.exist?("debug.rb").should == true
Origen.target.exist?("some_other_debug").should == false
Origen.target.exist?("mock").should == true
# Symlinks don't work too well on windows...
unless Origen.running_on_windows?
Origen.target.exist?("mock2").should == true
Origen.target.exist?("mock3").should == true
end
end
it "can check if a target name is unique" do
Origen.target.unique?("v93k").should == true
# Symlinks don't work too well on windows...
unless Origen.running_on_windows?
Origen.target.unique?("mock").should == false
end
end
it "configurable targets work" do
Origen.load_target("configurable", tester: OrigenTesters::J750, dut: C99::SOC)
$tester.j750?.should == true
$top.is_a?(C99::SOC).should == true
Origen.load_target("configurable", tester: OrigenTesters::V93K, dut: C99::NVM)
$tester.v93k?.should == true
$top.is_a?(C99::NVM).should == true
end
it "caches are cleared between reloads of configurable targets with different options" do
Origen.load_target("configurable", tester: OrigenTesters::J750, dut: MyDut1)
$dut.has_pin?(:pin1).should == true
$dut.has_pin?(:alias1).should == true
Origen.load_target("configurable", tester: OrigenTesters::V93K, dut: MyDut2)
$dut.has_pin?(:pin1).should == true
$dut.has_pin?(:alias1).should == false
end
it "leave with the debug target set" do
Origen.load_target("debug")
end
it "all_targets does not return target dir itself" do
Origen.target.all_targets.should_not == ["target"]
end
it "all_targets is able to find individual targets" do
Origen.target.all_targets.include?("production.rb").should == true
Origen.target.all_targets.include?("debug.rb").should == true
end
it "all_targets is able to find targets in subdirs" do
Origen.target.all_targets.include?("mock.rb").should == true
# Symlinks don't work too well on windows...
unless Origen.running_on_windows?
Origen.target.all_targets.include?("mock2.rb").should == true
Origen.target.all_targets.include?("mock3.rb").should == true
end
end
end
|
#!/usr/bin/env bash
docker-compose up -d db redis
redis_port=6379
POSTGRES_PASSWORD="mysecretpassword"
POSTGRES_USER="rustapp"
POSTGRES_DB="test"
# POSTGRES_PASSWORD=""
# POSTGRES_USER="postgres"
# POSTGRES_DB="postgres"
redis_host=$(docker-compose port redis $redis_port)
# POSTGRES_HOST="172.18.0.2"
POSTGRES_HOST=$(docker-compose port db 5432)
export DATABASE_URL="postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${POSTGRES_HOST}/${POSTGRES_DB}"
export REDIS_URL="redis://${redis_host}"
until docker-compose exec -e PGPASSWORD=$POSTGRES_PASSWORD db sh -c 'psql -U $POSTGRES_USER -d postgres -c "\q"'; do
>&2 echo "Postgres is unavailable - sleeping"
sleep 1
done
export RUST_TEST_THREADS=1
exec "$@"
|
const INITIAL_STATE = {
username: '',
email: '',
password: '',
confirmationPassword: '',
studentType: '',
};
export default ( state = INITIAL_STATE, action)=> {
switch(action.type){
case 'username_changed':
return { ...state, username: action.payload};
case 'email_changed':
return { ...state, email: action.payload};
case 'password_changed':
return { ...state, password: action.payload};
case 'confirmationPassword_changed':
return { ...state, confirmationPassword: action.payload};
case 'studentType_Changed':
return { ...state, studentType: action.payload};
default:
return state;
}
}; |
def find_factorial(n):
fact = 1
for i in range(1, n + 1):
fact *= i
return fact |
interface Info {
// Define the properties of the Info interface here
}
function processUpdates(updates: {
force?: boolean;
updateFunction?: (msg: Info) => any;
branch?: string;
}[]) {
for (const update of updates) {
if (update.force) {
if (update.updateFunction) {
update.updateFunction(/* pass the required info */);
}
// Apply the update immediately
} else if (update.branch) {
// Apply the update to the specified branch
} else if (update.updateFunction) {
update.updateFunction(/* pass the required info */);
}
}
} |
#!/bin/sh
set -eo pipefail -o nounset
cat << EOF > add_introns.py
import sys
import os
import io
import gzip
from collections import defaultdict
gtf_file = sys.argv[1] ## A gtf file to filter
genome_file = sys.argv[2] ## A .genome file
with io.open(genome_file, "rt", encoding = "utf-8") as gf:
chrom_set = set([x.strip().split("\t")[0] for x in gf])
## Get per transcript exons
fh = gzip.open(gtf_file, "rt", encoding = "utf-8") if gtf_file.endswith(".gz") else io.open(gtf_file, "rt", encoding = "utf-8")
exon_dict = defaultdict(list)
header = ["chrom","source","feature","start","end","score","strand","frame","attribute"]
for line in fh:
if line[0] == "#":
continue
line_dict = dict(zip(header,line.strip().split("\t")))
line_dict.update({x.strip().replace("\"","").split(" ")[0]:x.strip().replace("\"","").split(" ")[1] for x in line_dict["attribute"].strip().split(";")[:-1]})
## Add exon postiion to dict with transcript id as key
if line_dict["feature"] == "exon":
exon_dict[line_dict["transcript_id"]].append([int(line_dict["start"]),int(line_dict["end"])])
fh.close()
intron_dict = defaultdict(lambda: defaultdict(list))
for transcript_id, exon_list in exon_dict.items():
## Sort exon's by start position
sorted_list = sorted(exon_list, key=lambda x: (x[0]))
list_size = len(sorted_list)
for i in range(list_size):
## if enough exons are available to create an intron
if i + 1 < list_size:
## Intron start is exon end + 1
intron_start = sorted_list[i][1] + 1
## Intron end is next exon start - 1
intron_end = sorted_list[i+1][0] - 1
## Add intron to intron dict with keys as transcript id and exon position
intron_dict[transcript_id]["{}-{}".format(sorted_list[i][0], sorted_list[i][1])] = [intron_start, intron_end]
## Get per transcript max exon count
fh = gzip.open(gtf_file, "rt", encoding = "utf-8") if gtf_file.endswith(".gz") else io.open(gtf_file, "rt", encoding = "utf-8")
header = ["chrom","source","feature","start","end","score","strand","frame","attribute"]
print("#" + "\t".join(header))
for line in fh:
if line[0] == "#":
continue
line_dict = dict(zip(header,line.strip().split("\t")))
line_dict.update({x.strip().replace("\"","").split(" ")[0]:x.strip().replace("\"","").split(" ")[1] for x in line_dict["attribute"].strip().split(";")[:-1]})
## Skip scaffoldings not in the genome file
if line_dict["chrom"] not in chrom_set:
continue
## Print current line to stdout
print(line.strip())
## Add intron if exon exists
if line_dict["feature"] == "exon":
## Skip intron creation if transcript not in intron dict
if line_dict["transcript_id"] not in intron_dict:
continue
## Skip intron creation if exon key is not in intron dict
if "{}-{}".format(line_dict["start"],line_dict["end"]) not in intron_dict[line_dict["transcript_id"]]:
continue
## Get the intron positions from the intron dict
intron_positions = intron_dict[line_dict["transcript_id"]]["{}-{}".format(line_dict["start"],line_dict["end"])]
## Create new line for intron entry
intron_line = [str(line_dict["chrom"]), "ggd", "intron", str(intron_positions[0]), str(intron_positions[1]), ".", line_dict["strand"], "."]
## Get attribute info from the exon
attributes = []
if "gene_id" in line_dict:
attributes.append("gene_id \"" + line_dict["gene_id"] + "\"")
if "gene_version" in line_dict:
attributes.append("gene_version \"" + line_dict["gene_version"] + "\"")
if "transcript_id" in line_dict:
attributes.append("transcript_id \"" + line_dict["transcript_id"] + "\"")
if "transcript_version" in line_dict:
attributes.append("transcript_version \"" + line_dict["transcript_version"] + "\"")
if "gene_name" in line_dict:
attributes.append("gene_name \"" + line_dict["gene_name"] + "\"")
if "gene_source" in line_dict:
attributes.append("gene_source \"" + line_dict["gene_source"] + "\"")
if "gene_biotype" in line_dict:
attributes.append("gene_biotype \"" + line_dict["gene_biotype"] + "\"")
if "transcript_name" in line_dict:
attributes.append("transcript_name \"" + line_dict["transcript_name"] + "\"")
if "transcript_source" in line_dict:
attributes.append("transcript_source \"" + line_dict["transcript_source"] + "\"")
if "transcript_biotype" in line_dict:
attributes.append("transcript_biotype \"" + line_dict["transcript_biotype"] + "\"")
## Add feature info to the intron line
intron_line.append("; ".join(attributes) + ";")
## Print intron line to stdout
print("\t".join(intron_line))
fh.close()
EOF
genome=https://raw.githubusercontent.com/gogetdata/ggd-recipes/master/genomes/Homo_sapiens/GRCh37/GRCh37.genome
wget --quiet $genome
genome2=https://raw.githubusercontent.com/gogetdata/ggd-recipes/master/genomes/Homo_sapiens/hg19/hg19.genome
chrom_mapping=$(ggd get-files hg19-chrom-mapping-ensembl2ucsc-ncbi-v1 --pattern "*.txt")
wget --quiet ftp://ftp.ensembl.org/pub/release-75/gtf/homo_sapiens/Homo_sapiens.GRCh37.75.gtf.gz
python add_introns.py Homo_sapiens.GRCh37.75.gtf.gz GRCh37.genome \
| gsort --chromosomemappings $chrom_mapping /dev/stdin $genome2 \
| bgzip -c > hg19-gene-features-introns-added-v1.gtf.gz
tabix hg19-gene-features-introns-added-v1.gtf.gz
rm Homo_sapiens.GRCh37.75.gtf.gz
rm add_introns.py
rm GRCh37.genome
|
public void CreateDirectory(string path)
{
string[] directories = path.Split('/');
Directory currentDir = Root;
for (int i = 1; i < directories.Length; i++)
{
string dirName = directories[i];
Directory existingDir = currentDir.Directories.FirstOrDefault(d => d.Name == dirName);
if (existingDir == null)
{
Directory newDir = new Directory(dirName);
currentDir.AddDirectory(newDir);
currentDir = newDir;
}
else
{
currentDir = existingDir;
}
}
}
public void CreateFile(string path, string fileName)
{
string[] directories = path.Split('/');
Directory currentDir = Root;
for (int i = 1; i < directories.Length - 1; i++)
{
string dirName = directories[i];
Directory existingDir = currentDir.Directories.FirstOrDefault(d => d.Name == dirName);
if (existingDir == null)
{
// Directory does not exist, cannot create file
return;
}
else
{
currentDir = existingDir;
}
}
currentDir.AddFile(fileName);
}
public List<string> ListFiles(string path)
{
string[] directories = path.Split('/');
Directory currentDir = Root;
for (int i = 1; i < directories.Length; i++)
{
string dirName = directories[i];
Directory existingDir = currentDir.Directories.FirstOrDefault(d => d.Name == dirName);
if (existingDir == null)
{
// Directory does not exist, return empty list
return new List<string>();
}
else
{
currentDir = existingDir;
}
}
return currentDir.Files;
}
public List<string> SearchFiles(string path, string keyword)
{
List<string> result = new List<string>();
string[] directories = path.Split('/');
Directory currentDir = Root;
for (int i = 1; i < directories.Length; i++)
{
string dirName = directories[i];
Directory existingDir = currentDir.Directories.FirstOrDefault(d => d.Name == dirName);
if (existingDir == null)
{
// Directory does not exist, return empty list
return new List<string>();
}
else
{
currentDir = existingDir;
}
}
foreach (string file in currentDir.Files)
{
if (file.Contains(keyword))
{
result.Add(file);
}
}
return result;
} |
def detect_manual_changes(file_content: str) -> bool:
lines = file_content.split('\n')
for line in lines:
if line.strip().startswith("// Manual changes to this file may cause unexpected behavior in your application."):
return True
return False |
<gh_stars>0
package de.pfann.deepspace.api
import de.pfann.deepspace.fightsystem.core.SimpleAttackAction
import de.pfann.deepspace.fightsystem.spaceships.LongRangedAttack
class SpaceStation(aLifePoints: Int, aShildPoints: Int, aAttackPoints: Int) extends LongRangedAttack(aLifePoints,aShildPoints) {
override def getAttackAction(): AttackAction = {
new SimpleAttackAction(aAttackPoints)
}
override def copyFightable(lifePoints: Int, shildPoints: Int): Fightable = {
new SpaceStation(lifePoints,shildPoints,aAttackPoints)
}
override def toString: String = {
"SpaceStation(" + this.aLifePoints + " - " + this.aShildPoints + ")"
}
}
|
<filename>app/src/main/java/com/qubiz/fjobs/data/Job.java
package com.qubiz.fjobs.data;
import com.google.gson.annotations.SerializedName;
import com.qubiz.fjobs.util.DataUtils;
import java.text.SimpleDateFormat;
import java.util.List;
import java.util.Locale;
public class Job {
private static final String DATE_FORMAT = "yyyy-MM-dd";
private static final SimpleDateFormat dateFormat = new SimpleDateFormat(DATE_FORMAT, Locale.getDefault());
public static final String VERY_LOW_DIFFICULTY = "Very easy";
public static final String LOW_DIFFICULTY = "Easy";
public static final String MEDIUM_DIFFICULTY = "Challenging";
public static final String TOUGH_DIFFICULTY = "Tough";
public static final String HARD_CORE_DIFFICULTY = "Hard-Core";
private String id;
private String description;
private String title;
private String photo;
private String city;
private String address;
@SerializedName("employerID")
private String employerId;
@SerializedName("estimated_time")
private int estimatedTime;
@SerializedName("created_date")
private String createdDate;
@SerializedName("start_date")
private String startDate;
@SerializedName("end_date")
private String endDate;
private int difficulty;
@SerializedName("reward")
private String jobReward;
public List<Student> getStudents() {
return students;
}
private List<Student> students;
public Job() {
}
public String getId() {
return id;
}
public String getDescription() {
return description;
}
public String getTitle() {
return title;
}
public String getPhoto() {
return photo;
}
public String getCity() {
return city;
}
public String getAddress() {
return address;
}
public int getEstimatedTime() {
return estimatedTime;
}
public String getCreatedDate() {
return createdDate;
}
public String getStartDate() {
return startDate;
}
public String getEndDate() {
return endDate;
}
public String getDifficulty() {
switch (difficulty) {
default:
case 0:
case 1:
return VERY_LOW_DIFFICULTY;
case 2:
return LOW_DIFFICULTY;
case 3:
return MEDIUM_DIFFICULTY;
case 4:
return TOUGH_DIFFICULTY;
case 5:
return HARD_CORE_DIFFICULTY;
}
}
public String getJobReward() {
return jobReward;
}
public String getEmployerId() { return employerId; }
public void setDescription(String description) {
this.description = description;
}
public void setTitle(String title) {
this.title = title;
}
public void setPhoto(String photo) {
this.photo = photo;
}
public void setCity(String city) {
this.city = city;
}
public void setAddress(String address) {
this.address = address;
}
public void setEstimatedTime(int estimatedTime) {
this.estimatedTime = estimatedTime;
}
public void setStartDate(String startDate) {
this.startDate = startDate;
}
public void setEndDate(String endDate) {
this.endDate = endDate;
}
public void setDifficulty(int difficulty) {
this.difficulty = difficulty;
}
public void setJobReward(String jobReward) {
this.jobReward = jobReward;
}
}
|
git log --pretty="format:%ai,%an,%s,%h" --before=$1 2>&1 | tee ../meta/mvp.txt
|
/***
* Copyright (C) <NAME>. All rights reserved.
* Licensed under the MIT license. See LICENSE file in the project root
* for full license information.
*
* =+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
*
* For related information - https://github.com/CodeWithRodi/Cutternet/
*
* Cutternet Backend Source Code
*
* =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
****/
const Language = require('../Models/Language');
const HandlerFactory = require('./HandlerFactory');
const { General } = require('../Settings/');
exports.GetLanguages = HandlerFactory.GetAll({
Model: Language,
ApplyFilter: (Request) =>
Request.params.Language !== '-1' ? { Language: Request.params.Language } : {},
ApplyRecursion: [
(Data) => Data.Request.params.Language && Data.Database.TotalResults === 0,
{ Language: General.DefaultLanguage }
]
});
exports.DeleteLanguage = HandlerFactory.DeleteOne({ Model: Language });
exports.CreateLanguage = HandlerFactory.CreateOne({
Model: Language,
FilterRequestFields: ['Key', 'Value', 'Language']
});
exports.UpdateLanguage = HandlerFactory.UpdateOne({
Model: Language,
FilterRequestFields: ['Key', 'Value', 'Language']
});
|
<reponame>c-yi/notebook
/*const createArray = (n) => {
const arr = [];
for (let i = 0; i < n; i++) {
arr.push(Math.floor(Math.random() * n))
}
return arr;
};*/
class ArrBinaryTree {
constructor() {
this.root = [];
}
_add(v, index) {
if (!this.root[index] || this.root[index] === v) {
this.root[index] = v;
return
}
if (v > this.root[index]) {
this._add(v, 2 * index + 2)
}
if (v < this.root[index]) {
this._add(v, 2 * index + 1)
}
}
add(v) {
if (this.root.length === 0) {
this.root[0] = v;
} else {
this._add(v, 0)
}
}
_preOrder(index) {
if (this.root[index]) {
console.log(this.root[index]);
}
let left = 2 * index + 1;
let right = 2 * index + 2;
if (this.root[left] !== null && this.root[right] !== null) {
let length = this.root.length;
if (left <= length) {
this._preOrder(left)
}
if ((right <= length)) {
this._preOrder(right)
}
}
}
preOrder() {
if (this.root.length !== 0) {
this._preOrder(0)
} else {
console.log('空顺序存储二叉树');
}
}
}
let arr = new ArrBinaryTree();
let n = 10;
for (let i = 0; i < n; i++) {
arr.add(Math.floor(Math.random() * 10 * n))
}
console.log(arr.root.length);
console.log(arr.root.join(',').replace(',', ' '));
arr.preOrder()
|
<filename>src/main/java/stincmale/server/Server.java
package stincmale.server;
import java.util.concurrent.CompletionStage;
import javax.annotation.concurrent.ThreadSafe;
/**
* Represents an I/O server.
*/
@ThreadSafe
public interface Server {
/**
* Enables the {@link Server} to perform I/O
* operations until {@link #stop()} is invoked or until an implementation decided to cease all operations.
* This method is allowed to be called multiple times.
*
* @return {@link CompletionStage} that may be used to get notified when the server stops operating,
* or if the server fails to start.
*
* @see #start()
* @see #stop()
*/
CompletionStage<Void> start();
/**
* Disables the {@link Server} so it does not perform I/O and any other
* operations anymore if the {@link Server} was {@link #start() started}, otherwise does nothing.
* This method is allowed to be called multiple times.
*
* @see #start()
*/
void stop() throws InterruptedException;
}
|
#!/bin/sh
# Install dependencies using Poetry
poetry install
# Check if the installation was successful
if [ $? -eq 0 ]; then
echo "Dependency installation successful"
# Execute the Discord to Slack bot using Poetry
poetry run python discord2slackbot.py
else
echo "Dependency installation failed. Please check the logs for errors."
fi |
#!/bin/bash
DOCUTILS_PATH="/usr/bin"
${DOCUTILS_PATH}/rst2html <$1.rst >$1.html
${DOCUTILS_PATH}/rst2latex <$1.rst >$1.tex
pdflatex --shell-escape $1.tex
rm -rf $1.aux $1.log $1.out $1.tex $1.toc
exit 0
|
import Home from '../components/Home/Home'
const HomePage = () => <Home />
export default HomePage
|
#!/bin/sh
cd `dirname $0`
source ./../config.sh
exec_dir teacher_publish_paper_num
HIVE_DB=assurance
HIVE_TABLE=teacher_publish_paper_num
TARGET_TABLE=im_quality_teacher_data_info
DATA_NO=SZ_FBLWSL
function create_table() {
hadoop fs -rm -r ${BASE_HIVE_DIR}/${HIVE_DB}/${HIVE_TABLE} || :
hive -e "DROP TABLE IF EXISTS ${HIVE_DB}.${HIVE_TABLE};"
hive -e "CREATE EXTERNAL TABLE IF NOT EXISTS ${HIVE_DB}.${HIVE_TABLE}(
data_no String comment '数据项编号',
data_name String comment '数据项名称',
teacher_name String comment '教师姓名',
teacher_no String comment '教师编号',
data_cycle String comment '数据统计周期 YEAR 年 MONTH 月 DAY 日 QUARTER 季度 OTHER 其他',
data_type String comment '数据类型 NUMBER 数值类型 ENUM 枚举类型',
data_time String comment '数据日期 年YYYY 月YYYYmm 日YYYYMMDD 季度YYYY-1,yyyy-2,yyyy-3,yyyy-4 学期 yyyy-yyyy 学期 yyyy-yyyy-1,yyyy-yyyy-2',
data_value String comment '数据项值(数字保存数字,如果是数据字典枚举保存key)',
is_new String comment '是否最新 是YES 否NO',
create_time String comment '创建时间'
) COMMENT '发表论文数量'
LOCATION '${BASE_HIVE_DIR}/${HIVE_DB}/${HIVE_TABLE}'"
fn_log "创建表——发表论文数量:${HIVE_DB}.${HIVE_TABLE}"
}
function import_table() {
hive -e "
INSERT INTO TABLE ${HIVE_DB}.${HIVE_TABLE}
select
c.data_no as data_no,
c.data_name as data_name,
a.teacher_name as teacher_name,
a.teacher_no as teacher_no,
c.data_cycle as data_cycle,
c.data_type as data_type,
a.semester_year as data_time,
a.num as data_value,
'NO' as is_new,
FROM_UNIXTIME(
UNIX_TIMESTAMP()
) AS create_time
from
( select
a.semester_year,
a.teacher_code as teacher_no,
a.teacher_name as teacher_name,
count(a.code) as num
from
model.scientific_paper_personnel_info a
group by a.semester_year,
a.teacher_code,
a.teacher_name
) a,
assurance.im_quality_data_base_info c
where c.data_no ='${DATA_NO}' and a.teacher_name != ''
"
fn_log "导入数据 —— 发表论文数量表:${HIVE_DB}.${HIVE_TABLE}"
}
function export_table() {
DATE_TIME=`hive -e "select max(data_time) from ${HIVE_DB}.${HIVE_TABLE} " `
clear_mysql_data "delete from im_quality_teacher_data_info
where data_no = '${DATA_NO}' ;"
sqoop export --connect ${MYSQL_URL} --username ${MYSQL_USERNAME} --password ${MYSQL_PASSWORD} \
--table ${TARGET_TABLE} --export-dir ${BASE_HIVE_DIR}/${HIVE_DB}/${HIVE_TABLE} \
--input-fields-terminated-by '\0001' --input-null-string '\\N' --input-null-non-string '\\N' \
--null-string '\\N' --null-non-string '\\N' \
--columns 'data_no,data_name,teacher_name,teacher_no,data_cycle,data_type,data_time,data_value,is_new,create_time'
clear_mysql_data "update assurance.im_quality_teacher_data_info set is_new = 'NO' where data_no = '${DATA_NO}';"
clear_mysql_data "update assurance.im_quality_teacher_data_info set is_new = 'YES' where data_no = '${DATA_NO}' and data_time= '${DATE_TIME}' "
fn_log "导出数据-- 教师质控点:${HIVE_DB}.${TARGET_TABLE}"
}
##导入最新时间的数据
function import_table_new() {
hive -e "
INSERT INTO TABLE ${HIVE_DB}.${HIVE_TABLE}
select
c.data_no as data_no,
c.data_name as data_name,
a.teacher_name as teacher_name,
a.teacher_no as teacher_no,
c.data_cycle as data_cycle,
c.data_type as data_type,
a.semester_year as data_time,
a.num as data_value,
'NO' as is_new,
FROM_UNIXTIME(
UNIX_TIMESTAMP()
) AS create_time
from
( select
a.semester_year,
a.teacher_code as teacher_no,
a.teacher_name as teacher_name,
count(a.code) as num
from
model.scientific_paper_personnel_info a
where a.semester_year in
(select max(s.semeste_year) from model.scientific_author_patent_info s)
group by a.semester_year,
a.teacher_code,
a.teacher_name
) a,
assurance.im_quality_data_base_info c
where c.data_no ='${DATA_NO}' and a.teacher_name != ''
"
fn_log "导入数据 —— 发表论文数量表:${HIVE_DB}.${HIVE_TABLE}"
}
function export_table_new() {
DATE_TIME=`hive -e "select max(data_time) from ${HIVE_DB}.${HIVE_TABLE} " `
clear_mysql_data "delete from im_quality_teacher_data_info
where data_no = '${DATA_NO}' and data_time= '${DATE_TIME}';"
sqoop export --connect ${MYSQL_URL} --username ${MYSQL_USERNAME} --password ${MYSQL_PASSWORD} \
--table ${TARGET_TABLE} --export-dir ${BASE_HIVE_DIR}/${HIVE_DB}/${HIVE_TABLE} \
--input-fields-terminated-by '\0001' --input-null-string '\\N' --input-null-non-string '\\N' \
--null-string '\\N' --null-non-string '\\N' \
--columns 'data_no,data_name,teacher_name,teacher_no,data_cycle,data_type,data_time,data_value,is_new,create_time'
clear_mysql_data "update assurance.im_quality_teacher_data_info set is_new = 'NO' where data_no = '${DATA_NO}';"
clear_mysql_data "update assurance.im_quality_teacher_data_info set is_new = 'YES' where data_no = '${DATA_NO}' and data_time= '${DATE_TIME}' "
fn_log "导出数据-- 发表论文数量表:${HIVE_DB}.${TARGET_TABLE}"
}
#第1次执行脚本时执行 create_table - import_table - export_table三个函数
#第2次+以后执行脚本时执行 create_table - import_table_new - export_table_new三个函数
|
package ru.smartcoder.spring_rest_example.controller;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.http.HttpStatus;
import org.springframework.http.ResponseEntity;
import org.springframework.web.bind.annotation.*;
import ru.smartcoder.spring_rest_example.dao.ApplicationsDao;
import ru.smartcoder.spring_rest_example.model.Application;
import java.util.List;
import static ru.smartcoder.spring_rest_example.controller.Endpoints.*;
@RestController
public class ApplicationController {
private static final String VERSION = VERSION_1_URL;
@Autowired
private ApplicationsDao applicationsDao;
@GetMapping("/" + VERSION + "/" + CATEGORIES_URL + "/{categoryId}/" + APPLICATIONS_URL)
public List<Application> getApplicationsByCategory(@PathVariable Long categoryId) {
return applicationsDao.list(categoryId);
}
@GetMapping("/" + VERSION + "/" + CATEGORIES_URL + "/{categoryId}/" + APPLICATIONS_URL + "/{id}")
public ResponseEntity getApplication(@PathVariable Long categoryId,
@PathVariable Long id) {
Application application = applicationsDao.get(categoryId, id);
if (application == null) {
return new ResponseEntity<>("No Application found for ID " + id, HttpStatus.NOT_FOUND);
}
return new ResponseEntity<>(application, HttpStatus.OK);
}
@PostMapping(value = "/" + VERSION + "/" + CATEGORIES_URL + "/{categoryId}/" + APPLICATIONS_URL)
public ResponseEntity createAppCategory(@PathVariable Long categoryId,
@RequestBody Application application) {
application.setCategoryId(categoryId);
applicationsDao.create(application);
return new ResponseEntity<>(application, HttpStatus.OK);
}
@DeleteMapping("/" + VERSION + "/" + CATEGORIES_URL + "/{categoryId}/" + APPLICATIONS_URL + "/{id}")
public ResponseEntity deleteAppCategory(@PathVariable Long categoryId,
@PathVariable Long id) {
if (null == applicationsDao.delete(categoryId, id)) {
return new ResponseEntity<>("No Application found for ID " + id, HttpStatus.NOT_FOUND);
}
return new ResponseEntity<>(id, HttpStatus.OK);
}
@PutMapping("/" + VERSION + "/" + CATEGORIES_URL + "/{categoryId}/" + APPLICATIONS_URL + "/{id}")
public ResponseEntity updateAppCategory(@PathVariable Long categoryId,
@PathVariable Long id,
@RequestBody Application application) {
application.setCategoryId(categoryId);
application = applicationsDao.update(application);
if (null == application) {
return new ResponseEntity<>("No Application found for ID " + id, HttpStatus.NOT_FOUND);
}
return new ResponseEntity<>(application, HttpStatus.OK);
}
}
|
my_dict = {12345: 'John', 12346: 'Jessica', 12347: 'David'} |
<reponame>LaszloLajosT/Udacity-Exercises
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.example.android.explicitintent;
import android.os.Bundle;
import android.widget.TextView;
import androidx.appcompat.app.AppCompatActivity;
public class ChildActivity extends AppCompatActivity {
/* Field to store our TextView */
private TextView mDisplayText;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_child);
/* Typical usage of findViewById... */
mDisplayText = findViewById(R.id.tv_display);
// COMPLETED (3) Use the getIntent method to store the Intent that started this Activity in a variable
/*
* Here is where all the magic happens. The getIntent method will give us the Intent that
* started this particular Activity.
*/
Bundle dataFromMainActivity = getIntent().getExtras();
String information;
/* b) solution:
Intent intentThatStartedThisActivity = getIntent();
*/
// COMPLETED (4) Create an if statement to check if this Intent has the extra we passed from MainActivity
/*
* Although there is always an Intent that starts any particular Activity, we can't
* guarantee that the extra we are looking for was passed as well. Because of that, we need
* to check to see if the Intent has the extra that we specified when we created the
* Intent that we use to start this Activity. Note that this extra may not be present in
* the Intent if this Activity was started by any other method.
* */
if (dataFromMainActivity != null) {
// COMPLETED (5) If the Intent contains the correct extra, retrieve the text
information = dataFromMainActivity.getString("data");
// COMPLETED (6) If the Intent contains the correct extra, use it to set the TextView text
mDisplayText.setText(information);
}
/* Or I can use b) solution */
/*if (intentThatStartedThisActivity.hasExtra(Intent.EXTRA_TEXT)) {
// COMPLETED (5) If the Intent contains the correct extra, retrieve the text
String textEntered = intentThatStartedThisActivity.getStringExtra(Intent.EXTRA_TEXT);
// COMPLETED (6) If the Intent contains the correct extra, use it to set the TextView text
mDisplayText.setText(textEntered);
}
*/
}
} |
const { git } = require("./git-cli");
const getCommitsSinceTag = async (tag, dir) => {
const format = {
hash: "%H",
date: "%aI",
header: "%s",
body: "%b",
author: { name: "%an", email: <PASSWORD>" },
};
const gitLogFormat = JSON.stringify(format).replace(/"/gm, "%x1A") + "%x17";
const range = tag ? `${tag}..HEAD` : "HEAD";
return new Promise((resolve, reject) => {
git(["log", range, "--no-merges", `--pretty=format:${gitLogFormat}`], dir)
.then((out) => {
resolve(
out.trim() === ""
? []
: // eslint-disable-next-line no-control-regex
JSON.parse(
`[${out
.replace(/\\/gm, "\\\\")
.replace(/"/gm, '\\"')
// eslint-disable-next-line no-control-regex
.replace(/\x1A/gm, '"')
// eslint-disable-next-line no-control-regex
.replace(/\x17\n/gm, ",")
.replace(/(\r\n|\r|\n)/gm, "\\n")
.slice(0, -1)}]`
)
);
})
.catch((err) => {
console.log(err);
reject(err);
});
});
};
module.exports = { getCommitsSinceTag };
|
from typing import Tuple
import torch
from torch import Tensor
class RayTracer:
def __init__(self):
super().__init__()
self.rays_o = None
self.rays_d = None
def ray_intersect(
self, rays_o: Tensor, rays_d: Tensor
) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
# Assuming rays_o and rays_d have shapes (num_rays, 3)
t_min = torch.zeros(rays_o.shape[0]) # Placeholder for minimum intersection distance
t_max = torch.ones(rays_o.shape[0]) * float('inf') # Placeholder for maximum intersection distance
hit_idx = torch.zeros(rays_o.shape[0], dtype=torch.long) # Placeholder for hit indices
t = torch.zeros(rays_o.shape[0]) # Placeholder for intersection distances
# Perform ray intersection calculations here and update t, t_max, t_min, and hit_idx accordingly
return t, t_max, t_min, hit_idx
def pruning(self, keep: Tensor):
self.rays_o = self.rays_o[keep]
self.rays_d = self.rays_d[keep] |
<reponame>FireUpdate/Giveaway<gh_stars>0
const Discord = require("discord.js");
const client = new Discord.Client();
const fs = require("fs");
const config = require("./config.json");
client.config = config;
// Initialise discord giveaways
const { GiveawaysManager } = require("discord-giveaways");
client.giveawaysManager = new GiveawaysManager(client, {
updateCountdownEvery: 3000,
default: {
botsCanWin: false,
embedColor: "#FF0000",
reaction: "🎉"
}
});
//Coded by Zero
/* Load all events (discord based) */
fs.readdir("./events/discord", (_err, files) => {
files.forEach(file => {
if (!file.endsWith(".js")) return;
const event = require(`./events/discord/${file}`);
let eventName = file.split(".")[0];
console.log(`[Event] ✅ Loaded: ${eventName}`);
client.on(eventName, event.bind(null, client));
delete require.cache[require.resolve(`./events/discord/${file}`)];
});
});
// Let commands be a new collection
client.commands = new Discord.Collection();
/* Load all commands */
fs.readdir("./commands/", (_err, files) => {
files.forEach(file => {
if (!file.endsWith(".js")) return;
let props = require(`./commands/${file}`);
let commandName = file.split(".")[0];
client.commands.set(commandName, props);
console.log(`[Command] ✅ Loaded: ${commandName}`);
});
});
/* Client's GiveawaysManager Events */
client.giveawaysManager.on(
"giveawayReactionAdded",
async (giveaway, reactor, messageReaction) => {
if (reactor.user.bot) return;
try {
if(giveaway.extraData){
await client.guilds.cache.get(giveaway.extraData.server).members.fetch(reactor.id)
}
reactor.send(
new Discord.MessageEmbed()
.setTimestamp()
.setTitle("Entery Approved! | You have a chance to win!!")
.setDescription(
`Your entery to [This Giveaway](https://discord.com/channels/${giveaway.guildID}/${giveaway.channelID}/${giveaway.messageID}) has been approved!`
)
.setFooter("Subscribe to ZeroSync on YT!")
.setTimestamp()
);
} catch (error) {
const guildx = client.guilds.cache.get(giveaway.extraData.server)
messageReaction.users.remove(reactor.user);
reactor.send( new Discord.MessageEmbed()
.setTimestamp()
.setTitle(":x: Entery Denied | Databse Entery Not Found & Returned!")
.setDescription(
`Your entery to [This Giveaway](https://discord.com/channels/${giveaway.guildID}/${giveaway.channelID}/${giveaway.messageID}) has been denied as you did not join **${guildx.name}**`
)
.setFooter("Subscribe to ZeroSync on YT!")
);
}
}
);
// Check if user reacts on an ended giveaway
client.giveawaysManager.on('endedGiveawayReactionAdded', (giveaway, member, reaction) => {
reaction.users.remove(member.user);
member.send(`**Aw snap! Looks Like that giveaway has already ended!**`)
});
// Dm our winners
client.giveawaysManager.on('giveawayEnded', (giveaway, winners) => {
winners.forEach((member) => {
member.send(new Discord.MessageEmbed()
.setTitle(`🎁 Let's goo!`)
.setDescription(`Hello there ${member.user}\n I heard that you have won **[[This Giveaway]](https://discord.com/channels/${giveaway.guildID}/${giveaway.channelID}/${giveaway.messageID})**\n Good Job On Winning **${giveaway.prize}!**\nDirect Message the host to claim your prize!!`)
.setTimestamp()
.setFooter(member.user.username, member.user.displayAvatarURL())
);
});
});
// Dm Rerolled winners
client.giveawaysManager.on('giveawayRerolled', (giveaway, winners) => {
winners.forEach((member) => {
member.send(new Discord.MessageEmbed()
.setTitle(`🎁 Let's goo! We Have A New Winner`)
.setDescription(`Hello there ${member.user}\n I heard that the host rerolled and you have won **[[This Giveaway]](https://discord.com/channels/${giveaway.guildID}/${giveaway.channelID}/${giveaway.messageID})**\n Good Job On Winning **${giveaway.prize}!**\nDirect Message the host to claim your prize!!`)
.setTimestamp()
.setFooter(member.user.username, member.user.displayAvatarURL())
);
});
});
// When They Remove Reaction
client.giveawaysManager.on('giveawayReactionRemoved', (giveaway, member, reaction) => {
return member.send( new Discord.MessageEmbed()
.setTimestamp()
.setTitle('❓ Hold Up Did You Just Remove a Reaction From A Giveaway?')
.setDescription(
`Your entery to [This Giveaway](https://discord.com/channels/${giveaway.guildID}/${giveaway.channelID}/${giveaway.messageID}) was recorded but you un-reacted, since you don't need **${giveaway.prize}** I would have to choose someone else 😭`
)
.setFooter("Think It was a mistake? Go react again!")
);
});
// Login through the client
client.login(process.env.BOT_TOKEN);
|
#!/bin/bash
source "test-bootstrap.bash"
IFS=
$(./jp .cons 2>/dev/null)
if [ $? -ne 0 ];then
pass "cons without args errors"
else
fail "cons without args does not error"
fi
$(./jp 1 .cons 2>/dev/null)
if [ $? -ne 0 ];then
pass "cons one arg errors"
else
fail "cons one arg does not error"
fi
$(./jp 1 2 .cons 2>/dev/null)
if [ $? -ne 0 ];then
pass "cons no array errors"
else
fail "cons no array does not error"
fi
empty=$(./jp [] [] .cons)
if [ "$empty" = $'[[]]' ];then
pass "cons two empty arrays"
else
printf -v emptyesc "%q" "$empty"
fail "cons two empty arrays returns: $emptyesc"
fi
one=$(./jp [] 1 .cons)
if [ "$one" = $'[1]' ];then
pass "cons into empty array returns [1]"
else
printf -v oneesc "%q" "$one"
fail "cons into empty array returns: $oneesc"
fi
two=$(./jp [2] 1 .cons)
if [ "$two" = $'[1,2]' ];then
pass "cons into an array returns [1,2]"
else
printf -v twoesc "%q" "$two"
fail "cons into an array returns: $twoesc"
fi
end
|
// Define a custom route for URLs with the 'Admin' prefix
Router::prefix('Admin', function (RouteBuilder $routes) {
$routes->connect('/:controller/:action', [], ['routeClass' => 'DashedRoute']);
});
// Set the request for the Router
Router::setRequest($request);
// Generate a hyperlink using the HtmlHelper class
$result = $this->Html->link('Foo', ['prefix' => 'Admin', 'controller' => 'FooBar', 'action' => 'test']);
// Define the expected format for the generated hyperlink
$expected = '<a href="/admin/foo-bar/test">Foo</a>';
// Verify that the generated hyperlink matches the expected format
$this->assertEquals($expected, $result); |
XAMARIN_IOS_VERSION=11.10.1.178
XAMARIN_ANDROID_VERSION=8.3.0-19
XAMARIN_MAC_VERSION=4.5.0.280
installpkg() {
sudo installer -pkg $1 -target /
}
# install Android SDK
brew tap caskroom/cask
brew cask install android-sdk
mkdir ~/.android
touch ~/.android/repositories.cfg
yes | $ANDROID_SDK_PATH/tools/bin/sdkmanager "build-tools;27.0.1" "platforms;android-27" "platform-tools" > sdkmanager.log
# install Xamarin.IOS
wget https://dl.xamarin.com/MonoTouch/Mac/xamarin.ios-$XAMARIN_IOS_VERSION.pkg
installpkg xamarin.ios-$XAMARIN_IOS_VERSION.pkg > installpkg.xamarin.ios.log
# install Xamarin.Android
wget https://dl.xamarin.com/MonoforAndroid/Mac/xamarin.android-$XAMARIN_ANDROID_VERSION.pkg
installpkg xamarin.android-$XAMARIN_ANDROID_VERSION.pkg > installpkg.xamarin.android.log
# install Xamarin.Mac
wget https://dl.xamarin.com/XamarinforMac/Mac/xamarin.mac-$XAMARIN_MAC_VERSION.pkg
installpkg xamarin.mac-$XAMARIN_MAC_VERSION.pkg > installpkg.xamarin.mac.log |
<filename>src/utilities/idf/Test/ObjectOrderBase_GTest.cpp
/***********************************************************************************************************************
* OpenStudio(R), Copyright (c) 2008-2021, Alliance for Sustainable Energy, LLC, and other contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
* following conditions are met:
*
* (1) Redistributions of source code must retain the above copyright notice, this list of conditions and the following
* disclaimer.
*
* (2) Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided with the distribution.
*
* (3) Neither the name of the copyright holder nor the names of any contributors may be used to endorse or promote products
* derived from this software without specific prior written permission from the respective party.
*
* (4) Other than as required in clauses (1) and (2), distributions in any form of modifications or other derivative works
* may not use the "OpenStudio" trademark, "OS", "os", or any other confusingly similar designation without specific prior
* written permission from Alliance for Sustainable Energy, LLC.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND ANY CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S), ANY CONTRIBUTORS, THE UNITED STATES GOVERNMENT, OR THE UNITED
* STATES DEPARTMENT OF ENERGY, NOR ANY OF THEIR EMPLOYEES, BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
***********************************************************************************************************************/
#include <gtest/gtest.h>
#include "IdfFixture.hpp"
#include "../ObjectOrderBase.hpp"
#include <utilities/idd/IddEnums.hxx>
using openstudio::ObjectOrderBase;
using openstudio::IddObjectTypeVector;
using openstudio::OptionalIddObjectTypeVector;
using openstudio::IddObjectType;
TEST_F(IdfFixture, ObjectOrderBase_Constructors) {
// default
ObjectOrderBase defaultOrderer;
EXPECT_TRUE(defaultOrderer.orderByIddEnum());
EXPECT_FALSE(defaultOrderer.iddOrder());
// order is taken from IddObjectType enum order
EXPECT_TRUE(defaultOrderer.less(openstudio::IddObjectType::Building, openstudio::IddObjectType::AirflowNetwork_Distribution_Component_Coil));
EXPECT_FALSE(defaultOrderer.less(openstudio::IddObjectType::Output_Diagnostics, openstudio::IddObjectType::Lights));
EXPECT_TRUE(defaultOrderer.indexInOrder(openstudio::IddObjectType::Branch));
// specified order of IddObjectTypes
IddObjectTypeVector order;
order.push_back(openstudio::IddObjectType::Lights);
order.push_back(openstudio::IddObjectType::Zone);
order.push_back(openstudio::IddObjectType::RunPeriod);
order.push_back(openstudio::IddObjectType::Building);
ObjectOrderBase userEnumOrder(order);
EXPECT_FALSE(userEnumOrder.orderByIddEnum());
ASSERT_TRUE(userEnumOrder.iddOrder());
EXPECT_TRUE(order == *(userEnumOrder.iddOrder()));
ASSERT_TRUE(userEnumOrder.indexInOrder(openstudio::IddObjectType::RunPeriod));
EXPECT_EQ(static_cast<unsigned>(2), *(userEnumOrder.indexInOrder(openstudio::IddObjectType::RunPeriod)));
ASSERT_TRUE(userEnumOrder.indexInOrder(openstudio::IddObjectType::Branch));
EXPECT_EQ(static_cast<unsigned>(4), *(userEnumOrder.indexInOrder(openstudio::IddObjectType::Branch)));
// derived class is handling order
ObjectOrderBase cededControl(true);
EXPECT_FALSE(cededControl.orderByIddEnum());
EXPECT_FALSE(cededControl.iddOrder());
EXPECT_FALSE(cededControl.indexInOrder(openstudio::IddObjectType::Building));
}
// test that when new type of order is set, others are disabled
TEST_F(IdfFixture, ObjectOrderBase_OrderSetters) {
ObjectOrderBase orderer;
IddObjectTypeVector order;
order.push_back(openstudio::IddObjectType::Lights);
order.push_back(openstudio::IddObjectType::Zone);
order.push_back(openstudio::IddObjectType::RunPeriod);
order.push_back(openstudio::IddObjectType::Building);
orderer.setIddOrder(order);
EXPECT_FALSE(orderer.orderByIddEnum());
EXPECT_TRUE(orderer.less(openstudio::IddObjectType::Lights, openstudio::IddObjectType::Building));
orderer.setOrderByIddEnum();
EXPECT_TRUE(orderer.orderByIddEnum());
EXPECT_FALSE(orderer.iddOrder());
EXPECT_TRUE(orderer.less(openstudio::IddObjectType::Building, openstudio::IddObjectType::Lights));
orderer.setDirectOrder();
EXPECT_FALSE(orderer.orderByIddEnum());
EXPECT_FALSE(orderer.iddOrder());
}
TEST_F(IdfFixture, ObjectOrderBase_ManipulateIddObjectTypeOrder) {
IddObjectTypeVector order;
order.push_back(openstudio::IddObjectType::Lights); // 0
order.push_back(openstudio::IddObjectType::Zone); // 1
order.push_back(openstudio::IddObjectType::RunPeriod); // 2
order.push_back(openstudio::IddObjectType::Building); // 3
ObjectOrderBase orderer(order);
bool success;
// push_back
success = orderer.push_back(openstudio::IddObjectType::Schedule_Compact); // 4
EXPECT_TRUE(success);
EXPECT_EQ(static_cast<unsigned>(4), *(orderer.indexInOrder(openstudio::IddObjectType::Schedule_Compact)));
EXPECT_EQ(static_cast<unsigned>(5), *(orderer.indexInOrder(openstudio::IddObjectType::Schedule_Day_Hourly)));
EXPECT_TRUE(orderer.less(openstudio::IddObjectType::Schedule_Compact, openstudio::IddObjectType::DesignSpecification_OutdoorAir));
EXPECT_FALSE(orderer.less(openstudio::IddObjectType::DesignSpecification_OutdoorAir, openstudio::IddObjectType::Schedule_Day_Hourly));
// insert behind IddObjectType
success = orderer.insert(openstudio::IddObjectType::Ceiling_Adiabatic, IddObjectType(IddObjectType::Building));
EXPECT_TRUE(success);
EXPECT_EQ(static_cast<unsigned>(3), *(orderer.indexInOrder(openstudio::IddObjectType::Ceiling_Adiabatic)));
success = orderer.insert(IddObjectType::Daylighting_Controls, IddObjectType(IddObjectType::AirflowNetwork_Distribution_Node));
EXPECT_TRUE(success);
EXPECT_EQ(orderer.iddOrder()->size() - 1, *(orderer.indexInOrder(openstudio::IddObjectType::Daylighting_Controls)));
// insert at index
success = orderer.insert(openstudio::IddObjectType::ThermalStorage_ChilledWater_Mixed, 2);
EXPECT_TRUE(success);
EXPECT_EQ(static_cast<unsigned>(2), *(orderer.indexInOrder(openstudio::IddObjectType::ThermalStorage_ChilledWater_Mixed)));
success = orderer.insert(openstudio::IddObjectType::Refrigeration_CompressorList, 37891);
EXPECT_TRUE(success);
EXPECT_EQ(orderer.iddOrder()->size() - 1, *(orderer.indexInOrder(openstudio::IddObjectType::Refrigeration_CompressorList)));
// move before IddObjectType
unsigned n = orderer.iddOrder()->size();
success = orderer.move(openstudio::IddObjectType::Refrigeration_CompressorList, IddObjectType(IddObjectType::Lights));
EXPECT_TRUE(success);
EXPECT_EQ(static_cast<unsigned>(0), *(orderer.indexInOrder(openstudio::IddObjectType::Refrigeration_CompressorList)));
success = orderer.move(openstudio::IddObjectType::ElectricLoadCenter_Generators, IddObjectType(IddObjectType::Building));
EXPECT_FALSE(success);
success = orderer.move(openstudio::IddObjectType::Building, IddObjectType(IddObjectType::ElectricLoadCenter_Generators));
EXPECT_TRUE(success);
EXPECT_EQ(orderer.iddOrder()->size() - 1, *(orderer.indexInOrder(openstudio::IddObjectType::Building)));
EXPECT_EQ(n, orderer.iddOrder()->size());
// move to index
success = orderer.move(openstudio::IddObjectType::Building, 0);
EXPECT_TRUE(success);
EXPECT_EQ(static_cast<unsigned>(0), *(orderer.indexInOrder(openstudio::IddObjectType::Building)));
success = orderer.move(openstudio::IddObjectType::RunPeriod, 18601);
EXPECT_TRUE(success);
EXPECT_EQ(orderer.iddOrder()->size() - 1, *(orderer.indexInOrder(openstudio::IddObjectType::RunPeriod)));
success = orderer.move(openstudio::IddObjectType::ZoneControl_Humidistat, 0);
EXPECT_FALSE(success);
EXPECT_EQ(n, orderer.iddOrder()->size());
// swap
unsigned i = *(orderer.indexInOrder(openstudio::IddObjectType::Lights));
unsigned j = *(orderer.indexInOrder(openstudio::IddObjectType::Refrigeration_CompressorList));
success = orderer.swap(openstudio::IddObjectType::Lights, openstudio::IddObjectType::Refrigeration_CompressorList);
EXPECT_TRUE(success);
EXPECT_EQ(i, *(orderer.indexInOrder(openstudio::IddObjectType::Refrigeration_CompressorList)));
EXPECT_EQ(j, *(orderer.indexInOrder(openstudio::IddObjectType::Lights)));
EXPECT_EQ(n, orderer.iddOrder()->size());
// erase
success = orderer.erase(openstudio::IddObjectType::Refrigeration_CompressorList);
EXPECT_TRUE(success);
EXPECT_EQ(orderer.iddOrder()->size(), *(orderer.indexInOrder(openstudio::IddObjectType::Refrigeration_CompressorList)));
success = orderer.erase(openstudio::IddObjectType::Refrigeration_CompressorList);
EXPECT_FALSE(success);
EXPECT_TRUE(orderer.iddOrder()->size() < n);
}
|
<reponame>xuerong/PeonyFramwork
package com.peony.core.control.netEvent;
import com.alibaba.fastjson.JSONObject;
import com.peony.core.cluster.ServerInfo;
import com.peony.core.control.ServiceHelper;
import com.peony.core.control.annotation.EventListener;
import com.peony.core.control.annotation.NetEventListener;
import com.peony.core.control.annotation.Service;
import com.peony.common.exception.MMException;
import com.peony.common.exception.ToClientException;
import com.peony.core.server.Server;
import com.peony.core.server.ServerType;
import com.peony.core.server.SysConstantDefine;
import com.peony.core.control.BeanHelper;
import com.peony.common.tool.thread.ThreadPoolHelper;
import com.peony.common.tool.util.Util;
import gnu.trove.map.hash.TIntObjectHashMap;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.*;
/**
* Created by Administrator on 2015/12/30.
* <p>
* TODO 发送netEvent有两种情况:
* 1、自己执行了,接收者如果是自己,就不用发,或者不用处理:本地缓存之间的flush通知
* 2、自己没有执行,接受者是谁都要处理:加锁
* <p>
* 广播默认不发给自己
* 发给单个服务器的,要发给自己
* 自己不连自己,而是直接调用
* TODO 被忘了设定超时机制
* TODO 调用的方法命名有点混乱
* TODO remove方法
*/
@Service(init = "init",initPriority = 1,destroy = "destroy")
public class NetEventService {
private static final Logger logger = LoggerFactory.getLogger(NetEventService.class);
private static int processors = Runtime.getRuntime().availableProcessors();
private Map<Integer, NetEventListenerHandler> handlerMap = null;
// 最多平均每个线程有10个请求等待处理
private final ThreadPoolExecutor executor = ThreadPoolHelper.newThreadPoolExecutor("PeonyNetEvent",32,256,65536);
// 所有的serverClient 不包括自己 TODO 一个server可能既是这个server又是那个server
private Map<Integer, ServerClient> serverClients = new ConcurrentHashMap<>();
// mainServer 不包括自己
private ServerClient asyncServerClient;
private String selfAddress;
public void init() {
handlerMap = new HashMap<>();
TIntObjectHashMap<Class<?>> netEventHandlerClassMap = ServiceHelper.getNetEventListenerHandlerClassMap();
netEventHandlerClassMap.forEachEntry((i, aClass) -> {
handlerMap.put(i, (NetEventListenerHandler) BeanHelper.getServiceBean(aClass));
return true;
});
selfAddress = getServerKey(Util.getHostAddress(), Server.getEngineConfigure().getNetEventPort());
logger.info("net event address " + selfAddress);
}
private String getServerKey(String host, int port) {
return host + ":" + port;
}
@NetEventListener(netEvent = SysConstantDefine.NETEVENT_PONG)
public NetEventData pong(NetEventData eventData) {
logger.info("receive poing from {}", eventData.getChannel());
return null;
}
// 用于自动重连后更新 client
@EventListener(event = SysConstantDefine.Event_ConnectNewServer)
public void onClientConnected(NettyServerClient client) {
addClient(client);
}
/**
* 同步注册服务器,连接上该服务器之后,才返回
* 如果是自己,直接返回
* @param id 服务器id
* @param host 服务器主机
* @param port 服务器端口
* @return 服务器对象,用于通信,状态判断等
*/
public ServerClient registerServerSyn(int id,String host,int port) {
return registerServerSyn(id, host, port,Integer.MAX_VALUE,true);
}
public ServerClient registerServerSyn(int id,String host,int port, int timeout, boolean autoReconect) {
if(Server.getServerId() == id){
return null;
}
// 不要自己连自己
if (host.equals(Util.getHostAddress()) && port == Server.getEngineConfigure().getNetEventPort()) {
throw new MMException("server address error!");
}
NettyServerClient client = (NettyServerClient) serverClients.get(getServerKey(host, port));
if(client != null && client.isConnected()) {
return client;
}
client = new NettyServerClient(ServerType.NODE_SERVER,id, host, port);
client.setAutoReconnect(autoReconect);
client.connectSync(timeout);
addClient(client);
return client;
}
public ServerClient registerServerAsync(int id,String host,int port) {
if(Server.getServerId() == id){
return null;
}
// 不要自己连自己
if (host.equals(Util.getHostAddress()) && port == Server.getEngineConfigure().getNetEventPort()) {
throw new MMException("server address error!");
}
NettyServerClient client = (NettyServerClient) serverClients.get(getServerKey(host, port));
if(client != null && client.isConnected()) {
return client;
}
client = new NettyServerClient(ServerType.NODE_SERVER,id, host, port);
client.setAutoReconnect(true);
client.connectAsync();
addClient(client);
return client;
}
private void addClient(NettyServerClient client) {
serverClients.put(client.getServerId(), client);
logger.info("client add {} {} {}", client.getServerId(),client.getAddress(), serverClients.size());
}
// 一个系统的一种NetEvent只有一个监听器(因为很多事件需要返回数据),可以通过内部事件分发
public NetEventData handleNetEventData(NetEventData netEventData) {
NetEventListenerHandler handler = handlerMap.get(netEventData.getNetEvent());
if (handler == null) {
throw new MMException("netEventHandle is not exist , netEvent=" + netEventData.getNetEvent());
}
// TODO 这里面抛异常如何处理?自己消化,并通知调用服务器异常了,不返回数据的呢?
NetEventData ret = handler.handle(netEventData);
return ret;
}
/**
* 事件是异步的
**/
public void broadcastNetEvent(final NetEventData netEvent, final boolean self) {
executor.execute(new Runnable() {
@Override
public void run() {
// 通过NetEvent的网络接口发出事件
for (final Map.Entry<Integer, ServerClient> entry : serverClients.entrySet()) {
try { // TODO 这里尽量做到不捕获异常,提高效率
entry.getValue().push(netEvent); // 这个不等待返回,所以不用多个发送
} finally {
continue;
}
}
if (self) {
handleNetEventData(netEvent);
}
}
});
}
/**
* 同步触发事假,即事件完成方可返回
* 别忘了截取一些出问题的事件
* 显然,这里每个ServerClient并不需要同步等待,
*/
public Map<Integer, NetEventData> broadcastNetEventSyn(final NetEventData netEvent, boolean self) {
try {
final CountDownLatch latch = new CountDownLatch(serverClients.size());
final Map<Integer, NetEventData> result = new ConcurrentHashMap<>();
// 通过NetEvent的网络接口发出事件
for (final Map.Entry<Integer, ServerClient> entry : serverClients.entrySet()) {
executor.execute(new Runnable() {
@Override
public void run() {
try { // TODO 这里尽量做到不捕获异常,提高效率
NetEventData ret = sendNetEvent(entry.getValue(), netEvent);//(NetEventData)entry.getValue().send(netEvent);
if (ret == null) {
result.put(entry.getKey(), new NetEventData(netEvent.getNetEvent()));
}
} finally {
latch.countDown();
}
}
});
}
latch.await();
if (self) {
NetEventData ret = handleNetEventData(netEvent);
result.put(Server.getServerId(), ret);
}
return result;
} catch (Throwable e) {
e.printStackTrace();
logger.error("exception happened while fire netEvent :" + netEvent.getNetEvent());
}
return null;
}
// /**
// * 向主服务器发送事件
// * 异步
// */
// public void fireMainServerNetEvent(NetEventData netEvent) {
// if (ServerType.isMainServer()) {
// handleNetEventData(netEvent);
// return;
// }
// if (mainServerClient != null) {
// mainServerClient.push(netEvent);
// return;
// }
// throw new MMException("mainServerClient is null");
// }
// /**
// * 向主服务器发送事件
// */
// public NetEventData fireMainServerNetEventSyn(NetEventData netEvent) {
// if (ServerType.isMainServer()) {
// return handleNetEventData(netEvent);
// }
// if (mainServerClient != null) {
// return sendNetEvent(mainServerClient, netEvent);
// }
// throw new MMException("mainServerClient is null");
// }
// /**
// * 向异步服务器发送事件
// * 异步
// */
// public void fireAsyncServerNetEvent(NetEventData netEvent) {
// if (ServerType.isAsyncServer()) {
// handleNetEventData(netEvent);
// return;
// }
// if (asyncServerClient != null) {
// asyncServerClient.push(netEvent);
// return;
// }
// throw new MMException("asyncServerClient is null");
// }
// /**
// * 向异步服务器发送事件
// */
// public NetEventData fireAsyncServerNetEventSyn(NetEventData netEvent) {
// if (ServerType.isAsyncServer()) {
// return handleNetEventData(netEvent);
// }
// if (asyncServerClient != null) {
// return sendNetEvent(asyncServerClient, netEvent);
// }
// throw new MMException("asyncServerClient is null,");
// }
/**
* 向某个服务器发送事件
* 异步
*/
public void fireServerNetEvent(int id, NetEventData netEvent) {
if (Server.getServerId() == id) {
handleNetEventData(netEvent);
return;
}
ServerClient serverClient = serverClients.get(id);
if (serverClient != null) {
serverClient.push(netEvent);
return;
}
throw new MMException("serverClient is null");
}
/**
* 向某个服务器发送事件
* 同步
*/
public NetEventData fireServerNetEventSyn(int id, NetEventData netEvent) {
if (Server.getServerId() == id) {
return handleNetEventData(netEvent);
}
ServerClient serverClient = serverClients.get(id);
if (serverClient != null && serverClient.isConnected()) {
return sendNetEvent(serverClient, netEvent);
}
// 如果是超时, 上面就会抛异常
throw new MMException(MMException.ExceptionType.RemoteFail,"服务器尚未建立连接 ,address={}," ,serverClient==null?null:serverClient.getAddress());
}
public NetEventData sendNetEvent(ServerClient serverClient, NetEventData netEvent) {
try {
NetEventData ret = (NetEventData) serverClient.request(netEvent);
if(ret == null){
throw new MMException(MMException.ExceptionType.SendNetEventFail,"serverClient is null");
}
if (ret.getNetEvent() == SysConstantDefine.NETEVENTEXCEPTION || ret.getNetEvent() == SysConstantDefine.NETEVENTMMEXCEPTION) {
throw new MMException((String) ret.getParam());
} else if (ret.getNetEvent() == SysConstantDefine.NETEVENTTOCLIENTEXCEPTION) {
JSONObject object = JSONObject.parseObject((String) ret.getParam());
throw new ToClientException(object.getInteger("errCode"), object.getString("errMsg"));
}
return ret;
}catch (Throwable e){
logger.error("sendNetEvent error!");
throw new MMException(MMException.ExceptionType.SendNetEventFail,"serverClient is null");
}
}
/**
* 向某个服务器发送事件
* 同步
*/
public NetEventData fireServerNetEventSyn(ServerInfo serverInfo, NetEventData netEvent) {
String address = serverInfo.getInnerAddress();
if (address.equals(selfAddress)) {
return handleNetEventData(netEvent);
}
ServerClient serverClient = serverClients.get(address);
if (serverClient != null && serverClient.isConnected()) {
return sendNetEvent(serverClient, netEvent);
}
// 如果是超时, 上面就会抛异常
throw new MMException(MMException.ExceptionType.RemoteFail,"服务器尚未建立连接 " + address);
}
public void destroy(){
for(ServerClient serverClient: serverClients.values()){
serverClient.shutdown();
}
}
}
|
//==================================================================================================
//FIELD
//==================================================================================================
Form.Field = Backbone.View.extend({
/**
* Constructor
*
* @param {Object} options.key
* @param {Object} options.form
* @param {Object} [options.schema]
* @param {Function} [options.schema.template]
* @param {Backbone.Model} [options.model]
* @param {Object} [options.value]
* @param {String} [options.idPrefix]
* @param {Function} [options.template]
* @param {Function} [options.errorClassName]
*/
initialize: function(options) {
options = options || {};
//Store important data
_.extend(this, _.pick(options, 'form', 'key', 'model', 'value', 'idPrefix'));
//Create the full field schema, merging defaults etc.
var schema = this.schema = this.createSchema(options.schema);
//Override defaults
this.template = options.template || schema.template || this.template || this.constructor.template;
this.errorClassName = options.errorClassName || this.errorClassName || this.constructor.errorClassName;
//Create editor
this.editor = this.createEditor();
},
/**
* Creates the full field schema, merging defaults etc.
*
* @param {Object|String} schema
*
* @return {Object}
*/
createSchema: function(schema) {
if (_.isString(schema)) schema = { type: schema };
//Set defaults
schema = _.extend({
type: 'Text',
title: this.createTitle()
}, schema);
//Get the real constructor function i.e. if type is a string such as 'Text'
schema.type = (_.isString(schema.type)) ? Form.editors[schema.type] : schema.type;
return schema;
},
/**
* Creates the editor specified in the schema; either an editor string name or
* a constructor function
*
* @return {View}
*/
createEditor: function() {
var options = _.extend(
_.pick(this, 'schema', 'form', 'key', 'model', 'value'),
{ id: this.createEditorId() }
);
var constructorFn = this.schema.type;
return new constructorFn(options);
},
/**
* Creates the ID that will be assigned to the editor
*
* @return {String}
*/
createEditorId: function() {
var prefix = this.idPrefix,
id = this.key;
//Replace periods with underscores (e.g. for when using paths)
id = id.replace(/\./g, '_');
//If a specific ID prefix is set, use it
if (_.isString(prefix) || _.isNumber(prefix)) return prefix + id;
if (_.isNull(prefix)) return id;
//Otherwise, if there is a model use it's CID to avoid conflicts when multiple forms are on the page
if (this.model) return this.model.cid + '_' + id;
return id;
},
/**
* Create the default field title (label text) from the key name.
* (Converts 'camelCase' to 'Camel Case')
*
* @return {String}
*/
createTitle: function() {
var str = this.key;
//Add spaces
str = str.replace(/([A-Z])/g, ' $1');
//Uppercase first character
str = str.replace(/^./, function(str) { return str.toUpperCase(); });
return str;
},
/**
* Returns the data to be passed to the template
*
* @return {Object}
*/
templateData: function() {
var schema = this.schema;
return {
help: schema.help || '',
title: schema.title,
titleHTML: schema.titleHTML,
fieldAttrs: schema.fieldAttrs,
editorAttrs: schema.editorAttrs,
key: this.key,
editorId: this.editor.id
};
},
/**
* Render the field and editor
*
* @return {Field} self
*/
render: function() {
var schema = this.schema,
editor = this.editor,
$ = Backbone.$;
//Only render the editor if requested
if (this.editor.noField === true) {
return this.setElement(editor.render().el);
}
//Render field
var $field = $($.trim(this.template(_.result(this, 'templateData'))));
if (schema.fieldClass) $field.addClass(schema.fieldClass);
if (schema.fieldAttrs) $field.attr(schema.fieldAttrs);
//Render editor
$field.find('[data-editor]').add($field).each(function(i, el) {
var $container = $(el),
selection = $container.attr('data-editor');
if (_.isUndefined(selection)) return;
$container.append(editor.render().el);
});
this.setElement($field);
return this;
},
/**
* Disable the field's editor
* Will call the editor's disable method if it exists
* Otherwise will add the disabled attribute to all inputs in the editor
*/
disable: function(){
if ( _.isFunction(this.editor.disable) ){
this.editor.disable();
}
else {
$input = this.editor.$el;
$input = $input.is("input") ? $input : $input.find("input");
$input.attr("disabled",true);
}
},
/**
* Enable the field's editor
* Will call the editor's disable method if it exists
* Otherwise will remove the disabled attribute to all inputs in the editor
*/
enable: function(){
if ( _.isFunction(this.editor.enable) ){
this.editor.enable();
}
else {
$input = this.editor.$el;
$input = $input.is("input") ? $input : $input.find("input");
$input.attr("disabled",false);
}
},
/**
* Check the validity of the field
*
* @return {String}
*/
validate: function() {
var error = this.editor.validate();
if (error) {
this.setError(error.message);
} else {
this.clearError();
}
return error;
},
/**
* Set the field into an error state, adding the error class and setting the error message
*
* @param {String} msg Error message
*/
setError: function(msg) {
//Nested form editors (e.g. Object) set their errors internally
if (this.editor.hasNestedForm) return;
//Add error CSS class
this.$el.addClass(this.errorClassName);
//Set error message
this.$('[data-error]').html(msg);
},
/**
* Clear the error state and reset the help message
*/
clearError: function() {
//Remove error CSS class
this.$el.removeClass(this.errorClassName);
//Clear error message
this.$('[data-error]').empty();
},
/**
* Update the model with the new value from the editor
*
* @return {Mixed}
*/
commit: function() {
return this.editor.commit();
},
/**
* Get the value from the editor
*
* @return {Mixed}
*/
getValue: function() {
return this.editor.getValue();
},
/**
* Set/change the value of the editor
*
* @param {Mixed} value
*/
setValue: function(value) {
this.editor.setValue(value);
},
/**
* Give the editor focus
*/
focus: function() {
this.editor.focus();
},
/**
* Remove focus from the editor
*/
blur: function() {
this.editor.blur();
},
/**
* Remove the field and editor views
*/
remove: function() {
this.editor.remove();
Backbone.View.prototype.remove.call(this);
}
}, {
//STATICS
template: _.template('\
<div>\
<label for="<%= editorId %>">\
<% if (titleHTML){ %><%= titleHTML %>\
<% } else { %><%- title %><% } %>\
</label>\
<div>\
<span data-editor></span>\
<div data-error></div>\
<div><%= help %></div>\
</div>\
</div>\
', null, Form.templateSettings),
/**
* CSS class name added to the field when there is a validation error
*/
errorClassName: 'error'
});
|
<reponame>mjburling/beneficiary-fhir-data<gh_stars>10-100
package gov.cms.bfd.model.codebook.model;
public interface CcwCodebookInterface {
public Variable getVariable();
public String name();
}
|
export { ListStructure as Instance } from './internals';
export * from './functions'; |
<gh_stars>1-10
/*
* Copyright (c) 2016 Nike, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.nike.cerberus.endpoints.authentication;
import com.nike.backstopper.exception.ApiException;
import com.nike.cerberus.auth.connector.AuthResponse;
import com.nike.cerberus.domain.UserCredentials;
import com.nike.cerberus.service.AuthenticationService;
import com.nike.cerberus.service.EventProcessorService;
import com.nike.riposte.server.http.RequestInfo;
import com.nike.riposte.server.http.ResponseInfo;
import io.netty.handler.codec.http.DefaultHttpHeaders;
import io.netty.handler.codec.http.HttpHeaders;
import io.netty.handler.codec.http.HttpMethod;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mock;
import java.util.Base64;
import java.util.Collection;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionException;
import java.util.concurrent.Executor;
import java.util.concurrent.Executors;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Fail.fail;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import static org.mockito.MockitoAnnotations.initMocks;
public class AuthenticateUserTest {
private final String username = "username";
private final String password = "password";
private final String validAuthorizationHeader = "Basic " +
Base64.getEncoder().encodeToString((username + ":" + password).getBytes());
private final String invalidAuthorizationHeader = "Token <PASSWORD>";
private final Executor executor = Executors.newSingleThreadExecutor();
@Mock
private AuthenticationService authenticationService;
@Mock
private EventProcessorService eventProcessorService;
private AuthenticateUser subject;
@Before
public void setUp() throws Exception {
initMocks(this);
subject = new AuthenticateUser(authenticationService, eventProcessorService);
}
@Test
public void requestMatcher_is_http_get() {
final Collection<HttpMethod> httpMethods = subject.requestMatcher().matchingMethods();
assertThat(httpMethods).hasSize(1);
assertThat(httpMethods).contains(HttpMethod.GET);
}
@Test
public void execute_returns_vault_auth_response() {
final AuthResponse authResponse = new AuthResponse();
final RequestInfo<Void> requestInfo = mock(RequestInfo.class);
final HttpHeaders httpHeaders = new DefaultHttpHeaders();
httpHeaders.add(HttpHeaders.Names.AUTHORIZATION, validAuthorizationHeader);
when(requestInfo.getHeaders()).thenReturn(httpHeaders);
when(authenticationService.authenticate(any(UserCredentials.class))).thenReturn(authResponse);
final CompletableFuture<ResponseInfo<AuthResponse>> completableFuture =
subject.execute(requestInfo, executor, null);
final ResponseInfo<AuthResponse> responseInfo = completableFuture.join();
assertThat(responseInfo.getContentForFullResponse()).isEqualTo(authResponse);
}
@Test
public void execute_throws_api_error_when_bad_auth_header() {
final RequestInfo<Void> requestInfo = mock(RequestInfo.class);
final HttpHeaders httpHeaders = new DefaultHttpHeaders();
httpHeaders.add(HttpHeaders.Names.AUTHORIZATION, invalidAuthorizationHeader);
when(requestInfo.getHeaders()).thenReturn(httpHeaders);
try {
final CompletableFuture<ResponseInfo<AuthResponse>> completableFuture =
subject.execute(requestInfo, executor, null);
completableFuture.join();
fail("Expected exception not thrown.");
} catch (CompletionException cex) {
assertThat(cex.getCause()).isInstanceOf(ApiException.class);
}
}
} |
<gh_stars>0
import React from 'react';
import { View, StyleSheet, Text } from 'react-native';
import { SimpleLineIcons } from '@expo/vector-icons';
const TabIcon = (props) => (
<SimpleLineIcons
name="home"
size={25}
color={props.focused ? 'grey' : 'darkgrey'}
/>
)
export default class Main extends React.Component {
static navigationOptions = {
tabBarIcon: TabIcon
};
render() {
return (
<View style={styles.container}>
<Text> Main </Text>
</View>
);
}
}
const styles = StyleSheet.create({
container: {
flex: 1,
alignItems: 'center',
justifyContent: 'center',
},
});
|
<reponame>yuchia0221/Spotify
import concurrent.futures
import json
import threading
from time import sleep, time
from typing import Callable
import pandas as pd
import requests
import spotipy
import spotipy.util as util
def get_user_token(username: str, scope: str, file: str) -> str:
"""
Get verified token from Spotify
Parameter: username, scope(spotify api scope r), file name (json like file)
Required: client_id and client_secret to access spotify API, userID, and user authcation is required
when first ask for oauth permission, you'll first be redirected to localhost:5000,
after giving the permission, paste the link you're directed to the terminal.
http://localhost/5000 should be added to your app's white list
Return: the verified token(type: string)
"""
with open(file) as file:
ID, SECRET = json.load(file).values()
token = util.prompt_for_user_token(username,scope,
client_id=ID,
client_secret=SECRET,
# 注意需要在自己的web app中添加redirect url
redirect_uri='http://localhost/5000')
return token
def get_user_top_tracks(headers)->list:
"""
Get the top tracks of a user
Parameter: playlist_id(string)
Required: playlist ID(Spotify URI)
Return: list contains information of given playlist
"""
responses = requests.get("https://api.spotify.com/v1/me/top/tracks?time_range=medium_term&limit=100&offset=5", headers=headers)
myjson_data = json.loads(responses.text)
top_track_list = []
for song in myjson_data["items"]:
song_item = [song["name"],song["href"]]
#song_item = {"song_name":song["name"],"href":song["href"]}
top_track_list.append(song_item)
return top_track_list
def write_csv(file: str, data: list):
"""
Write csv file
Parameter: file(string), data(dictionaries of list)
"""
df = pd.DataFrame.from_dict(data, orient="columns")
df.to_csv(file, index=False, encoding="utf-8")
if __name__ == "__main__":
import io, sys
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding="utf8")
FILE = "user.json"
OUTPUT_FILE = "toptracks.csv"
token = get_user_token("<PASSWORD>","user-top-read",FILE)
headers = {"Authorization": "Bearer {}".format(token)}
data = get_user_top_tracks(headers)
write_csv(OUTPUT_FILE, data)
|
// Copyright (c) 2012-2014 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or https://www.opensource.org/licenses/mit-license.php .
#include "coins.h"
#include "memusage.h"
#include "random.h"
#include "version.h"
#include "policy/fees.h"
#include <assert.h>
#include <tracing.h>
/**
* calculate number of bytes for the bitmask, and its number of non-zero bytes
* each bit in the bitmask represents the availability of one output, but the
* availabilities of the first two outputs are encoded separately
*/
void CCoins::CalcMaskSize(unsigned int &nBytes, unsigned int &nNonzeroBytes) const {
unsigned int nLastUsedByte = 0;
for (unsigned int b = 0; 2+b*8 < vout.size(); b++) {
bool fZero = true;
for (unsigned int i = 0; i < 8 && 2+b*8+i < vout.size(); i++) {
if (!vout[2+b*8+i].IsNull()) {
fZero = false;
continue;
}
}
if (!fZero) {
nLastUsedByte = b + 1;
nNonzeroBytes++;
}
}
nBytes += nLastUsedByte;
}
bool CCoins::Spend(uint32_t nPos)
{
if (nPos >= vout.size() || vout[nPos].IsNull())
return false;
vout[nPos].SetNull();
Cleanup();
return true;
}
bool CCoinsView::GetSproutAnchorAt(const uint256 &rt, SproutMerkleTree &tree) const { return false; }
bool CCoinsView::GetSaplingAnchorAt(const uint256 &rt, SaplingMerkleTree &tree) const { return false; }
bool CCoinsView::GetOrchardAnchorAt(const uint256 &rt, OrchardMerkleTree &tree) const { return false; }
bool CCoinsView::GetNullifier(const uint256 &nullifier, ShieldedType type) const { return false; }
bool CCoinsView::GetCoins(const uint256 &txid, CCoins &coins) const { return false; }
bool CCoinsView::HaveCoins(const uint256 &txid) const { return false; }
uint256 CCoinsView::GetBestBlock() const { return uint256(); }
uint256 CCoinsView::GetBestAnchor(ShieldedType type) const { return uint256(); };
HistoryIndex CCoinsView::GetHistoryLength(uint32_t epochId) const { return 0; }
HistoryNode CCoinsView::GetHistoryAt(uint32_t epochId, HistoryIndex index) const { return HistoryNode(); }
uint256 CCoinsView::GetHistoryRoot(uint32_t epochId) const { return uint256(); }
bool CCoinsView::BatchWrite(CCoinsMap &mapCoins,
const uint256 &hashBlock,
const uint256 &hashSproutAnchor,
const uint256 &hashSaplingAnchor,
const uint256 &hashOrchardAnchor,
CAnchorsSproutMap &mapSproutAnchors,
CAnchorsSaplingMap &mapSaplingAnchors,
CAnchorsOrchardMap &mapOrchardAnchors,
CNullifiersMap &mapSproutNullifiers,
CNullifiersMap &mapSaplingNullifiers,
CNullifiersMap &mapOrchardNullifiers,
CHistoryCacheMap &historyCacheMap) { return false; }
bool CCoinsView::GetStats(CCoinsStats &stats) const { return false; }
CCoinsViewBacked::CCoinsViewBacked(CCoinsView *viewIn) : base(viewIn) { }
bool CCoinsViewBacked::GetSproutAnchorAt(const uint256 &rt, SproutMerkleTree &tree) const { return base->GetSproutAnchorAt(rt, tree); }
bool CCoinsViewBacked::GetSaplingAnchorAt(const uint256 &rt, SaplingMerkleTree &tree) const { return base->GetSaplingAnchorAt(rt, tree); }
bool CCoinsViewBacked::GetOrchardAnchorAt(const uint256 &rt, OrchardMerkleTree &tree) const { return base->GetOrchardAnchorAt(rt, tree); }
bool CCoinsViewBacked::GetNullifier(const uint256 &nullifier, ShieldedType type) const { return base->GetNullifier(nullifier, type); }
bool CCoinsViewBacked::GetCoins(const uint256 &txid, CCoins &coins) const { return base->GetCoins(txid, coins); }
bool CCoinsViewBacked::HaveCoins(const uint256 &txid) const { return base->HaveCoins(txid); }
uint256 CCoinsViewBacked::GetBestBlock() const { return base->GetBestBlock(); }
uint256 CCoinsViewBacked::GetBestAnchor(ShieldedType type) const { return base->GetBestAnchor(type); }
HistoryIndex CCoinsViewBacked::GetHistoryLength(uint32_t epochId) const { return base->GetHistoryLength(epochId); }
HistoryNode CCoinsViewBacked::GetHistoryAt(uint32_t epochId, HistoryIndex index) const { return base->GetHistoryAt(epochId, index); }
uint256 CCoinsViewBacked::GetHistoryRoot(uint32_t epochId) const { return base->GetHistoryRoot(epochId); }
void CCoinsViewBacked::SetBackend(CCoinsView &viewIn) { base = &viewIn; }
bool CCoinsViewBacked::BatchWrite(CCoinsMap &mapCoins,
const uint256 &hashBlock,
const uint256 &hashSproutAnchor,
const uint256 &hashSaplingAnchor,
const uint256 &hashOrchardAnchor,
CAnchorsSproutMap &mapSproutAnchors,
CAnchorsSaplingMap &mapSaplingAnchors,
CAnchorsOrchardMap &mapOrchardAnchors,
CNullifiersMap &mapSproutNullifiers,
CNullifiersMap &mapSaplingNullifiers,
CNullifiersMap &mapOrchardNullifiers,
CHistoryCacheMap &historyCacheMap) {
return base->BatchWrite(mapCoins, hashBlock,
hashSproutAnchor, hashSaplingAnchor, hashOrchardAnchor,
mapSproutAnchors, mapSaplingAnchors, mapOrchardAnchors,
mapSproutNullifiers, mapSaplingNullifiers, mapOrchardNullifiers,
historyCacheMap);
}
bool CCoinsViewBacked::GetStats(CCoinsStats &stats) const { return base->GetStats(stats); }
SaltedTxidHasher::SaltedTxidHasher() : k0(GetRand(std::numeric_limits<uint64_t>::max())), k1(GetRand(std::numeric_limits<uint64_t>::max())) {}
CCoinsViewCache::CCoinsViewCache(CCoinsView *baseIn) : CCoinsViewBacked(baseIn), hasModifier(false), cachedCoinsUsage(0) { }
CCoinsViewCache::~CCoinsViewCache()
{
assert(!hasModifier);
}
size_t CCoinsViewCache::DynamicMemoryUsage() const {
return memusage::DynamicUsage(cacheCoins) +
memusage::DynamicUsage(cacheSproutAnchors) +
memusage::DynamicUsage(cacheSaplingAnchors) +
memusage::DynamicUsage(cacheOrchardAnchors) +
memusage::DynamicUsage(cacheSproutNullifiers) +
memusage::DynamicUsage(cacheSaplingNullifiers) +
memusage::DynamicUsage(cacheOrchardNullifiers) +
memusage::DynamicUsage(historyCacheMap) +
cachedCoinsUsage;
}
CCoinsMap::const_iterator CCoinsViewCache::FetchCoins(const uint256 &txid) const {
CCoinsMap::iterator it = cacheCoins.find(txid);
if (it != cacheCoins.end())
return it;
CCoins tmp;
if (!base->GetCoins(txid, tmp))
return cacheCoins.end();
CCoinsMap::iterator ret = cacheCoins.insert(std::make_pair(txid, CCoinsCacheEntry())).first;
tmp.swap(ret->second.coins);
if (ret->second.coins.IsPruned()) {
// The parent only has an empty entry for this txid; we can consider our
// version as fresh.
ret->second.flags = CCoinsCacheEntry::FRESH;
}
cachedCoinsUsage += ret->second.coins.DynamicMemoryUsage();
return ret;
}
bool CCoinsViewCache::GetSproutAnchorAt(const uint256 &rt, SproutMerkleTree &tree) const {
CAnchorsSproutMap::const_iterator it = cacheSproutAnchors.find(rt);
if (it != cacheSproutAnchors.end()) {
if (it->second.entered) {
tree = it->second.tree;
return true;
} else {
return false;
}
}
if (!base->GetSproutAnchorAt(rt, tree)) {
return false;
}
CAnchorsSproutMap::iterator ret = cacheSproutAnchors.insert(std::make_pair(rt, CAnchorsSproutCacheEntry())).first;
ret->second.entered = true;
ret->second.tree = tree;
cachedCoinsUsage += ret->second.tree.DynamicMemoryUsage();
return true;
}
bool CCoinsViewCache::GetSaplingAnchorAt(const uint256 &rt, SaplingMerkleTree &tree) const {
CAnchorsSaplingMap::const_iterator it = cacheSaplingAnchors.find(rt);
if (it != cacheSaplingAnchors.end()) {
if (it->second.entered) {
tree = it->second.tree;
return true;
} else {
return false;
}
}
if (!base->GetSaplingAnchorAt(rt, tree)) {
return false;
}
CAnchorsSaplingMap::iterator ret = cacheSaplingAnchors.insert(std::make_pair(rt, CAnchorsSaplingCacheEntry())).first;
ret->second.entered = true;
ret->second.tree = tree;
cachedCoinsUsage += ret->second.tree.DynamicMemoryUsage();
return true;
}
bool CCoinsViewCache::GetOrchardAnchorAt(const uint256 &rt, OrchardMerkleTree &tree) const {
CAnchorsOrchardMap::const_iterator it = cacheOrchardAnchors.find(rt);
if (it != cacheOrchardAnchors.end()) {
if (it->second.entered) {
tree = it->second.tree;
return true;
} else {
return false;
}
}
if (!base->GetOrchardAnchorAt(rt, tree)) {
return false;
}
CAnchorsOrchardMap::iterator ret = cacheOrchardAnchors.insert(std::make_pair(rt, CAnchorsOrchardCacheEntry())).first;
ret->second.entered = true;
ret->second.tree = tree;
cachedCoinsUsage += ret->second.tree.DynamicMemoryUsage();
return true;
}
bool CCoinsViewCache::GetNullifier(const uint256 &nullifier, ShieldedType type) const {
CNullifiersMap* cacheToUse;
switch (type) {
case SPROUT:
cacheToUse = &cacheSproutNullifiers;
break;
case SAPLING:
cacheToUse = &cacheSaplingNullifiers;
break;
case ORCHARD:
cacheToUse = &cacheOrchardNullifiers;
break;
default:
throw std::runtime_error("Unknown shielded type");
}
CNullifiersMap::iterator it = cacheToUse->find(nullifier);
if (it != cacheToUse->end())
return it->second.entered;
CNullifiersCacheEntry entry;
bool tmp = base->GetNullifier(nullifier, type);
entry.entered = tmp;
cacheToUse->insert(std::make_pair(nullifier, entry));
return tmp;
}
HistoryIndex CCoinsViewCache::GetHistoryLength(uint32_t epochId) const {
HistoryCache& historyCache = SelectHistoryCache(epochId);
return historyCache.length;
}
HistoryNode CCoinsViewCache::GetHistoryAt(uint32_t epochId, HistoryIndex index) const {
HistoryCache& historyCache = SelectHistoryCache(epochId);
if (index >= historyCache.length) {
// Caller should ensure that it is limiting history
// request to 0..GetHistoryLength(epochId)-1 range
throw std::runtime_error("Invalid history request");
}
if (index >= historyCache.updateDepth) {
return historyCache.appends[index];
}
return base->GetHistoryAt(epochId, index);
}
uint256 CCoinsViewCache::GetHistoryRoot(uint32_t epochId) const {
return SelectHistoryCache(epochId).root;
}
template<typename Tree, typename Cache, typename CacheIterator, typename CacheEntry>
void CCoinsViewCache::AbstractPushAnchor(
const Tree &tree,
ShieldedType type,
Cache &cacheAnchors,
uint256 &hash
)
{
uint256 newrt = tree.root();
auto currentRoot = GetBestAnchor(type);
// We don't want to overwrite an anchor we already have.
// This occurs when a block doesn't modify mapAnchors at all,
// because there are no joinsplits. We could get around this a
// different way (make all blocks modify mapAnchors somehow)
// but this is simpler to reason about.
if (currentRoot != newrt) {
auto insertRet = cacheAnchors.insert(std::make_pair(newrt, CacheEntry()));
CacheIterator ret = insertRet.first;
ret->second.entered = true;
ret->second.tree = tree;
ret->second.flags = CacheEntry::DIRTY;
if (insertRet.second) {
// An insert took place
cachedCoinsUsage += ret->second.tree.DynamicMemoryUsage();
}
hash = newrt;
}
}
template<> void CCoinsViewCache::PushAnchor(const SproutMerkleTree &tree)
{
AbstractPushAnchor<SproutMerkleTree, CAnchorsSproutMap, CAnchorsSproutMap::iterator, CAnchorsSproutCacheEntry>(
tree,
SPROUT,
cacheSproutAnchors,
hashSproutAnchor
);
}
template<> void CCoinsViewCache::PushAnchor(const SaplingMerkleTree &tree)
{
AbstractPushAnchor<SaplingMerkleTree, CAnchorsSaplingMap, CAnchorsSaplingMap::iterator, CAnchorsSaplingCacheEntry>(
tree,
SAPLING,
cacheSaplingAnchors,
hashSaplingAnchor
);
}
template<> void CCoinsViewCache::PushAnchor(const OrchardMerkleTree &tree)
{
AbstractPushAnchor<OrchardMerkleTree, CAnchorsOrchardMap, CAnchorsOrchardMap::iterator, CAnchorsOrchardCacheEntry>(
tree,
ORCHARD,
cacheOrchardAnchors,
hashOrchardAnchor
);
}
template<>
void CCoinsViewCache::BringBestAnchorIntoCache(
const uint256 ¤tRoot,
SproutMerkleTree &tree
)
{
assert(GetSproutAnchorAt(currentRoot, tree));
}
template<>
void CCoinsViewCache::BringBestAnchorIntoCache(
const uint256 ¤tRoot,
SaplingMerkleTree &tree
)
{
assert(GetSaplingAnchorAt(currentRoot, tree));
}
template<>
void CCoinsViewCache::BringBestAnchorIntoCache(
const uint256 ¤tRoot,
OrchardMerkleTree &tree
)
{
assert(GetOrchardAnchorAt(currentRoot, tree));
}
void draftMMRNode(std::vector<uint32_t> &indices,
std::vector<HistoryEntry> &entries,
HistoryNode nodeData,
uint32_t alt,
uint32_t peak_pos)
{
HistoryEntry newEntry = alt == 0
? libzcash::LeafToEntry(nodeData)
// peak_pos - (1 << alt) is the array position of left child.
// peak_pos - 1 is the array position of right child.
: libzcash::NodeToEntry(nodeData, peak_pos - (1 << alt), peak_pos - 1);
indices.push_back(peak_pos);
entries.push_back(newEntry);
}
// Computes floor(log2(x)).
static inline uint32_t floor_log2(uint32_t x) {
assert(x > 0);
int log = 0;
while (x >>= 1) { ++log; }
return log;
}
// Computes the altitude of the largest subtree for an MMR with n nodes,
// which is floor(log2(n + 1)) - 1.
static inline uint32_t altitude(uint32_t n) {
return floor_log2(n + 1) - 1;
}
uint32_t CCoinsViewCache::PreloadHistoryTree(uint32_t epochId, bool extra, std::vector<HistoryEntry> &entries, std::vector<uint32_t> &entry_indices) {
auto treeLength = GetHistoryLength(epochId);
if (treeLength <= 0) {
throw std::runtime_error("Invalid PreloadHistoryTree state called - tree should exist");
} else if (treeLength == 1) {
entries.push_back(libzcash::LeafToEntry(GetHistoryAt(epochId, 0)));
entry_indices.push_back(0);
return 1;
}
uint32_t last_peak_pos = 0;
uint32_t last_peak_alt = 0;
uint32_t alt = 0;
uint32_t peak_pos = 0;
uint32_t total_peaks = 0;
// Assume the following example peak layout with 14 leaves, and 25 stored nodes in
// total (the "tree length"):
//
// P
// /\
// / \
// / \ \
// / \ \ Altitude
// _A_ \ \ 3
// _/ \_ B \ 2
// / \ / \ / \ C 1
// /\ /\ /\ /\ /\ /\ /\ 0
//
// We start by determining the altitude of the highest peak (A).
alt = altitude(treeLength);
// We determine the position of the highest peak (A) by pretending it is the right
// sibling in a tree, and its left-most leaf has position 0. Then the left sibling
// of (A) has position -1, and so we can "jump" to the peak's position by computing
// -1 + 2^(alt + 1) - 1.
peak_pos = (1 << (alt + 1)) - 2;
// Now that we have the position and altitude of the highest peak (A), we collect
// the remaining peaks (B, C). We navigate the peaks as if they were nodes in this
// Merkle tree (with additional imaginary nodes 1 and 2, that have positions beyond
// the MMR's length):
//
// / \
// / \
// / \
// / \
// A ==========> 1
// / \ // \
// _/ \_ B ==> 2
// /\ /\ /\ //
// / \ / \ / \ C
// /\ /\ /\ /\ /\ /\ /\
//
while (alt != 0) {
// If peak_pos is out of bounds of the tree, we compute the position of its left
// child, and drop down one level in the tree.
if (peak_pos >= treeLength) {
// left child, -2^alt
peak_pos = peak_pos - (1 << alt);
alt = alt - 1;
}
// If the peak exists, we take it and then continue with its right sibling.
if (peak_pos < treeLength) {
draftMMRNode(entry_indices, entries, GetHistoryAt(epochId, peak_pos), alt, peak_pos);
last_peak_pos = peak_pos;
last_peak_alt = alt;
// right sibling
peak_pos = peak_pos + (1 << (alt + 1)) - 1;
}
}
total_peaks = entries.size();
// Return early if we don't require extra nodes.
if (!extra) return total_peaks;
alt = last_peak_alt;
peak_pos = last_peak_pos;
// P
// /\
// / \
// / \ \
// / \ \
// _A_ \ \
// _/ \_ B \
// / \ / \ / \ C
// /\ /\ /\ /\ /\ /\ /\
// D E
//
// For extra peaks needed for deletion, we do extra pass on right slope of the last peak
// and add those nodes + their siblings. Extra would be (D, E) for the picture above.
while (alt > 0) {
uint32_t left_pos = peak_pos - (1 << alt);
uint32_t right_pos = peak_pos - 1;
alt = alt - 1;
// drafting left child
draftMMRNode(entry_indices, entries, GetHistoryAt(epochId, left_pos), alt, left_pos);
// drafting right child
draftMMRNode(entry_indices, entries, GetHistoryAt(epochId, right_pos), alt, right_pos);
// continuing on right slope
peak_pos = right_pos;
}
return total_peaks;
}
HistoryCache& CCoinsViewCache::SelectHistoryCache(uint32_t epochId) const {
auto entry = historyCacheMap.find(epochId);
if (entry != historyCacheMap.end()) {
return entry->second;
} else {
auto cache = HistoryCache(
base->GetHistoryLength(epochId),
base->GetHistoryRoot(epochId),
epochId
);
return historyCacheMap.insert({epochId, cache}).first->second;
}
}
void CCoinsViewCache::PushHistoryNode(uint32_t epochId, const HistoryNode node) {
HistoryCache& historyCache = SelectHistoryCache(epochId);
if (historyCache.length == 0) {
// special case, it just goes into the cache right away
historyCache.Extend(node);
if (librustzcash_mmr_hash_node(epochId, &node, historyCache.root.begin()) != 0) {
throw std::runtime_error("hashing node failed");
};
return;
}
std::vector<HistoryEntry> entries;
std::vector<uint32_t> entry_indices;
PreloadHistoryTree(epochId, false, entries, entry_indices);
uint256 newRoot;
std::array<HistoryNode, 32> appendBuf = {};
uint32_t appends = librustzcash_mmr_append(
epochId,
historyCache.length,
entry_indices.data(),
entries.data(),
entry_indices.size(),
&node,
newRoot.begin(),
appendBuf.data()
);
for (size_t i = 0; i < appends; i++) {
historyCache.Extend(appendBuf[i]);
}
historyCache.root = newRoot;
}
void CCoinsViewCache::PopHistoryNode(uint32_t epochId) {
HistoryCache& historyCache = SelectHistoryCache(epochId);
uint256 newRoot;
switch (historyCache.length) {
case 0:
{
// Caller is generally not expected to pop from empty tree! Caller
// should switch to previous epoch and pop history from there.
// If we are doing an expected rollback that changes the consensus
// branch ID for some upgrade (or introduces one that wasn't present
// at the equivalent height) this will occur because
// `SelectHistoryCache` selects the tree for the new consensus
// branch ID, not the one that existed on the chain being rolled
// back.
// Sensible action is to truncate the history cache:
}
case 1:
{
// Just resetting tree to empty
historyCache.Truncate(0);
historyCache.root = uint256();
return;
}
case 2:
{
// - A tree with one leaf has length 1.
// - A tree with two leaves has length 3.
throw std::runtime_error("a history tree cannot have two nodes");
}
case 3:
{
const HistoryNode tmpHistoryRoot = GetHistoryAt(epochId, 0);
// After removing a leaf from a tree with two leaves, we are left
// with a single-node tree, whose root is just the hash of that
// node.
if (librustzcash_mmr_hash_node(
epochId,
&tmpHistoryRoot,
newRoot.begin()
) != 0) {
throw std::runtime_error("hashing node failed");
}
historyCache.Truncate(1);
historyCache.root = newRoot;
return;
}
default:
{
// This is a non-elementary pop, so use the full tree logic.
std::vector<HistoryEntry> entries;
std::vector<uint32_t> entry_indices;
uint32_t peak_count = PreloadHistoryTree(epochId, true, entries, entry_indices);
uint32_t numberOfDeletes = librustzcash_mmr_delete(
epochId,
historyCache.length,
entry_indices.data(),
entries.data(),
peak_count,
entries.size() - peak_count,
newRoot.begin()
);
historyCache.Truncate(historyCache.length - numberOfDeletes);
historyCache.root = newRoot;
return;
}
}
}
template<typename Tree, typename Cache, typename CacheEntry>
void CCoinsViewCache::AbstractPopAnchor(
const uint256 &newrt,
ShieldedType type,
Cache &cacheAnchors,
uint256 &hash
)
{
auto currentRoot = GetBestAnchor(type);
// Blocks might not change the commitment tree, in which
// case restoring the "old" anchor during a reorg must
// have no effect.
if (currentRoot != newrt) {
// Bring the current best anchor into our local cache
// so that its tree exists in memory.
{
Tree tree;
BringBestAnchorIntoCache(currentRoot, tree);
}
// Mark the anchor as unentered, removing it from view
cacheAnchors[currentRoot].entered = false;
// Mark the cache entry as dirty so it's propagated
cacheAnchors[currentRoot].flags = CacheEntry::DIRTY;
// Mark the new root as the best anchor
hash = newrt;
}
}
void CCoinsViewCache::PopAnchor(const uint256 &newrt, ShieldedType type) {
switch (type) {
case SPROUT:
AbstractPopAnchor<SproutMerkleTree, CAnchorsSproutMap, CAnchorsSproutCacheEntry>(
newrt,
SPROUT,
cacheSproutAnchors,
hashSproutAnchor
);
break;
case SAPLING:
AbstractPopAnchor<SaplingMerkleTree, CAnchorsSaplingMap, CAnchorsSaplingCacheEntry>(
newrt,
SAPLING,
cacheSaplingAnchors,
hashSaplingAnchor
);
break;
default:
throw std::runtime_error("Unknown shielded type");
}
}
void CCoinsViewCache::SetNullifiers(const CTransaction& tx, bool spent) {
for (const JSDescription &joinsplit : tx.vJoinSplit) {
for (const uint256 &nullifier : joinsplit.nullifiers) {
std::pair<CNullifiersMap::iterator, bool> ret = cacheSproutNullifiers.insert(std::make_pair(nullifier, CNullifiersCacheEntry()));
ret.first->second.entered = spent;
ret.first->second.flags |= CNullifiersCacheEntry::DIRTY;
}
}
for (const SpendDescription &spendDescription : tx.vShieldedSpend) {
std::pair<CNullifiersMap::iterator, bool> ret = cacheSaplingNullifiers.insert(std::make_pair(spendDescription.nullifier, CNullifiersCacheEntry()));
ret.first->second.entered = spent;
ret.first->second.flags |= CNullifiersCacheEntry::DIRTY;
}
for (const uint256& nf : tx.GetOrchardBundle().GetNullifiers()) {
std::pair<CNullifiersMap::iterator, bool> ret = cacheOrchardNullifiers.insert(std::make_pair(nf, CNullifiersCacheEntry()));
ret.first->second.entered = spent;
ret.first->second.flags |= CNullifiersCacheEntry::DIRTY;
}
}
bool CCoinsViewCache::GetCoins(const uint256 &txid, CCoins &coins) const {
CCoinsMap::const_iterator it = FetchCoins(txid);
if (it != cacheCoins.end()) {
coins = it->second.coins;
return true;
}
return false;
}
CCoinsModifier CCoinsViewCache::ModifyCoins(const uint256 &txid) {
assert(!hasModifier);
std::pair<CCoinsMap::iterator, bool> ret = cacheCoins.insert(std::make_pair(txid, CCoinsCacheEntry()));
size_t cachedCoinUsage = 0;
if (ret.second) {
if (!base->GetCoins(txid, ret.first->second.coins)) {
// The parent view does not have this entry; mark it as fresh.
ret.first->second.coins.Clear();
ret.first->second.flags = CCoinsCacheEntry::FRESH;
} else if (ret.first->second.coins.IsPruned()) {
// The parent view only has a pruned entry for this; mark it as fresh.
ret.first->second.flags = CCoinsCacheEntry::FRESH;
}
} else {
cachedCoinUsage = ret.first->second.coins.DynamicMemoryUsage();
}
// Assume that whenever ModifyCoins is called, the entry will be modified.
ret.first->second.flags |= CCoinsCacheEntry::DIRTY;
return CCoinsModifier(*this, ret.first, cachedCoinUsage);
}
CCoinsModifier CCoinsViewCache::ModifyNewCoins(const uint256 &txid) {
assert(!hasModifier);
std::pair<CCoinsMap::iterator, bool> ret = cacheCoins.insert(std::make_pair(txid, CCoinsCacheEntry()));
ret.first->second.coins.Clear();
ret.first->second.flags = CCoinsCacheEntry::FRESH;
ret.first->second.flags |= CCoinsCacheEntry::DIRTY;
return CCoinsModifier(*this, ret.first, 0);
}
const CCoins* CCoinsViewCache::AccessCoins(const uint256 &txid) const {
CCoinsMap::const_iterator it = FetchCoins(txid);
if (it == cacheCoins.end()) {
return NULL;
} else {
return &it->second.coins;
}
}
bool CCoinsViewCache::HaveCoins(const uint256 &txid) const {
CCoinsMap::const_iterator it = FetchCoins(txid);
// We're using vtx.empty() instead of IsPruned here for performance reasons,
// as we only care about the case where a transaction was replaced entirely
// in a reorganization (which wipes vout entirely, as opposed to spending
// which just cleans individual outputs).
return (it != cacheCoins.end() && !it->second.coins.vout.empty());
}
uint256 CCoinsViewCache::GetBestBlock() const {
if (hashBlock.IsNull())
hashBlock = base->GetBestBlock();
return hashBlock;
}
uint256 CCoinsViewCache::GetBestAnchor(ShieldedType type) const {
switch (type) {
case SPROUT:
if (hashSproutAnchor.IsNull())
hashSproutAnchor = base->GetBestAnchor(type);
return hashSproutAnchor;
break;
case SAPLING:
if (hashSaplingAnchor.IsNull())
hashSaplingAnchor = base->GetBestAnchor(type);
return hashSaplingAnchor;
break;
case ORCHARD:
if (hashOrchardAnchor.IsNull())
hashOrchardAnchor = base->GetBestAnchor(type);
return hashOrchardAnchor;
break;
default:
throw std::runtime_error("Unknown shielded type");
}
}
void CCoinsViewCache::SetBestBlock(const uint256 &hashBlockIn) {
hashBlock = hashBlockIn;
}
void BatchWriteNullifiers(CNullifiersMap &mapNullifiers, CNullifiersMap &cacheNullifiers)
{
for (CNullifiersMap::iterator child_it = mapNullifiers.begin(); child_it != mapNullifiers.end();) {
if (child_it->second.flags & CNullifiersCacheEntry::DIRTY) { // Ignore non-dirty entries (optimization).
CNullifiersMap::iterator parent_it = cacheNullifiers.find(child_it->first);
if (parent_it == cacheNullifiers.end()) {
CNullifiersCacheEntry& entry = cacheNullifiers[child_it->first];
entry.entered = child_it->second.entered;
entry.flags = CNullifiersCacheEntry::DIRTY;
} else {
if (parent_it->second.entered != child_it->second.entered) {
parent_it->second.entered = child_it->second.entered;
parent_it->second.flags |= CNullifiersCacheEntry::DIRTY;
}
}
}
child_it = mapNullifiers.erase(child_it);
}
}
template<typename Map, typename MapIterator, typename MapEntry>
void BatchWriteAnchors(
Map &mapAnchors,
Map &cacheAnchors,
size_t &cachedCoinsUsage
)
{
for (MapIterator child_it = mapAnchors.begin(); child_it != mapAnchors.end();)
{
if (child_it->second.flags & MapEntry::DIRTY) {
MapIterator parent_it = cacheAnchors.find(child_it->first);
if (parent_it == cacheAnchors.end()) {
MapEntry& entry = cacheAnchors[child_it->first];
entry.entered = child_it->second.entered;
entry.tree = child_it->second.tree;
entry.flags = MapEntry::DIRTY;
cachedCoinsUsage += entry.tree.DynamicMemoryUsage();
} else {
if (parent_it->second.entered != child_it->second.entered) {
// The parent may have removed the entry.
parent_it->second.entered = child_it->second.entered;
parent_it->second.flags |= MapEntry::DIRTY;
}
}
}
child_it = mapAnchors.erase(child_it);
}
}
void BatchWriteHistory(CHistoryCacheMap& historyCacheMap, CHistoryCacheMap& historyCacheMapIn) {
for (auto nextHistoryCache = historyCacheMapIn.begin(); nextHistoryCache != historyCacheMapIn.end(); nextHistoryCache++) {
auto historyCacheIn = nextHistoryCache->second;
auto epochId = nextHistoryCache->first;
auto historyCache = historyCacheMap.find(epochId);
if (historyCache != historyCacheMap.end()) {
// delete old entries since updateDepth
historyCache->second.Truncate(historyCacheIn.updateDepth);
// Replace/append new/updated entries. HistoryCache.Extend
// auto-indexes the nodes, so we need to extend in the same order as
// this cache is indexed.
for (size_t i = historyCacheIn.updateDepth; i < historyCacheIn.length; i++) {
historyCache->second.Extend(historyCacheIn.appends[i]);
}
// the lengths should now match
assert(historyCache->second.length == historyCacheIn.length);
// write current root
historyCache->second.root = historyCacheIn.root;
} else {
// Just insert the history cache into its parent
historyCacheMap.insert({epochId, historyCacheIn});
}
}
}
bool CCoinsViewCache::BatchWrite(CCoinsMap &mapCoins,
const uint256 &hashBlockIn,
const uint256 &hashSproutAnchorIn,
const uint256 &hashSaplingAnchorIn,
const uint256 &hashOrchardAnchorIn,
CAnchorsSproutMap &mapSproutAnchors,
CAnchorsSaplingMap &mapSaplingAnchors,
CAnchorsOrchardMap &mapOrchardAnchors,
CNullifiersMap &mapSproutNullifiers,
CNullifiersMap &mapSaplingNullifiers,
CNullifiersMap &mapOrchardNullifiers,
CHistoryCacheMap &historyCacheMapIn) {
assert(!hasModifier);
for (CCoinsMap::iterator it = mapCoins.begin(); it != mapCoins.end();) {
if (it->second.flags & CCoinsCacheEntry::DIRTY) { // Ignore non-dirty entries (optimization).
CCoinsMap::iterator itUs = cacheCoins.find(it->first);
if (itUs == cacheCoins.end()) {
if (!it->second.coins.IsPruned()) {
// The parent cache does not have an entry, while the child
// cache does have (a non-pruned) one. Move the data up, and
// mark it as fresh (if the grandparent did have it, we
// would have pulled it in at first GetCoins).
assert(it->second.flags & CCoinsCacheEntry::FRESH);
CCoinsCacheEntry& entry = cacheCoins[it->first];
entry.coins.swap(it->second.coins);
cachedCoinsUsage += entry.coins.DynamicMemoryUsage();
entry.flags = CCoinsCacheEntry::DIRTY | CCoinsCacheEntry::FRESH;
}
} else {
if ((itUs->second.flags & CCoinsCacheEntry::FRESH) && it->second.coins.IsPruned()) {
// The grandparent does not have an entry, and the child is
// modified and being pruned. This means we can just delete
// it from the parent.
cachedCoinsUsage -= itUs->second.coins.DynamicMemoryUsage();
cacheCoins.erase(itUs);
} else {
// A normal modification.
cachedCoinsUsage -= itUs->second.coins.DynamicMemoryUsage();
itUs->second.coins.swap(it->second.coins);
cachedCoinsUsage += itUs->second.coins.DynamicMemoryUsage();
itUs->second.flags |= CCoinsCacheEntry::DIRTY;
}
}
}
it = mapCoins.erase(it);
}
::BatchWriteAnchors<CAnchorsSproutMap, CAnchorsSproutMap::iterator, CAnchorsSproutCacheEntry>(mapSproutAnchors, cacheSproutAnchors, cachedCoinsUsage);
::BatchWriteAnchors<CAnchorsSaplingMap, CAnchorsSaplingMap::iterator, CAnchorsSaplingCacheEntry>(mapSaplingAnchors, cacheSaplingAnchors, cachedCoinsUsage);
::BatchWriteAnchors<CAnchorsOrchardMap, CAnchorsOrchardMap::iterator, CAnchorsOrchardCacheEntry>(mapOrchardAnchors, cacheOrchardAnchors, cachedCoinsUsage);
::BatchWriteNullifiers(mapSproutNullifiers, cacheSproutNullifiers);
::BatchWriteNullifiers(mapSaplingNullifiers, cacheSaplingNullifiers);
::BatchWriteNullifiers(mapOrchardNullifiers, cacheOrchardNullifiers);
::BatchWriteHistory(historyCacheMap, historyCacheMapIn);
hashSproutAnchor = hashSproutAnchorIn;
hashSaplingAnchor = hashSaplingAnchorIn;
hashOrchardAnchor = hashOrchardAnchorIn;
hashBlock = hashBlockIn;
return true;
}
bool CCoinsViewCache::Flush() {
bool fOk = base->BatchWrite(cacheCoins,
hashBlock,
hashSproutAnchor,
hashSaplingAnchor,
hashOrchardAnchor,
cacheSproutAnchors,
cacheSaplingAnchors,
cacheOrchardAnchors,
cacheSproutNullifiers,
cacheSaplingNullifiers,
cacheOrchardNullifiers,
historyCacheMap);
cacheCoins.clear();
cacheSproutAnchors.clear();
cacheSaplingAnchors.clear();
cacheOrchardAnchors.clear();
cacheSproutNullifiers.clear();
cacheSaplingNullifiers.clear();
cacheOrchardNullifiers.clear();
historyCacheMap.clear();
cachedCoinsUsage = 0;
return fOk;
}
unsigned int CCoinsViewCache::GetCacheSize() const {
return cacheCoins.size();
}
const CTxOut &CCoinsViewCache::GetOutputFor(const CTxIn& input) const
{
const CCoins* coins = AccessCoins(input.prevout.hash);
assert(coins && coins->IsAvailable(input.prevout.n));
return coins->vout[input.prevout.n];
}
CAmount CCoinsViewCache::GetValueIn(const CTransaction& tx) const
{
if (tx.IsCoinBase())
return 0;
CAmount nResult = 0;
for (unsigned int i = 0; i < tx.vin.size(); i++)
nResult += GetOutputFor(tx.vin[i]).nValue;
nResult += tx.GetShieldedValueIn();
return nResult;
}
std::optional<UnsatisfiedShieldedReq> CCoinsViewCache::HaveShieldedRequirements(const CTransaction& tx) const
{
boost::unordered_map<uint256, SproutMerkleTree, SaltedTxidHasher> intermediates;
for (const JSDescription &joinsplit : tx.vJoinSplit)
{
for (const uint256& nullifier : joinsplit.nullifiers)
{
if (GetNullifier(nullifier, SPROUT)) {
// If the nullifier is set, this transaction
// double-spends!
auto txid = tx.GetHash().ToString();
auto nf = nullifier.ToString();
TracingWarn("consensus", "Sprout double-spend detected",
"txid", txid.c_str(),
"nf", nf.c_str());
return UnsatisfiedShieldedReq::SproutDuplicateNullifier;
}
}
SproutMerkleTree tree;
auto it = intermediates.find(joinsplit.anchor);
if (it != intermediates.end()) {
tree = it->second;
} else if (!GetSproutAnchorAt(joinsplit.anchor, tree)) {
auto txid = tx.GetHash().ToString();
auto anchor = joinsplit.anchor.ToString();
TracingWarn("consensus", "Transaction uses unknown Sprout anchor",
"txid", txid.c_str(),
"anchor", anchor.c_str());
return UnsatisfiedShieldedReq::SproutUnknownAnchor;
}
for (const uint256& commitment : joinsplit.commitments)
{
tree.append(commitment);
}
intermediates.insert(std::make_pair(tree.root(), tree));
}
for (const SpendDescription &spendDescription : tx.vShieldedSpend) {
if (GetNullifier(spendDescription.nullifier, SAPLING)) { // Prevent double spends
auto txid = tx.GetHash().ToString();
auto nf = spendDescription.nullifier.ToString();
TracingWarn("consensus", "Sapling double-spend detected",
"txid", txid.c_str(),
"nf", nf.c_str());
return UnsatisfiedShieldedReq::SaplingDuplicateNullifier;
}
SaplingMerkleTree tree;
if (!GetSaplingAnchorAt(spendDescription.anchor, tree)) {
auto txid = tx.GetHash().ToString();
auto anchor = spendDescription.anchor.ToString();
TracingWarn("consensus", "Transaction uses unknown Sapling anchor",
"txid", txid.c_str(),
"anchor", anchor.c_str());
return UnsatisfiedShieldedReq::SaplingUnknownAnchor;
}
}
for (const uint256 &nullifier : tx.GetOrchardBundle().GetNullifiers()) {
if (GetNullifier(nullifier, ORCHARD)) { // Prevent double spends
auto txid = tx.GetHash().ToString();
auto nf = nullifier.ToString();
TracingWarn("consensus", "Orchard double-spend detected",
"txid", txid.c_str(),
"nf", nf.c_str());
return UnsatisfiedShieldedReq::OrchardDuplicateNullifier;
}
}
std::optional<uint256> root = tx.GetOrchardBundle().GetAnchor();
if (root) {
OrchardMerkleTree tree;
if (!GetOrchardAnchorAt(root.value(), tree)) {
auto txid = tx.GetHash().ToString();
auto anchor = root.value().ToString();
TracingWarn("consensus", "Transaction uses unknown Orchard anchor",
"txid", txid.c_str(),
"anchor", anchor.c_str());
return UnsatisfiedShieldedReq::OrchardUnknownAnchor;
}
}
return std::nullopt;
}
bool CCoinsViewCache::HaveInputs(const CTransaction& tx) const
{
if (!tx.IsCoinBase()) {
for (unsigned int i = 0; i < tx.vin.size(); i++) {
const COutPoint &prevout = tx.vin[i].prevout;
const CCoins* coins = AccessCoins(prevout.hash);
if (!coins || !coins->IsAvailable(prevout.n)) {
return false;
}
}
}
return true;
}
double CCoinsViewCache::GetPriority(const CTransaction &tx, int nHeight) const
{
if (tx.IsCoinBase())
return 0.0;
// Shielded transfers do not reveal any information about the value or age of a note, so we
// cannot apply the priority algorithm used for transparent utxos. Instead, we just
// use the maximum priority for all (partially or fully) shielded transactions.
// (Note that coinbase transactions cannot contain JoinSplits, or Sapling shielded Spends or Outputs.)
if (tx.vJoinSplit.size() > 0 || tx.vShieldedSpend.size() > 0 || tx.vShieldedOutput.size() > 0) {
return MAX_PRIORITY;
}
// FIXME: this logic is partially duplicated between here and CreateNewBlock in miner.cpp.
double dResult = 0.0;
for (const CTxIn& txin : tx.vin)
{
const CCoins* coins = AccessCoins(txin.prevout.hash);
assert(coins);
if (!coins->IsAvailable(txin.prevout.n)) continue;
if (coins->nHeight < nHeight) {
dResult += coins->vout[txin.prevout.n].nValue * (nHeight-coins->nHeight);
}
}
return tx.ComputePriority(dResult);
}
CCoinsModifier::CCoinsModifier(CCoinsViewCache& cache_, CCoinsMap::iterator it_, size_t usage) : cache(cache_), it(it_), cachedCoinUsage(usage) {
assert(!cache.hasModifier);
cache.hasModifier = true;
}
CCoinsModifier::~CCoinsModifier()
{
assert(cache.hasModifier);
cache.hasModifier = false;
it->second.coins.Cleanup();
cache.cachedCoinsUsage -= cachedCoinUsage; // Subtract the old usage
if ((it->second.flags & CCoinsCacheEntry::FRESH) && it->second.coins.IsPruned()) {
cache.cacheCoins.erase(it);
} else {
// If the coin still exists after the modification, add the new usage
cache.cachedCoinsUsage += it->second.coins.DynamicMemoryUsage();
}
}
|
#!/bin/sh
#
# Copyright (C) 2004, 2007 Internet Systems Consortium, Inc. ("ISC")
# Copyright (C) 2000, 2001 Internet Software Consortium.
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
# $Id: setup.sh,v 1.10 2007-06-19 23:47:04 tbox Exp $
cp -f ns2/example1.db ns2/example.db
|
#!/bin/bash
echo -e "\n## Installing prerequisites"
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends tzdata
sudo apt-get install -y git wget flex bison gperf python3 python3-pip python3-setuptools python-is-python3 python3-libusb1 cmake ninja-build ccache libffi-dev libssl-dev dfu-util
mkdir ~/esp
# install esp idf
cd ~/esp
git clone -b v4.3.2 --recursive https://github.com/espressif/esp-idf.git
cd ~/esp/esp-idf
./install.sh > ~/esp-idf-install.log 2>&1
# install esp adf
cd ~/esp
git clone --recursive https://github.com/espressif/esp-adf.git
|
func handleSyncReceived(payload: HubPayload, post: Post4V2) -> Bool {
guard payload.eventName == HubPayload.EventName.DataStore.syncReceived else {
return false
}
guard let mutationEvent = payload.data as? MutationEvent else {
XCTFail("Could not cast payload to mutation event")
return false
}
if let syncedPost = try? mutationEvent.decodeModel() as? Post4V2,
syncedPost.id == post.id {
return true
}
return false
} |
<gh_stars>0
import time
import numpy as np
from src.neural_net.weights import Weight
from src.preprocessing import shuffle
class NeuralNetwork:
def __init__(self, layers: list):
self.metric = None
self.layers = layers
self.weights = []
self.y = None
self.loss_f = None
self.loss = -1
self.lr = 0.01
self.batch_size = None
for i, layer in enumerate(self.layers[:-1]):
weight = Weight(layer, self.layers[i + 1])
self.weights.append(weight)
layer.next_layer = self.layers[i + 1]
layer.weight = weight
def forward(self, x):
activation = x
for weight in self.weights:
activation = weight.forward(activation)
return activation
def target_prop(self):
assert self.loss_f is not None and self.y is not None
preds = self.layers[-1].activation
self.loss += float(self.loss_f(self.y, preds))
loss = self.loss_f.derivative(self.y, preds)
# init targets:
target = preds - self.lr * loss
for weight in list(reversed(self.weights)):
target = weight.init_target(target)
# target propagation:
old = np.ones(self.layers[0].activation.shape)
start = time.time()
while True:
for weight in reversed(self.weights):
t = weight.target_prop()
if np.linalg.norm(t - old) < 0.0001 or time.time() - start > 20:
break
old = t
# accumulate updates for weight matrices
output_target_diff = np.linalg.norm(self.layers[-1].target - self.layers[-1].activation) ** 2
for weight in self.weights:
weight.backward(times=output_target_diff)
def compile(self, loss_function, metric):
self.loss_f = loss_function
self.metric = metric
def update(self):
for weight in self.weights:
weight.update()
def fit(self, X, Y, X_val=None, Y_val=None, learning_rate=0.01, n_epochs=25, batch_size=64):
self.lr = learning_rate
for weigths in self.weights:
weigths.lr = learning_rate
self.batch_size = batch_size
num_samples = X.shape[0]
train_losses = []
val_losses = []
val_metrics = []
for epoch in range(n_epochs):
X, Y = shuffle(X, Y)
self.loss = 0
for i, (x, y) in enumerate(zip(X, Y)):
self.y = y.reshape(-1, 1)
self.forward(x.reshape(-1, 1))
self.target_prop()
if i % batch_size == 0 and i != 0:
self.update()
self.loss /= num_samples
if X_val is not None and Y_val is not None:
tr_loss, val_loss, val_metric = self.print_predict(X_val, Y_val, epoch=epoch)
train_losses.append(tr_loss)
val_losses.append(val_loss)
val_metrics.append(val_metric)
else:
train_losses.append(self.loss)
print("Epoch {} - Loss = {:.3f}".format(epoch, self.loss))
return {"Train_loss": train_losses, "Val_loss": val_losses,
"Val_performance": val_metrics, "Epochs": list(range(n_epochs))}
def print_predict(self, X, Y, epoch=-1):
if X is None or Y is None:
return
train_loss = self.loss
preds = self.predict(X.T)
val_loss = np.sum(self.loss_f(Y, preds)) / Y.shape[0]
metric_val = self.metric(Y, preds)
print("Epoch {} - Train loss = {:.3f} Validation Loss = {:.3f}, performance: {:.4f}"
.format(epoch, train_loss, val_loss, metric_val))
return train_loss, val_loss, metric_val
def predict_proba(self, X):
probs = self.forward(X)
return probs
def predict(self, X):
probs = self.predict_proba(X)
preds = np.argmax(probs, axis=0)
return preds
|
set -exo pipefail
main() {
export CARGO_OPTIONS="$CARGO_OPTIONS --target $TARGET"
if [[ ! $TARGET =~ .*linux.* ]]; then
sed -i "s/linux-embedded-hal/#linux-embedded-hal/g" Cargo.toml
sed -i "s/embedded-hal-mock/#embedded-hal-mock/g" Cargo.toml
fi
sed -i "s/criterion/#criterion/g" Cargo.toml
if [ ! -z $NO_DEFAULT_FEATURES ]; then
export CARGO_OPTIONS="$CARGO_OPTIONS --no-default-features"
fi
if [ ! -z $FEATURES ]; then
export CARGO_OPTIONS="$CARGO_OPTIONS --features $FEATURES"
fi
cargo check $CARGO_OPTIONS
cargo build $CARGO_OPTIONS
if [ -z $DISABLE_EXAMPLES ] && [[ $TARGET =~ .*linux.* ]]; then
cargo build $CARGO_OPTIONS --examples
fi
cargo doc $CARGO_OPTIONS
if [ -z $DISABLE_TESTS ] && [[ $TARGET =~ .*linux.* ]]; then
cargo test $CARGO_OPTIONS
fi
if [ -z $DISABLE_CLIPPY ] && [[ $TRAVIS_RUST_VERSION =~ .*stable.* ]]; then
cargo clippy
fi
}
main
|
#!/bin/sh
set -xe
sudo service mysql start
cd ~/jtalks-cicd
echo "Installing dependencies to run tests"
python setup.py install --user
export PYTHONUNBUFFERED=1
echo '=============================================='
echo '========Running Unit & Component Tests========'
echo '=============================================='
python setup.py test
component_tests_result=$?
echo '=============================================='
echo '=============Running System Tests============='
echo '=============================================='
pip install -e . --user
export PATH="$PATH:/home/jtalks/.local/bin"
rm -rf ~/.jtalks
cp -rv ~/jtalks-cicd/docs/configs_example ~/.jtalks
jtalks deploy -e dev -p jcommune -b 6 --sanity-test-timeout-sec=300
jc_system_test_result=$?
jtalks deploy -e dev -p poulpe -b 344
poulpe_system_test_result=$?
jtalks deploy -e dev -p antarcticle -b 564
antarcticle_system_test_result=$?
if [ "$component_tests_result" != 0 ]; then
echo '===Component/unit tests failed!==='
exit 1000
elif [ "$jc_system_test_result" != 0 ]; then
echo '===JCommune Deployment failed in scope of System Testing==='
exit 1000
elif [ "$poulpe_system_test_result" != 0 ]; then
echo '===Poulpe Deployment failed in scope of System Testing==='
exit 1000
elif [ "$antarcticle_system_test_result" != 0 ]; then
echo '===Antarcticle Deployment failed in scope of System Testing==='
exit 1000
fi
|
const mongoose = require('mongoose');
// Connect to MongoDB
mongoose.connect('mongodb://localhost/exercise', { useNewUrlParser: true })
.then(() => console.log('Connected to MongoDB...'))
.catch(err => console.log('Connection failed...'));
// Author Schema
const authorSchema = new mongoose.Schema({
name: String,
bio: String,
website: String
});
// Author Model
const Author = mongoose.model('author', authorSchema);
// Course Schema
const courseSchema = new mongoose.Schema({
name: String,
author: {
type: authorSchema,
required: true
}
});
// Course Model
const Course = mongoose.model('course', courseSchema);
// CRUD Operations
async function createAuthor(name, bio, website){
const author = new Author({
name,
bio,
website
});
const result = await author.save();
console.log(result);
}
async function createCourse(name, author){
const course = new Course({
name,
author
});
const result = await course.save();
console.log(result);
}
async function getCourses(){
const courses = await Course.find();
console.log(courses);
}
/*
async function updateCourseAuthor(courseId){
const course = await Course.findById(courseId);
course.author.name = 'Bipro';
course.save();
}
*/
async function updateCourseAuthor(courseId){
const course = await Course.update({_id: courseId}, {
$set: {
'author.name': '<NAME>'
}
});
}
// createAuthor('Biprodas', 'Software Engineer', 'biprodas.me');
// createAuthor('Hiranmoy', 'Lab Technishian', 'hiranmoy.net');
// createCourse('Mastering Node.js', new Author({name: 'Biprodas'}));
// updateCourseAuthor('5<PASSWORD>');
getCourses(); |
<filename>src/swganh_core/messages/get_map_locations_response_message.h
// This file is part of SWGANH which is released under the MIT license.
// See file LICENSE or go to http://swganh.com/LICENSE
#pragma once
#include <cstdint>
#include <algorithm>
#include <list>
#include <string>
#include "swganh/byte_buffer.h"
#include "base_swg_message.h"
namespace swganh {
namespace messages {
struct MapLocation
{
uint64_t id;
std::wstring name;
float x;
float y;
// use only one of the following; choose the type to use and assign it to the appropriate display mode
uint8_t type_displayAsCategory;
uint8_t type_displayAsSubcategory;
uint8_t type_displayAsActive;
};
struct GetMapLocationsResponseMessage : public BaseSwgMessage
{
uint16_t Opcount() const { return 28; }
uint32_t Opcode() const { return 0x9F80464C; }
std::string planet_name;
std::list<MapLocation> locations;
std::list<MapLocation> blank_list1;
std::list<MapLocation> blank_list2;
void OnSerialize(swganh::ByteBuffer& buffer) const
{
buffer.write(planet_name);
buffer.write(locations.size());
std::for_each(locations.begin(), locations.end(), [&buffer] (MapLocation location) {
buffer.write(location.id);
buffer.write(location.name);
buffer.write(location.x);
buffer.write(location.y);
buffer.write(location.type_displayAsCategory);
buffer.write(location.type_displayAsSubcategory);
buffer.write(location.type_displayAsActive);
});
// unclear why the following lists are needed; locations will be displayed no matter the list
buffer.write(blank_list1.size());
std::for_each(blank_list1.begin(), blank_list1.end(), [&buffer] (MapLocation location) {
buffer.write(location.id);
buffer.write(location.name);
buffer.write(location.x);
buffer.write(location.y);
buffer.write(location.type_displayAsCategory);
buffer.write(location.type_displayAsSubcategory);
buffer.write(location.type_displayAsActive);
});
buffer.write(blank_list2.size());
std::for_each(blank_list2.begin(), blank_list2.end(), [&buffer] (MapLocation location) {
buffer.write(location.id);
buffer.write(location.name);
buffer.write(location.x);
buffer.write(location.y);
buffer.write(location.type_displayAsCategory);
buffer.write(location.type_displayAsSubcategory);
buffer.write(location.type_displayAsActive);
});
buffer.write<uint32_t>(0);
buffer.write<uint32_t>(0);
buffer.write<uint32_t>(0);
}
void OnDeserialize(swganh::ByteBuffer& buffer)
{
planet_name = buffer.read<std::string>();
uint32_t locations_count = buffer.read<uint32_t>();
for (uint32_t i = 0; i < locations_count; i++) {
MapLocation location;
location.id = buffer.read<uint64_t>();
location.name = buffer.read<std::wstring>();
location.x = buffer.read<float>();
location.y = buffer.read<float>();
location.type_displayAsCategory = buffer.read<uint8_t>();
location.type_displayAsSubcategory = buffer.read<uint8_t>();
location.type_displayAsActive = buffer.read<uint8_t>();
locations.push_back(location);
}
// unclear why the following lists are needed; locations will be displayed no matter the list
uint32_t blank_list1_count = buffer.read<uint32_t>();
for (uint32_t i = 0; i < blank_list1_count; i++) {
MapLocation location;
location.id = buffer.read<uint64_t>();
location.name = buffer.read<std::wstring>();
location.x = buffer.read<float>();
location.y = buffer.read<float>();
location.type_displayAsCategory = buffer.read<uint8_t>();
location.type_displayAsSubcategory = buffer.read<uint8_t>();
location.type_displayAsActive = buffer.read<uint8_t>();
blank_list1.push_back(location);
}
uint32_t blank_list2_count = buffer.read<uint32_t>();
for (uint32_t i = 0; i < blank_list2_count; i++) {
MapLocation location;
location.id = buffer.read<uint64_t>();
location.name = buffer.read<std::wstring>();
location.x = buffer.read<float>();
location.y = buffer.read<float>();
location.type_displayAsCategory = buffer.read<uint8_t>();
location.type_displayAsSubcategory = buffer.read<uint8_t>();
location.type_displayAsActive = buffer.read<uint8_t>();
blank_list2.push_back(location);
}
}
};
}} // namespace swganh::messages
|
#!/bin/bash
set -x
set -e
target=$1
script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
# sdist is built at stage 3
if [ ${PYNQ_BOARD} == "Unknown" ]; then
dist_out=$BUILD_ROOT/PYNQ/dist
mkdir -p $dist_out
cp -rf $target/home/xilinx/pynq_git/dist/* $dist_out
fi
sudo rm -rf $target/home/xilinx/pynq_git $target/home/xilinx/get_revision.sh
|
#!/bin/bash
# K8S offline install script.
# Installed & verified by CentOS Linux release 7.3.1611 (Core)
# Step 1
# Start python simple http server first!!!
# python -m SimpleHTTPServer
# Serving HTTP on 0.0.0.0 port 8000 ...
# Step 2
# Run script with parameters
# Server side:
# curl -L http://192.168.0.104:8000/install.sh | bash -s master
# Client side:
# curl -L http://192.168.0.104:8000/install.sh | bash -s join --token=6669b1.81f129bc847154f9 192.168.0.104:6443
set -x
set -e
HTTP_SERVER=192.168.0.104:8000
KUBE_REPO_PREFIX=gcr.io/google_containers
root=$(id -u)
if [ "$root" -ne 0 ] ;then
echo must run as root
exit 1
fi
kube::install_docker()
{
set +e
which docker > /dev/null 2>&1
i=$?
set -e
if [ $i -ne 0 ]; then
curl -L http://$HTTP_SERVER/rpms/docker.tar.gz > /tmp/docker.tar.gz
tar zxf /tmp/docker.tar.gz -C /tmp
yum localinstall -y /tmp/docker/*.rpm
kube::config_docker
fi
systemctl enable docker.service && systemctl start docker.service
echo docker has been installed!
docker version
rm -rf /tmp/docker /tmp/docker.tar.gz
}
kube::config_docker()
{
setenforce 0
sed -i -e 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
#sysctl -w net.bridge.bridge-nf-call-iptables=1
#sysctl -w net.bridge.bridge-nf-call-ip6tables=1
# /etc/sysctl.conf
# net.bridge.bridge-nf-call-ip6tables = 1
# net.bridge.bridge-nf-call-iptables = 1
systemctl disable firewalld
systemctl stop firewalld
echo DOCKER_STORAGE_OPTIONS=\" -s overlay --selinux-enabled=false\" > /etc/sysconfig/docker-storage
systemctl daemon-reload && systemctl restart docker.service
}
kube::load_images()
{
mkdir -p /tmp/k8s
master_images=(
kube-apiserver-amd64_v1.6.2
kube-controller-manager-amd64_v1.6.2
kube-scheduler-amd64_v1.6.2
kube-proxy-amd64_v1.6.2
pause-amd64_3.0
k8s-dns-dnsmasq-nanny-amd64_1.14.1
k8s-dns-kube-dns-amd64_1.14.1
k8s-dns-sidecar-amd64_1.14.1
etcd_v3.0.17
flannel-amd64_v0.7.1
kubernetes-dashboard-amd64_1.6.2
)
node_images=(
pause-amd64_3.0
kube-proxy-amd64_v1.6.2
flannel-amd64_v0.7.1
kubernetes-dashboard-amd64_1.6.2
)
if [ $1 == "master" ]; then
# 判断镜像是否存在,不存在才会去load
for i in "${!master_images[@]}"; do
ret=$(docker images | awk 'NR!=1{print $1"_"$2}'| grep $KUBE_REPO_PREFIX/${master_images[$i]} | wc -l)
if [ $ret -lt 1 ];then
curl -L http://$HTTP_SERVER/images/${master_images[$i]}.tar > /tmp/k8s/${master_images[$i]}.tar
docker load < /tmp/k8s/${master_images[$i]}.tar
fi
done
else
for i in "${!node_images[@]}"; do
ret=$(docker images | awk 'NR!=1{print $1"_"$2}' | grep $KUBE_REPO_PREFIX/${node_images[$i]} | wc -l)
if [ $ret -lt 1 ];then
curl -L http://$HTTP_SERVER/images/${node_images[$i]}.tar > /tmp/k8s/${node_images[$i]}.tar
docker load < /tmp/k8s/${node_images[$i]}.tar
fi
done
fi
rm /tmp/k8s* -rf
}
kube::install_bin()
{
set +e
which kubeadm > /dev/null 2>&1
i=$?
set -e
if [ $i -ne 0 ]; then
curl -L http://$HTTP_SERVER/rpms/k8s.tar.gz > /tmp/k8s.tar.gz
tar zxf /tmp/k8s.tar.gz -C /tmp
yum localinstall -y /tmp/k8s/*.rpm
rm -rf /tmp/k8s*
# Change cgroup-driver for kubelet
sed -i -e 's/cgroup-driver=systemd/cgroup-driver=cgroupfs/g' /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
#sed -i -e 's/$KUBELET_NETWORK_ARGS//g' /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
# Enable and start kubelet service
systemctl enable kubelet.service && systemctl start kubelet.service && rm -rf /etc/kubernetes
fi
}
kube::config_firewalld()
{
systemctl disable firewalld && systemctl stop firewalld
# iptables -A IN_public_allow -p tcp -m tcp --dport 9898 -m conntrack --ctstate NEW -j ACCEPT
# iptables -A IN_public_allow -p tcp -m tcp --dport 6443 -m conntrack --ctstate NEW -j ACCEPT
# iptables -A IN_public_allow -p tcp -m tcp --dport 10250 -m conntrack --ctstate NEW -j ACCEPT
}
kube::wati_manifests(){
while [[ ! -f /etc/kubernetes/manifests/kube-scheduler.json ]]; do
sleep 2
done
}
kube::config_manifests()
{
cd /etc/kubernetes/manifests
for file in `ls`
do
sed -i '/image/a\ \"imagePullPolicy\": \"IfNotPresent\",' $file
done
}
kube::wait_apiserver()
{
ret=1
while [[ $ret != 0 ]]; do
sleep 2
curl -k https://127.0.0.1:6443 2>&1>/dev/null
ret=$?
done
}
kube::master_up()
{
kube::install_docker
kube::load_images master
kube::install_bin
kube::config_firewalld
# kubeadm需要联网去找最新版本
echo $HTTP_SERVER storage.googleapis.com >> /etc/hosts
# 这里一定要带上--pod-network-cidr参数,不然后面的flannel网络会出问题
export KUBE_ETCD_IMAGE=gcr.io/google_containers/etcd-amd64:3.0.17
kubeadm init --kubernetes-version=v1.6.2 --pod-network-cidr=10.96.0.0/12
# 使能master,可以被调度到
# kubectl taint nodes --all dedicated-
export KUBECONFIG=/etc/kubernetes/admin.conf
# install flannel network
kubectl apply -f http://$HTTP_SERVER/network/kube-flannel-rbac.yml
kubectl apply -f http://$HTTP_SERVER/network/kube-flannel.yml --namespace=kube-system
#install dashboard
kubectl create -f http://$HTTP_SERVER/network/kubernetes-dashboard-rbac.yml
kubectl create -f http://$HTTP_SERVER/network/kubernetes-dashboard.yml
# show pods
kubectl get po --all-namespaces
# show tokens
kubeadm token list
echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >> ~/.bashrc
echo "Please reload ~/.bashrc to use kubectl command!"
echo "K8S master install finished!"
}
kube::node_up()
{
kube::install_docker
kube::load_images minion
kube::install_bin
kube::config_firewalld
kubeadm join $@
}
kube::tear_down()
{
systemctl stop kubelet.service
docker ps -aq|xargs -I '{}' docker stop {}
docker ps -aq|xargs -I '{}' docker rm {}
df |grep /var/lib/kubelet|awk '{ print $6 }'|xargs -I '{}' umount {}
rm -rf /var/lib/kubelet && rm -rf /etc/kubernetes/ && rm -rf /var/lib/etcd
yum remove -y kubectl kubeadm kubelet kubernetes-cni
rm -rf /var/lib/cni
ip link del cni0
}
main()
{
case $1 in
"m" | "master" )
kube::master_up
;;
"j" | "join" )
shift
kube::node_up $@
;;
"d" | "down" )
kube::tear_down
;;
*)
echo "usage: $0 m[master] | j[join] token | d[down] "
echo " $0 master to setup master "
echo " $0 join to join master with token "
echo " $0 down to tear all down ,inlude all data! so becarefull"
echo " unkown command $0 $@"
;;
esac
}
main $@
|
<filename>server/src/http/routes/public/constants.js
const express = require("express");
const tryCatch = require("../../middlewares/tryCatchMiddleware");
const { referrers } = require("../../../common/model/constants/referrers");
const { optMode } = require("../../../common/model/constants/etablissement");
/**
* @description Constants router.
*/
module.exports = () => {
const router = express.Router();
/**
* @description Returns all constants.
*/
router.get(
"/",
tryCatch((req, res) => res.send({ referrers: Object.values(referrers), optMode: Object.values(optMode) }))
);
return router;
};
|
<filename>legacy/rpi/source/SerialChain.cpp
/**
* BitMatrix - Raspberry Pi Dot Matrix Display Controller
* Copyright 2020 Frame Factory GmbH, <NAME>
* License: MIT
*/
#include "SerialChain.h"
#include "SerialDevice.h"
#include <wiringPi.h>
#include <iostream>
using namespace std;
SerialChain::SerialChain(uint8_t dataPin) :
_bitCount(0),
_dataPin(dataPin),
_dataInverted(false)
{
pinMode(_dataPin, OUTPUT);
}
SerialChain::~SerialChain()
{
clear();
}
void SerialChain::clear()
{
for(auto it : _devices) {
delete it;
}
_bitCount = 0;
}
void SerialChain::setDataPinInverted(bool isInverted)
{
_dataInverted = isInverted;
}
void SerialChain::beginWrite(uint32_t totalBitCount)
{
_deviceIndex = deviceCount() - 1;
_bitOffset = totalBitCount - _bitCount;
//cout << "SerialChain::beginWrite - offset: " << _bitOffset << ", ";
}
void SerialChain::writeBit()
{
if (_bitOffset > 0) {
_bitOffset--;
digitalWrite(_dataPin, _dataInverted ? HIGH : LOW);
return;
}
bool done = false;
bool bit = _devices[_deviceIndex]->nextBit(done);
if (done) {
_deviceIndex--;
}
//cout << (bit ? "1" : "0");
//if (done) cout << endl;
digitalWrite(_dataPin, _dataInverted ^ bit ? HIGH : LOW);
}
void SerialChain::updateBitCount()
{
uint32_t bitCount = 0;
for (auto it : _devices) {
bitCount += it->bitCount();
}
_bitCount = bitCount;
}
|
wget "ftp://ftp.ncbi.nlm.nih.gov/pub/pmc/oa_file_list.txt"
while read id; do
path=$(grep $id oa_file_list.txt | cut -f1)
[ ! -z "$path" ] && wget "ftp://ftp.ncbi.nlm.nih.gov/pub/pmc/$path"
done <ids.txt
|
$(document).ready(function() {
$('*[data-jquery-clock]').each(function(){
let t = $(this);
let seconds = new Date().getSeconds(),
hours = new Date().getHours(),
mins = new Date().getMinutes(),
sdegree = seconds * 6,
hdegree = hours * 30 + (mins / 2),
mdegree = mins * 6;
let updateWatch = function() {
sdegree += 6;
if ( sdegree % 360 == 0 ) {
mdegree += 6;
}
hdegree += (0.1 / 12);
let srotate = "rotate(" + sdegree + "deg)",
hrotate = "rotate(" + hdegree + "deg)" ,
mrotate = "rotate(" + mdegree + "deg)";
$(".jquery-clock-sec", t).css({"-moz-transform" : srotate, "-webkit-transform" : srotate, '-ms-transform' : srotate });
$(".jquery-clock-hour", t).css({"-moz-transform" : hrotate, "-webkit-transform" : hrotate, '-ms-transform' : hrotate });
$(".jquery-clock-min", t).css({"-moz-transform" : mrotate, "-webkit-transform" : mrotate, '-ms-transform' : mrotate });
};
updateWatch();
setInterval(function(){
$(".jquery-clock-sec, .jquery-clock-hour, .jquery-clock-min").addClass('jquery-clock-transitions');
updateWatch();
}, 1000);
$(window).focus(function() {
$(".jquery-clock-sec, .jquery-clock-hour, .jquery-clock-min").addClass('jquery-clock-transitions');
});
$(window).blur(function() {
$(".jquery-clock-sec, .jquery-clock-hour, .jquery-clock-min").removeClass('jquery-clock-transitions');
});
});
}); |
#
# Description: Calculate composites for various Fourier phases
# (on abyss and/or vortex.earthsci.unimelb.edu.au)
#
function usage {
echo "USAGE: bash $0 fourier_file sf_file var_file var_short var_long freq outfile python_exe code_dir vis_dir temp_dir"
echo " fourier_file: Fourier transform file name"
echo " sf_file: Streamfunction file"
echo " var_file: Variable file"
echo " var_short: Variable short name"
echo " var_long: Variable long name"
echo " freq: Frequency to filter phase against"
echo " outfile: Output file name, which includes the word phase-range which will be replaced"
echo " python_exe: Python executable"
echo " code_dir: Directory that psa_date_list.py and calc_composite.py are in"
echo " vis_dir: Directory that plot_map.py is in"
echo " temp_dir: Directory to store temporary data files"
exit 1
}
nargs=11
if [ $# -ne $nargs ] ; then
usage
fi
fourier_file=$1
sf_file=$2
var_file=$3
var_short=$4
var_long=$5
freq=$6
outfile=$7
python_exe=$8
code_dir=$9
vis_dir=${10}
temp_dir=${11}
central_phases=(13 30 44 57)
temp_files=()
# Generate a date list and calculate composite for each phase range
for central_phase in "${central_phases[@]}"; do
start_phase=`expr $central_phase - 5`
end_phase=`expr $central_phase + 5`
temp_outfile=`echo ${outfile} | sed s/phase-range/phase${start_phase}-${end_phase}/`
temp_date_file=${temp_dir}/dates_phase${start_phase}-${end_phase}.txt
temp_sfcomp_file=${temp_dir}/sf-composite_phase${start_phase}-${end_phase}.nc
temp_varcomp_file=${temp_dir}/${var_short}-composite_phase${start_phase}-${end_phase}.nc
${python_exe} ${code_dir}/psa_date_list.py ${fourier_file} ${temp_date_file} \
--freq ${freq} --phase_filter ${start_phase} ${end_phase}
${python_exe} ${code_dir}/calc_composite.py ${sf_file} sf ${temp_sfcomp_file} \
--date_file ${temp_date_file} --region sh --no_sig
${python_exe} ${code_dir}/calc_composite.py ${var_file} ${var_short} ${temp_varcomp_file} \
--date_file ${temp_date_file} --region sh
bash ${vis_dir}/plot_composite.sh ${temp_varcomp_file} ${var_long} ${temp_sfcomp_file} streamfunction ${temp_outfile} ${python_exe} ${vis_dir}
temp_files+=(${temp_date_file} ${temp_sfcomp_file} ${temp_varcomp_file})
done
rm ${temp_files[@]}
|
package ru.alklimenko.calculator;
import java.util.Scanner;
public class Main {
private static final Scanner scanInput = new Scanner(System.in);
private static String getLine() {
String data;
data = scanInput.nextLine();
return data;
}
public static void main(String[] args) {
@SuppressWarnings("MismatchedQueryAndUpdateOfCollection")
Expression exp = new Expression();
Operator.add("^", 4, Math::pow);
System.out.println("Enter arithmetic expression for calculation");
System.out.println("Empty string assume exit");
do {
System.out.print(">");
String str = getLine();
if (str.trim().equals("")) {
break;
}
try {
System.out.println(">" + exp.calculate(str));
} catch(InvalidExpressionException e) {
System.out.println(e.getMessage());
System.out.println();
}
} while(true);
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.