text stringlengths 1 1.05M |
|---|
package com.danli.entity;
import com.baomidou.mybatisplus.annotation.IdType;
import com.baomidou.mybatisplus.annotation.TableId;
import com.fasterxml.jackson.annotation.JsonFormat;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.EqualsAndHashCode;
import lombok.NoArgsConstructor;
import lombok.experimental.Accessors;
import java.io.Serializable;
import java.time.LocalDateTime;
/**
* 访客实体类
*
* @author luzhiwei
* @date 2021/12/12
*/
@Data
@EqualsAndHashCode(callSuper = false)
@Accessors(chain = true)
@AllArgsConstructor
@NoArgsConstructor
public class Visitor implements Serializable {
private static final long serialVersionUID = 1L;
@TableId(value = "id", type = IdType.AUTO)
private Long id;
/**
* 访客标识码
*/
private String uuid;
/**
* ip
*/
private String ip;
/**
* ip来源
*/
private String ipSource;
/**
* 操作系统
*/
private String os;
/**
* 浏览器
*/
private String browser;
/**
* 首次访问时间
*/
@JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss")
private LocalDateTime createTime;
/**
* 最后访问时间
*/
@JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss")
private LocalDateTime lastTime;
/**
* 访问页数统计
*/
private Integer pv;
/**
* user-agent用户代理
*/
private String userAgent;
}
|
#!/usr/bin/env bash
while true;do ls | grep -q dockerbunker.sh;if [[ $? == 0 ]];then BASE_DIR=$PWD;break;else cd ../;fi;done
PROPER_NAME="Send"
SERVICE_NAME="$(echo -e "${PROPER_NAME,,}" | tr -d '[:space:]')"
PROMPT_SSL=1
declare -a environment=( "data/env/dockerbunker.env" "data/include/init.sh" )
for env in "${environment[@]}";do
[[ -f "${BASE_DIR}"/$env ]] && source "${BASE_DIR}"/$env
done
declare -A WEB_SERVICES
declare -a containers=( "${SERVICE_NAME}-service-dockerbunker" )
declare -A volumes=( [${SERVICE_NAME}-data-vol-1]="/send/data" )
declare -a add_to_network=( "${SERVICE_NAME}-service-dockerbunker" )
declare -a networks=( )
declare -A IMAGES=( [service]="chaosbunker/${SERVICE_NAME}-docker" )
[[ -z $1 ]] && options_menu
configure() {
pre_configure_routine
echo -e "# \e[4mSend Settings\e[0m"
set_domain
cat <<-EOF >> "${SERVICE_ENV}"
PROPER_NAME=${PROPER_NAME}
SERVICE_NAME=${SERVICE_NAME}
SSL_CHOICE=${SSL_CHOICE}
LE_EMAIL=${LE_EMAIL}
SERVICE_DOMAIN=${SERVICE_DOMAIN}
EOF
post_configure_routine
}
if [[ $1 == "letsencrypt" ]];then
$1 $*
else
$1
fi |
<filename>src/main/java/com/testvagrant/ekam/api/annotations/Authorization.java
package com.testvagrant.ekam.api.annotations;
import com.google.inject.BindingAnnotation;
import java.lang.annotation.Retention;
import java.lang.annotation.Target;
import static java.lang.annotation.ElementType.FIELD;
import static java.lang.annotation.ElementType.PARAMETER;
import static java.lang.annotation.RetentionPolicy.RUNTIME;
@BindingAnnotation
@Target({FIELD, PARAMETER})
@Retention(RUNTIME)
public @interface Authorization {}
|
'''
@Author: <NAME>
@Date: 2021-01-07 15:04:21
@Description: 这个是训练 mixed model 的时候使用的
@LastEditTime: 2021-02-06 22:40:20
'''
import os
import numpy as np
import time
import torch
from torch import nn, optim
from TrafficFlowClassification.TrafficLog.setLog import logger
from TrafficFlowClassification.utils.setConfig import setup_config
# 下面是一些可以使用的模型
from TrafficFlowClassification.models.resnet1d_ae import resnet_AE
from TrafficFlowClassification.data.dataLoader import data_loader
from TrafficFlowClassification.data.tensordata import get_tensor_data
from TrafficFlowClassification.utils.helper import adjust_learning_rate, save_checkpoint
from TrafficFlowClassification.utils.evaluate_tools import display_model_performance_metrics
# 针对这个训练修改的 train process
from TrafficFlowClassification.utils.helper import AverageMeter, accuracy
mean_val = np.array([2.86401660e-03, 0.00000000e+00, 3.08146750e-03, 1.17455448e-02,
5.75561597e-03, 6.91365004e-04, 6.64955585e-02, 2.41380099e-02,
9.75861990e-01, 0.00000000e+00, 2.89814456e+02, 6.42617944e+01,
6.89227965e+00, 2.56964887e+02, 1.36799462e+02, 9.32648320e+01,
7.83185943e+01, 1.32048335e+02, 2.09555592e+01, 1.70122810e-02,
6.28544986e+00, 3.27195426e-03, 3.60230735e+01, 9.15340653e+00,
2.17694894e-06, 7.32748605e+01])
std_val = np.array([3.44500263e-02, 0.00000000e+00, 3.09222563e-02, 8.43027570e-02,
4.87519125e-02, 1.48120354e-02, 2.49138903e-01, 1.53477827e-01,
1.53477827e-01, 0.00000000e+00, 8.48196659e+02, 1.94163550e+02,
1.30259798e+02, 7.62370125e+02, 4.16966374e+02, 1.25455838e+02,
2.30658312e+01, 8.78612984e+02, 1.84367543e+02, 1.13978421e-01,
1.19289813e+02, 1.45965914e-01, 8.76535415e+02, 1.78680040e+02,
4.91812227e-04, 4.40298923e+03]) + 0.001
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
mean_val = torch.from_numpy(mean_val).float().to(device)
std_val = torch.from_numpy(std_val).float().to(device)
def train_process(train_loader, model, alpha, criterion_c, criterion_r, optimizer, epoch, device, print_freq):
"""训练一个 epoch 的流程
Args:
train_loader (dataloader): [description]
model ([type]): [description]
criterion_c ([type]): 计算分类误差
criterion_l ([type]): 计算重构误差
optimizer ([type]): [description]
epoch (int): 当前所在的 epoch
device (torch.device): 是否使用 gpu
print_freq ([type]): [description]
"""
c_loss = AverageMeter()
r_loss = AverageMeter()
losses = AverageMeter() # 在一个 train loader 中的 loss 变化
top1 = AverageMeter() # 记录在一个 train loader 中的 accuracy 变化
model.train() # 切换为训练模型
for i, (pcap, statistic, target) in enumerate(train_loader):
pcap = (pcap/255).to(device) # 也要归一化
statistic = statistic.to(device)
statistic = (statistic - mean_val)/std_val # 首先需要对 statistic 的数据进行归一化
target = target.to(device)
classific_result, fake_statistic = model(pcap, statistic) # 分类结果和重构结果
loss_c = criterion_c(classific_result, target) # 计算 分类的 loss
loss_r = criterion_r(statistic, fake_statistic) # 计算 重构 loss
loss = alpha * loss_c + loss_r # 将两个误差组合在一起
# 计算准确率, 记录 loss 和 accuracy
prec1 = accuracy(classific_result.data, target)
c_loss.update(loss_c.item(), pcap.size(0))
r_loss.update(loss_r.item(), pcap.size(0))
losses.update(loss.item(), pcap.size(0))
top1.update(prec1[0].item(), pcap.size(0))
# 反向传播
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i + 1) % print_freq == 0:
logger.info(
'Epoch: [{0}][{1}/{2}], Loss {loss.val:.4f} ({loss.avg:.4f}), Loss_c {loss_c.val:.4f} ({loss_c.avg:.4f}), Loss_r {loss_r.val:.4f} ({loss_r.avg:.4f}), Prec@1 {top1.val:.3f} ({top1.avg:.3f})'
.format(epoch,
i,
len(train_loader),
loss=losses,
loss_c=c_loss,
loss_r=r_loss,
top1=top1))
def validate_process(val_loader, model, device, print_freq):
top1 = AverageMeter()
model.eval() # switch to evaluate mode
for i, (pcap, statistic, target) in enumerate(val_loader):
pcap = (pcap/255).to(device) # 也要归一化
statistic = statistic.to(device)
statistic = (statistic - mean_val)/std_val # 首先需要对 statistic 的数据进行归一化
target = target.to(device)
with torch.no_grad():
output, _ = model(pcap, statistic) # compute output
# measure accuracy and record loss
prec1 = accuracy(output.data, target)
top1.update(prec1[0].item(), pcap.size(0))
if (i + 1) % print_freq == 0:
logger.info('Test: [{0}/{1}], Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.
format(i, len(val_loader), top1=top1))
logger.info(' * Prec@1 {top1.avg:.3f}'.format(top1=top1))
return top1.avg
def CENTIME_train_pipeline(alpha):
cfg = setup_config() # 获取 config 文件
logger.info(cfg)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
logger.info('是否使用 GPU 进行训练, {}'.format(device))
model_path = os.path.join(cfg.train.model_dir, cfg.train.model_name) # 模型的路径
model = resnet_AE(model_path, pretrained=False, num_classes=12).to(device) # 初始化模型
criterion_c = nn.CrossEntropyLoss() # 分类用的损失函数
criterion_r = nn.L1Loss() # 重构误差的损失函数
optimizer = optim.Adam(model.parameters(), lr=cfg.train.lr) # 定义优化器
logger.info('成功初始化模型.')
train_loader = data_loader(
pcap_file=cfg.train.train_pcap,
label_file=cfg.train.train_label,
statistic_file=cfg.train.train_statistic,
trimed_file_len=cfg.train.TRIMED_FILE_LEN) # 获得 train dataloader
test_loader = data_loader(
pcap_file=cfg.train.test_pcap,
label_file=cfg.train.test_label,
statistic_file=cfg.train.test_statistic,
trimed_file_len=cfg.train.TRIMED_FILE_LEN) # 获得 train dataloader
logger.info('成功加载数据集.')
best_prec1 = 0
for epoch in range(cfg.train.epochs):
adjust_learning_rate(optimizer, epoch, cfg.train.lr) # 动态调整学习率
train_process(train_loader, model, alpha, criterion_c, criterion_r, optimizer, epoch, device, 80) # train for one epoch
prec1 = validate_process(test_loader, model, device, 20) # evaluate on validation set
# remember the best prec@1 and save checkpoint
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
# 保存最优的模型
save_checkpoint(
{
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
'optimizer': optimizer.state_dict()
}, is_best, model_path)
# 下面进入测试模式, 计算每个类别详细的准确率
logger.info('进入测试模式.')
model = resnet_AE(model_path, pretrained=True, num_classes=12).to(device) # 加载最好的模型
index2label = {j: i for i, j in cfg.test.label2index.items()} # index->label 对应关系
label_list = [index2label.get(i) for i in range(12)] # 12 个 label 的标签
pcap_data, statistic_data, label_data = get_tensor_data(
pcap_file=cfg.train.test_pcap,
statistic_file=cfg.train.test_statistic,
label_file=cfg.train.test_label,
trimed_file_len=cfg.train.TRIMED_FILE_LEN) # 将 numpy 转换为 tensor
pcap_data = (pcap_data/255).to(device) # 流量数据
statistic_data = (statistic_data.to(device) - mean_val)/std_val # 对数据做一下归一化
y_pred, _ = model(pcap_data, statistic_data) # 放入模型进行预测
_, pred = y_pred.topk(1, 1, largest=True, sorted=True)
Y_data_label = [index2label.get(i.tolist()) for i in label_data] # 转换为具体名称
pred_label = [index2label.get(i.tolist()) for i in pred.view(-1).cpu().detach()]
logger.info('Alpha:{}'.format(alpha))
display_model_performance_metrics(true_labels=Y_data_label,
predicted_labels=pred_label,
classes=label_list)
logger.info('Finished! (* ̄︶ ̄)')
def alpha_experiment_CENTIME():
alpha_list = [0, 0.001, 0.01, 0.1, 0.5, 1, 5, 10, 100]
for alpha in alpha_list:
CENTIME_train_pipeline(alpha)
time.sleep(10)
if __name__ == "__main__":
CENTIME_train_pipeline() # 用于测试
|
public void OnCellClick(Cell clickedCell)
{
if (!_simStarted)
{
_simStarted = true;
digbutton.gameObject.SetActive(false);
}
if (clickedCell.IsFilledWithFluid())
{
clickedCell.RemoveFluid();
_remainingFluid--;
SpreadFluidToNeighbours(clickedCell);
}
}
private void SpreadFluidToNeighbours(Cell cell)
{
List<Cell> neighbours = GetNeighbourCells(cell);
foreach (Cell neighbour in neighbours)
{
if (neighbour.IsEmpty())
{
neighbour.FillWithFluid();
_remainigFluidCurr++;
}
}
}
private List<Cell> GetNeighbourCells(Cell cell)
{
// Implement logic to get neighbouring cells
}
void Update()
{
if (_remainigFluidCurr > 0)
{
// Continue the game
}
else
{
// End the game
}
} |
<reponame>aicevote/VoteFrontII
export default {
mode: 'spa',
/*
** Headers of the page
*/
head: {
title: process.env.npm_package_name || '',
meta: [
{ charset: 'utf-8' },
{ name: 'viewport', content: 'width=device-width, initial-scale=1' },
{ hid: 'description', name: 'description', content: process.env.npm_package_description || '' }
],
link: [
{ rel: 'icon', type: 'image/x-icon', href: '/favicon.ico' }
]
},
/*
** Customize the progress-bar color
*/
loading: { color: '#fff' },
/*
** Global CSS
*/
css: [
'purecss/build/pure-min.css',
'purecss/build/grids-responsive-min.css'
],
/*
** Plugins to load before mounting the App
*/
plugins: [
],
/*
** Nuxt.js dev-modules
*/
buildModules: [
'@nuxt/typescript-build',
],
/*
** Nuxt.js modules
*/
modules: [
// Doc: https://axios.nuxtjs.org/usage
'@nuxtjs/axios',
'@nuxtjs/pwa',
'@nuxtjs/sitemap',
'@nuxtjs/fontawesome'
],
/*
** Axios module configuration
** See https://axios.nuxtjs.org/options
*/
axios: {
},
/*
** PWA module configuration
** See https://pwa.nuxtjs.org
*/
pwa: {
meta: {
ogHost: 'https://beta.aicevote.com',
ogImage: '/ogp.png',
twitterCard: 'summary_large_image',
twitterSite: 'https://beta.aicevote.com',
twitterCreator: process.env.npm_package_author_name || ''
}
},
/*
** Sitemap module configuration
** See https://github.com/nuxt-community/sitemap-module#sitemap-options
*/
sitemap: {
hostname: 'https://beta.aicevote.com'
},
/*
** Fontawesome module configuration
** See https://github.com/nuxt-community/fontawesome-module#module-options
*/
fontawesome: {
component: 'fa',
suffix: true,
icons: {
solid: [
'faChartArea', 'faCheck', 'faComment',
'faHome', 'faPalette', 'faPaperPlane',
'faServer', 'faSignInAlt',
'faSignOutAlt', 'faVoteYea'
],
brands: ['faGithub', 'faHtml5', 'faTwitter']
}
},
/*
** Build configuration
*/
build: {
/*
** You can extend webpack config here
*/
extend(config, ctx) {
}
},
generate: {
fallback: true
}
}
|
import numpy as np
class Box:
"""
This class implements functions to manage continuous states and action
spaces. It is similar to the ``Box`` class in ``gym.spaces.box``.
"""
def __init__(self, low, high, shape=None):
"""
Constructor.
Args:
low ([float, np.ndarray]): the minimum value of each dimension of
the space. If a scalar value is provided, this value is
considered as the minimum one for each dimension. If a
np.ndarray is provided, each i-th element is considered the
minimum value of the i-th dimension;
high ([float, np.ndarray]): the maximum value of dimensions of the
space. If a scalar value is provided, this value is considered
as the maximum one for each dimension. If a np.ndarray is
provided, each i-th element is considered the maximum value
of the i-th dimension;
shape (np.ndarray, None): the dimension of the space. Must match
the shape of ``low`` and ``high``, if they are np.ndarray.
"""
if shape is None:
self._low = low
self._high = high
self._shape = low.shape
else:
self._low = low
self._high = high
self._shape = shape
if np.isscalar(low) and np.isscalar(high):
self._low += np.zeros(shape)
self._high += np.zeros(shape)
assert self._low.shape == self._high.shape
@property
def low(self):
"""
Returns:
The minimum value of each dimension of the space.
"""
return self._low
@property
def high(self):
"""
Returns:
The maximum value of each dimension of the space.
"""
return self._high
@property
def shape(self):
"""
Returns:
The dimensions of the space.
"""
return self._shape
class Discrete:
"""
This class implements functions to manage discrete states and action
spaces. It is similar to the ``Discrete`` class in ``gym.spaces.discrete``.
"""
def __init__(self, n):
"""
Constructor.
Args:
n (int): the number of values of the space.
"""
self.values = np.arange(n)
self.n = n
@property
def size(self):
"""
Returns:
The number of elements of the space.
"""
return self.n,
@property
def shape(self):
"""
Returns:
The shape of the space that is always (1,).
"""
return 1,
|
#!/bin/bash
chmod +x vendor/k8s.io/code-generator/generate-groups.sh
./vendor/k8s.io/code-generator/generate-groups.sh client,lister,informer \
github.com/openshift/sriov-network-operator/pkg/client \
github.com/openshift/sriov-network-operator/apis \
sriovnetwork:v1 \
--go-header-file hack/boilerplate.go.txt
chmod -x vendor/k8s.io/code-generator/generate-groups.sh
|
<gh_stars>0
import { AdapterError } from '@chainlink/ea-bootstrap'
import { ExecuteWithConfig } from '@chainlink/types'
import { BigNumber, ethers } from 'ethers'
import { getDataFromAcrossChains, inputParameters as commonInputParameters } from '../utils'
import { Config } from '../config'
import { getContractAddress } from '../utils'
import { DEBT_CACHE_ABI } from './abi'
// Needs to be exported so that doc generator script works
export const inputParameters = commonInputParameters
export const supportedEndpoints = ['debt']
export const execute: ExecuteWithConfig<Config> = async (request, _, config) =>
await getDataFromAcrossChains(request, config, getTotalDebtIssued)
const getTotalDebtIssued = async (
jobRunID: string,
config: Config,
chainsToQuery: string[],
): Promise<BigNumber> => {
const chainResponses = await Promise.all(
chainsToQuery.map(async (network): Promise<BigNumber> => {
if (!config.chains[network])
throw new AdapterError({
jobRunID,
statusCode: 500,
message: `Chain ${network} not configured`,
})
const networkProvider = new ethers.providers.JsonRpcProvider(config.chains[network].rpcURL)
try {
const debtCacheAddress = await getContractAddress(
networkProvider,
config.chains[network].chainAddressResolverAddress,
'DebtCache',
)
const debtCache = new ethers.Contract(debtCacheAddress, DEBT_CACHE_ABI, networkProvider)
const [debtIssued] = await debtCache.currentDebt()
return debtIssued
} catch (e) {
throw new AdapterError({
jobRunID,
message: `Failed to fetch debt data from chain ${network}. Error Message: ${e}`,
})
}
}),
)
let totalDebtIssued = BigNumber.from(0)
for (const chainSynthesizedDebt of chainResponses) {
totalDebtIssued = totalDebtIssued.add(chainSynthesizedDebt)
}
return totalDebtIssued
}
|
/*
* Copyright (c) 2012 ARM Limited
* All rights reserved
*
* The license below extends only to copyright in the software and shall
* not be construed as granting a license to any other intellectual
* property including but not limited to intellectual property relating
* to a hardware implementation of the functionality of the software
* licensed hereunder. You may use the software subject to the license
* terms below provided that you ensure that this notice is replicated
* unmodified and in its entirety in all distributions of the software,
* modified or unmodified, in source code or in binary form.
*
* Copyright (c) 2007 MIPS Technologies, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Authors: <NAME>
*
*/
#ifndef __CPU_INORDER_CPU_HH__
#define __CPU_INORDER_CPU_HH__
#include <iostream>
#include <list>
#include <queue>
#include <set>
#include <vector>
#include "arch/isa_traits.hh"
#include "arch/registers.hh"
#include "arch/types.hh"
#include "base/statistics.hh"
#include "base/types.hh"
#include "config/the_isa.hh"
#include "cpu/inorder/inorder_dyn_inst.hh"
#include "cpu/inorder/pipeline_stage.hh"
#include "cpu/inorder/pipeline_traits.hh"
#include "cpu/inorder/reg_dep_map.hh"
#include "cpu/inorder/thread_state.hh"
#include "cpu/o3/dep_graph.hh"
#include "cpu/o3/rename_map.hh"
#include "cpu/activity.hh"
#include "cpu/base.hh"
#include "cpu/simple_thread.hh"
#include "cpu/timebuf.hh"
#include "mem/packet.hh"
#include "mem/port.hh"
#include "mem/request.hh"
#include "sim/eventq.hh"
#include "sim/process.hh"
class CacheUnit;
class ThreadContext;
class MemInterface;
class MemObject;
class Process;
class ResourcePool;
class InOrderCPU : public BaseCPU
{
protected:
typedef ThePipeline::Params Params;
typedef InOrderThreadState Thread;
//ISA TypeDefs
typedef TheISA::IntReg IntReg;
typedef TheISA::FloatReg FloatReg;
typedef TheISA::FloatRegBits FloatRegBits;
typedef TheISA::MiscReg MiscReg;
typedef TheISA::RegIndex RegIndex;
//DynInstPtr TypeDefs
typedef ThePipeline::DynInstPtr DynInstPtr;
typedef std::list<DynInstPtr>::iterator ListIt;
//TimeBuffer TypeDefs
typedef TimeBuffer<InterStageStruct> StageQueue;
friend class Resource;
public:
/** Constructs a CPU with the given parameters. */
InOrderCPU(Params *params);
/* Destructor */
~InOrderCPU();
/** Return a reference to the data port. */
virtual CpuPort &getDataPort() { return dataPort; }
/** Return a reference to the instruction port. */
virtual CpuPort &getInstPort() { return instPort; }
/** CPU ID */
int cpu_id;
// SE Mode ASIDs
ThreadID asid[ThePipeline::MaxThreads];
/** Type of core that this is */
std::string coreType;
// Only need for SE MODE
enum ThreadModel {
Single,
SMT,
SwitchOnCacheMiss
};
ThreadModel threadModel;
int readCpuId() { return cpu_id; }
void setCpuId(int val) { cpu_id = val; }
Params *cpu_params;
public:
enum Status {
Running,
Idle,
Halted,
Blocked,
SwitchedOut
};
/** Overall CPU status. */
Status _status;
private:
/**
* CachePort class for the in-order CPU, interacting with a
* specific CacheUnit in the pipeline.
*/
class CachePort : public CpuPort
{
private:
/** Pointer to cache unit */
CacheUnit *cacheUnit;
public:
/** Default constructor. */
CachePort(CacheUnit *_cacheUnit, const std::string& name);
protected:
/** Timing version of receive */
bool recvTimingResp(PacketPtr pkt);
/** Handles doing a retry of a failed timing request. */
void recvRetry();
/** Ignoring snoops for now. */
void recvTimingSnoopReq(PacketPtr pkt) { }
};
/** Define TickEvent for the CPU */
class TickEvent : public Event
{
private:
/** Pointer to the CPU. */
InOrderCPU *cpu;
public:
/** Constructs a tick event. */
TickEvent(InOrderCPU *c);
/** Processes a tick event, calling tick() on the CPU. */
void process();
/** Returns the description of the tick event. */
const char *description() const;
};
/** The tick event used for scheduling CPU ticks. */
TickEvent tickEvent;
/** Schedule tick event, regardless of its current state. */
void scheduleTickEvent(Cycles delay)
{
assert(!tickEvent.scheduled() || tickEvent.squashed());
reschedule(&tickEvent, clockEdge(delay), true);
}
/** Unschedule tick event, regardless of its current state. */
void unscheduleTickEvent()
{
if (tickEvent.scheduled())
tickEvent.squash();
}
public:
// List of Events That can be scheduled from
// within the CPU.
// NOTE(1): The Resource Pool also uses this event list
// to schedule events broadcast to all resources interfaces
// NOTE(2): CPU Events usually need to schedule a corresponding resource
// pool event.
enum CPUEventType {
ActivateThread,
ActivateNextReadyThread,
DeactivateThread,
HaltThread,
SuspendThread,
Trap,
Syscall,
SquashFromMemStall,
UpdatePCs,
NumCPUEvents
};
static std::string eventNames[NumCPUEvents];
enum CPUEventPri {
InOrderCPU_Pri = Event::CPU_Tick_Pri,
Syscall_Pri = Event::CPU_Tick_Pri + 9,
ActivateNextReadyThread_Pri = Event::CPU_Tick_Pri + 10
};
/** Define CPU Event */
class CPUEvent : public Event
{
protected:
InOrderCPU *cpu;
public:
CPUEventType cpuEventType;
ThreadID tid;
DynInstPtr inst;
Fault fault;
unsigned vpe;
short syscall_num;
public:
/** Constructs a CPU event. */
CPUEvent(InOrderCPU *_cpu, CPUEventType e_type, Fault fault,
ThreadID _tid, DynInstPtr inst, CPUEventPri event_pri);
/** Set Type of Event To Be Scheduled */
void setEvent(CPUEventType e_type, Fault _fault, ThreadID _tid,
DynInstPtr _inst)
{
fault = _fault;
cpuEventType = e_type;
tid = _tid;
inst = _inst;
vpe = 0;
}
/** Processes a CPU event. */
void process();
/** Returns the description of the CPU event. */
const char *description() const;
/** Schedule Event */
void scheduleEvent(Cycles delay);
/** Unschedule This Event */
void unscheduleEvent();
};
/** Schedule a CPU Event */
void scheduleCpuEvent(CPUEventType cpu_event, Fault fault, ThreadID tid,
DynInstPtr inst, Cycles delay = Cycles(0),
CPUEventPri event_pri = InOrderCPU_Pri);
public:
/** Width (processing bandwidth) of each stage */
int stageWidth;
/** Interface between the CPU and CPU resources. */
ResourcePool *resPool;
/** Instruction used to signify that there is no *real* instruction in
buffer slot */
DynInstPtr dummyInst[ThePipeline::MaxThreads];
DynInstPtr dummyBufferInst;
DynInstPtr dummyReqInst;
DynInstPtr dummyTrapInst[ThePipeline::MaxThreads];
/** Used by resources to signify a denied access to a resource. */
ResourceRequest *dummyReq[ThePipeline::MaxThreads];
/** The Pipeline Stages for the CPU */
PipelineStage *pipelineStage[ThePipeline::NumStages];
/** Program Counters */
TheISA::PCState pc[ThePipeline::MaxThreads];
/** Last Committed PC */
TheISA::PCState lastCommittedPC[ThePipeline::MaxThreads];
/** The Register File for the CPU */
union {
FloatReg f[ThePipeline::MaxThreads][TheISA::NumFloatRegs];
FloatRegBits i[ThePipeline::MaxThreads][TheISA::NumFloatRegs];
} floatRegs;
TheISA::IntReg intRegs[ThePipeline::MaxThreads][TheISA::NumIntRegs];
/** ISA state */
std::vector<TheISA::ISA *> isa;
/** Dependency Tracker for Integer & Floating Point Regs */
RegDepMap archRegDepMap[ThePipeline::MaxThreads];
/** Register Types Used in Dependency Tracking */
enum RegType { IntType, FloatType, MiscType, NumRegTypes};
/** Global communication structure */
TimeBuffer<TimeStruct> timeBuffer;
/** Communication structure that sits in between pipeline stages */
StageQueue *stageQueue[ThePipeline::NumStages-1];
TheISA::TLB *getITBPtr();
TheISA::TLB *getDTBPtr();
TheISA::Decoder *getDecoderPtr(unsigned tid);
/** Accessor Type for the SkedCache */
typedef uint32_t SkedID;
/** Cache of Instruction Schedule using the instruction's name as a key */
static m5::hash_map<SkedID, ThePipeline::RSkedPtr> skedCache;
typedef m5::hash_map<SkedID, ThePipeline::RSkedPtr>::iterator SkedCacheIt;
/** Initialized to last iterator in map, signifying a invalid entry
on map searches
*/
SkedCacheIt endOfSkedIt;
ThePipeline::RSkedPtr frontEndSked;
ThePipeline::RSkedPtr faultSked;
/** Add a new instruction schedule to the schedule cache */
void addToSkedCache(DynInstPtr inst, ThePipeline::RSkedPtr inst_sked)
{
SkedID sked_id = genSkedID(inst);
assert(skedCache.find(sked_id) == skedCache.end());
skedCache[sked_id] = inst_sked;
}
/** Find a instruction schedule */
ThePipeline::RSkedPtr lookupSked(DynInstPtr inst)
{
SkedID sked_id = genSkedID(inst);
SkedCacheIt lookup_it = skedCache.find(sked_id);
if (lookup_it != endOfSkedIt) {
return (*lookup_it).second;
} else {
return NULL;
}
}
static const uint8_t INST_OPCLASS = 26;
static const uint8_t INST_LOAD = 25;
static const uint8_t INST_STORE = 24;
static const uint8_t INST_CONTROL = 23;
static const uint8_t INST_NONSPEC = 22;
static const uint8_t INST_DEST_REGS = 18;
static const uint8_t INST_SRC_REGS = 14;
static const uint8_t INST_SPLIT_DATA = 13;
inline SkedID genSkedID(DynInstPtr inst)
{
SkedID id = 0;
id = (inst->opClass() << INST_OPCLASS) |
(inst->isLoad() << INST_LOAD) |
(inst->isStore() << INST_STORE) |
(inst->isControl() << INST_CONTROL) |
(inst->isNonSpeculative() << INST_NONSPEC) |
(inst->numDestRegs() << INST_DEST_REGS) |
(inst->numSrcRegs() << INST_SRC_REGS) |
(inst->splitInst << INST_SPLIT_DATA);
return id;
}
ThePipeline::RSkedPtr createFrontEndSked();
ThePipeline::RSkedPtr createFaultSked();
ThePipeline::RSkedPtr createBackEndSked(DynInstPtr inst);
class StageScheduler {
private:
ThePipeline::RSkedPtr rsked;
int stageNum;
int nextTaskPriority;
public:
StageScheduler(ThePipeline::RSkedPtr _rsked, int stage_num)
: rsked(_rsked), stageNum(stage_num),
nextTaskPriority(0)
{ }
void needs(int unit, int request) {
rsked->push(new ScheduleEntry(
stageNum, nextTaskPriority++, unit, request
));
}
void needs(int unit, int request, int param) {
rsked->push(new ScheduleEntry(
stageNum, nextTaskPriority++, unit, request, param
));
}
};
private:
/** Data port. Note that it has to appear after the resPool. */
CachePort dataPort;
/** Instruction port. Note that it has to appear after the resPool. */
CachePort instPort;
public:
/** Registers statistics. */
void regStats();
/** Ticks CPU, calling tick() on each stage, and checking the overall
* activity to see if the CPU should deschedule itself.
*/
void tick();
/** Initialize the CPU */
void init();
/** HW return from error interrupt. */
Fault hwrei(ThreadID tid);
bool simPalCheck(int palFunc, ThreadID tid);
void checkForInterrupts();
/** Returns the Fault for any valid interrupt. */
Fault getInterrupts();
/** Processes any an interrupt fault. */
void processInterrupts(Fault interrupt);
/** Halts the CPU. */
void halt() { panic("Halt not implemented!\n"); }
/** Check if this address is a valid instruction address. */
bool validInstAddr(Addr addr) { return true; }
/** Check if this address is a valid data address. */
bool validDataAddr(Addr addr) { return true; }
/** Schedule a syscall on the CPU */
void syscallContext(Fault fault, ThreadID tid, DynInstPtr inst,
Cycles delay = Cycles(0));
/** Executes a syscall.*/
void syscall(int64_t callnum, ThreadID tid);
/** Schedule a trap on the CPU */
void trapContext(Fault fault, ThreadID tid, DynInstPtr inst,
Cycles delay = Cycles(0));
/** Perform trap to Handle Given Fault */
void trap(Fault fault, ThreadID tid, DynInstPtr inst);
/** Schedule thread activation on the CPU */
void activateContext(ThreadID tid, Cycles delay = Cycles(0));
/** Add Thread to Active Threads List. */
void activateThread(ThreadID tid);
/** Activate Thread In Each Pipeline Stage */
void activateThreadInPipeline(ThreadID tid);
/** Schedule Thread Activation from Ready List */
void activateNextReadyContext(Cycles delay = Cycles(0));
/** Add Thread From Ready List to Active Threads List. */
void activateNextReadyThread();
/** Schedule a thread deactivation on the CPU */
void deactivateContext(ThreadID tid, Cycles delay = Cycles(0));
/** Remove from Active Thread List */
void deactivateThread(ThreadID tid);
/** Schedule a thread suspension on the CPU */
void suspendContext(ThreadID tid);
/** Suspend Thread, Remove from Active Threads List, Add to Suspend List */
void suspendThread(ThreadID tid);
/** Schedule a thread halt on the CPU */
void haltContext(ThreadID tid);
/** Halt Thread, Remove from Active Thread List, Place Thread on Halted
* Threads List
*/
void haltThread(ThreadID tid);
/** squashFromMemStall() - sets up a squash event
* squashDueToMemStall() - squashes pipeline
* @note: maybe squashContext/squashThread would be better?
*/
void squashFromMemStall(DynInstPtr inst, ThreadID tid,
Cycles delay = Cycles(0));
void squashDueToMemStall(int stage_num, InstSeqNum seq_num, ThreadID tid);
void removePipelineStalls(ThreadID tid);
void squashThreadInPipeline(ThreadID tid);
void squashBehindMemStall(int stage_num, InstSeqNum seq_num, ThreadID tid);
PipelineStage* getPipeStage(int stage_num);
int
contextId()
{
hack_once("return a bogus context id");
return 0;
}
/** Update The Order In Which We Process Threads. */
void updateThreadPriority();
/** Switches a Pipeline Stage to Active. (Unused currently) */
void switchToActive(int stage_idx)
{ /*pipelineStage[stage_idx]->switchToActive();*/ }
/** Get the current instruction sequence number, and increment it. */
InstSeqNum getAndIncrementInstSeq(ThreadID tid)
{ return globalSeqNum[tid]++; }
/** Get the current instruction sequence number, and increment it. */
InstSeqNum nextInstSeqNum(ThreadID tid)
{ return globalSeqNum[tid]; }
/** Increment Instruction Sequence Number */
void incrInstSeqNum(ThreadID tid)
{ globalSeqNum[tid]++; }
/** Set Instruction Sequence Number */
void setInstSeqNum(ThreadID tid, InstSeqNum seq_num)
{
globalSeqNum[tid] = seq_num;
}
/** Get & Update Next Event Number */
InstSeqNum getNextEventNum()
{
#ifdef DEBUG
return cpuEventNum++;
#else
return 0;
#endif
}
/** Register file accessors */
uint64_t readIntReg(RegIndex reg_idx, ThreadID tid);
FloatReg readFloatReg(RegIndex reg_idx, ThreadID tid);
FloatRegBits readFloatRegBits(RegIndex reg_idx, ThreadID tid);
void setIntReg(RegIndex reg_idx, uint64_t val, ThreadID tid);
void setFloatReg(RegIndex reg_idx, FloatReg val, ThreadID tid);
void setFloatRegBits(RegIndex reg_idx, FloatRegBits val, ThreadID tid);
RegType inline getRegType(RegIndex reg_idx)
{
if (reg_idx < TheISA::FP_Base_DepTag)
return IntType;
else if (reg_idx < TheISA::Ctrl_Base_DepTag)
return FloatType;
else
return MiscType;
}
RegIndex flattenRegIdx(RegIndex reg_idx, RegType ®_type, ThreadID tid);
/** Reads a miscellaneous register. */
MiscReg readMiscRegNoEffect(int misc_reg, ThreadID tid = 0);
/** Reads a misc. register, including any side effects the read
* might have as defined by the architecture.
*/
MiscReg readMiscReg(int misc_reg, ThreadID tid = 0);
/** Sets a miscellaneous register. */
void setMiscRegNoEffect(int misc_reg, const MiscReg &val,
ThreadID tid = 0);
/** Sets a misc. register, including any side effects the write
* might have as defined by the architecture.
*/
void setMiscReg(int misc_reg, const MiscReg &val, ThreadID tid = 0);
/** Reads a int/fp/misc reg. from another thread depending on ISA-defined
* target thread
*/
uint64_t readRegOtherThread(unsigned misc_reg,
ThreadID tid = InvalidThreadID);
/** Sets a int/fp/misc reg. from another thread depending on an ISA-defined
* target thread
*/
void setRegOtherThread(unsigned misc_reg, const MiscReg &val,
ThreadID tid);
/** Reads the commit PC of a specific thread. */
TheISA::PCState
pcState(ThreadID tid)
{
return pc[tid];
}
/** Sets the commit PC of a specific thread. */
void
pcState(const TheISA::PCState &newPC, ThreadID tid)
{
pc[tid] = newPC;
}
Addr instAddr(ThreadID tid) { return pc[tid].instAddr(); }
Addr nextInstAddr(ThreadID tid) { return pc[tid].nextInstAddr(); }
MicroPC microPC(ThreadID tid) { return pc[tid].microPC(); }
/** Function to add instruction onto the head of the list of the
* instructions. Used when new instructions are fetched.
*/
ListIt addInst(DynInstPtr inst);
/** Find instruction on instruction list */
ListIt findInst(InstSeqNum seq_num, ThreadID tid);
/** Function to tell the CPU that an instruction has completed. */
void instDone(DynInstPtr inst, ThreadID tid);
/** Add Instructions to the CPU Remove List*/
void addToRemoveList(DynInstPtr inst);
/** Remove an instruction from CPU */
void removeInst(DynInstPtr inst);
/** Remove all instructions younger than the given sequence number. */
void removeInstsUntil(const InstSeqNum &seq_num,ThreadID tid);
/** Removes the instruction pointed to by the iterator. */
inline void squashInstIt(const ListIt inst_it, ThreadID tid);
/** Cleans up all instructions on the instruction remove list. */
void cleanUpRemovedInsts();
/** Cleans up all events on the CPU event remove list. */
void cleanUpRemovedEvents();
/** Debug function to print all instructions on the list. */
void dumpInsts();
/** Forwards an instruction read to the appropriate data
* resource (indexes into Resource Pool thru "dataPortIdx")
*/
Fault read(DynInstPtr inst, Addr addr,
uint8_t *data, unsigned size, unsigned flags);
/** Forwards an instruction write. to the appropriate data
* resource (indexes into Resource Pool thru "dataPortIdx")
*/
Fault write(DynInstPtr inst, uint8_t *data, unsigned size,
Addr addr, unsigned flags, uint64_t *write_res = NULL);
public:
/** Per-Thread List of all the instructions in flight. */
std::list<DynInstPtr> instList[ThePipeline::MaxThreads];
/** List of all the instructions that will be removed at the end of this
* cycle.
*/
std::queue<ListIt> removeList;
bool trapPending[ThePipeline::MaxThreads];
/** List of all the cpu event requests that will be removed at the end of
* the current cycle.
*/
std::queue<Event*> cpuEventRemoveList;
/** Records if instructions need to be removed this cycle due to
* being retired or squashed.
*/
bool removeInstsThisCycle;
/** True if there is non-speculative Inst Active In Pipeline. Lets any
* execution unit know, NOT to execute while the instruction is active.
*/
bool nonSpecInstActive[ThePipeline::MaxThreads];
/** Instruction Seq. Num of current non-speculative instruction. */
InstSeqNum nonSpecSeqNum[ThePipeline::MaxThreads];
/** Instruction Seq. Num of last instruction squashed in pipeline */
InstSeqNum squashSeqNum[ThePipeline::MaxThreads];
/** Last Cycle that the CPU squashed instruction end. */
Tick lastSquashCycle[ThePipeline::MaxThreads];
std::list<ThreadID> fetchPriorityList;
protected:
/** Active Threads List */
std::list<ThreadID> activeThreads;
/** Ready Threads List */
std::list<ThreadID> readyThreads;
/** Suspended Threads List */
std::list<ThreadID> suspendedThreads;
/** Halted Threads List */
std::list<ThreadID> haltedThreads;
/** Thread Status Functions */
bool isThreadActive(ThreadID tid);
bool isThreadReady(ThreadID tid);
bool isThreadSuspended(ThreadID tid);
private:
/** The activity recorder; used to tell if the CPU has any
* activity remaining or if it can go to idle and deschedule
* itself.
*/
ActivityRecorder activityRec;
public:
/** Number of Active Threads in the CPU */
ThreadID numActiveThreads() { return activeThreads.size(); }
/** Thread id of active thread
* Only used for SwitchOnCacheMiss model.
* Assumes only 1 thread active
*/
ThreadID activeThreadId()
{
if (numActiveThreads() > 0)
return activeThreads.front();
else
return InvalidThreadID;
}
/** Records that there was time buffer activity this cycle. */
void activityThisCycle() { activityRec.activity(); }
/** Changes a stage's status to active within the activity recorder. */
void activateStage(const int idx)
{ activityRec.activateStage(idx); }
/** Changes a stage's status to inactive within the activity recorder. */
void deactivateStage(const int idx)
{ activityRec.deactivateStage(idx); }
/** Wakes the CPU, rescheduling the CPU if it's not already active. */
void wakeCPU();
virtual void wakeup();
/* LL/SC debug functionality
unsigned stCondFails;
unsigned readStCondFailures()
{ return stCondFails; }
unsigned setStCondFailures(unsigned st_fails)
{ return stCondFails = st_fails; }
*/
/** Returns a pointer to a thread context. */
ThreadContext *tcBase(ThreadID tid = 0)
{
return thread[tid]->getTC();
}
/** Count the Total Instructions Committed in the CPU. */
virtual Counter totalInsts() const
{
Counter total(0);
for (ThreadID tid = 0; tid < (ThreadID)thread.size(); tid++)
total += thread[tid]->numInst;
return total;
}
/** Count the Total Ops Committed in the CPU. */
virtual Counter totalOps() const
{
Counter total(0);
for (ThreadID tid = 0; tid < (ThreadID)thread.size(); tid++)
total += thread[tid]->numOp;
return total;
}
/** Pointer to the system. */
System *system;
/** The global sequence number counter. */
InstSeqNum globalSeqNum[ThePipeline::MaxThreads];
#ifdef DEBUG
/** The global event number counter. */
InstSeqNum cpuEventNum;
/** Number of resource requests active in CPU **/
unsigned resReqCount;
#endif
Addr lockAddr;
/** Temporary fix for the lock flag, works in the UP case. */
bool lockFlag;
/** Counter of how many stages have completed draining */
int drainCount;
/** Pointers to all of the threads in the CPU. */
std::vector<Thread *> thread;
/** Per-Stage Instruction Tracing */
bool stageTracing;
/** The cycle that the CPU was last running, used for statistics. */
Tick lastRunningCycle;
void updateContextSwitchStats();
unsigned instsPerSwitch;
Stats::Average instsPerCtxtSwitch;
Stats::Scalar numCtxtSwitches;
/** Update Thread , used for statistic purposes*/
inline void tickThreadStats();
/** Per-Thread Tick */
Stats::Vector threadCycles;
/** Tick for SMT */
Stats::Scalar smtCycles;
/** Stat for total number of times the CPU is descheduled. */
Stats::Scalar timesIdled;
/** Stat for total number of cycles the CPU spends descheduled or no
* stages active.
*/
Stats::Scalar idleCycles;
/** Stat for total number of cycles the CPU is active. */
Stats::Scalar runCycles;
/** Percentage of cycles a stage was active */
Stats::Formula activity;
/** Instruction Mix Stats */
Stats::Scalar comLoads;
Stats::Scalar comStores;
Stats::Scalar comBranches;
Stats::Scalar comNops;
Stats::Scalar comNonSpec;
Stats::Scalar comInts;
Stats::Scalar comFloats;
/** Stat for the number of committed instructions per thread. */
Stats::Vector committedInsts;
/** Stat for the number of committed ops per thread. */
Stats::Vector committedOps;
/** Stat for the number of committed instructions per thread. */
Stats::Vector smtCommittedInsts;
/** Stat for the total number of committed instructions. */
Stats::Scalar totalCommittedInsts;
/** Stat for the CPI per thread. */
Stats::Formula cpi;
/** Stat for the SMT-CPI per thread. */
Stats::Formula smtCpi;
/** Stat for the total CPI. */
Stats::Formula totalCpi;
/** Stat for the IPC per thread. */
Stats::Formula ipc;
/** Stat for the total IPC. */
Stats::Formula smtIpc;
/** Stat for the total IPC. */
Stats::Formula totalIpc;
};
#endif // __CPU_O3_CPU_HH__
|
import { all, takeLatest } from 'redux-saga/effects';
import { Types as FavoriteTypes } from '~/store/ducks/favorites';
import { addFavoriteRequest } from './favorites';
export default function* rootSaga() {
return yield all([takeLatest(FavoriteTypes.ADD_REQUEST, addFavoriteRequest)]);
}
|
#!/usr/bin/env bash
#
# Installs Kubespray on remote target machines.
#
set -e -o pipefail
KS_COMMIT="${KS_COMMIT:-master}"
get_kubespray () {
# Cleanup Old Kubespray Installations
echo "Cleaning Up Old Kubespray Installation"
rm -rf kubespray
# Install git
sudo apt install -y git
# Download Kubespray
echo "Downloading Kubespray"
git clone https://github.com/kubernetes-incubator/kubespray.git
pushd kubespray
git checkout "$KS_COMMIT"
popd
}
prepare_kubespray () {
# install python3-venv if pyvenv not found.
if ! which pyvenv
then
sudo apt install -y python3-venv
fi
# create a virtualenv with specific packages, if it doesn't exist
if [ ! -x "ks_venv/bin/activate" ]
then
python3 -m venv ks_venv
# shellcheck disable=SC1091
source ks_venv/bin/activate
pip install -U pip # upgrade pip
pip install wheel # to avoid bdist error while compiling modules
pip install -r kubespray/requirements.txt
else
# shellcheck disable=SC1091
source ks_venv/bin/activate
fi
# Generate inventory and var files
echo "Generating The Inventory File"
rm -rf "inventories/${DEPLOYMENT_NAME}"
mkdir -p "inventories/${DEPLOYMENT_NAME}"
cp -r kubespray/inventory/sample/group_vars "inventories/${DEPLOYMENT_NAME}/group_vars"
CONFIG_FILE="inventories/${DEPLOYMENT_NAME}/inventory.yml" python3 kubespray/contrib/inventory_builder/inventory.py "${NODES[@]}"
# Add configuration to inventory
echo ${NODES[*]}
ansible-playbook k8s-configs.yaml \
--extra-vars "deployment_name=${DEPLOYMENT_NAME} k8s_nodes='${NODES[*]}' kubespray_remote_ssh_user='${REMOTE_SSH_USER}'"
}
replace_hostname () {
# kubespray changes hostname to node{1,2,3,...}
# Replace them to real hostnames
CONFIG_FILE="inventories/${DEPLOYMENT_NAME}/inventory.yml"
echo ${NODES[*]}
for i in ${!NODES[*]}
do
sed -ie s/node$(($i+1))/${NODES[$i]}/g ${CONFIG_FILE}
done
}
install_kubespray () {
# Go to python virtual env
source ks_venv/bin/activate
# Prepare Target Machines
echo "Installing Prerequisites On Remote Machines"
ansible-playbook -i "inventories/${DEPLOYMENT_NAME}/inventory.yml" k8s-requirements.yaml
# Install Kubespray
echo "Installing Kubespray"
ansible-playbook -i "inventories/${DEPLOYMENT_NAME}/inventory.yml" kubespray/cluster.yml -b -v
}
reset_kubespray () {
# Go to python virtual env
source ks_venv/bin/activate
# Reset Kubespray
echo "Resetting Kubespray"
ansible-playbook -i "inventories/${DEPLOYMENT_NAME}/inventory.yml" kubespray/reset.yml -b -v
}
#
# Exports the Kubespray Config Location
#
source_kubeconfig () {
sudo chown -R ${UID}:${GROUPS} ${PWD}
kubeconfig_path="${PWD}/inventories/${DEPLOYMENT_NAME}/artifacts/admin.conf"
if [ -f "$kubeconfig_path" ]
then
# these options are annoying outside of scripts
set +e +u +o pipefail
if ! grep KUBECONFIG ${HOME}/.bashrc
then
echo "setting KUBECONFIG=$kubeconfig_path"
echo "export KUBECONFIG=$kubeconfig_path" >> ${HOME}/.bashrc
else
echo "KUBECONFIG is already in ${HOME}/.bashrc"
fi
else
echo "kubernetes admin.conf not found at: '$kubeconfig_path'"
exit 1
fi
}
#
# Checks if an arbitrary cluster name is given during specifc
# operations.
#
check_cluster_name () {
if [ -z "$DEPLOYMENT_NAME" ]
then
echo "Missing option: clustername" >&2
echo " "
display_help
exit -1
fi
}
#
# Displays the help menu.
#
display_help () {
echo "Usage: $0 {-g|-p|-r|-i|-R|-s|-h} [clustername] [ip...|hostname...]" >&2
echo " "
echo " -h, --help Display this help message."
echo " -g, --get Get Kubespray git source."
echo " -p, --prepare Prepare kubespray."
echo " -r, --replace Replace hostnames."
echo " -i, --install Install Kubespray on <clustername>"
echo " -R, --reset Reset Kubespray on <clustername>"
echo " -s, --source Source the Kubectl config for <clustername>"
echo " "
echo " clustername An arbitrary name representing the cluster"
echo " ip The IP address of the remote node(s)"
echo " "
echo "Example usages:"
echo " KS_COMMIT=<kubespray_release_version> ./setup.sh -g clustername"
echo " REMOTE_SSH_USER=orchard ./setup.sh -p clustername [ip...]"
echo " ./setup.sh -r clustername [hostname...]"
echo " ./setup.sh -i clustername"
echo " ./setup.sh -R clustername"
echo " ./setup.sh -s clustername"
}
#
# Init
#
if [ $# -lt 2 ]
then
display_help
exit 0
fi
CLI_OPT=$1
DEPLOYMENT_NAME=$2
shift 2
DEFAULT_NODES=(10.90.0.101 10.90.0.102 10.90.0.103)
NODES=("${@:-${DEFAULT_NODES[@]}}")
REMOTE_SSH_USER="${REMOTE_SSH_USER:-orchard}"
while :
do
case $CLI_OPT in
-g | --get)
get_kubespray
exit 0
;;
-p | --prepare)
check_cluster_name
prepare_kubespray
exit 0
;;
-r | --replace)
check_cluster_name
replace_hostname
exit 0
;;
-i | --install)
check_cluster_name
install_kubespray
exit 0
;;
-R | --reset)
check_cluster_name
reset_kubespray
exit 0
;;
-h | --help)
display_help
exit 0
;;
-s | --source)
check_cluster_name
source_kubeconfig
break
;;
--) # End of all options
shift
break
;;
*)
echo Error: Unknown option: "$CLI_OPT" >&2
echo " "
display_help
exit -1
;;
esac
done
|
def text_analysis(text):
# Split the text into words
words = text.split()
# Create a frequncy table
freqTable = {}
for word in words:
word = word.lower()
if word in freqTable:
freqTable[word] += 1
else:
freqTable[word] = 1
# Sort the frequency table by value
sortedFreqTable = {k: v for k, v in sorted(freqTable.items(), key=lambda item: item[1], reverse=True)}
# Return the most frequent words
mostFrequentWords = []
for word in freqTable:
if freqTable[word] == sortedFreqTable[list(sortedFreqTable.keys())[0]]:
mostFrequentWords.append(word)
return mostFrequentWords
text = "Once upon a time there was a mischievous little boy who loved to play pranks on everyone."
mostFrequentWords = text_analysis(text)
print(mostFrequentWords) |
// Copyright 2019 <NAME>
// Copyright 2019 <NAME>
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt
#include "PartialMatchRegex.h"
const pcrecpp::RE PartialMatchRegex::RE_MATCHER =
pcrecpp::RE("^((?:(?:[^?+*{}()[\\]\\\\|]+|\\\\.|\\[(?:\\^?\\\\.|\\^[^\\\\]|[^\\\\^])"
"(?:[^\\]\\\\]+|\\\\.)*\\]|\\((?:\\?[:=!]|\\?<[=!]|\\?>)?(?1)??\\)|\\(\\?"
"(?:R|[+-]?\\d+)\\))(?:(?:[?+*]|\\{\\d+(?:,\\d*)?\\})[?+]?)?|\\|)*)$");
PartialMatchRegex::PartialMatchRegex(const std::string &str)
{
if (!isValidRegex(str)) {
throw std::runtime_error(str + " is not valid Regex");
}
_basic = std::make_unique<pcrecpp::RE>(str);
std::string extReStr;
if (!str.empty()) {
auto n = str.size();
extReStr.reserve(((n / 2) + 3) * (n + 1));
for (size_t i = 0; i < n - 1; i++) {
pcrecpp::StringPiece sub(str.c_str(), int(i + 1));
if (RE_MATCHER.FullMatch(sub)) {
extReStr += "|(";
extReStr += sub.as_string();
extReStr += ")";
}
}
extReStr += "|(";
extReStr += str;
extReStr += ")";
}
_extended = std::make_unique<pcrecpp::RE>(extReStr);
}
PartialMatchRegex::PartialMatchRegex(const PartialMatchRegex &other)
{
if (other._basic) {
_basic = std::make_unique<pcrecpp::RE>(*other._basic);
}
if (other._extended) {
_extended = std::make_unique<pcrecpp::RE>(*other._extended);
}
}
|
composer install --prefer-dist
php tests/Fixtures/bin/console doctrine:database:drop --env=test --force
php tests/Fixtures/bin/console doctrine:database:create --env=test
php tests/Fixtures/bin/console doctrine:schema:update --env=test --force
php tests/Fixtures/bin/console doctrine:fixtures:load --env=test --no-interaction
composer dump-autoload
vendor/bin/phpunit --exclude-group exclude_travis
|
secondLargest :: (Ord a) => [a] -> Maybe a
secondLargest arr
| length arr < 2 = Nothing
| otherwise = Just ((reverse . sort) arr !! 1) |
from enum import Enum
class MusicAPP(Enum):
qq = "qq"
wy = "netease"
PRE_URL = "http://www.musictool.top/"
class MusicURLGenerator:
@staticmethod
def generate_url(platform):
return PRE_URL + platform.value
@staticmethod
def count_unique_urls(platforms):
unique_urls = set()
for platform in platforms:
unique_urls.add(PRE_URL + platform.value)
return len(unique_urls) |
<gh_stars>0
require 'spec_helper_acceptance'
tmpdir = default.tmpdir('tmp')
describe 'hocon_setting resource' do
after :all do
shell("rm #{tmpdir}/*.conf", :acceptable_exit_codes => [0,1,2])
end
shared_examples 'has_content' do |path,pp,content|
before :all do
shell("rm #{path}", :acceptable_exit_codes => [0,1,2])
end
after :all do
shell("cat #{path}", :acceptable_exit_codes => [0,1,2])
shell("rm #{path}", :acceptable_exit_codes => [0,1,2])
end
it 'applies the manifest twice' do
apply_manifest(pp, :catch_failures => true)
apply_manifest(pp, :catch_changes => true)
end
describe file(path) do
it { should be_file }
#XXX Solaris 10 doesn't support multi-line grep
it("should contain #{content}", :unless => fact('osfamily') == 'Solaris') {
should contain(content)
}
end
end
shared_examples 'has_error' do |path,pp,error|
before :all do
shell("rm #{path}", :acceptable_exit_codes => [0,1,2])
end
after :all do
shell("cat #{path}", :acceptable_exit_codes => [0,1,2])
shell("rm #{path}", :acceptable_exit_codes => [0,1,2])
end
it 'applies the manifest and gets a failure message' do
expect(apply_manifest(pp, :expect_failures => true).stderr).to match(error)
end
describe file(path) do
it { should_not be_file }
end
end
describe 'ensure parameter' do
context '=> present for top-level and nested' do
pp = <<-EOS
hocon_setting { 'ensure => present for section':
ensure => present,
path => "#{tmpdir}/hocon_setting.conf",
setting => 'one.two',
value => 'three',
}
hocon_setting { 'ensure => present for top level':
ensure => present,
path => "#{tmpdir}/hocon_setting.conf",
setting => 'four',
value => 'five',
}
EOS
it 'applies the manifest twice' do
apply_manifest(pp, :catch_failures => true)
apply_manifest(pp, :catch_changes => true)
end
describe file("#{tmpdir}/hocon_setting.conf") do
it { should be_file }
#XXX Solaris 10 doesn't support multi-line grep
it("should contain one {\n two=three\n}\nfour=five", :unless => fact('osfamily') == 'Solaris') {
should contain("one {\n two=three\n}\nfour=five")
}
end
end
context '=> absent for key/value' do
before :all do
if fact('osfamily') == 'Darwin'
shell("echo \"one {\n two=three\n}\nfour=five\" > #{tmpdir}/hocon_setting.conf")
else
shell("echo -e \"one {\n two=three\n}\nfour=five\" > #{tmpdir}/hocon_setting.conf")
end
end
pp = <<-EOS
hocon_setting { 'ensure => absent for key/value':
ensure => absent,
path => "#{tmpdir}/hocon_setting.conf",
setting => 'one.two',
value => 'three',
}
EOS
it 'applies the manifest twice' do
apply_manifest(pp, :catch_failures => true)
apply_manifest(pp, :catch_changes => true)
end
describe file("#{tmpdir}/hocon_setting.conf") do
it { should be_file }
it { should contain('four=five') }
it { should_not contain("two=three") }
end
end
context '=> absent for top-level settings' do
before :all do
if fact('osfamily') == 'Darwin'
shell("echo \"one {\n two=three\n}\nfour=five\" > #{tmpdir}/hocon_setting.conf")
else
shell("echo -e \"one {\n two=three\n}\nfour=five\" > #{tmpdir}/hocon_setting.conf")
end
end
after :all do
shell("cat #{tmpdir}/hocon_setting.conf", :acceptable_exit_codes => [0,1,2])
shell("rm #{tmpdir}/hocon_setting.conf", :acceptable_exit_codes => [0,1,2])
end
pp = <<-EOS
hocon_setting { 'ensure => absent for top-level':
ensure => absent,
path => "#{tmpdir}/hocon_setting.conf",
setting => 'four',
value => 'five',
}
EOS
it 'applies the manifest twice' do
apply_manifest(pp, :catch_failures => true)
apply_manifest(pp, :catch_changes => true)
end
describe file("#{tmpdir}/hocon_setting.conf") do
it { should be_file }
it { should_not contain('four=five') }
it { should contain("one {\n two=three\n}") }
end
end
end
describe 'setting, value parameters' do
{
"setting => 'test.foo', value => 'bar'," => "test {\n foo = bar\n}",
"setting => 'more.baz', value => 'quux'," => "more {\n baz = quux\n}",
"setting => 'top', value => 'level'," => "top : \"level\"",
}.each do |parameter_list, content|
context parameter_list do
pp = <<-EOS
hocon_setting { "#{parameter_list}":
ensure => present,
path => "#{tmpdir}/hocon_setting.conf",
#{parameter_list}
}
EOS
it_behaves_like 'has_content', "#{tmpdir}/hocon_setting.conf", pp, content
end
end
{
"" => /setting is a required.+value is a required/,
"setting => 'test.foo'," => /value is a required/,
"value => 'bar'," => /setting is a required/,
}.each do |parameter_list, error|
context parameter_list do
pp = <<-EOS
hocon_setting { "#{parameter_list}":
ensure => present,
path => "#{tmpdir}/hocon_setting.conf",
#{parameter_list}
}
EOS
it_behaves_like 'has_error', "#{tmpdir}/hocon_setting.conf", pp, error
end
end
end
describe 'path parameter' do
[
"#{tmpdir}/one.conf",
"#{tmpdir}/two.conf",
"#{tmpdir}/three.conf",
].each do |path|
context "path => #{path}" do
pp = <<-EOS
hocon_setting { 'path => #{path}':
ensure => present,
setting => 'one.two',
value => 'three',
path => '#{path}',
}
EOS
it_behaves_like 'has_content', path, pp, "one {\n two=three\n}"
end
end
context "path => foo" do
pp = <<-EOS
hocon_setting { 'path => foo':
ensure => present,
setting => 'one.two',
value => 'three',
path => 'foo',
}
EOS
it_behaves_like 'has_error', 'foo', pp, /must be fully qualified/
end
end
end
|
git clone https://github.com/cubedro/eth-netstats
sudo npm install -g grunt-cli
cd eth-netstats
npm install
grunt
npm start |
# Created by hkh at 2018-12-07
# ko, ja (source) -> en (target)
# data_dir: /home/hkh/data/ted2013/ko-en/train.ko-en.[ko|en]
# /home/hkh/data/ted2013/ja-en/train.ja-en.[ja|en]
TOOLS_DIR=/home/hkh/tools
koen_dir=/home/hkh/data/ted2013/ko-en
jaen_dir=/home/hkh/data/ted2013/ja-en
# Clone Moses
if [ ! -d "${TOOLS_DIR}/mosesdecoder" ]; then
echo "Cloning moses for data processing"
git clone https://github.com/moses-smt/mosesdecoder.git "${TOOLS_DIR}/mosesdecoder"
fi
# Generate Subword Units (BPE)
# Clone Subword NMT
if [ ! -d "${TOOLS_DIR}/subword-nmt" ]; then
git clone https://github.com/rsennrich/subword-nmt.git "${TOOLS_DIR}/subword-nmt"
fi
|
#!/bin/sh
ROOT=$(readlink -e $(dirname $0))
. $ROOT/.env
cd $OPENWRT_FOLDER
|
<reponame>scrogatl/spring-petclinic-microservices<gh_stars>1-10
package imports.k8s;
/**
* Status is a return value for calls that don't return other objects.
*/
@javax.annotation.Generated(value = "jsii-pacmak/1.14.1 (build 828de8a)", date = "2020-11-30T16:28:28.169Z")
@software.amazon.jsii.Jsii(module = imports.k8s.$Module.class, fqn = "k8s.StatusOptions")
@software.amazon.jsii.Jsii.Proxy(StatusOptions.Jsii$Proxy.class)
public interface StatusOptions extends software.amazon.jsii.JsiiSerializable {
/**
* Suggested HTTP return code for this status, 0 if not set.
*/
default @org.jetbrains.annotations.Nullable java.lang.Number getCode() {
return null;
}
/**
* Extended data associated with the reason.
* <p>
* Each reason may define its own extended details. This field is optional and the data returned is not guaranteed to conform to any schema except that defined by the reason type.
*/
default @org.jetbrains.annotations.Nullable imports.k8s.StatusDetails getDetails() {
return null;
}
/**
* A human-readable description of the status of this operation.
*/
default @org.jetbrains.annotations.Nullable java.lang.String getMessage() {
return null;
}
/**
* Standard list metadata.
* <p>
* More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
*/
default @org.jetbrains.annotations.Nullable imports.k8s.ListMeta getMetadata() {
return null;
}
/**
* A machine-readable description of why this operation is in the "Failure" status.
* <p>
* If this value is empty there is no information available. A Reason clarifies an HTTP status code but does not override it.
*/
default @org.jetbrains.annotations.Nullable java.lang.String getReason() {
return null;
}
/**
* @return a {@link Builder} of {@link StatusOptions}
*/
static Builder builder() {
return new Builder();
}
/**
* A builder for {@link StatusOptions}
*/
public static final class Builder implements software.amazon.jsii.Builder<StatusOptions> {
private java.lang.Number code;
private imports.k8s.StatusDetails details;
private java.lang.String message;
private imports.k8s.ListMeta metadata;
private java.lang.String reason;
/**
* Sets the value of {@link StatusOptions#getCode}
* @param code Suggested HTTP return code for this status, 0 if not set.
* @return {@code this}
*/
public Builder code(java.lang.Number code) {
this.code = code;
return this;
}
/**
* Sets the value of {@link StatusOptions#getDetails}
* @param details Extended data associated with the reason.
* Each reason may define its own extended details. This field is optional and the data returned is not guaranteed to conform to any schema except that defined by the reason type.
* @return {@code this}
*/
public Builder details(imports.k8s.StatusDetails details) {
this.details = details;
return this;
}
/**
* Sets the value of {@link StatusOptions#getMessage}
* @param message A human-readable description of the status of this operation.
* @return {@code this}
*/
public Builder message(java.lang.String message) {
this.message = message;
return this;
}
/**
* Sets the value of {@link StatusOptions#getMetadata}
* @param metadata Standard list metadata.
* More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
* @return {@code this}
*/
public Builder metadata(imports.k8s.ListMeta metadata) {
this.metadata = metadata;
return this;
}
/**
* Sets the value of {@link StatusOptions#getReason}
* @param reason A machine-readable description of why this operation is in the "Failure" status.
* If this value is empty there is no information available. A Reason clarifies an HTTP status code but does not override it.
* @return {@code this}
*/
public Builder reason(java.lang.String reason) {
this.reason = reason;
return this;
}
/**
* Builds the configured instance.
* @return a new instance of {@link StatusOptions}
* @throws NullPointerException if any required attribute was not provided
*/
@Override
public StatusOptions build() {
return new Jsii$Proxy(code, details, message, metadata, reason);
}
}
/**
* An implementation for {@link StatusOptions}
*/
@software.amazon.jsii.Internal
final class Jsii$Proxy extends software.amazon.jsii.JsiiObject implements StatusOptions {
private final java.lang.Number code;
private final imports.k8s.StatusDetails details;
private final java.lang.String message;
private final imports.k8s.ListMeta metadata;
private final java.lang.String reason;
/**
* Constructor that initializes the object based on values retrieved from the JsiiObject.
* @param objRef Reference to the JSII managed object.
*/
protected Jsii$Proxy(final software.amazon.jsii.JsiiObjectRef objRef) {
super(objRef);
this.code = software.amazon.jsii.Kernel.get(this, "code", software.amazon.jsii.NativeType.forClass(java.lang.Number.class));
this.details = software.amazon.jsii.Kernel.get(this, "details", software.amazon.jsii.NativeType.forClass(imports.k8s.StatusDetails.class));
this.message = software.amazon.jsii.Kernel.get(this, "message", software.amazon.jsii.NativeType.forClass(java.lang.String.class));
this.metadata = software.amazon.jsii.Kernel.get(this, "metadata", software.amazon.jsii.NativeType.forClass(imports.k8s.ListMeta.class));
this.reason = software.amazon.jsii.Kernel.get(this, "reason", software.amazon.jsii.NativeType.forClass(java.lang.String.class));
}
/**
* Constructor that initializes the object based on literal property values passed by the {@link Builder}.
*/
protected Jsii$Proxy(final java.lang.Number code, final imports.k8s.StatusDetails details, final java.lang.String message, final imports.k8s.ListMeta metadata, final java.lang.String reason) {
super(software.amazon.jsii.JsiiObject.InitializationMode.JSII);
this.code = code;
this.details = details;
this.message = message;
this.metadata = metadata;
this.reason = reason;
}
@Override
public final java.lang.Number getCode() {
return this.code;
}
@Override
public final imports.k8s.StatusDetails getDetails() {
return this.details;
}
@Override
public final java.lang.String getMessage() {
return this.message;
}
@Override
public final imports.k8s.ListMeta getMetadata() {
return this.metadata;
}
@Override
public final java.lang.String getReason() {
return this.reason;
}
@Override
@software.amazon.jsii.Internal
public com.fasterxml.jackson.databind.JsonNode $jsii$toJson() {
final com.fasterxml.jackson.databind.ObjectMapper om = software.amazon.jsii.JsiiObjectMapper.INSTANCE;
final com.fasterxml.jackson.databind.node.ObjectNode data = com.fasterxml.jackson.databind.node.JsonNodeFactory.instance.objectNode();
if (this.getCode() != null) {
data.set("code", om.valueToTree(this.getCode()));
}
if (this.getDetails() != null) {
data.set("details", om.valueToTree(this.getDetails()));
}
if (this.getMessage() != null) {
data.set("message", om.valueToTree(this.getMessage()));
}
if (this.getMetadata() != null) {
data.set("metadata", om.valueToTree(this.getMetadata()));
}
if (this.getReason() != null) {
data.set("reason", om.valueToTree(this.getReason()));
}
final com.fasterxml.jackson.databind.node.ObjectNode struct = com.fasterxml.jackson.databind.node.JsonNodeFactory.instance.objectNode();
struct.set("fqn", om.valueToTree("k8s.StatusOptions"));
struct.set("data", data);
final com.fasterxml.jackson.databind.node.ObjectNode obj = com.fasterxml.jackson.databind.node.JsonNodeFactory.instance.objectNode();
obj.set("$jsii.struct", struct);
return obj;
}
@Override
public final boolean equals(final Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
StatusOptions.Jsii$Proxy that = (StatusOptions.Jsii$Proxy) o;
if (this.code != null ? !this.code.equals(that.code) : that.code != null) return false;
if (this.details != null ? !this.details.equals(that.details) : that.details != null) return false;
if (this.message != null ? !this.message.equals(that.message) : that.message != null) return false;
if (this.metadata != null ? !this.metadata.equals(that.metadata) : that.metadata != null) return false;
return this.reason != null ? this.reason.equals(that.reason) : that.reason == null;
}
@Override
public final int hashCode() {
int result = this.code != null ? this.code.hashCode() : 0;
result = 31 * result + (this.details != null ? this.details.hashCode() : 0);
result = 31 * result + (this.message != null ? this.message.hashCode() : 0);
result = 31 * result + (this.metadata != null ? this.metadata.hashCode() : 0);
result = 31 * result + (this.reason != null ? this.reason.hashCode() : 0);
return result;
}
}
}
|
//
// Created by ooooo on 2020/3/10.
//
#ifndef CPP_013__SOLUTION1_H_
#define CPP_013__SOLUTION1_H_
#include <iostream>
#include <vector>
using namespace std;
class Solution {
public:
int calc_num(int i, int j) {
int sum = 0;
while (i || j) {
sum += i % 10 + j % 10;
i /= 10;
j /= 10;
}
return sum;
}
bool is_valid(int i, int j) {
return i >= 0 && i < m && j >= 0 && j < n && !marked[i][j] && calc_num(i, j) <= k;
}
int dfs(int i, int j) {
if (!is_valid(i, j)) return 0;
int count = 1;
marked[i][j] = true;
for (int p = 0; p < 4; ++p) {
int dx = i + dx_dy[p][0], dy = j + dx_dy[p][1];
count += dfs(dx, dy);
}
return count;
}
int m, n, k;
vector<vector<int>> dx_dy = {{1, 0}, {-1, 0}, {0, 1}, {0, -1}};
vector<vector<bool>> marked;
int movingCount(int m, int n, int k) {
this->m = m;
this->n = n;
this->k = k;
this->marked.assign(m, vector<bool>(n, false));
return dfs(0, 0);
}
};
#endif //CPP_013__SOLUTION1_H_
|
<gh_stars>0
import React from 'react'
import Helmet from 'react-helmet'
import styled from 'styled-components'
import config from '../utils/siteConfig'
import SEO from '../components/SEO'
import Footer from '../components/Footer'
import { P, HeaderText } from '../components/Headings'
import Summary from '../components/Summary'
import TestimonialSlider from '../components/TestimonialSlider'
import { Wrapper, HeaderBlock, Block } from '../components/Block'
import { TwoColRow } from '../components/TwoColRow'
import { RowWrapper } from '../components/Row'
import Column from '../components/Column'
import leafService from '../images/leaf1.png'
import leafEstimates from '../images/leaf4.png'
import leafCare from '../images/leaf5.png'
import { TextBlock } from '../components/TextBlocks'
import { RightToMidLeaf, LeftTopLeaf } from '../components/Leaves'
import pine from '../images/leaf3.png'
import pine2 from '../images/leaf6.png'
import { graphql } from 'gatsby'
import ScrollWrapper from '../components/ScrollWrapper'
const LeftLeaf = styled(LeftTopLeaf)`
left: -60px;
@media (max-width: 600px) {
bottom: auto;
top: -10%;
left: -25%;
}
`
const RightLeaf = styled(RightToMidLeaf)`
right: -2%;
bottom: 0;
@media (max-width: 1100px) {
right: -5%;
}
@media (max-width: 800px) {
right: -4%;
height: 22vw;
}
@media (max-width: 600px) {
bottom: 0;
height: 150px;
left: auto;
right: -10%;
}
`
const FeaturedBlock = styled(Wrapper)`
padding: 50px 200px 200px 200px;
@media (max-width: 1200px) {
padding: 50px 10vw 10vw 10vw;
}
`
const Image = styled.img`
height: 12vw;
@media (max-width: 1700px) {
height: 15vw;
}
@media (max-width: 800px) {
height: 20vw;
}
`
const FeaturedRow = styled(RowWrapper)`
flex-wrap: wrap;
img {
width: 32%;
margin-right: 1%;
margin-bottom: 1%;
&:last-of-type {
margin-right: 0;
}
@media (max-width: 800px) {
width: 48.5%;
}
}
`
const WorkRow = styled(RowWrapper)`
padding: 100px 200px;
div {
flex: 1;
margin-right: 9%;
&:last-of-type {
margin-right: 0;
}
}
@media (max-width: 1200px) {
padding: 10vw 10vw 6vw 10vw;
}
@media (max-width: 800px) {
flex-direction: column;
div {
margin-right: 0;
margin-top: 5vw;
display: flex;
flex-direction: column;
align-items: center;
&:first-of-type {
margin-top: 0;
}
p {
text-align: center;
}
}
}
`
const HeaderRow = styled(RowWrapper)`
p {
margin: 50px 5% 0 0;
}
@media (max-width: 750px) {
flex-direction: column;
margin-top: 50px;
p {
margin: 0;
}
}
`
const Work = ({ data }) => {
const summary = data.allContentfulSummary.edges[0].node
const testimonials = data.allContentfulTestimonial.edges
const work = data.allContentfulWork.edges[0].node
const postNode = {
title: `Our Work - ${config.siteTitle}`,
}
return (
<ScrollWrapper>
<Helmet>
<title>{`Our Work - ${config.siteTitle}`}</title>
</Helmet>
<SEO postNode={postNode} pagePath="contact" customTitle />
<HeaderBlock bgColor="#9F4300">
<HeaderText as="h3" size="38" color="#FFFFFF">
{work.subHeaderText}
</HeaderText>
<HeaderText
as="h1"
padding="1vw 0 0 0"
size="100"
weight="700"
color="#FFFFFF"
>
{work.headerText}
</HeaderText>
<HeaderRow>
{work.qualifications.map(({ node: qualification }, index) => {
return (
<P key={index + ''} color="#FFFFFF">
{work.qualifications[index]}
</P>
)
})}
</HeaderRow>
</HeaderBlock>
<TwoColRow bias="left">
<Block
bgColor="transparent"
padding="0"
bgImage={work.firstImage.ogimg.src}
/>
<TextBlock
bgColor="#FFFFFF"
hideButton
dest="/"
theme="dark"
header={work.secondaryHeader1}
inlineText={work.paragraphText1.childMarkdownRemark.html}
/>
</TwoColRow>
<TwoColRow bias="right" smallReverse>
<TextBlock
pushUp
bgColor="#FFFFFF"
bText={work.buttonText}
dest={
'/' +
work.buttonDestination.replace(/[^a-z0-9]/gi, '-').toLowerCase()
}
theme="dark"
header={work.secondaryHeader2}
inlineText={work.paragraphText2.childMarkdownRemark.html}
/>
<Block
bgColor="transparent"
padding="0"
bgImage={work.secondImage.ogimg.src}
/>
</TwoColRow>
<Block bgColor="#C9EAEB" padding="0">
<WorkRow>
<Column className="work">
<Image src={leafService} />
<HeaderText as="h6" size="30" weight="700" color="#434343">
{work.highlightTitle1}
</HeaderText>
<P
as="div"
color="#293536"
dangerouslySetInnerHTML={{
__html: work.highlightParagraph1.childMarkdownRemark.html,
}}
/>
</Column>
<Column>
<Image src={leafCare} />
<HeaderText as="h6" size="30" weight="700" color="#434343">
{work.highlightTitle2}
</HeaderText>
<P
as="div"
color="#293536"
dangerouslySetInnerHTML={{
__html: work.highlightParagraph2.childMarkdownRemark.html,
}}
/>{' '}
</Column>
<Column>
<Image src={leafEstimates} />
<HeaderText as="h6" size="30" weight="700" color="#434343">
{work.highlightTitle3}
</HeaderText>
<P
as="div"
color="#293536"
dangerouslySetInnerHTML={{
__html: work.highlightParagraph3.childMarkdownRemark.html,
}}
/>{' '}
</Column>
</WorkRow>
</Block>
<TestimonialSlider data={testimonials} />
<FeaturedBlock bgColor="#394343">
<HeaderText
padding="5vw 0 5vw 0"
align="center"
as="h2"
size="72"
weight="700"
color="#FFFFFF"
>
Featured Projects
</HeaderText>
<FeaturedRow>
{work.workImages.map(({ node: image }, index) => {
return (
<img key={index + ''} src={work.workImages[index].ogimg.src} />
)
})}
</FeaturedRow>
<RightLeaf src={pine} />
<LeftLeaf src={pine2} />
</FeaturedBlock>
<Summary
bgColor="#9F4300"
subhead={summary.subHeaderText}
header={summary.headerText}
text={summary.paragraphText.childMarkdownRemark.html}
bText={summary.buttonText}
dest={'/' + summary.buttonDestination}
/>
<Footer />
</ScrollWrapper>
)
}
export const query = graphql`
query {
allContentfulSummary {
edges {
node {
headerText
subHeaderText
buttonText
buttonDestination
paragraphText {
childMarkdownRemark {
html
excerpt(pruneLength: 320)
}
}
}
}
}
allContentfulTestimonial {
edges {
node {
firstName
lastInitial
city
state
customerReview {
childMarkdownRemark {
html
excerpt(pruneLength: 320)
}
}
}
}
}
allContentfulWork {
edges {
node {
headerText
subHeaderText
secondaryHeader1
secondaryHeader2
buttonText
buttonDestination
highlightTitle1
highlightTitle2
highlightTitle3
firstImage {
title
fluid(maxWidth: 1800) {
...GatsbyContentfulFluid_withWebp_noBase64
}
ogimg: resize(width: 1800) {
src
width
height
}
}
secondImage {
title
fluid(maxWidth: 1800) {
...GatsbyContentfulFluid_withWebp_noBase64
}
ogimg: resize(width: 1800) {
src
width
height
}
}
paragraphText1 {
childMarkdownRemark {
html
excerpt(pruneLength: 320)
}
}
paragraphText2 {
childMarkdownRemark {
html
excerpt(pruneLength: 320)
}
}
highlightParagraph1 {
childMarkdownRemark {
html
excerpt(pruneLength: 320)
}
}
highlightParagraph2 {
childMarkdownRemark {
html
excerpt(pruneLength: 320)
}
}
highlightParagraph3 {
childMarkdownRemark {
html
excerpt(pruneLength: 320)
}
}
workImages {
title
fluid(maxWidth: 1800) {
...GatsbyContentfulFluid_withWebp_noBase64
}
ogimg: resize(width: 1800) {
src
width
height
}
}
qualifications
}
}
}
}
`
export default Work
|
package main
import (
"fmt"
"io"
"github.com/sirupsen/logrus"
"github.com/stellar/go/ingest"
backends "github.com/stellar/go/ingest/ledgerbackend"
"github.com/stellar/go/support/log"
)
func statistics() {
// Only log errors from the backend to keep output cleaner.
lg := log.New()
lg.SetLevel(logrus.ErrorLevel)
config.Log = lg
backend, err := backends.NewCaptive(config)
panicIf(err)
defer backend.Close()
// Prepare a range to be ingested:
var startingSeq uint32 = 2 // can't start with genesis ledger
var ledgersToRead uint32 = 10000
fmt.Printf("Preparing range (%d ledgers)...\n", ledgersToRead)
ledgerRange := backends.BoundedRange(startingSeq, startingSeq+ledgersToRead)
err = backend.PrepareRange(ledgerRange)
panicIf(err)
// These are the statistics that we're tracking.
var successfulTransactions, failedTransactions int
var operationsInSuccessful, operationsInFailed int
for seq := startingSeq; seq <= startingSeq+ledgersToRead; seq++ {
fmt.Printf("Processed ledger %d...\r", seq)
txReader, err := ingest.NewLedgerTransactionReader(
backend, config.NetworkPassphrase, seq)
panicIf(err)
defer txReader.Close()
// Read each transaction within the ledger, extract its operations, and
// accumulate the statistics we're interested in.
for {
tx, err := txReader.Read()
if err == io.EOF {
break
}
panicIf(err)
envelope := tx.Envelope
operationCount := len(envelope.Operations())
if tx.Result.Successful() {
successfulTransactions++
operationsInSuccessful += operationCount
} else {
failedTransactions++
operationsInFailed += operationCount
}
}
}
fmt.Println("\nDone. Results:")
fmt.Printf(" - total transactions: %d\n", successfulTransactions+failedTransactions)
fmt.Printf(" - succeeded / failed: %d / %d\n", successfulTransactions, failedTransactions)
fmt.Printf(" - total operations: %d\n", operationsInSuccessful+operationsInFailed)
fmt.Printf(" - succeeded / failed: %d / %d\n", operationsInSuccessful, operationsInFailed)
}
|
/**
* WordPress dependencies
*/
import { __ } from '@wordpress/i18n';
import { RichText } from '@wordpress/block-editor';
/**
* Edit component.
* See https://wordpress.org/gutenberg/handbook/designers-developers/developers/block-api/block-edit-save/#edit
*
* @param {Object} props The block props.
* @param {Object} props.attributes Block attributes.
* @param {string} props.attributes.customTitle Custom title to be displayed.
* @param {string} props.className Class name for the block.
* @param {Function} props.setAttributes Sets the value for block attributes.
* @return {Function} Render the edit screen
*/
const ExampleBockEdit = ({
attributes: { customTitle: currentTitle },
className,
setAttributes,
}) => {
return (
<div className={className}>
<RichText
className="wp-block-example-block__title"
tagName="h2"
placeholder={__('Custom Title')}
value={currentTitle}
onChange={(customTitle) => setAttributes({ customTitle })}
/>
</div>
);
};
export default ExampleBockEdit;
|
#!/bin/sh
gunicorn -c /usr/src/backend/app/conf/gunicorn_config.py 'app:run()' --reload |
<filename>frontend/gulpfile.js
const {
series, parallel, src, dest,
} = require('gulp');
const uglify = require('gulp-uglify');
const clean = require('gulp-clean');
const concat = require('gulp-concat');
const htmlmin = require('gulp-htmlmin');
const cleanCSS = require('gulp-clean-css');
const rename = require('gulp-rename');
function uglifyTask() {
const path = 'lib/angular/angular';
return src([`${path}.js`, `${path}-messages.js`, `${path}-locale_pt-br.js`, `${path}-route.js`, 'js/**/*.js'])
.pipe(uglify())
.pipe(concat('all.min.js'))
.pipe(dest('dist/js'));
}
function cleanTask() {
return src('dist/', { read: false, allowEmpty: true })
.pipe(clean());
}
function htmlMinTask() {
return src('view/*.html')
.pipe(htmlmin({ collapseWhitespace: true }))
.pipe(dest('dist'));
}
function cleanCSSTask() {
return src(['lib/bootstrap/css/bootstrap.css', 'css/**/*.css'])
.pipe(concat('styles.min.css'))
.pipe(cleanCSS())
.pipe(dest('dist/css'));
}
function copy() {
return src('./index-prod.html')
.pipe(htmlmin({ collapseWhitespace: true }))
.pipe(rename('index.html'))
.pipe(dest('dist/'));
}
function copyAngularMap() {
return src('lib/angular/angular.min.js.map')
.pipe(dest('dist/js'));
}
exports.default = series(
cleanTask,
parallel(
uglifyTask,
htmlMinTask,
cleanCSSTask,
copy,
copyAngularMap,
),
);
|
package SimJoins.SimJoinsSingleNode.SimJoins
import java.io.PrintWriter
import java.util.Calendar
import SimJoins.SimJoinsSingleNode.Commons.CommonFunctions
import SimJoins.SimJoinsSingleNode.Commons.ED.CommonEdFunctions.commons
import SimJoins.SimJoinsSingleNode.Commons.ED.{CommonEdFunctions, EdFilters}
import SimJoins.DataStructure.Profile
/**
* Implements the ED Join algorithm
**/
object EDJoin {
/**
* @param profiles set of profiles
* @param attributesThresholds thresholds for each attribute
* @param log logger
* @param qgramLen size of the q-grams
**/
def getMatchesMulti(profiles: List[Profile], attributesThresholds: Map[String, Double], log: PrintWriter, qgramLen: Int): List[(Long, Long)] = {
/**
* For each attribute in attributeThresholds, obtain the candidate set
**/
val tokenizedAndCandidates = attributesThresholds.map { case (attribute, threshold) =>
log.println("[EDJoin] Attribute " + attribute)
val t1 = Calendar.getInstance().getTimeInMillis
val docs = CommonFunctions.extractField(profiles, attribute)
val t2 = Calendar.getInstance().getTimeInMillis
log.println("[EDJoin] Time to tokenize attribute's data " + CommonFunctions.msToMin(t2 - t1) + " min")
val res = (attribute, EDJoin.getCandidates(docs, qgramLen, threshold.toInt, log))
val t3 = Calendar.getInstance().getTimeInMillis
log.println("[EDJoin] Time to compute candidate set " + CommonFunctions.msToMin(t3 - t2) + " min")
res
}
val t4 = Calendar.getInstance().getTimeInMillis
val tokenizedAttributes = tokenizedAndCandidates.map(x => (x._1, x._2._1))
val a = tokenizedAttributes.toList.flatMap(x => x._2.map(y => (y._1, x._1, y._2)))
val docTokens = a.groupBy(_._1).map(x => (x._1, x._2.map(y => (y._2, y._3)).toMap))
val candidates = tokenizedAndCandidates.map(_._2._2).reduce((c1, c2) => c1.intersect(c2))
log.println("[EDJoin] Candidates number " + candidates.length)
val t5 = Calendar.getInstance().getTimeInMillis
log.println("[EDJoin] Time to obtain candidates " + CommonFunctions.msToMin(t5 - t4))
val pairs = candidates.filter { case (doc1, doc2) =>
val d1 = docTokens.get(doc1)
val d2 = docTokens.get(doc2)
var pass = true
if (d1.isDefined && d2.isDefined) {
val docs1 = d1.get
val docs2 = d2.get
val it = attributesThresholds.iterator
while (it.hasNext && pass) {
val (attribute, threshold) = it.next()
pass = CommonEdFunctions.editDist(docs1(attribute), docs2(attribute)) <= threshold
}
}
else {
pass = false
}
pass
}
val t6 = Calendar.getInstance().getTimeInMillis
log.println("[EDJoin] Matches " + pairs.length)
log.println("[EDJoin] Verification time " + CommonFunctions.msToMin(t6 - t5))
/*val finalResults = matches.reduce((l1, l2) => l1.intersect(l2))
log.println("Numero di record finali "+finalResults.length)*/
log.flush()
pairs
}
/**
* Ritorna le coppie che hanno ED <= threshold
**/
def getCandidates(documents: List[(Long, String)], qgramLength: Int, threshold: Int, log: PrintWriter): (Map[Long, String], List[(Long, Long)], Long, Long) = {
val t1 = Calendar.getInstance().getTimeInMillis
//Trasforma i documenti in q-grammi
val docs = documents.map(x => (x._1, CommonEdFunctions.getQgrams(x._2, qgramLength)))
//Ordina i q-grammi per la loro document frequency
val sortedDocs = CommonEdFunctions.getSortedQgrams(docs)
//Costruisce il prefix index
val prefixIndex = CommonEdFunctions.buildPrefixIndex(sortedDocs, qgramLength, threshold)
val t2 = Calendar.getInstance().getTimeInMillis
log.println("[EDJoin] Tempo di creazione dell'indice " + (t2 - t1))
val tIndex = t2 - t1
//Candidati
var preCandidates: List[(Long, Long)] = Nil
//Inizializzo l'array che mi dice se ho già visto o meno un vicino
val maxId = sortedDocs.maxBy(_._1)._1.toInt + 1
val neighbors = Array.ofDim[Int](maxId)
val notFound = Array.fill[Boolean](maxId) {
true
}
var numNeighbors = 0
//Lunghezza del prefisso
val prefixLen = EdFilters.getPrefixLen(qgramLength, threshold)
val t3 = Calendar.getInstance().getTimeInMillis
log.println("[EDJoin] Tempo inizializzazione " + (t3 - t2))
//Per ogni documento
sortedDocs.foreach { case (docId, qgrams) =>
val docLen = qgrams.length
//Prendo il suo prefisso, nel caso in cui sia troppo corto allora devo guardare anche il blocco speciale
val prefix = {
if (docLen < prefixLen) {
qgrams.union(commons.fixPrefix :: Nil)
}
else {
qgrams.take(prefixLen)
}
}
//Per ogni elemento nel prefisso
prefix.foreach { case (qgram, qgramPos) =>
//Prendo il blocco relativo a quell'elemento (se esiste)
val block = prefixIndex.get(qgram)
if (block.isDefined) {
//Per ogni vicino
block.get.foreach { neighbor =>
//Se il vicino non è già stato visto in precedenza
if (docId < neighbor.docId && notFound(neighbor.docId.toInt)) {
//Se passa il length filter e la posizione tra i due q-grammi è inferiore alla soglia
if (Math.abs(neighbor.docLength - docLen) <= threshold && Math.abs(qgramPos - neighbor.qgramPos) <= threshold) {
preCandidates = (docId, neighbor.docId) :: preCandidates
}
//Segna il vicino come già visto
notFound.update(neighbor.docId.toInt, false)
neighbors.update(numNeighbors, neighbor.docId.toInt)
numNeighbors += 1
}
}
}
}
//Alla fine del documento resetta i vicini visti
for (i <- 0 until numNeighbors) {
notFound.update(neighbors(i), true)
}
numNeighbors = 0
}
val sortedDocMap = sortedDocs.toMap
val documentMap = documents.toMap
log.println("[EDJoin] Numero pre-candidati (questi vengono tutti parsati dal common filter) " + preCandidates.length)
val t4 = Calendar.getInstance().getTimeInMillis
log.println("[EDJoin] Tempo precandidati " + (t4 - t3))
//Ora la fase di verifica finale
val candidates = preCandidates.filter { case (doc1Id, doc2Id) =>
//Se passa il common filter allora calcola l'ED e verifica che sia inferiore/uguale alla soglia
if (EdFilters.commonFilter(sortedDocMap(doc1Id), sortedDocMap(doc2Id), qgramLength, threshold)) {
true
}
else {
false
}
}
val t5 = Calendar.getInstance().getTimeInMillis
log.println("[EDJoin] Numero candidati " + candidates.length)
log.println("[EDJoin] Tempo common filter " + (t5 - t4))
log.println("[EDJoin] Tempo di JOIN complessivo " + (t5 - t3))
val tJoin = t5 - t3
(documentMap, candidates, tIndex, tJoin)
}
/**
* Ritorna le coppie che hanno ED <= threshold
**/
def getMatches(documents: List[(Long, String)], qgramLength: Int, threshold: Int, log: PrintWriter): List[(Long, Long)] = {
val t1 = Calendar.getInstance().getTimeInMillis
//Trasforma i documenti in q-grammi
val docs = documents.map(x => (x._1, CommonEdFunctions.getQgrams(x._2, qgramLength)))
//Ordina i q-grammi per la loro document frequency
val sortedDocs = CommonEdFunctions.getSortedQgrams(docs)
val t2 = Calendar.getInstance().getTimeInMillis
log.println("[EDJoin] Tempo preprocessing " + CommonFunctions.msToMin(t2 - t1))
//Costruisce il prefix index
val prefixIndex = CommonEdFunctions.buildPrefixIndex(sortedDocs, qgramLength, threshold)
val t3 = Calendar.getInstance().getTimeInMillis
log.println("[EDJoin] Tempo prefix indexing " + CommonFunctions.msToMin(t3 - t2))
//Candidati
var candidates: List[(Long, Long)] = Nil
//Inizializzo l'array che mi dice se ho già visto o meno un vicino
val maxId = sortedDocs.maxBy(_._1)._1.toInt + 1
val neighbors = Array.ofDim[Int](maxId)
val notFound = Array.fill[Boolean](maxId) {
true
}
var numNeighbors = 0
var lenFilterAct: Double = 0
//Lunghezza del prefisso
val prefixLen = EdFilters.getPrefixLen(qgramLength, threshold)
//Per ogni documento
sortedDocs.foreach { case (docId, qgrams) =>
val docLen = qgrams.length
//Prendo il suo prefisso, nel caso in cui sia troppo corto allora devo guardare anche il blocco speciale
val prefix = {
if (docLen < prefixLen) {
qgrams.union(commons.fixPrefix :: Nil)
}
else {
qgrams.take(prefixLen)
}
}
//Per ogni elemento nel prefisso
prefix.foreach { case (qgram, qgramPos) =>
//Prendo il blocco relativo a quell'elemento (se esiste)
val block = prefixIndex.get(qgram)
if (block.isDefined) {
//Per ogni vicino
block.get.foreach { neighbor =>
//Se il vicino non è già stato visto in precedenza
if (docId < neighbor.docId && notFound(neighbor.docId.toInt)) {
lenFilterAct += 1
//Se passa il length filter e la posizione tra i due q-grammi è inferiore alla soglia
if (Math.abs(neighbor.docLength - docLen) <= threshold && Math.abs(qgramPos - neighbor.qgramPos) <= threshold) {
candidates = (docId, neighbor.docId) :: candidates
}
//Segna il vicino come già visto
notFound.update(neighbor.docId.toInt, false)
neighbors.update(numNeighbors, neighbor.docId.toInt)
numNeighbors += 1
}
}
}
}
//Alla fine del documento resetta i vicini visti
for (i <- 0 until numNeighbors) {
notFound.update(neighbors(i), true)
}
numNeighbors = 0
}
log.println("[EDJoin] Numero di attivazioni length filter " + lenFilterAct)
log.println("[EDJoin] Numero di candidati (corrisponde anche al numero di attivazioni del commonFilter) " + candidates.length)
val sortedDocMap = sortedDocs.toMap
val documentMap = documents.toMap
//Ora la fase di verifica finale
val verified = candidates.filter { case (doc1Id, doc2Id) =>
//Se passa il common filter allora calcola l'ED e verifica che sia inferiore/uguale alla soglia
if (EdFilters.commonFilter(sortedDocMap(doc1Id), sortedDocMap(doc2Id), qgramLength, threshold) && CommonEdFunctions.editDist(documentMap(doc1Id), documentMap(doc2Id)) <= threshold) {
true
}
else {
false
}
}
log.println("[EDJoin] Numero di coppie verificate " + verified.length)
val t4 = Calendar.getInstance().getTimeInMillis
log.println("[EDJoin] Tempo totale (min) " + CommonFunctions.msToMin(t4 - t3))
verified
}
}
|
<filename>barba-gsap/src/js/app.js<gh_stars>0
import barba from '@barba/core';
import gsap from 'gsap';
import { animationEnter, animationLeave } from './animations';
import { revealProject, leaveToProject, leaveFromProject, animationEnter, animationLeave } from './animations';
const resetActiveLink = () => gsap.set('a.is-active span', {
xPercent: -100,
transformOrigin: 'left'
});
barba.hooks.enter(() => {
window.scrollTo(0, 0);
});
barba.init({
transitions: [
{
name: 'detail',
to: {
namespace: ['detail']
},
once({next}){
revealProject(next.container)
},
leave: ({current}) => leaveToProject(current.container),
enter({next}){
revealProject(next.container)
}
},
{
name: 'general-transition',
once({next}){
resetActiveLink();
gsap.from('header a', {
duration: 0.6,
yPercent: 100,
stagger: 0.2,
ease: 'power1.out',
onComplete: () => animationEnter(next.container)
});
},
leave: ({current}) => animationLeave(current.container),
enter({next}){
animationEnter(next.container);
}
}
]
}) |
<reponame>DivyeshPadamani/Investmentclub<filename>src/app/topbar/topbar.component.ts
import { Component, OnInit, AfterViewInit } from '@angular/core';
import { HttpClient } from '@angular/common/http';
import { Observable } from 'rxjs/Observable';
import 'rxjs/add/operator/catch';
import 'rxjs/add/operator/map';
import { UserService } from '../services/user.service';
import { ClassService } from '../services/class.service';
declare let TradingView: any;
@Component({
selector: 'app-topbar',
templateUrl: './topbar.component.html',
styleUrls: ['./topbar.component.css']
})
export class TopBarComponent implements OnInit {
constructor(public user: UserService, private classesService: ClassService) { }
ngOnInit() {
}
logout() {
this.user.logout();
}
getClasses() {
return this.classesService.getMyClasses();
}
}
|
package net.kyaco.wynntr.mixin;
import org.spongepowered.asm.mixin.Mixin;
import org.spongepowered.asm.mixin.Shadow;
import org.spongepowered.asm.mixin.injection.At;
import org.spongepowered.asm.mixin.injection.Inject;
import org.spongepowered.asm.mixin.injection.callback.CallbackInfo;
import net.kyaco.wynntr.WynnTextReplacer;
import net.minecraft.client.network.packet.ChatMessageS2CPacket;
import net.minecraft.network.MessageType;
import net.minecraft.network.Packet;
import net.minecraft.network.listener.ClientPlayPacketListener;
import net.minecraft.text.Text;
import net.minecraft.util.PacketByteBuf;
@Mixin(ChatMessageS2CPacket.class)
abstract public class MixinChatMessageS2CPacket implements Packet<ClientPlayPacketListener>
{
@Shadow private Text message;
@Shadow private MessageType location;
@Inject(method = "read", at = @At("RETURN"))
public void readMixin(PacketByteBuf packetBytebuf, CallbackInfo ci)
{
if (location == MessageType.GAME_INFO)
{
message = WynnTextReplacer.reverseTranslateTitleText(message);
}
else
{
message = WynnTextReplacer.reverseTranslateChatText(message);
}
}
}
|
import random
class RandomNumberGenerator:
def __init__(self, lower, upper):
self.lower = lower
self.upper = upper
def generateRandom(self):
return random.randint(self.lower, self.upper) |
def isBalanced(s: str) -> bool:
stack = []
opening = set("({[")
closing = {")": "(", "}": "{", "]": "["}
for char in s:
if char in opening:
stack.append(char)
else:
if not stack or stack.pop() != closing[char]:
return False
return not stack |
#!/usr/bin/env bash
#
# Copyright (c) 2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
export LC_ALL=C.UTF-8
TRAVIS_COMMIT_LOG=$(git log --format=fuller -1)
export TRAVIS_COMMIT_LOG
OUTDIR=$BASE_OUTDIR/$TRAVIS_PULL_REQUEST/$TRAVIS_JOB_NUMBER-$HOST
BITCOIN_CONFIG_ALL="--disable-dependency-tracking --prefix=$TRAVIS_BUILD_DIR/depends/$HOST --bindir=$OUTDIR/bin --libdir=$OUTDIR/lib"
if [ -z "$NO_DEPENDS" ]; then
DOCKER_EXEC ccache --max-size=$CCACHE_SIZE
fi
BEGIN_FOLD autogen
if [ -n "$CONFIG_SHELL" ]; then
DOCKER_EXEC "$CONFIG_SHELL" -c "./autogen.sh"
else
DOCKER_EXEC ./autogen.sh
fi
END_FOLD
mkdir build
cd build || (echo "could not enter build directory"; exit 1)
BEGIN_FOLD configure
DOCKER_EXEC ../configure --cache-file=config.cache $BITCOIN_CONFIG_ALL $BITCOIN_CONFIG || ( cat config.log && false)
END_FOLD
BEGIN_FOLD distdir
DOCKER_EXEC make distdir VERSION=$HOST
END_FOLD
cd "epgc-$HOST" || (echo "could not enter distdir epgc-$HOST"; exit 1)
BEGIN_FOLD configure
DOCKER_EXEC ./configure --cache-file=../config.cache $BITCOIN_CONFIG_ALL $BITCOIN_CONFIG || ( cat config.log && false)
END_FOLD
set -o errtrace
trap 'DOCKER_EXEC "cat ${TRAVIS_BUILD_DIR}/sanitizer-output/* 2> /dev/null"' ERR
BEGIN_FOLD build
DOCKER_EXEC make $MAKEJOBS $GOAL || ( echo "Build failure. Verbose build follows." && DOCKER_EXEC make $GOAL V=1 ; false )
END_FOLD
BEGIN_FOLD deploy
DOCKER_EXEC $TRAVIS_BUILD_DIR/contrib/travis-artifacts/collect-artifacts.sh $TRAVIS_BUILD_DIR $OUTDIR build $TRAVIS_COMMIT $HOST
END_FOLD
BEGIN_FOLD upload
DOCKER_EXEC export SSHPASS=$DEPLOY_PASS && sshpass -p $DEPLOY_PASS rsync -avz -e "ssh -o StrictHostKeyChecking=no" package-$HOST.tgz $DEPLOY_USER@$DEPLOY_HOST:$DEPLOY_PATH/
END_FOLD
cd ${TRAVIS_BUILD_DIR} || (echo "could not enter travis build dir $TRAVIS_BUILD_DIR"; exit 1)
|
package cli
import (
"fmt"
"github.com/matthogan/zc/cmd/cn/cli/options"
container "github.com/matthogan/zc/pkg/container"
"github.com/spf13/cobra"
)
var (
digest container.DigestApi
)
func init() {
digest = &container.Digest{}
}
func Digest() *cobra.Command {
cmd := &cobra.Command{
Use: "digest",
Short: "Operations related to artifact digests in a registry",
}
cmd.AddCommand(
getDigest(),
)
return cmd
}
func getDigest() *cobra.Command {
o := &options.DigestOptions{}
cmd := &cobra.Command{
Use: "get",
Short: "Get the digest of an image",
Long: r.GetResourceAsString("digest_get"),
Example: " cn digest get <image uri>",
Args: cobra.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
digest, err := digest.GetDigestFromRegistry(cmd.Context(), &o.Registry, args[0])
if err != nil {
return err
}
fmt.Fprintf(cmd.OutOrStdout(), "%s\n", digest.DigestStr())
return nil
},
}
return cmd
}
|
import java.util.*;
public class Main {
public static void main(String[] args) {
int num1 = 25;
int num2 = 75;
int sum = num1 + num2;
System.out.println("The sum of the two numbers is " + sum);
}
} |
def generatPrimes(start, end):
primes = []
for num in range(start, end + 1):
is_prime = True
for i in range(2, num):
if num % i == 0:
is_prime = False
break
if is_prime:
primes.append(num)
return primes
start = 2
end = 10
result = generatPrimes(start, end)
print(result) |
<filename>microservice/challenge-4/cat-mood-producer/src/main/java/ca/antaki/www/cat/producer/handler/ExecutorServiceMoodHandler.java<gh_stars>0
package ca.antaki.www.cat.producer.handler;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import com.codahale.metrics.MetricRegistry;
import com.codahale.metrics.Timer;
import com.codahale.metrics.Timer.Context;
import ca.antaki.www.cat.producer.model.Cat;
public class ExecutorServiceMoodHandler implements CatMoodHandler {
private static final Logger LOG = LoggerFactory.getLogger(ExecutorServiceMoodHandler.class);
private MoodGeneratorAndSender moodGeneratorAndSender;
private final Timer changeMoodTimer;
private ExecutorService executorService;
private final int nbThreads;
@Autowired
public ExecutorServiceMoodHandler(MetricRegistry metricRegistry,
MoodGeneratorAndSender moodGeneratorAndSender, int threads) {
this.changeMoodTimer = metricRegistry.timer("changeMoodTime");
this.moodGeneratorAndSender = moodGeneratorAndSender;
this.nbThreads = threads;
executorService = Executors.newFixedThreadPool(nbThreads);
LOG.info("Created a threadpool with {} threads", nbThreads);
}
@Override
public void handle(List<Cat> cats) {
Context ctx = changeMoodTimer.time();
CountDownLatch latch = new CountDownLatch(cats.size());
for (Cat cat : cats) {
executorService.submit(() -> {
moodGeneratorAndSender.generateAndSend(cat);
latch.countDown();
});
}
try {
latch.await(1, TimeUnit.MINUTES);
} catch (InterruptedException e) {
LOG.warn("InterruptedException in changeMood", e);
throw new RuntimeException(e);
} finally {
long time = ctx.stop();
LOG.info("changeMood.time.ms = {} ms", TimeUnit.NANOSECONDS.toMillis(time));
}
}
}
|
read -e -p " Language:" lang[$i]
read 'test' a[0]
|
//
// ExtensionDelegate.h
// SkyTimeAppWatch Extension
//
#import <WatchKit/WatchKit.h>
@interface ExtensionDelegate : NSObject <WKExtensionDelegate>
@end
|
<filename>Dependencies/Include/Rocket/Controls/ElementFormControl.h
/*
* This source file is part of libRocket, the HTML/CSS Interface Middleware
*
* For the latest information, see http://www.librocket.com
*
* Copyright (c) 2008-2010 CodePoint Ltd, Shift Technology Ltd
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
*/
#ifndef ROCKETCONTROLSELEMENTFORMCONTROL_H
#define ROCKETCONTROLSELEMENTFORMCONTROL_H
#include "../Core/Element.h"
#include "Header.h"
namespace Rocket {
namespace Controls {
/**
A generic specialisation of the generic Core::Element for all input controls.
@author <NAME>
*/
class ROCKETCONTROLS_API ElementFormControl : public Core::Element
{
public:
/// Constructs a new ElementFormControl. This should not be called directly; use the Factory
/// instead.
/// @param[in] tag The tag the element was declared as in RML.
ElementFormControl(const Rocket::Core::String& tag);
virtual ~ElementFormControl();
/// Returns the name of the form control. This is not guaranteed to be unique, and in the case of some form
/// controls (such as radio buttons) most likely will not be.
/// @return The name of the form control.
Rocket::Core::String GetName() const;
/// Sets the name of the form control.
/// @param[in] name The new name of the form control.
void SetName(const Rocket::Core::String& name);
/// Returns a string representation of the current value of the form control.
/// @return The value of the form control.
virtual Rocket::Core::String GetValue() const = 0;
/// Sets the current value of the form control.
/// @param[in] value The new value of the form control.
virtual void SetValue(const Rocket::Core::String& value) = 0;
/// Returns if this value should be submitted with the form.
/// @return True if the value should be be submitted with the form, false otherwise.
virtual bool IsSubmitted();
/// Returns the disabled status of the form control.
/// @return True if the element is disabled, false otherwise.
bool IsDisabled() const;
/// Sets the disabled status of the form control.
/// @param[in] disable True to disable the element, false to enable.
void SetDisabled(bool disable);
protected:
/// Checks for changes to the 'disabled' attribute.
/// @param[in] changed_attributes List of changed attributes on the element.
virtual void OnAttributeChange(const Core::AttributeNameList& changed_attributes);
};
}
}
#endif
|
use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};
// Custom HashMap struct
pub struct CustomHashMap {
data: Vec<Option<Vec<(HumanAddr, u64)>>>,
size: usize,
}
impl CustomHashMap {
// Create a new CustomHashMap with initial capacity
pub fn new(capacity: usize) -> Self {
CustomHashMap {
data: vec![None; capacity],
size: 0,
}
}
// Insert a HumanAddr instance into the CustomHashMap
pub fn insert(&mut self, key: HumanAddr, value: u64) {
let index = self.hash(&key) % self.data.len() as u64;
if let Some(bucket) = self.data.get_mut(index as usize) {
if let Some(entry) = bucket.as_mut() {
if let Some(existing) = entry.iter_mut().find(|(k, _)| *k == key) {
existing.1 = value;
} else {
entry.push((key, value));
self.size += 1;
}
} else {
*bucket = Some(vec![(key, value)]);
self.size += 1;
}
}
}
// Retrieve a value associated with a given HumanAddr key
pub fn get(&self, key: &HumanAddr) -> Option<&u64> {
let index = self.hash(key) % self.data.len() as u64;
if let Some(bucket) = self.data.get(index as usize) {
if let Some(entries) = bucket.as_ref() {
for (k, v) in entries {
if k == key {
return Some(v);
}
}
}
}
None
}
// Remove a HumanAddr instance from the CustomHashMap
pub fn remove(&mut self, key: &HumanAddr) -> Option<u64> {
let index = self.hash(key) % self.data.len() as u64;
if let Some(bucket) = self.data.get_mut(index as usize) {
if let Some(entries) = bucket.as_mut() {
if let Some(pos) = entries.iter().position(|(k, _)| k == key) {
let (_, value) = entries.remove(pos);
self.size -= 1;
return Some(value);
}
}
}
None
}
// Hash function for HumanAddr instances
fn hash(&self, key: &HumanAddr) -> u64 {
let mut hasher = DefaultHasher::new();
key.0.hash(&mut hasher);
hasher.finish()
}
}
// Implementing traits for HumanAddr to be used as keys in the CustomHashMap
impl Eq for HumanAddr {}
impl PartialEq for HumanAddr {
fn eq(&self, other: &Self) -> bool {
self.0 == other.0
}
}
impl Hash for HumanAddr {
fn hash<H: Hasher>(&self, state: &mut H) {
self.0.hash(state);
}
} |
python transformers/examples/language-modeling/run_language_modeling.py --model_name_or_path train-outputs/512+512+512-shuffled/7-model --tokenizer_name model-configs/1536-config --eval_data_file ../data/wikitext-103-raw/wiki.valid.raw --output_dir eval-outputs/512+512+512-shuffled/7-512+512+512-SS-N-first-256 --do_eval --per_device_eval_batch_size 1 --dataloader_drop_last --augmented --augmentation_function shuffle_sentences_remove_all_but_nouns_first_third_sixth --eval_function penultimate_sixth_eval |
<filename>snail/src/main/java/com/acgist/snail/pojo/ISpeedGetter.java<gh_stars>0
package com.acgist.snail.pojo;
/**
* <p>获取速度(上传、下载)接口</p>
*
* @author acgist
*/
public interface ISpeedGetter {
/**
* <p>获取上传速度</p>
*
* @return 上传速度
*/
long uploadSpeed();
/**
* <p>获取下载速度</p>
*
* @return 下载速度
*/
long downloadSpeed();
}
|
import string
import random
PASS_LEN = 8 # min password length
def generate_password():
lower_case = string.ascii_lowercase
upper_case = string.ascii_uppercase
digits = string.digits
punctuation = string.punctuation
all_char = lower_case + upper_case + digits + punctuation
password = "".join(random.choice(all_char) for _ in range(PASS_LEN))
return password
random_password = generate_password()
print(f'Randomly generated password: {random_password}') |
<reponame>kokizzu/colima
package daemon
import (
"context"
"fmt"
"os"
"os/signal"
"path/filepath"
"strconv"
"sync"
"syscall"
"time"
"github.com/abiosoft/colima/cli"
"github.com/abiosoft/colima/daemon/process"
godaemon "github.com/sevlyar/go-daemon"
"github.com/sirupsen/logrus"
)
var dir = process.Dir
// daemonize creates the daemon and returns if this is a child process
func daemonize() (ctx *godaemon.Context, child bool, err error) {
dir := dir()
if err := os.MkdirAll(dir, 0755); err != nil {
return nil, false, fmt.Errorf("cannot make dir: %w", err)
}
info := Info()
ctx = &godaemon.Context{
PidFileName: info.PidFile,
PidFilePerm: 0644,
LogFileName: info.LogFile,
LogFilePerm: 0644,
}
d, err := ctx.Reborn()
if err != nil {
return ctx, false, fmt.Errorf("error starting daemon: %w", err)
}
if d != nil {
return ctx, false, nil
}
logrus.Info("- - - - - - - - - - - - - - -")
logrus.Info("daemon started by colima")
logrus.Infof("Run `pkill -F %s` to kill the daemon", info.PidFile)
return ctx, true, nil
}
func start(ctx context.Context, processes []process.Process) error {
if status() == nil {
logrus.Info("daemon already running, startup ignored")
return nil
}
{
ctx, child, err := daemonize()
if err != nil {
return err
}
if ctx != nil {
defer func() {
_ = ctx.Release()
}()
}
if !child {
return nil
}
}
ctx, stop := signal.NotifyContext(ctx, syscall.SIGINT, syscall.SIGTERM)
defer stop()
return RunProcesses(ctx, processes...)
}
func stop(ctx context.Context) error {
if status() != nil {
// not running
return nil
}
info := Info()
if err := cli.CommandInteractive("pkill", "-F", info.PidFile).Run(); err != nil {
return fmt.Errorf("error sending sigterm to daemon: %w", err)
}
logrus.Info("waiting for process to terminate")
for {
alive := status() == nil
if !alive {
return nil
}
select {
case <-ctx.Done():
return ctx.Err()
default:
time.Sleep(time.Second * 1)
}
}
}
func status() error {
info := Info()
if _, err := os.Stat(info.PidFile); err != nil {
return fmt.Errorf("pid file not found: %w", err)
}
// check if process is actually running
p, err := os.ReadFile(info.PidFile)
if err != nil {
return fmt.Errorf("error reading pid file: %w", err)
}
pid, _ := strconv.Atoi(string(p))
if pid == 0 {
return fmt.Errorf("invalid pid: %v", string(p))
}
process, err := os.FindProcess(pid)
if err != nil {
return fmt.Errorf("process not found: %v", err)
}
if err := process.Signal(syscall.Signal(0)); err != nil {
return fmt.Errorf("process signal(0) returned error: %w", err)
}
return nil
}
const (
pidFileName = "daemon.pid"
logFileName = "daemon.log"
)
func Info() struct {
PidFile string
LogFile string
} {
dir := dir()
return struct {
PidFile string
LogFile string
}{
PidFile: filepath.Join(dir, pidFileName),
LogFile: filepath.Join(dir, logFileName),
}
}
// Run runs the daemon with background processes.
// NOTE: this must be called from the program entrypoint with minimal intermediary logic
// due to the creation of the daemon.
func RunProcesses(ctx context.Context, processes ...process.Process) error {
ctx, stop := context.WithCancel(ctx)
defer stop()
var wg sync.WaitGroup
wg.Add(len(processes))
for _, bg := range processes {
go func(bg process.Process) {
err := bg.Start(ctx)
if err != nil {
logrus.Error(fmt.Errorf("error starting %s: %w", bg.Name(), err))
stop()
}
wg.Done()
}(bg)
}
<-ctx.Done()
logrus.Info("terminate signal received")
wg.Wait()
return ctx.Err()
}
|
import net.minecraftforge.client.event.GuiScreenEvent;
import net.minecraftforge.eventbus.api.SubscribeEvent;
import net.minecraft.client.Minecraft;
import net.minecraft.client.gui.GuiMainMenu;
import net.minecraft.client.gui.GuiScreen;
public class MainMenuEvent {
@SubscribeEvent
public void onMainMenuOpen(GuiScreenEvent.InitGuiEvent.Post event) {
GuiScreen gui = event.getGui();
if (gui instanceof GuiMainMenu) {
// Display custom message on the main menu screen
Minecraft.getInstance().ingameGUI.getChatGUI().printChatMessage(
new TextComponentString("Welcome to the custom main menu!"));
}
}
} |
<form>
<label for="name">Name:</label>
<input type="text" name="name" id="name" required><br />
<label for="email">Email:</label>
<input type="email" name="email" id="email" required><br />
<label for="phone">Phone:</label>
<input type="tel" name="phone" id="phone" required><br />
<label for="gender">Gender:</label>
<select name="gender" id="gender">
<option value="male">Male</option>
<option value="female">Female</option>
<option value="other">Other</option>
</select><br />
<label for="country">Country:</label>
<input type="text" name="country" id="country" required><br />
<label for="city">City:</label>
<input type="text" name="city" id="city" required><br />
<label for="street">Street:</label>
<input type="text" name="street" id="street" required><br />
<label for="zip">Zip:</label>
<input type="text" name="zip" id="zip" required><br />
<label for="card_number">Card Number:</label>
<input type="text" name="card_number" id="card_number" required><br />
<label for="expiry_date">Expiry Date:</label>
<input type="month" name="expiry_date" id="expiry_date" required><br />
<input type="submit" value="Submit">
</form> |
#!/bin/bash
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Verifies that services and portals work.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${KUBE_ROOT}/cluster/kube-env.sh"
source "${KUBE_ROOT}/cluster/${KUBERNETES_PROVIDER}/util.sh"
function error() {
echo "$@" >&2
exit 1
}
function sort_args() {
printf "%s\n" "$@" | sort -n | tr '\n\r' ' ' | sed 's/ */ /g'
}
# Join args $2... with $1 between them.
# Example: join ", " x y z => x, y, z
function join() {
local sep item
sep=$1
shift
echo -n "${1:-}"
shift
for item; do
echo -n "${sep}${item}"
done
echo
}
svcs_to_clean=()
function do_teardown() {
local svc
for svc in "${svcs_to_clean[@]:+${svcs_to_clean[@]}}"; do
stop_service "${svc}"
done
}
# Args:
# $1: service name
# $2: service port
# $3: service replica count
# $4: public IPs (optional, string e.g. "1.2.3.4 5.6.7.8")
function start_service() {
echo "Starting service '$1' on port $2 with $3 replicas"
svcs_to_clean+=("$1")
${KUBECTL} create -f - << __EOF__
{
"kind": "ReplicationController",
"apiVersion": "v1beta1",
"id": "$1",
"namespace": "default",
"desiredState": {
"replicas": $3,
"replicaSelector": {
"name": "$1"
},
"podTemplate": {
"desiredState": {
"manifest": {
"version": "v1beta2",
"containers": [
{
"name": "$1",
"image": "kubernetes/serve_hostname",
"ports": [
{
"containerPort": 9376,
"protocol": "TCP"
}
],
}
],
}
},
"labels": {
"name": "$1"
}
}
}
}
__EOF__
# Convert '1.2.3.4 5.6.7.8' => '"1.2.3.4", "5.6.7.8"'
local ip ips_array=() public_ips
for ip in ${4:-}; do
ips_array+=("\"${ip}\"")
done
public_ips=$(join ", " "${ips_array[@]:+${ips_array[@]}}")
${KUBECTL} create -f - << __EOF__
{
"kind": "Service",
"apiVersion": "v1beta1",
"id": "$1",
"namespace": "default",
"port": $2,
"protocol": "TCP",
"labels": {
"name": "$1"
},
"selector": {
"name": "$1"
},
"containerPort": 9376,
"publicIPs": [ ${public_ips} ]
}
__EOF__
}
# Args:
# $1: service name
function stop_service() {
echo "Stopping service '$1'"
${KUBECFG} stop "$1" || true
${KUBECFG} delete "/replicationControllers/$1" || true
${KUBECFG} delete "/services/$1" || true
}
# Args:
# $1: service name
# $2: expected pod count
function query_pods() {
# This fails very occasionally, so retry a bit.
pods_unsorted=()
local i
for i in $(seq 1 10); do
pods_unsorted=($(${KUBECFG} \
'-template={{range.items}}{{.id}} {{end}}' \
-l name="$1" list pods))
found="${#pods_unsorted[*]}"
if [[ "${found}" == "$2" ]]; then
break
fi
sleep 3
done
if [[ "${found}" != "$2" ]]; then
error "Failed to query pods for $1: expected $2, found ${found}"
fi
# The "return" is a sorted list of pod IDs.
sort_args "${pods_unsorted[@]}"
}
# Args:
# $1: service name
# $2: pod count
function wait_for_pods() {
echo "Querying pods in $1"
local pods_sorted=$(query_pods "$1" "$2")
printf '\t%s\n' ${pods_sorted}
# Container turn up on a clean cluster can take a while for the docker image
# pulls. Wait a generous amount of time.
# TODO: Sometimes pods change underneath us, which makes the GET fail (404).
# Maybe this test can be loosened and still be useful?
pods_needed=$2
local i
for i in $(seq 1 30); do
echo "Waiting for ${pods_needed} pods to become 'running'"
pods_needed="$2"
for id in ${pods_sorted}; do
status=$(${KUBECFG} -template '{{.currentState.status}}' get "pods/${id}")
if [[ "${status}" == "Running" ]]; then
pods_needed=$((pods_needed-1))
fi
done
if [[ "${pods_needed}" == 0 ]]; then
break
fi
sleep 3
done
if [[ "${pods_needed}" -gt 0 ]]; then
error "Pods for $1 did not come up in time"
fi
}
# Args:
# $1: service name
# $2: service IP
# $3: service port
# $4: pod count
# $5: pod IDs
function wait_for_service_up() {
local i
local found_pods
for i in $(seq 1 20); do
results=($(ssh-to-node "${test_node}" "
set -e;
for i in $(seq -s' ' 1 $4); do
curl -s --connect-timeout 1 http://$2:$3;
done | sort | uniq
"))
found_pods=$(sort_args "${results[@]:+${results[@]}}")
echo "Checking if ${found_pods} == ${5}"
if [[ "${found_pods}" == "$5" ]]; then
break
fi
echo "Waiting for endpoints to propagate"
sleep 3
done
if [[ "${found_pods}" != "$5" ]]; then
error "Endpoints did not propagate in time"
fi
}
# Args:
# $1: service name
# $2: service IP
# $3: service port
function wait_for_service_down() {
local i
for i in $(seq 1 15); do
$(ssh-to-node "${test_node}" "
curl -s --connect-timeout 2 "http://$2:$3" >/dev/null 2>&1 && exit 1 || exit 0;
") && break
echo "Waiting for $1 to go down"
sleep 2
done
}
# Args:
# $1: service name
# $2: service IP
# $3: service port
# $4: pod count
# $5: pod IDs
function verify_from_container() {
results=($(ssh-to-node "${test_node}" "
set -e;
sudo docker pull busybox >/dev/null;
sudo docker run busybox sh -c '
for i in $(seq -s' ' 1 $4); do
ok=false
for j in $(seq -s' ' 1 10); do
if wget -q -T 1 -O - http://$2:$3; then
ok=true
break
fi
sleep 1
done
if [[ \${ok} == false ]]; then
exit 1
fi
done
'")) \
|| error "testing $1 portal from container failed"
found_pods=$(sort_args "${results[@]}")
if [[ "${found_pods}" != "$5" ]]; then
error -e "$1 portal failed from container, expected:\n
$(printf '\t%s\n' $5)\n
got:\n
$(printf '\t%s\n' ${found_pods})
"
fi
}
trap "do_teardown" EXIT
# Get node IP addresses and pick one as our test point.
detect-minions
test_node="${MINION_NAMES[0]}"
master="${MASTER_NAME}"
# Launch some pods and services.
svc1_name="service1"
svc1_port=80
svc1_count=3
svc1_publics="192.168.1.1 192.168.1.2"
start_service "${svc1_name}" "${svc1_port}" "${svc1_count}" "${svc1_publics}"
svc2_name="service2"
svc2_port=80
svc2_count=3
start_service "${svc2_name}" "${svc2_port}" "${svc2_count}"
# Wait for the pods to become "running".
wait_for_pods "${svc1_name}" "${svc1_count}"
wait_for_pods "${svc2_name}" "${svc2_count}"
# Get the sorted lists of pods.
svc1_pods=$(query_pods "${svc1_name}" "${svc1_count}")
svc2_pods=$(query_pods "${svc2_name}" "${svc2_count}")
# Get the portal IPs.
svc1_ip=$(${KUBECFG} -template '{{.portalIP}}' get "services/${svc1_name}")
test -n "${svc1_ip}" || error "Service1 IP is blank"
svc2_ip=$(${KUBECFG} -template '{{.portalIP}}' get "services/${svc2_name}")
test -n "${svc2_ip}" || error "Service2 IP is blank"
if [[ "${svc1_ip}" == "${svc2_ip}" ]]; then
error "Portal IPs conflict: ${svc1_ip}"
fi
#
# Test 1: Prove that the service portal is alive.
#
echo "Test 1: Prove that the service portal is alive."
echo "Verifying the portals from the host"
wait_for_service_up "${svc1_name}" "${svc1_ip}" "${svc1_port}" \
"${svc1_count}" "${svc1_pods}"
for ip in ${svc1_publics}; do
wait_for_service_up "${svc1_name}" "${ip}" "${svc1_port}" \
"${svc1_count}" "${svc1_pods}"
done
wait_for_service_up "${svc2_name}" "${svc2_ip}" "${svc2_port}" \
"${svc2_count}" "${svc2_pods}"
echo "Verifying the portals from a container"
verify_from_container "${svc1_name}" "${svc1_ip}" "${svc1_port}" \
"${svc1_count}" "${svc1_pods}"
for ip in ${svc1_publics}; do
verify_from_container "${svc1_name}" "${ip}" "${svc1_port}" \
"${svc1_count}" "${svc1_pods}"
done
verify_from_container "${svc2_name}" "${svc2_ip}" "${svc2_port}" \
"${svc2_count}" "${svc2_pods}"
#
# Test 2: Bounce the proxy and make sure the portal comes back.
#
echo "Test 2: Bounce the proxy and make sure the portal comes back."
echo "Restarting kube-proxy"
restart-kube-proxy "${test_node}"
echo "Verifying the portals from the host"
wait_for_service_up "${svc1_name}" "${svc1_ip}" "${svc1_port}" \
"${svc1_count}" "${svc1_pods}"
wait_for_service_up "${svc2_name}" "${svc2_ip}" "${svc2_port}" \
"${svc2_count}" "${svc2_pods}"
echo "Verifying the portals from a container"
verify_from_container "${svc1_name}" "${svc1_ip}" "${svc1_port}" \
"${svc1_count}" "${svc1_pods}"
verify_from_container "${svc2_name}" "${svc2_ip}" "${svc2_port}" \
"${svc2_count}" "${svc2_pods}"
#
# Test 3: Stop one service and make sure it is gone.
#
echo "Test 3: Stop one service and make sure it is gone."
stop_service "${svc1_name}"
wait_for_service_down "${svc1_name}" "${svc1_ip}" "${svc1_port}"
#
# Test 4: Bring up another service.
# TODO: Actually add a test to force re-use.
#
echo "Test 4: Bring up another service."
svc3_name="service3"
svc3_port=80
svc3_count=3
start_service "${svc3_name}" "${svc3_port}" "${svc3_count}"
# Wait for the pods to become "running".
wait_for_pods "${svc3_name}" "${svc3_count}"
# Get the sorted lists of pods.
svc3_pods=$(query_pods "${svc3_name}" "${svc3_count}")
# Get the portal IP.
svc3_ip=$(${KUBECFG} -template '{{.portalIP}}' get "services/${svc3_name}")
test -n "${svc3_ip}" || error "Service3 IP is blank"
echo "Verifying the portals from the host"
wait_for_service_up "${svc3_name}" "${svc3_ip}" "${svc3_port}" \
"${svc3_count}" "${svc3_pods}"
echo "Verifying the portals from a container"
verify_from_container "${svc3_name}" "${svc3_ip}" "${svc3_port}" \
"${svc3_count}" "${svc3_pods}"
#
# Test 5: Remove the iptables rules, make sure they come back.
#
echo "Test 5: Remove the iptables rules, make sure they come back."
echo "Manually removing iptables rules"
ssh-to-node "${test_node}" "sudo iptables -t nat -F KUBE-PORTALS-HOST"
ssh-to-node "${test_node}" "sudo iptables -t nat -F KUBE-PORTALS-CONTAINER"
echo "Verifying the portals from the host"
wait_for_service_up "${svc3_name}" "${svc3_ip}" "${svc3_port}" \
"${svc3_count}" "${svc3_pods}"
echo "Verifying the portals from a container"
verify_from_container "${svc3_name}" "${svc3_ip}" "${svc3_port}" \
"${svc3_count}" "${svc3_pods}"
#
# Test 6: Restart the master, make sure portals come back.
#
echo "Test 6: Restart the master, make sure portals come back."
echo "Restarting the master"
ssh-to-node "${master}" "sudo /etc/init.d/kube-apiserver restart"
sleep 5
echo "Verifying the portals from the host"
wait_for_service_up "${svc3_name}" "${svc3_ip}" "${svc3_port}" \
"${svc3_count}" "${svc3_pods}"
echo "Verifying the portals from a container"
verify_from_container "${svc3_name}" "${svc3_ip}" "${svc3_port}" \
"${svc3_count}" "${svc3_pods}"
#
# Test 7: Bring up another service, make sure it does not re-use Portal IPs.
#
echo "Test 7: Bring up another service, make sure it does not re-use Portal IPs."
svc4_name="service4"
svc4_port=80
svc4_count=3
start_service "${svc4_name}" "${svc4_port}" "${svc4_count}"
# Wait for the pods to become "running".
wait_for_pods "${svc4_name}" "${svc4_count}"
# Get the sorted lists of pods.
svc4_pods=$(query_pods "${svc4_name}" "${svc4_count}")
# Get the portal IP.
svc4_ip=$(${KUBECFG} -template '{{.portalIP}}' get "services/${svc4_name}")
test -n "${svc4_ip}" || error "Service4 IP is blank"
if [[ "${svc4_ip}" == "${svc2_ip}" || "${svc4_ip}" == "${svc3_ip}" ]]; then
error "Portal IPs conflict: ${svc4_ip}"
fi
echo "Verifying the portals from the host"
wait_for_service_up "${svc4_name}" "${svc4_ip}" "${svc4_port}" \
"${svc4_count}" "${svc4_pods}"
echo "Verifying the portals from a container"
verify_from_container "${svc4_name}" "${svc4_ip}" "${svc4_port}" \
"${svc4_count}" "${svc4_pods}"
# TODO: test createExternalLoadBalancer
exit 0
|
#!/bin/bash
prefix=$1
output=$2
cat << pkgconfig > $output
prefix=$prefix
exec_prefix=\${prefix}
libdir=\${prefix}/lib
includedir=\${prefix}/include
Name: Wad64
Description: Simple library for working with Wad64 files
Version: 1.0.0
Libs: -L\${libdir} -lwad64
Cflags: -I\${includedir}
pkgconfig
|
export default `
declare enum MappingType {
ValueToText = 1,
RangeToText = 2,
}
interface BaseMap {
id: number;
text: string;
type: MappingType;
}
declare type ValueMapping = ValueMap | RangeMap;
interface ValueMap extends BaseMap {
value: string;
}
interface RangeMap extends BaseMap {
from: string;
to: string;
}
`;
|
#!/bin/bash
###############################################################################
# CALL PAYMENT REQUEST AND REGISTER A MERCHANT #
###############################################################################
# This script calls the payment request API in ING's sandbox environment and- #
# -submits a post request to the endpoint "payment-requests/registrations". #
# You must request an application access token to run this script. #
# Please update the variables "accessToken", "certPath". You can find the- #
# other endpoints to test in the sandbox under "sandbox" section of the API- #
# documentation #
###############################################################################
outputFile=premium_02_CallPaymentRequestRegistrationResponse.json
# read from config and set variables
# shellcheck disable=SC2154,SC1090
{
rootPath="./../../" # path to root of the repository
. $rootPath"apps/active.env" # read what environment is active
activePath=$rootPath"apps/$active" # store active path
config="$activePath/config-premium.env" # config file for sandbox app
. "$config" # source config from fil
keyId=$keyId # map keyId from config
httpHost=$baseURL # map host
signingKeyPath=$rootPath$signingKeyFile # map signing private key file
tlsCertificatePath=$rootPath$tlsCertificateFile # map tls certificate file
tlsKeyPath=$rootPath$tlsKeyFile # map tls private key file
}
# httpMethod value must be in lower code
httpMethod="post"
reqPath="/payment-requests/registrations"
# Generated value of application access token from the previous step. Please note that the access token expires in 15 minutes
read -r accessToken
# Body content as provided under "Sandbox" section of the documentation for "Payment Request API".
# This content is for testing the API response and should not be edited.
payload='{
"merchantId": "001234567",
"merchantSubId": "123456",
"merchantName": "Company BV",
"merchantIBAN": "NL26INGB0003275339",
"dailyReceivableLimit": {
"value": 50000.00,
"currency": "EUR"
},
"allowIngAppPayments": "Y"
}'
payloadDigest=$(echo -n "$payload" | openssl dgst -binary -sha256 | openssl base64)
digest=SHA-256=$payloadDigest
# CALCULATE DATE
reqDate=$(LC_TIME=en_US.UTF-8 date -u "+%a, %d %b %Y %H:%M:%S GMT")
# signingString must be declared exactly as shown below in separate lines
signingString="(request-target): $httpMethod $reqPath
date: $reqDate
digest: $digest"
signature=$(echo -n "$signingString" | openssl dgst -sha256 -sign "$signingKeyPath" -passin "pass:changeit" | openssl base64 -A)
# Curl request method must be in uppercase e.g "POST", "GET"
curl -X POST "${httpHost}${reqPath}" \
-H "Accept: application/json" \
-H "Content-Type: application/json" \
-H "Digest: ${digest}" \
-H "Date: ${reqDate}" \
-H "Authorization: Bearer ${accessToken}" \
-H "Signature: keyId=\"$keyId\",algorithm=\"rsa-sha256\",headers=\"(request-target) date digest\",signature=\"$signature\"" \
-d "${payload}" \
--cert "$tlsCertificatePath" \
--key "$tlsKeyPath" >$outputFile
cat $outputFile
|
<reponame>fjmhzyh/create-react-app-multiPage<gh_stars>0
import React, { Component } from 'react';
import ReactDOM from 'react-dom';
import './index.scss';
import { List } from 'antd-mobile';
import { Table } from 'antd';
import 'antd/dist/antd.css';
import { getAlarmByBtgNumber } from 'services/home/home';
import { statusTypeFormatter, deviceTypeFormatter, alarmLevelFormatter } from 'utils/dataFormatter'
import moment from 'moment'
const Item = List.Item;
let id = null;
mui.plusReady(function(){
var self = plus.webview.currentWebview();
id = self.id
});
class AlarmBtgDetail extends Component {
constructor(props){
super(props);
this.state={
batteryGroupNumber: '',
alarmList:[],
colorLevel: ["#f00","#E6A23C","#5BC726","#1890FF"]
}
}
componentDidMount(){
console.log('componentDidMount')
mui.plusReady(this.plusReady);
}
plusReady = ()=>{
let self = plus.webview.currentWebview();
let id = self.batteryGroupNumber;
console.log('plusReady', self)
this.fetchData(id);
}
// 接口查询告警收敛器列表
fetchData = (id)=>{
let me = this;
let data = {
pageNo: 1,
size: 20,
batteryGroupNumber: id
}
getAlarmByBtgNumber(data).then(res=>{
if(res.success){
console.log('查询结果', res)
me.setState({
batteryGroupNumber: id,
alarmList: res.data.alarmList
})
} else {
Toast.info(res.msg)
}
}, (err)=>{
console.log('获取告警列表失败', err)
})
}
handleLinkDetail = (item)=>{
let page = mui.preload({
url: '../../../common/alarmDetail/alarmDetail.html',
id:'alarmDetail',
});
page.show()
mui.fire(page, 'show', {
alarmNumber: item.alarmNumber,
deviceNumber:item.deviceNumber,
deviceType:item.deviceType
})
}
columns = [
{
key: '1',
title: '序号',
render:(text,record,index)=>`${(index+1)}`,
align: 'center',
width: 60,
fixed: 'left'
},
{
title: '告警编号',
dataIndex: 'alarmNumber',
align: 'left',
width: 220,
render: (text,item) => (
<div className="detail" onClick={() => {this.handleLinkDetail(item)}}>{text? text: '-'}</div>
)
},
// {
// title: '告警设备编号',
// dataIndex: 'deviceNumber',
// align: 'center',
// width: 210,
// render: (text,item) =>{
// return text? text: '-'
// }
// },
// {
// title: '告警设备类型',
// dataIndex: 'deviceType',
// align: 'center',
// render: val => <span>{deviceTypeFormatter(val)}</span>
// },
{
title: '设备状态',
dataIndex: 'deviceStatus',
align: 'left',
width: 110,
render: val => <span>{statusTypeFormatter(val)}</span>
},
{
title: '告警参数',
dataIndex: 'alarmParameter',
align: 'left',
width: 160,
},
// {
// title: '参数值',
// dataIndex: 'Params',
// align: 'center',
// },
{
title: '告警等级',
dataIndex: 'alarmLevel',
align: 'left',
width: 120,
render: val => <span style={{color:`${this.state.colorLevel[val-1]}`}}>{alarmLevelFormatter(val)}</span>
},
{
title: '告警发生时间',
dataIndex: 'startTime',
align: 'left',
render: val => <span>{moment(val).format('YYYY-MM-DD HH:mm:ss')}</span>
},
// {
// title: '操作',
// dataIndex: 'handler',
// align: 'left',
// render: (text,item) => (
// <span>
// <span className="detail" onClick={() => {this.handleLinkDetail(item)}}>查看详情</span>
// </span>
// )
// },
];
render() {
let columns = this.columns;
let list = this.state.alarmList;
let batteryGroupNumber = this.state.batteryGroupNumber;
return (
<div className="App">
<header className="mui-bar mui-bar-nav">
<a className="mui-action-back mui-icon mui-icon-left-nav mui-pull-left"></a>
<h1 className="mui-title">告警电池组详情页</h1>
</header>
<div className="mui-content">
<List className="my-list" renderHeader={() => '电池组编号'} >
<Item key='1'>{batteryGroupNumber}</Item>
</List>
<List className="my-list" renderHeader={() => '告警列表'} >
</List>
<Table columns={columns} dataSource={list} rowKey = {record=>record.alarmNumber}
scroll={{ x: 860, y:window.innerHeight }} pagination={false} />
</div>
</div>
);
}
}
ReactDOM.render(<AlarmBtgDetail />, document.getElementById('root')); |
import hashlib
from django.contrib.auth.models import User
from .models import Token
def generate_token(username, email, request):
# Generate a unique token based on username and email using MD5 hashing
token_hash = hashlib.md5((username + email).encode()).hexdigest()
# Create a new token object and associate it with the user
user = User.objects.get(username=username) # Assuming the User model is used for authentication
token = Token.objects.create(token=token_hash, user=user)
return token |
/* mbed Microcontroller Library
* Copyright (c) 2013 Nordic Semiconductor
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MBED_PINNAMES_H
#define MBED_PINNAMES_H
#include "cmsis.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef enum {
PIN_INPUT,
PIN_OUTPUT
} PinDirection;
#define PORT_SHIFT 3
typedef enum {
//MCU PINS
P0_0 = 0,
P0_1 = 1,
P0_2 = 2,
P0_3 = 3,
P0_4 = 4,
P0_5 = 5,
P0_6 = 6,
P0_7 = 7,
P0_8 = 8,
P0_9 = 9,
P0_10 = 10,
P0_11 = 11,
P0_12 = 12,
P0_13 = 13,
P0_14 = 14,
P0_15 = 15,
P0_16 = 16,
P0_17 = 17,
P0_18 = 18,
P0_19 = 19,
P0_20 = 20,
P0_21 = 21,
P0_22 = 22,
P0_23 = 23,
P0_24 = 24,
P0_25 = 25,
P0_26 = 26,
P0_27 = 27,
P0_28 = 28,
P0_29 = 29,
P0_30 = 30,
//MICROBIT EDGE CONNECTOR PINS
P0 = P0_3,
P1 = P0_2,
P2 = P0_1,
P3 = P0_4,
P4 = P0_5,
P5 = P0_17,
P6 = P0_12,
P7 = P0_11,
P8 = P0_18,
P9 = P0_10,
P10 = P0_6,
P11 = P0_26,
P12 = P0_20,
P13 = P0_23,
P14 = P0_22,
P15 = P0_21,
P16 = P0_16,
P19 = P0_0,
P20 = P0_30,
//PADS
PAD3 = P0_1,
PAD2 = P0_2,
PAD1 = P0_3,
//LED MATRIX COLS
COL1 = P0_4,
COL2 = P0_5,
COL3 = P0_6,
COL4 = P0_7,
COL5 = P0_8,
COL6 = P0_9,
COL7 = P0_10,
COL8 = P0_11,
COL9 = P0_12,
//LED MATRIX ROWS
ROW1 = P0_13,
ROW2 = P0_14,
ROW3 = P0_15,
//NORMAL PIN (NO SPECIFIED FUNCTIONALITY)
//PIN_16
// BUTTON A
BUTTON_A = P0_17,
//NORMAL PIN (NO SPECIFIED FUNCTIONALITY)
//PIN_18
//TARGET RESET
TGT_NRESET = P0_19,
//NORMAL PIN (NO SPECIFIED FUNCTIONALITY)
//PIN_20
//MASTER OUT SLAVE IN
MOSI = P0_21,
//MASTER IN SLAVE OUT
MISO = P0_22,
//SERIAL CLOCK
SCK = P0_23,
// RX AND TX PINS
TGT_TX = P0_24,
TGT_RX = P0_25,
//BUTTON B
BUTTON_B = P0_26,
//ACCEL INTERRUPT PINS (MMA8653FC)
ACCEL_INT2 = P0_27,
ACCEL_INT1 = P0_28,
//MAGENETOMETER INTERRUPT PIN (MAG3110)
MAG_INT1 = P0_29,
// Not connected
NC = (int)0xFFFFFFFF,
RX_PIN_NUMBER = TGT_RX,
TX_PIN_NUMBER = TGT_TX,
CTS_PIN_NUMBER = 31, //unused ** REQUIRES A PROPER FIX **
RTS_PIN_NUMBER = 31, //unused
// mBed interface Pins
USBTX = TX_PIN_NUMBER,
USBRX = RX_PIN_NUMBER,
LED1 = PAD1,
LED2 = PAD2,
LED3 = PAD3,
LED4 = P0_16,
//SDA (SERIAL DATA LINE)
I2C_SDA0 = P0_30,
//SCL (SERIAL CLOCK LINE)
I2C_SCL0 = P0_0
} PinName;
typedef enum {
PullNone = 0,
PullDown = 1,
PullUp = 3,
PullDefault = PullUp
} PinMode;
#ifdef __cplusplus
}
#endif
#endif
|
import React from 'react'
import { Card, CardBody, CardHeader, Modal, ModalBody, Nav, NavItem, NavLink, TabContent, TabPane } from 'reactstrap'
import AddButtons from '../../common/AddButtons'
import ImageForm from './ImageForm'
import DetailsForm from './DetailsForm'
import CustomFieldsForm from '../../common/CustomFieldsForm'
import { translations } from '../../utils/_translations'
import Variations from './Variations'
import Features from './Features'
import ProductAttribute from './ProductAttribute'
import FileUploads from '../../documents/FileUploads'
import ProductModel from '../../models/ProductModel'
import DefaultModalHeader from '../../common/ModalHeader'
import DefaultModalFooter from '../../common/ModalFooter'
class AddProduct extends React.Component {
constructor (props) {
super(props)
this.productModel = new ProductModel(null)
this.initialState = this.productModel.fields
this.state = this.initialState
this.toggle = this.toggle.bind(this)
this.hasErrorFor = this.hasErrorFor.bind(this)
this.renderErrorFor = this.renderErrorFor.bind(this)
this.handleInput = this.handleInput.bind(this)
this.handleMultiSelect = this.handleMultiSelect.bind(this)
this.handleFileChange = this.handleFileChange.bind(this)
this.handleCheck = this.handleCheck.bind(this)
this.onChangeHandler = this.onChangeHandler.bind(this)
this.handleVariations = this.handleVariations.bind(this)
this.handleFeatures = this.handleFeatures.bind(this)
}
componentDidMount () {
if (Object.prototype.hasOwnProperty.call(localStorage, 'productForm')) {
const storedValues = JSON.parse(localStorage.getItem('productForm'))
this.setState({ ...storedValues }, () => console.log('new state', this.state))
}
}
hasErrorFor (field) {
return !!this.state.errors[field]
}
renderErrorFor (field) {
if (this.hasErrorFor(field)) {
return (
<span className='invalid-feedback'>
<strong>{this.state.errors[field][0]}</strong>
</span>
)
}
}
toggleTab (tab) {
if (this.state.activeTab !== tab) {
this.setState({ activeTab: tab })
}
}
handleVariations (variations) {
this.setState({ variations: variations }, () => console.log('variations', this.state.variations))
}
handleFeatures (features) {
this.setState({ features: features }, () => console.log('features', this.state.features))
}
handleClick () {
const formData = new FormData()
formData.append('cover', this.state.cover)
if (this.state.image && this.state.image.length) {
for (let x = 0; x < this.state.image.length; x++) {
formData.append('image[]', this.state.image[x])
}
}
formData.append('name', this.state.name)
formData.append('description', this.state.description)
formData.append('variations', JSON.stringify(this.state.variations))
formData.append('features', JSON.stringify(this.state.features))
formData.append('price', this.state.price)
formData.append('is_featured', this.state.is_featured)
formData.append('cost', this.state.cost)
formData.append('quantity', this.state.quantity)
formData.append('sku', this.state.sku)
formData.append('length', this.state.length)
formData.append('width', this.state.width)
formData.append('height', this.state.height)
formData.append('weight', this.state.weight)
formData.append('mass_unit', this.state.mass_unit)
formData.append('distance_unit', this.state.distance_unit)
formData.append('company_id', this.state.company_id)
formData.append('brand_id', this.state.brand_id)
formData.append('category', this.state.selectedCategories)
formData.append('notes', this.state.notes)
formData.append('assigned_to', this.state.assigned_to)
formData.append('custom_value1', this.state.custom_value1)
formData.append('custom_value2', this.state.custom_value2)
formData.append('custom_value3', this.state.custom_value3)
formData.append('custom_value4', this.state.custom_value4)
this.productModel.save(formData).then(response => {
if (!response) {
this.setState({ errors: this.productModel.errors, message: this.productModel.error_message })
return
}
this.props.products.push(response)
this.props.action(this.props.products)
this.setState(this.initialState)
localStorage.removeItem('productForm')
})
}
handleCheck () {
this.setState({ is_featured: !this.state.is_featured }, () => localStorage.setItem('productForm', JSON.stringify(this.state)))
}
handleFileChange (e) {
this.setState({
[e.target.name]: e.target.files[0]
})
}
onChangeHandler (e) {
// if return true allow to setState
this.setState({
[e.target.name]: e.target.files
}, () => localStorage.setItem('productForm', JSON.stringify(this.state)))
}
handleMultiSelect (e) {
this.setState({ selectedCategories: Array.from(e.target.selectedOptions, (item) => item.value) }, () => localStorage.setItem('productForm', JSON.stringify(this.state)))
}
handleInput (e) {
const value = e.target.type === 'checkbox' ? e.target.checked : e.target.value
this.setState({
[e.target.name]: value
}, () => localStorage.setItem('productForm', JSON.stringify(this.state)))
}
toggle () {
this.setState({
modal: !this.state.modal,
errors: []
}, () => {
if (!this.state.modal) {
this.setState(this.initialState, () => localStorage.removeItem('productForm'))
}
})
}
render () {
const theme = !Object.prototype.hasOwnProperty.call(localStorage, 'dark_theme') || (localStorage.getItem('dark_theme') && localStorage.getItem('dark_theme') === 'true') ? 'dark-theme' : 'light-theme'
return (
<React.Fragment>
<AddButtons toggle={this.toggle}/>
<Modal size="lg" isOpen={this.state.modal} toggle={this.toggle} className={this.props.className}>
<DefaultModalHeader toggle={this.toggle} title={translations.add_product}/>
<ModalBody className={theme}>
<Nav tabs>
<NavItem>
<NavLink
className={this.state.activeTab === '1' ? 'active' : ''}
onClick={() => {
this.toggleTab('1')
}}>
{translations.details}
</NavLink>
</NavItem>
<NavItem>
<NavLink
className={this.state.activeTab === '2' ? 'active' : ''}
onClick={() => {
this.toggleTab('2')
}}>
{translations.images}
</NavLink>
</NavItem>
<NavItem>
<NavLink
className={this.state.activeTab === '3' ? 'active' : ''}
onClick={() => {
this.toggleTab('3')
}}>
{translations.variations}
</NavLink>
</NavItem>
<NavItem>
<NavLink
className={this.state.activeTab === '4' ? 'active' : ''}
onClick={() => {
this.toggleTab('4')
}}>
{translations.attributes}
</NavLink>
</NavItem>
<NavItem>
<NavLink
className={this.state.activeTab === '5' ? 'active' : ''}
onClick={() => {
this.toggleTab('5')
}}>
{translations.features}
</NavLink>
</NavItem>
<NavItem>
<NavLink
className={this.state.activeTab === '6' ? 'active' : ''}
onClick={() => {
this.toggleTab('6')
}}>
{translations.documents}
</NavLink>
</NavItem>
</Nav>
<TabContent activeTab={this.state.activeTab}>
<TabPane tabId="1">
<DetailsForm errors={this.state.errors} handleInput={this.handleInput}
product={this.state}
handleMultiSelect={this.handleMultiSelect}
categories={this.props.categories}
selectedCategories={this.state.selectedCategories}
companies={this.state.companies}
handleCheck={this.handleCheck}/>
<CustomFieldsForm handleInput={this.handleInput}
custom_value1={this.state.custom_value1}
custom_value2={this.state.custom_value2}
custom_value3={this.state.custom_value3}
custom_value4={this.state.custom_value4}
custom_fields={this.props.custom_fields}/>
</TabPane>
<TabPane tabId="2">
<ImageForm errors={this.state.errors} images={this.state.images}
deleteImage={null} handleFileChange={this.handleFileChange}
onChangeHandler={this.onChangeHandler}/>
</TabPane>
<TabPane tabId="3">
<Variations variations={this.state.variations} onChange={this.handleVariations}/>
</TabPane>
<TabPane tabId="4">
<Card>
<CardHeader>{translations.attributes}</CardHeader>
<CardBody>
<ProductAttribute errors={this.state.errors} handleInput={this.handleInput}
product={this.state}/>
</CardBody>
</Card>
</TabPane>
<TabPane tabId="5">
<Features features={this.state.features} onChange={this.handleFeatures}/>
</TabPane>
<TabPane tabId="6">
<Card>
<CardHeader>{translations.documents}</CardHeader>
<CardBody>
<FileUploads entity_type="Product" entity={this.state}
user_id={this.state.user_id}/>
</CardBody>
</Card>
</TabPane>
</TabContent>
</ModalBody>
<DefaultModalFooter show_success={true} toggle={this.toggle}
saveData={this.handleClick.bind(this)}
loading={false}/>
</Modal>
</React.Fragment>
)
}
}
export default AddProduct
|
#!/bin/bash
set -e
FROM="$(sh ./list.migrations.sh | tail -2 | head -1)"
./g.script.sh $FROM
|
#!/bin/sh
docker-compose -f /drone/src/docker-compose.yml -f /drone/src/docker-compose.staging.yml config >/drone/src/docker-compose.processed.yml
docker stack deploy -c /drone/src/docker-compose.processed.yml rss-feed-staging
|
# This maps to cli/main.py
import os
import pytest
from click.testing import CliRunner
import osa.cli
# Testing tools
class Command(object):
def __init__(self, entrypoint=None,
valid_args=None, invalid_args=None,
valid_subcmds=None, invalid_subcmds=None):
self.entrypoint = entrypoint
self.valid_args = valid_args
self.invalid_args = invalid_args
self.valid_subcmds = valid_subcmds
self.invalid_subcmds = invalid_subcmds
maincli = Command(entrypoint=osa.cli.entrypoint,
valid_args=[
['--debug'],
],
invalid_args=[
['--oadir', './oa'],
],
valid_subcmds=[
'static_inventory',
'dynamic_inventory',
],
invalid_subcmds=['babar'])
staticinventorycli = Command(valid_args=[
["--oadir", './oa'],
["--workdir", './work']
],
invalid_args=[
['--debug']
],
valid_subcmds=[
'generate'
],
invalid_subcmds=[
'invert'
])
# Tests definition
def test_import():
""" Tests imports """
assert True
def test_usage(clickrunner):
""" Tests main cli can load and showing usage """
result = clickrunner.invoke(maincli.entrypoint)
assert result.exit_code == 0
assert "Usage" in result.output
for valid_subcmd in maincli.valid_subcmds:
assert valid_subcmd in result.output
for invalid_subcmd in maincli.invalid_subcmds:
assert invalid_subcmd not in result.output
def test_validargs(clickrunner):
""" Tests valid parameters with no subcommands """
for args in maincli.valid_args:
result = clickrunner.invoke(maincli.entrypoint, args)
assert result.exit_code == 2
assert "Missing command" in result.output
def test_invalidargs(clickrunner):
""" Test invalid parameters with no subcommands """
for args in maincli.invalid_args:
result = clickrunner.invoke(maincli.entrypoint, args)
assert result.exit_code == 2
assert "no such option" in result.output
# static inventory
def test_usage_staticinventory(clickrunner):
""" Test staticinventory cli can load and show its usage """
result = clickrunner.invoke(maincli.entrypoint, ['static_inventory'])
assert result.exit_code == 0
assert "Usage" in result.output
for valid_subcmd in staticinventorycli.valid_subcmds:
assert valid_subcmd in result.output
for invalid_subcmd in staticinventorycli.invalid_subcmds:
assert invalid_subcmd not in result.output |
<filename>src/modelo/datos/VO/LogroConseguidoVO.java
package modelo.datos.VO;
/**
* @author <NAME>
*
*/
public class LogroConseguidoVO {
private String usuario;
private String logro;
/**
* @param usuario
* @param logro
*
* Construye el objeto LogroConseguido con TODOS los parametros
*/
public LogroConseguidoVO(String usuario, String logro) {
super();
this.usuario = usuario;
this.logro = logro;
}
/**
* Constructor de objeto vacio
*/
public LogroConseguidoVO() {}
@Override
public String toString() {
return "LogroConseguidoVO {\n\tusuario: " + usuario + "\n\tlogro: " + logro + "\n}";
}
public String toSQLInsert() {
return "INSERT INTO logroConseguido (usuario, id_logro) VALUES (\"" + usuario + "\", \"" + logro + "\");";
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((logro == null) ? 0 : logro.hashCode());
result = prime * result + ((usuario == null) ? 0 : usuario.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (!(obj instanceof LogroConseguidoVO)) {
return false;
}
LogroConseguidoVO other = (LogroConseguidoVO) obj;
if (logro == null) {
if (other.logro != null) {
return false;
}
} else if (!logro.equals(other.logro)) {
return false;
}
if (usuario == null) {
if (other.usuario != null) {
return false;
}
} else if (!usuario.equals(other.usuario)) {
return false;
}
return true;
}
public String getUsuario() {
return usuario;
}
public void setUsuario(String usuario) {
this.usuario = usuario;
}
public String getLogro() {
return logro;
}
public void setLogro(String logro) {
this.logro = logro;
}
}
|
#include "Misc.h"
#include <Info/Info/Database.h>
#include "../../GameState.h"
#include "../../State/States.h"
#include "../Player.h"
using namespace Lunia::XRated::Database::Info;
namespace Lunia { namespace XRated { namespace Logic {
Misc::Misc() : Actor(Lunia::XRated::Constants::ObjectType::Misc)
{
objectData.Name = L"Misc";
}
bool Misc::DoDamage(Object* who, uint32, float, float, float, Constants::DamageType, uint32 /*stateID*/, Constants::AttackStateType /*attackType*/, unsigned int& sFlag)
{
--status.Hp;
if ( !who ) {
stageData->StatusChanged(objectData.GameObjectSerial, objectData.Position, objectData.Direction, status.Hp,
status.Mp, 0, 0);
} else if ( who->GetType() == Lunia::XRated::Constants::ObjectType::Projectile ) {
stageData->StatusChanged(objectData.GameObjectSerial, objectData.Position, objectData.Direction, status.Hp,
status.Mp, ((Projectile*)who)->GetCreatorSerial(), who->GetNameHash(), sFlag);
} else if (who->GetType() == Lunia::XRated::Constants::ObjectType::Player ||
who->GetType() == Lunia::XRated::Constants::ObjectType::NonPlayer ||
who->GetType() == Lunia::XRated::Constants::ObjectType::Vehicle ||
who->GetType() == Lunia::XRated::Constants::ObjectType::Structure)
{
stageData->StatusChanged(objectData.GameObjectSerial, objectData.Position, objectData.Direction, status.Hp,
status.Mp, who->GetSerial(), ((Actor*)who)->GetActionNameHash(), sFlag);
} else {
}
if (status.Hp == 0) {
float3 posRevision[4];
posRevision[0].SetVector(0, 0, -15);
posRevision[1].SetVector(0, 0, 15);
posRevision[2].SetVector(15, 0, 0);
posRevision[3].SetVector(-15, 0, 0);
float r;
float prob=0;
if(info->BonusBox) {
std::vector<NonPlayerInfo::Item>& items = info->Items;
std::vector<NonPlayerInfo::Item>::iterator item, itemEnd = items.end();
const PlayerMap &players = stageData->GetPlayers();
int posCnt = 0;
for ( PlayerMap::const_iterator i = players.begin(); i != players.end() ; ++i, ++posCnt ) {
if((*i).second->IsGainNormal() == false)
continue;
if (posCnt > 3) posCnt = 0;
r = stageData->GetRandomFloat();
prob = 0;
for (item = items.begin(); item != itemEnd ; ++item) {
if (item->Probability==0.0f) continue;
prob += (*item).Probability;
if ( r <= prob ) {
float3 pos = objectData.Position;
pos.y = 0;
float3 direction = who->GetDirection();
stageData->CreateItem((*item).Hash, pos + posRevision[posCnt], direction, (*i).second->GetSerial(), 1, 0, false);
break;
}
}
}
} else {
std::vector<NonPlayerInfo::Item>& items = info->Items;
std::vector<NonPlayerInfo::Item>::iterator item = items.begin(), itemEnd = items.end();
r = stageData->GetRandomFloat();
for (; item != itemEnd ; ++item) {
if (item->Probability==0.0f) continue;
prob += (*item).Probability;
if ( r <= prob ) {
float3 pos = objectData.Position;
pos.y = 0;
float3 direction = who->GetDirection();
stageData->CreateItem((*item).Hash, pos, direction, uint16(1));
break;
}
}
}
}
return true;
}
bool Misc::LoadStatus(uint32 misc)
{
info = Database::DatabaseInstance().InfoCollections.Miscs.Retrieve(misc);
if ( !info ) {
Logger::GetInstance().Error( L"[Misc::LoadStatus] unknown misc. [{0}]", misc );
return false;
}
objectData.Name = info->Name;
objectData.NameHash = info->NameHash;
objectData.Radius = info->Radius;
status.Hp = status.MaxHp = (float)info->Hp;
Stat::IState *state;
for (std::vector<ActionInfo::State>::iterator i = info->States.begin() ; i != info->States.end() ; ++i) {
state = Stat::CreateState<Actor>((*i).type, (Actor*)this, &(*i).params, &(*i).optionalStates);
AddState(state);
}
return true;
}
bool Misc::Update(float dt, IGameStateDatabase* db)
{
Object::Update(dt, db);
if ( duration > 0 ) {
duration -= dt;
if (duration <= 0)
return true;
}
if (status.Hp <= 0) {
//std::vector<NonPlayerInfo::Item>& items = info->Items;
//std::vector<NonPlayerInfo::Item>::iterator item = items.begin(), itemEnd = items.end();
//float r = db->GetRandomFloat();
//float prob=0;
//for (; item != itemEnd ; ++item) {
// prob += (*item).Probability;
// if ( r <= prob ) {
// float3 pos = objectData.Position;
// pos.y = 0;
// db->CreateItem((*item).Hash, pos, objectData.Direction);
// break;
// }
//}
return true;
}
return false;
}
bool Misc::SetAction(uint32, Action::Action::IActionParam*, bool)
{
return false;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////
// MiscFinite
//bool MiscFinite::Update(float dt, IGameStateDatabase* db)
//{
// return Misc::Update(dt, db);
//}
////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Misc Invincible
//MiscTemp::MiscTemp()
//{
// stateFlags[Database::Info::StateInfo::Type::INVINCIBLE] = 1;
//}
} } }
|
<reponame>get-bundled/axyz-sdk<filename>packages/axyz-react/src/components/Solana/ModalWalletButtons/ModalWalletButtons.tsx
import React, { FC } from 'react';
import { Grid, Image } from '@nextui-org/react';
import WalletConnectButton from '../../WalletConnectButton';
import { useAxyz, useSortedWallets } from '../../../hooks';
import { useWallet } from '../../../hooks/solana/useWallet';
interface Props {
close: () => void;
}
const ModalWalletButtons: FC<Props> = ({ close }) => {
const { installedWallets, loadableWallets, undetectedWallets } = useSortedWallets();
const { select } = useWallet();
const axyz = useAxyz();
return (
<>
{[...installedWallets, ...loadableWallets, ...undetectedWallets].map((wallet) => (
<Grid key={wallet.name} xs={12} justify="center">
<WalletConnectButton
Icon={
wallet.icon ? (
<Image showSkeleton width="$10" height="$10" alt={wallet.name} src={wallet.icon} />
) : undefined
}
onClick={async () => {
select(wallet.name);
await wallet.connect();
}}
disabled={axyz.solana.isConnected}
name={wallet.name}
close={close}
/>
</Grid>
))}
</>
);
};
export default ModalWalletButtons;
|
package main
import (
"bufio"
"os"
"strconv"
"strings"
)
// Config represents the global configuration for logshifter.
type Config struct {
queueSize int // size of the internal log message queue
inputBufferSize int // input up to \n or this number of bytes is considered a line
outputType string // one of syslog, file
syslogBufferSize int // lines bound for syslog lines are split at this size
fileBufferSize int // lines bound for a file are split at this size
fileWriterDir string // base dir for the file writer output's file
outputTypeFromEnviron bool // allows outputtype to be overridden via LOGSHIFTER_OUTPUT_TYPE
}
// DefaultConfig returns a Config with some sane defaults.
func DefaultConfig() *Config {
return &Config{
queueSize: 1000,
inputBufferSize: 2048,
outputType: "syslog",
syslogBufferSize: 2048,
fileBufferSize: 2048,
outputTypeFromEnviron: true,
}
}
const (
// output types
Syslog = "syslog"
File = "file"
DefaultConfigFile = "/etc/openshift/logshifter.conf"
)
// ParseConfig reads file and constructs a Config from the contents.
//
// The config file format is a simple newline-delimited key=value pair format.
// Config keys within the file correspond to the fields of Config, and the keys
// are case-insensitive.
//
// The values assigned by DefaultConfig are used for any missing config keys.
//
// An error is returned if file is not a valid config file.
func ParseConfig(file string) (*Config, error) {
config := DefaultConfig()
f, err := os.Open(file)
defer f.Close()
if err != nil {
return nil, err
}
reader := bufio.NewReader(f)
for {
line, err := reader.ReadString('\n')
if err != nil || len(line) == 0 {
break
}
c := strings.SplitN(line, "=", 2)
if len(c) != 2 {
break
}
k := strings.Trim(c[0], "\n ")
v := strings.Trim(c[1], "\n ")
switch strings.ToLower(k) {
case "queuesize":
config.queueSize, _ = strconv.Atoi(v)
case "inputbuffersize":
config.inputBufferSize, _ = strconv.Atoi(v)
case "outputtype":
switch v {
case "syslog":
config.outputType = Syslog
case "file":
config.outputType = File
}
case "syslogbuffersize":
config.syslogBufferSize, _ = strconv.Atoi(v)
case "filebuffersize":
config.fileBufferSize, _ = strconv.Atoi(v)
case "outputtypefromenviron":
config.outputTypeFromEnviron, _ = strconv.ParseBool(v)
case "filewriterdir":
config.fileWriterDir = v
}
}
return config, nil
}
|
<gh_stars>1-10
require 'rails_helper'
module HiveMindHive
RSpec.describe Plugin, type: :model do
describe '#create' do
it 'sets the version of the hive' do
expect(Plugin.create(
'hostname' => 'test_host_name',
'version' => '2.3.4'
).version).to eq '2.3.4'
end
it 'sets runner plugins of the hive' do
expect(Plugin.create(
'hostname' => 'test_host_name',
'runner_plugins' => { 'plugin1' => '2.4.8', 'plugin2' => '99.9' }
).runner_plugins).to match(
{ 'plugin1' => '2.4.8', 'plugin2' => '99.9' }
)
end
end
describe '#update' do
it 'updates the version of the hive' do
hive = Plugin.create(
'hostname' => 'test_host_name',
'version' => '2.3.4'
)
hive.update(version: '2.3.5')
expect(hive.version).to eq '2.3.5'
end
end
describe '#name' do
let(:hive_plugin) {
Plugin.create( hostname: 'hive_host_name' )
}
it 'returns the hostname' do
expect(hive_plugin.name).to eq 'hive_host_name'
end
end
context 'runner version' do
let(:runner_version) {
RunnerVersion.create( version: '1.2.3' )
}
let(:hive) {
Plugin.create(
hostname: 'hive_host_name',
runner_version_history: [
RunnerVersionHistory.create(
runner_version: runner_version,
start_timestamp: Time.now
)
]
)
}
let(:hive_no_history) {
Plugin.create(hostname: 'hive_host_name_2')
}
describe '#version' do
it 'returns nil version if there is no history' do
expect(hive_no_history.version).to be_nil
end
it 'returns the version number' do
expect(hive.version).to eq '1.2.3'
end
end
describe '#update_version' do
it 'changes the reported version' do
hive.update_version('1.2.4')
expect(hive.version).to eq '1.2.4'
end
it 'sets the end timestamp of the previous version in the history' do
hive.update_version('1.2.4')
expect(hive.runner_version_history[0].end_timestamp).to_not be_nil
end
it 'sets a first version in history' do
hive_no_history.update_version('1.2.4')
expect(hive_no_history.version).to eq '1.2.4'
end
it 'does not unecessarily update the version of the hive' do
hive # Preload
expect{hive.update_version('1.2.3')}.to change(HiveMindHive::RunnerVersionHistory, :count).by 0
end
end
end
context 'runner plugins' do
let(:hive_no_plugins) {
Plugin.create(hostname: 'hive_no_plugins')
}
let(:runner_plugin_version1) {
RunnerPluginVersion.create(
name: 'plugin1',
version: '2.4.8'
)
}
let(:runner_plugin_version2) {
RunnerPluginVersion.create(
name: 'plugin2',
version: '5'
)
}
let(:runner_plugin_version3) {
RunnerPluginVersion.create(
name: 'plugin3',
version: '1.0a'
)
}
let(:hive_one_plugin) {
Plugin.create(
hostname: 'hive_no_plugins',
runner_plugin_version_history: [
RunnerPluginVersionHistory.create(
runner_plugin_version: runner_plugin_version1,
start_timestamp: Time.now
)
]
)
}
let(:hive_three_plugins) {
Plugin.create(
hostname: 'hive_no_plugins',
runner_plugin_version_history: [
RunnerPluginVersionHistory.create(
runner_plugin_version: runner_plugin_version1,
start_timestamp: Time.now
),
RunnerPluginVersionHistory.create(
runner_plugin_version: runner_plugin_version2,
start_timestamp: Time.now
),
RunnerPluginVersionHistory.create(
runner_plugin_version: runner_plugin_version3,
start_timestamp: Time.now
)
]
)
}
describe '#runner_plugins' do
it 'returns an empty list of plugins' do
expect(hive_no_plugins.runner_plugins).to eq({})
end
it 'returns a single plugin with version' do
expect(hive_one_plugin.runner_plugins).to eq({ 'plugin1' => '2.4.8' })
end
it 'returns multiple plugins with versions' do
expect(hive_three_plugins.runner_plugins).to match(
{
'plugin1' => '2.4.8',
'plugin2' => '5',
'plugin3' => '1.0a'
}
)
end
end
describe '#update_runner_plugins' do
it 'changes the reported version of a single plugin' do
hive_one_plugin.update_runner_plugins(
'plugin1' => '2.4.9'
)
expect(hive_one_plugin.runner_plugins).to eq({ 'plugin1' => '2.4.9' })
end
it 'changes the reported versions of multiple plugins' do
hive_three_plugins.update_runner_plugins(
'plugin1' => '2.4.9', 'plugin2' => '7', 'plugin3' => '1.0b'
)
expect(hive_three_plugins.runner_plugins).to match(
{
'plugin1' => '2.4.9',
'plugin2' => '7',
'plugin3' => '1.0b'
}
)
end
it 'adds a new plugin' do
hive_three_plugins.update_runner_plugins(
'plugin1' => '2.4.8', 'plugin2' => '5', 'plugin3' => '1.0a', 'plugin4' => '99'
)
expect(hive_three_plugins.runner_plugins).to match(
{
'plugin1' => '2.4.8',
'plugin2' => '5',
'plugin3' => '1.0a',
'plugin4' => '99',
}
)
end
it 'removes a new plugin' do
hive_three_plugins.update_runner_plugins(
'plugin1' => '2.4.8', 'plugin2' => '5'
)
expect(hive_three_plugins.runner_plugins).to match(
{
'plugin1' => '2.4.8',
'plugin2' => '5',
}
)
end
it 'sets the end timestamp of the previous version in the history' do
hive_one_plugin.update_runner_plugins('plugin1' => '2.4.9')
expect(hive_one_plugin.runner_plugin_version_history[0].end_timestamp).to_not be_nil
end
it 'does not unecessarily update the version of a plugin (single plugin)' do
hive_one_plugin # Preload
expect{hive_one_plugin.update_runner_plugins('plugin1' => '2.4.8')}.to change(HiveMindHive::RunnerPluginVersionHistory, :count).by 0
end
it 'does not unecessarily update the version of a plugin (multiple plugins)' do
hive_three_plugins # Preload
expect{hive_three_plugins.update_runner_plugins('plugin1' => '2.4.8', 'plugin2' => '6', 'plugin3' => '1.0b')}.to change(HiveMindHive::RunnerPluginVersionHistory, :count).by 2
end
it 'does not unecessarily update the version of a plugin (adding plugins)' do
hive_three_plugins # Preload
expect{hive_three_plugins.update_runner_plugins('plugin1' => '2.4.8', 'plugin2' => '5', 'plugin3' => '1.0a', 'plugin4' => '99')}.to change(HiveMindHive::RunnerPluginVersionHistory, :count).by 1
end
end
end
describe '#details' do
let(:runner_version) {
RunnerVersion.create( version: '1.2.3' )
}
let(:runner_plugin_version1) {
RunnerPluginVersion.create(
name: 'plugin1',
version: '2.4.8'
)
}
let(:runner_plugin_version2) {
RunnerPluginVersion.create(
name: 'plugin2',
version: '5'
)
}
let(:runner_plugin_version3) {
RunnerPluginVersion.create(
name: 'plugin3',
version: '1.0a'
)
}
let(:hive) {
Plugin.create(
hostname: 'hive_host_name',
runner_version_history: [
RunnerVersionHistory.create(
runner_version: runner_version,
start_timestamp: Time.now
)
],
runner_plugin_version_history: [
RunnerPluginVersionHistory.create(
runner_plugin_version: runner_plugin_version1,
start_timestamp: Time.now
),
RunnerPluginVersionHistory.create(
runner_plugin_version: runner_plugin_version2,
start_timestamp: Time.now
),
RunnerPluginVersionHistory.create(
runner_plugin_version: runner_plugin_version3,
start_timestamp: Time.now
)
]
)
}
it 'returns the version number' do
expect(hive.details).to include({'version' => '1.2.3'})
end
it 'returns the plugin version numbers' do
expect(hive.details.keys).to include 'runner_plugins'
expect(hive.details['runner_plugins']).to match(
{
'plugin1' => '2.4.8',
'plugin2' => '5',
'plugin3' => '1.0a'
}
)
end
end
describe '#json_keys' do
let(:plugin) { Plugin.create }
it 'returns the correct array' do
expect(plugin.json_keys).to eq([:version, :connected_devices])
end
end
end
end
|
#include <string>
#include <iostream>
using namespace std;
#include "stack-array.h"
int main(int argc, char **argv)
{
try {
cout << "\n";
StackArray<int> saI(5);
for(int i = 0 ; i < 5; i++)
saI.push(10*i+1);
for(int i = 0 ; i < 5; i++)
cout << saI.pop() << "\n";
} catch(const char* err) {
cout << "caught a char* err=" << err << "\n";
}
try {
cout << "\n";
StackArray<double> saD(5);
for(int i = 0 ; i < 5; i++)
saD.push(100.*i+1.1);
for(int i = 0 ; i < 5; i++)
cout << saD.pop() << "\n";
} catch(const char* err) {
cout << "caught a char* err=" << err << "\n";
}
try {
cout << "\n";
StackArray<double> saD(5);
for(int i = 0 ; i < 15; i++)
saD.push(100.*i+1.1);
for(int i = 0 ; i < 5; i++)
cout << saD.pop() << "\n";
} catch(const char* err) {
cout << "caught a char* err=" << err << "\n";
}
try {
cout << "\n";
StackArray<string> saS(5);
saS.push( string("one") );
saS.push( string("two") );
saS.push( string("three") );
saS.push( string("four") );
saS.push( string("five") );
size_t num = saS.size();
for(size_t i = 1 ; i <= num + 1; i++) {
if(saS.size() == 0) cout << "popping an empty stack empty. expect a throw:\n";
cout << saS.pop() << "\n";
}
} catch(const char* err) {
cout << "caught a char* err=" << err << "\n";
}
}
|
class Person:
def __init__(self, name):
self.name = name
def show_name(self):
print("Name : ", self.name)
def greeting(self):
print("Hello, my name is {}".format(self.name))
class Student(Person):
def __init__(self, name, rollnum):
Person.__init__(self, name)
self.rollnum = rollnum
def show_rollnum(self):
print("Roll Number: ", self.rollnum)
s = Student("Jill", "15")
s.show_name()
s.show_rollnum()
s.greeting() |
import templates from '../templates';
import BaseView from './base';
import ItemEditorView from './item.editor';
export default class ManageView extends BaseView {
constructor (options) {
super(options);
this.name = 'Manage';
this._model = options.model;
this._bindEvent('click', 'tr.item-row', (event) => {
let itemId = event.sTarget.getAttribute('data-id');
if (this._itemEditor) {
this._itemEditor.destroy();
}
this._itemEditor = new ItemEditorView({
el: this._itemEditorContainer,
item: this._model.getItem(itemId)
});
this._itemEditor.render();
this._itemEditor.on('save', item => {
this._model.updateItem(item);
this.render();
});
});
}
render () {
this.el.innerHTML = templates.view.manage({
items: this._model.getItems()
});
this._itemEditorContainer = this.el.querySelector('aside.item-editor-container');
}
}
|
docker run -it --name db2 -d --privileged=true -p 50000:50000 -e LICENSE=accept -e DB2INST1_PASSWORD=db2inst1 -e DBNAME=INVDB -v $(pwd):/home -v $(pwd)/database:/database ibmcom/db2
|
set -euxo pipefail
main() {
# test that building the book works
mdbook build
# mdbook doesn't handle relative links correctly in print.html so skip it.
linkchecker --ignore-url "print.html" book
# now check this as a directory of the bookshelf
rm -rf shelf
mkdir shelf
mv book shelf
# Skipping bad relative link errors in print.html again here.
linkchecker --ignore-url "print.html" shelf
mv shelf/book .
rmdir shelf
# first (fast) pass: check that examples compile
for chapter in $(echo src/*); do
if [ ! -f $chapter/Cargo.toml ]; then
continue
fi
pushd $chapter
case $(basename $chapter) in
05-led-roulette | 06-hello-world)
RUSTFLAGS="-D rust_2018_compatibility -D rust_2018_idioms" cargo check --target thumbv7em-none-eabihf
;;
WIP-async-io-the-future)
popd
continue
;;
*)
RUSTFLAGS="-D rust_2018_compatibility -D rust_2018_idioms" cargo check
;;
esac
popd
done
# second (slow) pass: check that examples link
for chapter in $(echo src/*); do
if [ ! -f $chapter/Cargo.toml ]; then
continue
fi
pushd $chapter
case $(basename $chapter) in
05-led-roulette | 06-hello-world)
cargo build --target thumbv7em-none-eabihf
cargo build --target thumbv7em-none-eabihf --release
;;
WIP-async-io-the-future)
popd
continue
;;
*)
cargo build
cargo build --release
;;
esac
popd
done
}
if [ $TRAVIS_BRANCH != master ]; then
main
fi
|
#!/usr/bin/env -S bash -euET -o pipefail -O inherit_errexit
SCRIPT=$(readlink -f "$0") && cd $(dirname "$SCRIPT")
# --- Script Init ---
mkdir -p log
rm -R -f log/*
# --- Setup run dirs ---
find output -type f -not -name '*summary-info*' -not -name '*.json' -exec rm -R -f {} +
rm -R -f fifo/*
rm -R -f work/*
mkdir work/kat/
mkdir work/gul_S1_summaryleccalc
mkdir work/gul_S1_summaryaalcalc
mkdir work/il_S1_summaryleccalc
mkdir work/il_S1_summaryaalcalc
mkfifo fifo/gul_P1
mkfifo fifo/gul_S1_summary_P1
mkfifo fifo/gul_S1_summary_P1.idx
mkfifo fifo/gul_S1_eltcalc_P1
mkfifo fifo/gul_S1_summarycalc_P1
mkfifo fifo/gul_S1_pltcalc_P1
mkfifo fifo/il_P1
mkfifo fifo/il_S1_summary_P1
mkfifo fifo/il_S1_summary_P1.idx
mkfifo fifo/il_S1_eltcalc_P1
mkfifo fifo/il_S1_summarycalc_P1
mkfifo fifo/il_S1_pltcalc_P1
# --- Do insured loss computes ---
eltcalc < fifo/il_S1_eltcalc_P1 > work/kat/il_S1_eltcalc_P1 & pid1=$!
summarycalctocsv < fifo/il_S1_summarycalc_P1 > work/kat/il_S1_summarycalc_P1 & pid2=$!
pltcalc < fifo/il_S1_pltcalc_P1 > work/kat/il_S1_pltcalc_P1 & pid3=$!
tee < fifo/il_S1_summary_P1 fifo/il_S1_eltcalc_P1 fifo/il_S1_summarycalc_P1 fifo/il_S1_pltcalc_P1 work/il_S1_summaryaalcalc/P1.bin work/il_S1_summaryleccalc/P1.bin > /dev/null & pid4=$!
tee < fifo/il_S1_summary_P1.idx work/il_S1_summaryleccalc/P1.idx > /dev/null & pid5=$!
summarycalc -m -f -1 fifo/il_S1_summary_P1 < fifo/il_P1 &
# --- Do ground up loss computes ---
eltcalc < fifo/gul_S1_eltcalc_P1 > work/kat/gul_S1_eltcalc_P1 & pid6=$!
summarycalctocsv < fifo/gul_S1_summarycalc_P1 > work/kat/gul_S1_summarycalc_P1 & pid7=$!
pltcalc < fifo/gul_S1_pltcalc_P1 > work/kat/gul_S1_pltcalc_P1 & pid8=$!
tee < fifo/gul_S1_summary_P1 fifo/gul_S1_eltcalc_P1 fifo/gul_S1_summarycalc_P1 fifo/gul_S1_pltcalc_P1 work/gul_S1_summaryaalcalc/P1.bin work/gul_S1_summaryleccalc/P1.bin > /dev/null & pid9=$!
tee < fifo/gul_S1_summary_P1.idx work/gul_S1_summaryleccalc/P1.idx > /dev/null & pid10=$!
summarycalc -m -g -1 fifo/gul_S1_summary_P1 < fifo/gul_P1 &
eve 1 1 | getmodel | gulcalc -S0 -L0 -r -c fifo/gul_P1 -i - | fmcalc -a2 > fifo/il_P1 &
wait $pid1 $pid2 $pid3 $pid4 $pid5 $pid6 $pid7 $pid8 $pid9 $pid10
# --- Do insured loss kats ---
kat -s work/kat/il_S1_eltcalc_P1 > output/il_S1_eltcalc.csv & kpid1=$!
kat work/kat/il_S1_pltcalc_P1 > output/il_S1_pltcalc.csv & kpid2=$!
kat work/kat/il_S1_summarycalc_P1 > output/il_S1_summarycalc.csv & kpid3=$!
# --- Do ground up loss kats ---
kat -s work/kat/gul_S1_eltcalc_P1 > output/gul_S1_eltcalc.csv & kpid4=$!
kat work/kat/gul_S1_pltcalc_P1 > output/gul_S1_pltcalc.csv & kpid5=$!
kat work/kat/gul_S1_summarycalc_P1 > output/gul_S1_summarycalc.csv & kpid6=$!
wait $kpid1 $kpid2 $kpid3 $kpid4 $kpid5 $kpid6
aalcalc -Kil_S1_summaryaalcalc > output/il_S1_aalcalc.csv & lpid1=$!
leccalc -r -Kil_S1_summaryleccalc -F output/il_S1_leccalc_full_uncertainty_aep.csv -f output/il_S1_leccalc_full_uncertainty_oep.csv -S output/il_S1_leccalc_sample_mean_aep.csv -s output/il_S1_leccalc_sample_mean_oep.csv -W output/il_S1_leccalc_wheatsheaf_aep.csv -M output/il_S1_leccalc_wheatsheaf_mean_aep.csv -m output/il_S1_leccalc_wheatsheaf_mean_oep.csv -w output/il_S1_leccalc_wheatsheaf_oep.csv & lpid2=$!
aalcalc -Kgul_S1_summaryaalcalc > output/gul_S1_aalcalc.csv & lpid3=$!
leccalc -r -Kgul_S1_summaryleccalc -F output/gul_S1_leccalc_full_uncertainty_aep.csv -f output/gul_S1_leccalc_full_uncertainty_oep.csv -S output/gul_S1_leccalc_sample_mean_aep.csv -s output/gul_S1_leccalc_sample_mean_oep.csv -W output/gul_S1_leccalc_wheatsheaf_aep.csv -M output/gul_S1_leccalc_wheatsheaf_mean_aep.csv -m output/gul_S1_leccalc_wheatsheaf_mean_oep.csv -w output/gul_S1_leccalc_wheatsheaf_oep.csv & lpid4=$!
wait $lpid1 $lpid2 $lpid3 $lpid4
rm -R -f work/*
rm -R -f fifo/*
|
#pragma once
#include <map>
#include <string>
#include <Poco/SharedPtr.h>
#include <Poco/Instantiator.h>
#include "util/Loggable.h"
#include "zwave/ZWaveMapperRegistry.h"
namespace BeeeOn {
/**
* @brief SpecificZWaveMapperRegistry implements the method resolve()
* generically. The subclass of SpecificZWaveMapperRegistry should
* register a special instantiator creating the appropriate Mapper
* implementation based on the vendor and product IDs of Z-Wave nodes.
*/
class SpecificZWaveMapperRegistry : public ZWaveMapperRegistry, protected Loggable {
public:
SpecificZWaveMapperRegistry();
Mapper::Ptr resolve(const ZWaveNode &node) override;
/**
* @brief Set the spec mapping where the map key is a string
* in form: <code>VENDOR:PRODUCT</code> and the value is the
* name of MapperInstantiator to be used.
*/
void setSpecMap(const std::map<std::string, std::string> &specMap);
protected:
/**
* @brief Specification of a Z-Wave node to match.
*/
struct Spec {
const uint16_t vendor;
const uint16_t product;
bool operator <(const Spec &other) const;
std::string toString() const;
static Spec parse(const std::string &input);
};
/**
* @brief Instantiator of specific Mapper implementations.
*/
class MapperInstantiator {
public:
typedef Poco::SharedPtr<MapperInstantiator> Ptr;
virtual ~MapperInstantiator();
virtual Mapper::Ptr create(const ZWaveNode &node) = 0;
};
/**
* @brief Template implementation of MapperInstantiator creating
* MapperType instances having constructor specified as:
* <code>MapperType(const ZWaveNode::Identity &, const std::string &)</code>.
*/
template <typename MapperType>
class SimpleMapperInstantiator : public MapperInstantiator {
public:
Mapper::Ptr create(const ZWaveNode &node) override
{
return new MapperType(node.id(), node.product());
}
};
/**
* @brief The subclass would call this method for each instantiator
* type it offers. The name of instantiator is referred from the
* specMap property.
*
* @see setSpecMap()
*/
void registerInstantiator(
const std::string &name,
MapperInstantiator::Ptr instantiator);
private:
typedef std::map<std::string, MapperInstantiator::Ptr> InstantiatorsMap;
InstantiatorsMap m_instantiators;
std::map<Spec, InstantiatorsMap::iterator> m_specMap;
};
}
|
#!/bin/sh
set -e
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
# Used as a return value for each invocation of `strip_invalid_archs` function.
STRIP_BINARY_RETVAL=0
# This protects against multiple targets copying the same framework dependency at the same time. The solution
# was originally proposed here: https://lists.samba.org/archive/rsync/2008-February/020158.html
RSYNC_PROTECT_TMP_FILES=(--filter "P .*.??????")
# Copies and strips a vendored framework
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
elif [ -r "$1" ]; then
local source="$1"
fi
local destination="${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
# Use filter instead of exclude so missing patterns don't throw errors.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
local basename
basename="$(basename -s .framework "$1")"
binary="${destination}/${basename}.framework/${basename}"
if ! [ -r "$binary" ]; then
binary="${destination}/${basename}"
fi
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then
strip_invalid_archs "$binary"
fi
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries. No longer necessary as of Xcode 7.
if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u && exit ${PIPESTATUS[0]})
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
fi
}
# Copies and strips a vendored dSYM
install_dsym() {
local source="$1"
if [ -r "$source" ]; then
# Copy the dSYM into a the targets temp dir.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${DERIVED_FILES_DIR}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${DERIVED_FILES_DIR}"
local basename
basename="$(basename -s .framework.dSYM "$source")"
binary="${DERIVED_FILES_DIR}/${basename}.framework.dSYM/Contents/Resources/DWARF/${basename}"
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"Mach-O dSYM companion"* ]]; then
strip_invalid_archs "$binary"
fi
if [[ $STRIP_BINARY_RETVAL == 1 ]]; then
# Move the stripped file into its final destination.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${DERIVED_FILES_DIR}/${basename}.framework.dSYM\" \"${DWARF_DSYM_FOLDER_PATH}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${DERIVED_FILES_DIR}/${basename}.framework.dSYM" "${DWARF_DSYM_FOLDER_PATH}"
else
# The dSYM was not stripped at all, in this case touch a fake folder so the input/output paths from Xcode do not reexecute this script because the file is missing.
touch "${DWARF_DSYM_FOLDER_PATH}/${basename}.framework.dSYM"
fi
fi
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY}" -a "${CODE_SIGNING_REQUIRED}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identitiy
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
local code_sign_cmd="/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS} --preserve-metadata=identifier,entitlements '$1'"
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
code_sign_cmd="$code_sign_cmd &"
fi
echo "$code_sign_cmd"
eval "$code_sign_cmd"
fi
}
# Strip invalid architectures
strip_invalid_archs() {
binary="$1"
# Get architectures for current target binary
binary_archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | awk '{$1=$1;print}' | rev)"
# Intersect them with the architectures we are building for
intersected_archs="$(echo ${ARCHS[@]} ${binary_archs[@]} | tr ' ' '\n' | sort | uniq -d)"
# If there are no archs supported by this binary then warn the user
if [[ -z "$intersected_archs" ]]; then
echo "warning: [CP] Vendored binary '$binary' contains architectures ($binary_archs) none of which match the current build architectures ($ARCHS)."
STRIP_BINARY_RETVAL=0
return
fi
stripped=""
for arch in $binary_archs; do
if ! [[ "${ARCHS}" == *"$arch"* ]]; then
# Strip non-valid architectures in-place
lipo -remove "$arch" -output "$binary" "$binary" || exit 1
stripped="$stripped $arch"
fi
done
if [[ "$stripped" ]]; then
echo "Stripped $binary of architectures:$stripped"
fi
STRIP_BINARY_RETVAL=1
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework "${BUILT_PRODUCTS_DIR}/XFPasswordTextField/XFPasswordTextField.framework"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework "${BUILT_PRODUCTS_DIR}/XFPasswordTextField/XFPasswordTextField.framework"
fi
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
wait
fi
|
package de.cpg.oss.verita.service.event_store;
import com.fasterxml.uuid.Generators;
import de.cpg.oss.verita.service.AbstractCommandBusTest;
import de.cpg.oss.verita.service.CommandBus;
import org.junit.AfterClass;
import org.junit.BeforeClass;
public class CommandBusIT extends AbstractCommandBusTest {
private static CommandBus commandBus;
@BeforeClass
public static void setup() {
TestUtil.setup();
commandBus = new CommandBusImpl(TestUtil.objectMapper(), Generators.nameBasedGenerator(), TestUtil.esConnection());
}
@AfterClass
public static void cleanup() {
TestUtil.cleanup();
}
@Override
protected CommandBus commandBus() {
return commandBus;
}
}
|
#!/bin/sh -e
source .env
VERSION=$(date +%Y-%m-%d-%H:%M:%S-%Z)
az login
az acr login --name ${AZ_REPO}
docker login ${AZ_REPO}.azurecr.io
docker tag ${IMAGE_NAME}:latest ${AZ_REPO}.azurecr.io/samples/aci-nvidia-gpu:${VERSION}
docker push ${AZ_REPO}.azurecr.io/samples/aci-nvidia-gpu:${VERSION}
echo -e "Pushed image to Azure Container Registry" |
def check_application_state(application_states):
if not application_states: # If the input list is empty
return True
return all(application_states) # Return True if all application states are True, False otherwise |
erb pgtype_array_type=Int2Array pgtype_element_type=Int2 go_array_types=[]int16,[]uint16 element_type_name=int2 text_null=NULL binary_format=true typed_array.go.erb > int2_array.go
erb pgtype_array_type=Int4Array pgtype_element_type=Int4 go_array_types=[]int32,[]uint32 element_type_name=int4 text_null=NULL binary_format=true typed_array.go.erb > int4_array.go
erb pgtype_array_type=Int8Array pgtype_element_type=Int8 go_array_types=[]int64,[]uint64 element_type_name=int8 text_null=NULL binary_format=true typed_array.go.erb > int8_array.go
erb pgtype_array_type=BoolArray pgtype_element_type=Bool go_array_types=[]bool element_type_name=bool text_null=NULL binary_format=true typed_array.go.erb > bool_array.go
erb pgtype_array_type=DateArray pgtype_element_type=Date go_array_types=[]time.Time element_type_name=date text_null=NULL binary_format=true typed_array.go.erb > date_array.go
erb pgtype_array_type=TimestamptzArray pgtype_element_type=Timestamptz go_array_types=[]time.Time element_type_name=timestamptz text_null=NULL binary_format=true typed_array.go.erb > timestamptz_array.go
erb pgtype_array_type=TimestampArray pgtype_element_type=Timestamp go_array_types=[]time.Time element_type_name=timestamp text_null=NULL binary_format=true typed_array.go.erb > timestamp_array.go
erb pgtype_array_type=Float4Array pgtype_element_type=Float4 go_array_types=[]float32 element_type_name=float4 text_null=NULL binary_format=true typed_array.go.erb > float4_array.go
erb pgtype_array_type=Float8Array pgtype_element_type=Float8 go_array_types=[]float64 element_type_name=float8 text_null=NULL binary_format=true typed_array.go.erb > float8_array.go
erb pgtype_array_type=InetArray pgtype_element_type=Inet go_array_types=[]*net.IPNet,[]net.IP element_type_name=inet text_null=NULL binary_format=true typed_array.go.erb > inet_array.go
erb pgtype_array_type=CIDRArray pgtype_element_type=CIDR go_array_types=[]*net.IPNet,[]net.IP element_type_name=cidr text_null=NULL binary_format=true typed_array.go.erb > cidr_array.go
erb pgtype_array_type=TextArray pgtype_element_type=Text go_array_types=[]string element_type_name=text text_null='"NULL"' binary_format=true typed_array.go.erb > text_array.go
erb pgtype_array_type=VarcharArray pgtype_element_type=Varchar go_array_types=[]string element_type_name=varchar text_null='"NULL"' binary_format=true typed_array.go.erb > varchar_array.go
erb pgtype_array_type=BPCharArray pgtype_element_type=BPChar go_array_types=[]string element_type_name=bpchar text_null='NULL' binary_format=true typed_array.go.erb > bpchar_array.go
erb pgtype_array_type=ByteaArray pgtype_element_type=Bytea go_array_types=[][]byte element_type_name=bytea text_null=NULL binary_format=true typed_array.go.erb > bytea_array.go
erb pgtype_array_type=ACLItemArray pgtype_element_type=ACLItem go_array_types=[]string element_type_name=aclitem text_null=NULL binary_format=false typed_array.go.erb > aclitem_array.go
erb pgtype_array_type=HstoreArray pgtype_element_type=Hstore go_array_types=[]map[string]string element_type_name=hstore text_null=NULL binary_format=true typed_array.go.erb > hstore_array.go
erb pgtype_array_type=NumericArray pgtype_element_type=Numeric go_array_types=[]float32,[]float64,[]int64,[]uint64 element_type_name=numeric text_null=NULL binary_format=true typed_array.go.erb > numeric_array.go
erb pgtype_array_type=UUIDArray pgtype_element_type=UUID go_array_types=[][16]byte,[][]byte,[]string element_type_name=uuid text_null=NULL binary_format=true typed_array.go.erb > uuid_array.go
# While the binary format is theoretically possible it is only practical to use the text format. In addition, the text format for NULL enums is unquoted so TextArray or a possible GenericTextArray cannot be used.
erb pgtype_array_type=EnumArray pgtype_element_type=GenericText go_array_types=[]string text_null='NULL' binary_format=false typed_array.go.erb > enum_array.go
goimports -w *_array.go
|
<filename>tp2019Lucas/src/main/java/py/edu/uca/lp3/domain/Entrenador.java
package py.edu.uca.lp3.domain;
import javax.persistence.Entity;
@Entity
public class Entrenador extends Empleado {
/**
*
*/
private static final long serialVersionUID = -5352711405615465689L;
private int titulosGanados; //como jugador
private boolean exJugador;
public Entrenador() {
// TODO Auto-generated constructor stub
}
public Entrenador(String nombre, String apellido, int edad) {
super(nombre, apellido, edad);
// TODO Auto-generated constructor stub
}
public Entrenador(String nombre, String apellido, int edad, int nroCedula) {
super(nombre, apellido, edad, nroCedula);
// TODO Auto-generated constructor stub
}
public Entrenador(int numeroCedula, String nombre) {
super(numeroCedula, nombre);
// TODO Auto-generated constructor stub
}
public int getTitulosGanados() {
return titulosGanados;
}
public void setTitulosGanados(int titulosGanados) {
this.titulosGanados = titulosGanados;
}
public boolean isExJugador() {
return exJugador;
}
public void setExJugador(boolean exJugador) {
this.exJugador = exJugador;
}
}
|
module KubeDSL::DSL::Apps::V1beta2
class StatefulSetStatus < ::KubeDSL::DSLObject
value_field :collision_count
array_field(:condition) { KubeDSL::DSL::Apps::V1beta2::StatefulSetCondition.new }
value_field :current_replicas
value_field :current_revision
value_field :observed_generation
value_field :ready_replicas
value_field :replicas
value_field :update_revision
value_field :updated_replicas
validates :collision_count, field: { format: :integer }, presence: false
validates :conditions, array: { kind_of: KubeDSL::DSL::Apps::V1beta2::StatefulSetCondition }, presence: false
validates :current_replicas, field: { format: :integer }, presence: false
validates :current_revision, field: { format: :string }, presence: false
validates :observed_generation, field: { format: :integer }, presence: false
validates :ready_replicas, field: { format: :integer }, presence: false
validates :replicas, field: { format: :integer }, presence: false
validates :update_revision, field: { format: :string }, presence: false
validates :updated_replicas, field: { format: :integer }, presence: false
def serialize
{}.tap do |result|
result[:collisionCount] = collision_count
result[:conditions] = conditions.map(&:serialize)
result[:currentReplicas] = current_replicas
result[:currentRevision] = current_revision
result[:observedGeneration] = observed_generation
result[:readyReplicas] = ready_replicas
result[:replicas] = replicas
result[:updateRevision] = update_revision
result[:updatedReplicas] = updated_replicas
end
end
def kind_sym
:stateful_set_status
end
end
end
|
#!/usr/bin/env bash
sudo apt-get remove unscd;
sudo apt-get -y install python-pip;
pip install sox wget;
sudo apt-get -y install sox libsox-fmt-mp3
cd ../dataset
if [ -d "LibriSpeech_dataset" ]
then
echo "\n\nLibrispeech folder found, skipping download.\n\n"
sleep 2
else
echo "\n\nDownloading clean_test, (est. 1.5 min, space req 1G)...\n\n"
sleep 2
sh download_dataset.sh clean_test
fi
cd ../inference
if [ "${1}" = "cuda" ]
then
VARIANT="cuda_"
sudo add-apt-repository -y ppa:graphics-drivers/ppa
sudo apt-get -y update
sudo apt-get -y install nvidia
sudo apt-get -y install cuda-drivers
else
VARIANT=""
fi
cd ../docker
yes 'y' | sh install_${VARIANT}docker.sh
GROUP="docker"
sudo usermod -a -G $GROUP $USER
newgrp $GROUP << END # Need to run docker related items as a user in this group!
echo "\n\nBuilding Docker Image (up to 8min)\n\n"
sleep 2
yes 'y' | sh build_${VARIANT}docker.sh
END
cd ../inference
echo "Ready to run:\n\tnewgrp ${GROUP}\n\tsh ../docker/run_${VARIANT}dev.sh"
|
fn multiply_by_two<T>(value: T) -> T
where
T: std::ops::Mul<Output=T>
{
value * T::from(2)
}
let x = 3;
assert_eq!(6, multiply_by_two(x));
let y = 5.0;
assert_eq!(10.0, multiply_by_two(y)); |
<reponame>khepherer/java_lleida_01_06_2017<gh_stars>0
/*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package com.curso.ejemplostreams.basico;
import com.curso.ejemplostreams.modelo.Persona;
import static com.curso.ejemplostreams.utilidades.Util.personas;
import java.util.function.Predicate;
import java.util.stream.Stream;
/**
*
* @author usuario
*/
public interface FilterUtil {
static Stream<Persona> ejemploFilter() {
return personas(1, 10).filter(p -> p.getEdad() < 5);
}
static Stream<Persona> ejemploFilter(Predicate<Persona> p) {
return personas(1, 10).filter(p);
}
}
|
<gh_stars>0
/**
* SPDX-License-Identifier: Apache-2.0
*/
import nock from "nock";
import configureMockStore from "redux-mock-store";
import thunk from "redux-thunk";
import actions from "./actions";
import operations from "./operations";
import reducers from "./reducers";
import * as selectors from "./selectors";
import types from "./types";
const middleware = [thunk];
const mockStore = configureMockStore(middleware);
const initialState = {};
describe("Tables", () => {
describe("Operations", () => {
afterEach(() => {
nock.cleanAll();
});
const channel = "mychannel";
test("blockList", async done => {
nock(/\w*(\W)/g)
.get(`/api/blockAndTxList/${channel}/0`)
.reply(200, {
rows: [{ test: "rows" }]
});
const expectedActions = [{ type: types.BLOCK_LIST }];
const store = mockStore(initialState, expectedActions);
await store.dispatch(operations.blockList(channel));
const actions = store.getActions();
expect(actions[0].type).toEqual(types.BLOCK_LIST);
done();
});
test("blockList catch error", async done => {
spyOn(console, "error");
nock(/\w*(\W)/g)
.get(`/api/blockAndTxList/${channel}/0`)
.replyWithError({ code: "ECONNREFUSED" });
const expectedActions = [{ type: types.BLOCK_LIST }];
const store = mockStore(initialState, expectedActions);
await store.dispatch(operations.blockList(channel));
const actions = store.getActions();
expect(actions).toEqual([]);
done();
});
test("chaincodeList", async done => {
nock(/\w*(\W)/g)
.get(`/api/chaincode/${channel}`)
.reply(200, {
rows: [{ test: "rows" }]
});
const expectedActions = [{ type: types.CHAINCODE_LIST }];
const store = mockStore(initialState, expectedActions);
await store.dispatch(operations.chaincodeList(channel));
const actions = store.getActions();
expect(actions[0].type).toEqual(types.CHAINCODE_LIST);
done();
});
test("chaincodeList catch error", async done => {
spyOn(console, "error");
nock(/\w*(\W)/g)
.get(`/api/chaincode/${channel}`)
.replyWithError({ code: "ECONNREFUSED" });
const expectedActions = [{ type: types.CHAINCODE_LIST }];
const store = mockStore(initialState, expectedActions);
await store.dispatch(operations.chaincodeList(channel));
const actions = store.getActions();
expect(actions).toEqual([]);
done();
});
test("channels", async done => {
nock(/\w*(\W)/g)
.get("/api/channels/info")
.reply(200, {
channels: [{ test: "rows" }]
});
const expectedActions = [{ type: types.CHANNELS }];
const store = mockStore(initialState, expectedActions);
await store.dispatch(operations.channels());
const actions = store.getActions();
expect(actions[0].type).toEqual(types.CHANNELS);
done();
});
test("channels catch error", async done => {
spyOn(console, "error");
nock(/\w*(\W)/g)
.get("/api/channels/info")
.replyWithError({ code: "ECONNREFUSED" });
const expectedActions = [{ type: types.CHANNELS }];
const store = mockStore(initialState, expectedActions);
await store.dispatch(operations.channels(channel));
const actions = store.getActions();
expect(actions).toEqual([]);
done();
});
test("peerList", async done => {
nock(/\w*(\W)/g)
.get(`/api/peers/${channel}`)
.reply(200, {
rows: [{ test: "rows" }]
});
const expectedActions = [{ type: types.PEER_LIST }];
const store = mockStore(initialState, expectedActions);
await store.dispatch(operations.peerList(channel));
const actions = store.getActions();
expect(actions[0].type).toEqual(types.PEER_LIST);
done();
});
test("peerList catch error", async done => {
spyOn(console, "error");
nock(/\w*(\W)/g)
.get(`/api/peers/${channel}`)
.replyWithError({ code: "ECONNREFUSED" });
const expectedActions = [{ type: types.PEER_LIST }];
const store = mockStore(initialState, expectedActions);
await store.dispatch(operations.peerList(channel));
const actions = store.getActions();
expect(actions).toEqual([]);
done();
});
test("transaction", async done => {
nock(/\w*(\W)/g)
.get(`/api/transaction/${channel}/1`)
.reply(200, {
rows: [{ test: "rows" }]
});
const expectedActions = [{ type: types.TRANSACTION }];
const store = mockStore(initialState, expectedActions);
await store.dispatch(operations.transaction(channel, 1));
const actions = store.getActions();
expect(actions[0].type).toEqual(types.TRANSACTION);
done();
});
test("transaction catch error", async done => {
spyOn(console, "error");
nock(/\w*(\W)/g)
.get(`/api/transaction/${channel}/1`)
.replyWithError({ code: "ECONNREFUSED" });
const expectedActions = [{ type: types.TRANSACTION }];
const store = mockStore(initialState, expectedActions);
await store.dispatch(operations.transaction(channel, 1));
const actions = store.getActions();
expect(actions).toEqual([]);
done();
});
test("transactionList", async done => {
nock(/\w*(\W)/g)
.get(`/api/txList/${channel}/0/0/`)
.reply(200, {
rows: [{ test: "rows" }]
});
const expectedActions = [{ type: types.TRANSACTION_LIST }];
const store = mockStore(initialState, expectedActions);
await store.dispatch(operations.transactionList(channel));
const actions = store.getActions();
expect(actions[0].type).toEqual(types.TRANSACTION_LIST);
done();
});
test("transactionList catch error", async done => {
spyOn(console, "error");
nock(/\w*(\W)/g)
.get(`/api/txList/${channel}/0/0/`)
.replyWithError({ code: "ECONNREFUSED" });
const expectedActions = [{ type: types.TRANSACTION_LIST }];
const store = mockStore(initialState, expectedActions);
await store.dispatch(operations.transactionList(channel));
const actions = store.getActions();
expect(actions).toEqual([]);
done();
});
});
describe("Reducers", () => {
test("blockListReducer", () => {
const payload = { rows: "test" };
const action = actions.getBlockList(payload);
const newState = reducers(initialState, action);
expect(newState.blockList.rows).toBe("test");
});
test("chaincodeListReducer", () => {
const payload = { chaincode: "test" };
const action = actions.getChaincodeList(payload);
const newState = reducers(initialState, action);
expect(newState.chaincodeList.rows).toBe("test");
});
test("channelsReducer", () => {
const payload = { channels: "test" };
const action = actions.getChannels(payload);
const newState = reducers(initialState, action);
expect(newState.channels.rows).toBe("test");
});
test("peerListReducer", () => {
const payload = { peers: "test" };
const action = actions.getPeerList(payload);
const newState = reducers(initialState, action);
expect(newState.peerList.rows).toBe("test");
});
test("transactionReducer", () => {
const payload = { row: "test" };
const action = actions.getTransaction(payload);
const newState = reducers(initialState, action);
expect(newState.transaction.transaction).toBe("test");
});
test("transactionListReducer", () => {
const payload = "test";
const action = actions.getTransactionList(payload);
const newState = reducers(initialState, action);
expect(newState.transactionList.rows).toBe("test");
});
});
describe("selectors", () => {
test("blockListSelector", () => {
const state = { tables: { blockList: { rows: "test" } } };
const blockList = selectors.blockListSelector(state);
expect(blockList).toBe("test");
});
test("chaincodeListSelector", () => {
const state = { tables: { chaincodeList: { rows: "test" } } };
const chaincodeList = selectors.chaincodeListSelector(state);
expect(chaincodeList).toBe("test");
});
test("channelsSelector", () => {
const state = { tables: { channels: { rows: "test" } } };
const channels = selectors.channelsSelector(state);
expect(channels).toBe("test");
});
test("peerListSelector", () => {
const state = { tables: { peerList: { rows: "test" } } };
const peerList = selectors.peerListSelector(state);
expect(peerList).toBe("test");
});
test("transactionSelector", () => {
const state = { tables: { transaction: { transaction: "test" } } };
const transaction = selectors.transactionSelector(state);
expect(transaction).toBe("test");
});
test("transactionListSelector", () => {
const state = { tables: { transactionList: { rows: "test" } } };
const transactionList = selectors.transactionListSelector(state);
expect(transactionList).toBe("test");
});
});
});
|
#!/bin/bash
############################
# .make.sh
# This script creates symlinks from the home directory to any desired dotfiles in ~/dotfiles
# Before use: chmod +x makesymlinks.sh
# Run with ./makesymlinks.sh
############################
########## Variables
dir=~/dotfiles # dotfiles directory
olddir=~/dotfiles_old # old dotfiles backup directory
files="tmux vimrc.after vimrc.before zshenv zshrc" # list of files/folders to symlink in homedir
##########
# create dotfiles_old in homedir
echo -n "Creating $olddir for backup of any existing dotfiles in ~ ..."
mkdir -p $olddir
echo "done"
# change to the dotfiles directory
echo -n "Changing to the $dir directory ..."
cd $dir
echo "done"
# move any existing dotfiles in homedir to dotfiles_old directory, then create symlinks from the homedir to any files in the ~/dotfiles directory specified in $files
for file in $files; do
echo "Moving any existing dotfiles from ~ to $olddir"
mv ~/.$file ~/dotfiles_old/
echo "Creating symlink to $file in home directory."
ln -s $dir/$file ~/.$file
done
install_zsh () {
# Test to see if zshell is installed. If it is:
if [ -f /bin/zsh -o -f /usr/bin/zsh ]; then
# Clone my oh-my-zsh repository from GitHub only if it isn't already present
if [[ ! -d $dir/oh-my-zsh/ ]]; then
git clone http://github.com/robbyrussell/oh-my-zsh.git
fi
# Set the default shell to zsh if it isn't currently set to zsh
if [[ ! $(echo $SHELL) == $(which zsh) ]]; then
chsh -s $(which zsh)
fi
else
# If zsh isn't installed, get the platform of the current machine
platform=$(uname);
# If the platform is Linux, try an apt-get to install zsh and then recurse
if [[ $platform == 'Linux' ]]; then
if [[ -f /etc/redhat-release ]]; then
sudo yum install zsh
install_zsh
fi
if [[ -f /etc/debian_version ]]; then
sudo apt-get install zsh
install_zsh
fi
# If the platform is OS X, tell the user to install zsh :)
elif [[ $platform == 'Darwin' ]]; then
echo "Please install zsh, then re-run this script!"
exit
fi
fi
}
install_zsh
|
package com.github.jinahya.datagokr.api.b090041_.lunphinfoservice.client;
import com.github.jinahya.datagokr.api.b090041_.lunphinfoservice.client.message.Item;
import com.github.jinahya.datagokr.api.b090041_.lunphinfoservice.client.message.Response;
import com.github.jinahya.datagokr.api.b090041_.lunphinfoservice.client.message.Responses;
import lombok.AccessLevel;
import lombok.Getter;
import lombok.Setter;
import lombok.experimental.Accessors;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.context.annotation.Lazy;
import org.springframework.lang.Nullable;
import org.springframework.stereotype.Component;
import org.springframework.web.reactive.function.client.WebClient;
import org.springframework.web.reactive.function.client.WebClientException;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
import reactor.core.scheduler.Scheduler;
import javax.validation.constraints.Max;
import javax.validation.constraints.Min;
import javax.validation.constraints.NotNull;
import javax.validation.constraints.Positive;
import java.lang.annotation.*;
import java.time.Month;
import java.time.Year;
import java.util.concurrent.atomic.AtomicInteger;
import static com.github.jinahya.datagokr.api.b090041_.lunphinfoservice.client.message.Item.MAX_SOL_DAY;
import static com.github.jinahya.datagokr.api.b090041_.lunphinfoservice.client.message.Item.MIN_SOL_DAY;
import static java.util.Objects.requireNonNull;
import static java.util.Optional.ofNullable;
import static reactor.core.publisher.Flux.fromIterable;
/**
* A client implementation uses an instance of {@link WebClient}.
*
* @author <NAME> <onacit_at_gmail.com>
* @see LunPhInfoServiceClient
*/
@Lazy
@Component
@Slf4j
public class LunPhInfoServiceReactiveClient extends AbstractLunPhInfoServiceClient {
/**
* An injection qualifier for an instance of {@link WebClient}.
*/
@Qualifier
@Documented
@Target({ElementType.FIELD, ElementType.METHOD, ElementType.TYPE, ElementType.PARAMETER})
@Retention(RetentionPolicy.RUNTIME)
public @interface LunPhInfoServiceWebClient {
}
// -----------------------------------------------------------------------------------------------------------------
protected static Mono<Response> handled(final Mono<Response> mono) {
return requireNonNull(mono, "mono is null").handle((r, h) -> {
if (!Responses.isResultSuccessful(r)) {
h.error(new WebClientException("unsuccessful result: " + r.getHeader()) {
});
} else {
h.next(r);
}
});
}
// ---------------------------------------------------------------------------------------------------- constructors
/**
* Creates a new instance.
*/
public LunPhInfoServiceReactiveClient() {
super();
}
// -----------------------------------------------------------------------------------------------------------------
@Autowired
private void onPostConstruct() {
if (webClient == null) {
log.warn("no web client autowired. using a bare instance...");
webClient = WebClient.builder()
.baseUrl(AbstractLunPhInfoServiceClient.BASE_URL_PRODUCTION)
.build();
}
}
// --------------------------------------------------------------------------------------------------- /getLunPhInfo
/**
* Retrieves a response from {@code /getLunPhInfo} with specified arguments.
*
* @param solYear a value for {@link #QUERY_PARAM_NAME_SOL_YEAR ?solYear}.
* @param solMonth a value for {@link #QUERY_PARAM_NAME_SOL_MONTH ?solMonth}.
* @param solDay a value for {@link #QUERY_PARAM_NAME_SOL_DAY ?solDay}; {@code null} for a whole month.
* @param pageNo a value for {@link #QUERY_PARAM_NAME_PAGE_NO ?pageNo}; {@code null} for the first page.
* @return a mono of response.
*/
public @NotNull Mono<Response> getLunPhInfo(
@NotNull final Year solYear, @NotNull final Month solMonth,
@Max(MAX_SOL_DAY) @Min(MIN_SOL_DAY) @Nullable final Integer solDay,
@Positive @Nullable final Integer pageNo) {
return webClient()
.get()
.uri(b -> {
b.pathSegment(PATH_SEGMENT_GET_LUN_PH_INFO)
.queryParam(QUERY_PARAM_NAME_SERVICE_KEY, serviceKey())
.queryParam(QUERY_PARAM_NAME_SOL_YEAR, solYear.getValue())
.queryParam(QUERY_PARAM_NAME_SOL_MONTH, MONTH_FORMATTER.format(solMonth));
ofNullable(solDay)
.map(AbstractLunPhInfoServiceClient::format02d)
.ifPresent(v -> b.queryParam(QUERY_PARAM_NAME_SOL_DAY, v));
ofNullable(pageNo).ifPresent(v -> b.queryParam(QUERY_PARAM_NAME_PAGE_NO, v));
return b.build();
})
.retrieve()
.bodyToMono(Response.class)
.as(LunPhInfoServiceReactiveClient::handled);
}
/**
* Reads all responses from all pages of {@code /getLunPhInfo} with specified arguments.
*
* @param solYear a value for {@link #QUERY_PARAM_NAME_SOL_YEAR ?solYear}.
* @param solMonth a value for {@link #QUERY_PARAM_NAME_SOL_MONTH ?solMonth}.
* @param solDay a value for {@link #QUERY_PARAM_NAME_SOL_DAY ?solDay}; {@code null} for a whole month.
* @return a flux of responses.
*/
public @NotNull Flux<Response> getLunPhInfoForAllPages(
@NotNull final Year solYear, @NotNull final Month solMonth,
@Max(MAX_SOL_DAY) @Min(MIN_SOL_DAY) @Nullable final Integer solDay) {
final AtomicInteger pageNo = new AtomicInteger();
return getLunPhInfo(solYear, solMonth, solDay, pageNo.incrementAndGet())
.expand(r -> {
if (Responses.isLastPage(r)) {
return Mono.empty();
}
return getLunPhInfo(solYear, solMonth, solDay, pageNo.incrementAndGet());
});
}
/**
* Reads all items from {@code /.../getLunPhInfo} with specified arguments.
*
* @param solYear a value for {@link #QUERY_PARAM_NAME_SOL_YEAR ?solYear}.
* @param solMonth a value for {@link #QUERY_PARAM_NAME_SOL_MONTH ?solMonth}.
* @param solDay a value for {@link #QUERY_PARAM_NAME_SOL_DAY ?solDay}; {@code null} for a whole month.
* @return a flux of items.
* @see #getLunPhInfo(Year, Month, Integer, Integer)
*/
public @NotNull Flux<Item> getLunPhInfo(
@NotNull final Year solYear, @NotNull final Month solMonth,
@Max(MAX_SOL_DAY) @Min(MIN_SOL_DAY) @Nullable final Integer solDay) {
return getLunPhInfoForAllPages(solYear, solMonth, solDay)
.flatMap(r -> fromIterable(r.getBody().getItems()));
}
/**
* Reads all items in specified year.
*
* @param year the year whose all items are retrieved.
* @param parallelism a value for parallelism.
* @param scheduler a scheduler.
* @return a flux of items.
* @see #getLunPhInfo(Year, Month, Integer)
*/
public @NotNull Flux<Item> getLunPhInfo(@NotNull final Year year, @Positive final int parallelism,
@NotNull final Scheduler scheduler) {
return Flux.fromArray(Month.values())
.parallel(parallelism)
.runOn(scheduler)
.flatMap(m -> getLunPhInfo(year, m, null))
.sequential();
}
// -----------------------------------------------------------------------------------------------------------------
@Autowired
@LunPhInfoServiceWebClient
@Accessors(fluent = true)
@Setter(AccessLevel.NONE)
@Getter(AccessLevel.PROTECTED)
private WebClient webClient;
}
|
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#doc: add new label to the issue: `/label <label>`
LABEL="$1"
URL="$(jq -r '.issue.url' "$GITHUB_EVENT_PATH")/labels"
set +x #GITHUB_TOKEN
curl -s -o /dev/null \
-X POST \
--data "$(jq --arg value "$LABEL" -n '{labels: [ $value ]}')" \
--header "authorization: Bearer $GITHUB_TOKEN" \
"$URL"
|
<gh_stars>10-100
type MIDIMessage = MIDIChannelMessage | MIDISystemMessage
type MIDIChannelMessage =
| MIDIChannelVoiceMessage
| MIDIChannelModeMessage
| RPNChange
| NRPNChange
type MIDIChannelVoiceMessage =
| NoteOff
| NoteOn
| PolyKeyPressure
| ControlChange
| ProgramChange
| ChannelKeyPressure
| PitchBendChange
type NoteOff = {
type: 'NoteOff'
channel: Channel
note: U7
velocity: U7
}
type NoteOn = {
type: 'NoteOn'
channel: Channel
note: U7
velocity: U7
}
type PolyKeyPressure = {
type: 'PolyKeyPressure'
channel: Channel
note: U7
pressure: U7
}
type ControlChange = {
type: 'ControlChange'
channel: Channel
control: U7
value: U7 | U14
}
type ProgramChange = {
type: 'ProgramChange'
channel: Channel
number: U7
}
type ChannelKeyPressure = {
type: 'ChannelKeyPressure'
channel: Channel
pressure: U7
}
type PitchBendChange = {
type: 'PitchBendChange'
channel: Channel
value: U14
}
type RPNChange = {
type: 'RPNChange'
channel: Channel
parameter: U14
value: U14
}
type NRPNChange = {
type: 'NRPNChange'
channel: Channel
parameter: U14
value: U14
}
type MIDIChannelModeMessage =
| AllSoundOff
| ResetAllControllers
| LocalControl
| AllNotesOff
| OmniOff
| OmniOn
| MonoMode
| PolyMode
type AllSoundOff = {
type: 'AllSoundOff'
channel: Channel
}
type ResetAllControllers = {
type: 'ResetAllControllers'
channel: Channel
}
type LocalControl = {
type: 'LocalControl'
channel: Channel
value: boolean // false: off, true: on
}
type AllNotesOff = {
type: 'AllNotesOff'
channel: Channel
}
type OmniOff = {
type: 'OmniOff'
channel: Channel
}
type OmniOn = {
type: 'OmniOn'
channel: Channel
}
type MonoMode = {
type: 'MonoMode'
channel: Channel
}
type PolyMode = {
type: 'PolyMode'
channel: Channel
}
// 1-indexed, 1-16
type Channel = number
type MIDISystemMessage =
| SysEx
| MTCQuarterFrame
| SongPositionPointer
| SongSelect
| TuneRequest
| TimingClock
| Start
| Continue
| Stop
| ActiveSensing
| SystemReset
type SysEx = {
type: 'SysEx'
deviceId: SysExDeviceID
data: U7[]
}
// See MIDI 1.0 Detailed Specification 4.2 p34-35 (p66-67 in PDF)
type SysExDeviceID = U7 | [U7] | [U7, U7, U7]
// TODO implement proper parsing of MTC values
// See MIDI Time Code spec p1 (p116 of complete spec)
type MTCQuarterFrame = {
type: 'MTCQuarterFrame'
data: U7
}
type SongPositionPointer = {
type: 'SongPositionPointer'
position: U14
}
type SongSelect = {
type: 'SongSelect'
number: U7
}
type TuneRequest = {
type: 'TuneRequest'
}
type TimingClock = {
type: 'TimingClock'
}
type Start = {
type: 'Start'
}
type Continue = {
type: 'Continue'
}
type Stop = {
type: 'Stop'
}
type ActiveSensing = {
type: 'ActiveSensing'
}
type SystemReset = {
type: 'SystemReset'
}
// One of:
// [status, data, data]
// [status, data]
// [status]
// [0xF0, ...data, 0xF7] (sysex message)
type EncodedMessage = number[]
type U4 = number
type U7 = number
type U14 = number
interface BufferLike {
[byte: number]: number
length: number
}
type NodeCallback = (error: Error | null | undefined) => void
|
// Copyright 2014 The lime Authors.
// Use of this source code is governed by a 2-clause
// BSD-style license that can be found in the LICENSE file.
package log
import (
"strings"
"sync"
"testing"
"time"
"github.com/limetext/log4go"
)
type testlogger func(string)
func (l testlogger) LogWrite(rec *log4go.LogRecord) {
l(rec.Message)
}
func (l testlogger) Close() {}
func TestGlobalLog(t *testing.T) {
var wg sync.WaitGroup
Global.Close()
Global.AddFilter("globaltest", FINEST, testlogger(func(str string) {
if str != "Testing: hello world" {
t.Errorf("got: %s", str)
}
wg.Done()
}))
wg.Add(1)
Info("Testing: %s %s", "hello", "world")
wg.Wait()
}
func TestLogf(t *testing.T) {
l := NewLogger()
logs := []string{}
l.AddFilter("test", FINEST, testlogger(func(str string) {
logs = append(logs, str)
}))
// Log a message at each level. Because we cannot access the internals of the logger,
// we assume that this test succeeds if it does not cause an error (although we cannot
// actually look inside and see if the level was changed)
levels := []Level{FINEST, FINE, DEBUG, TRACE, INFO, WARNING, ERROR, CRITICAL, 999}
teststring := time.Now().String()
for _, lvl := range levels {
l.Logf(lvl, teststring)
}
if len(logs) != 9 {
t.Errorf("Expected 9 log entries, got %d\n", len(logs))
}
for _, log := range logs {
if !strings.Contains(log, teststring) {
t.Errorf("Expected log entry %q to contain string %q", log, teststring)
}
}
}
func TestClose(t *testing.T) {
l := NewLogger()
l.Close()
}
func TestNewLogger(t *testing.T) {
l := NewLogger()
if l == nil {
t.Error("Returned a nil logger")
}
}
func TestLogLevels(t *testing.T) {
l := NewLogger()
// Again, because we cannot access the internals of log this will
// succeed as long there is no error
for _, test_lvl := range []Level{FINEST, FINE, DEBUG, TRACE, INFO, WARNING, ERROR, CRITICAL, 999} {
// Use a random-ish string (the current time)
l.AddFilter(time.Now().String(), test_lvl, testlogger(func(str string) {}))
}
}
func TestLogFunctions(t *testing.T) {
l := NewLogger()
l.Finest(time.Now().String())
l.Fine(time.Now().String())
l.Debug(time.Now().String())
l.Trace(time.Now().String())
l.Warn(time.Now().String())
l.Error(time.Now().String())
l.Critical(time.Now().String())
}
|
import math
def total_distance(points):
"""
Calculate the total distance traveled when moving from the first point to the last point in the list.
Args:
points: A list of 2D points represented as tuples of two integers.
Returns:
float: The total distance traveled.
"""
total_distance = 0
for i in range(len(points) - 1):
p1 = points[i]
p2 = points[i+1]
dist = math.sqrt((p2[0] - p1[0])**2 + (p2[1] - p1[1])**2)
total_distance += dist
return total_distance |
cd ~
echo "Clone MXE from my source that way ARM is included :)"
git clone https://github.com/armdevvel/mxe --depth=1
echo "cd mxe!"
cd mxe
echo "Download LLVM-MinGW!"
mkdir usr && cd usr && wget https://github.com/armdevvel/llvm-mingw/releases/download/13.0/armv7-only-llvm-mingw-linux-x86_64.tar.xz
echo "Extract LLVM-MinGW!"
tar -xf armv7-only-llvm-mingw-linux-x86_64.tar.xz
echo "Setup known packages that work! This may take a while... Hold on tight!~"
cd .. && make MXE_TARGETS="armv7-w64-mingw32" libpng cmake sdl2 sdl tiff jpeg ccache lame libxml++ libxml2 libxslt libyaml libzip libwebp libusb1 sdl_image sdl_mixer sdl2_mixer zlib yasm dbus pcre boost icu4c
echo "Adding MXE to your PATH (bash)"
echo "\n" >> ~/.bashrc
echo "export PATH = /home/$USER/mxe/usr/bin"':$PATH' >> ~/.bashrc
echo "Finished!" |
<gh_stars>0
declare var AJS: {
I18n: {
getText: (key: string, ...args?: string[]) => string
},
toInit: (initFunc: () => any) => void,
params: {
baseUrl: string
}
};
|
public class Account {
private String name;
private String accountNumber;
private double balance;
// Constructor
public Account(String name, String accountNumber, double balance) {
this.name = name;
this.accountNumber = accountNumber;
this.balance = balance;
}
// Get methods
public String getName() {
return this.name;
}
public String getAccountNumber() {
return this.accountNumber;
}
public double getBalance() {
return this.balance;
}
// Set methods
public void setName(String name) {
this.name = name;
}
public void setAccountNumber(String accountNumber) {
this.accountNumber = accountNumber;
}
public void setBalance(double balance) {
this.balance = balance;
}
// Other methods
public void deposit(double amount) {
this.balance += amount;
}
public void withdraw(double amount) {
this.balance -= amount;
}
} |
<filename>hg100r/reboot_router.py<gh_stars>0
import json
import requests
from typing import Dict, Any
def reboot_router(
api_url: str,
token: str,
timeout: float = 3.0,
) -> Dict[str, Any]:
payload = {
'method': 'System.reboot',
'id': 24,
'jsonrpc': '2.0',
'token': token,
}
headers = {
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
}
res = requests.post(api_url,
headers=headers,
data=json.dumps(payload),
timeout=timeout,
)
response = res.json()
# result = response['result']
return response
|
#!/bin/bash
# Copyright (c) 2013 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
if [ -d "$1" ]; then
cd "$1"
else
echo "Usage: $0 <datadir>" >&2
echo "Removes obsolete Robbiecon database files" >&2
exit 1
fi
LEVEL=0
if [ -f wallet.dat -a -f addr.dat -a -f blkindex.dat -a -f blk0001.dat ]; then LEVEL=1; fi
if [ -f wallet.dat -a -f peers.dat -a -f blkindex.dat -a -f blk0001.dat ]; then LEVEL=2; fi
if [ -f wallet.dat -a -f peers.dat -a -f coins/CURRENT -a -f blktree/CURRENT -a -f blocks/blk00000.dat ]; then LEVEL=3; fi
if [ -f wallet.dat -a -f peers.dat -a -f chainstate/CURRENT -a -f blocks/index/CURRENT -a -f blocks/blk00000.dat ]; then LEVEL=4; fi
case $LEVEL in
0)
echo "Error: no Robbiecon datadir detected."
exit 1
;;
1)
echo "Detected old Robbiecon datadir (before 0.7)."
echo "Nothing to do."
exit 0
;;
2)
echo "Detected Robbiecon 0.7 datadir."
;;
3)
echo "Detected Robbiecon pre-0.8 datadir."
;;
4)
echo "Detected Robbiecon 0.8 datadir."
;;
esac
FILES=""
DIRS=""
if [ $LEVEL -ge 3 ]; then FILES=$(echo $FILES blk????.dat blkindex.dat); fi
if [ $LEVEL -ge 2 ]; then FILES=$(echo $FILES addr.dat); fi
if [ $LEVEL -ge 4 ]; then DIRS=$(echo $DIRS coins blktree); fi
for FILE in $FILES; do
if [ -f $FILE ]; then
echo "Deleting: $FILE"
rm -f $FILE
fi
done
for DIR in $DIRS; do
if [ -d $DIR ]; then
echo "Deleting: $DIR/"
rm -rf $DIR
fi
done
echo "Done."
|
<gh_stars>1-10
export * from './src/interfaces';
export { FormModel, PropertiesFieldName } from './src/form-model';
|
# Sorting the denominations
coins.sort()
# Initializing the pointer
i = len(coins) - 1
# Initializing the result
result = 0
# Looping through the denominations and finding the minimum number of coins
while amount > 0 and i >= 0:
if amount >= coins[i]:
amount -= coins[i]
result += 1
else:
i -= 1
print(result) |
<gh_stars>1-10
const router = require("express").Router();
const sequelize = require("../../config/connection");
const { User, Favorite, UserFavorites } = require("../../models");
// GET all /api/userfav
router.get("/users", (req, res) => {
console.log("====GET=userfav====");
UserFavorites.findAll({
attributes: [
"id",
"user_id",
"favorite_id",
[
sequelize.literal(
"(SELECT food_name FROM favorite WHERE favorite.id = userfav.favorite_id)"
),
"food_name",
],
],
include: [
{
model: User,
attributes: ["first_name", "last_name"],
},
],
})
.then((dbFavoriteData) => res.json(dbFavoriteData))
.catch((err) => {
console.log(err);
res.status(303).json(err);
});
});
// GET all favorites /api/userfav
router.get(`/user/:id`, (req, res) => {
console.log("====GET=favorites=BY=user====");
UserFavorites.findAll({
where: {
user_id: req.session.user_id,
},
attributes: [
"id",
"user_id",
"favorite_id",
[
sequelize.literal(
"(SELECT food_name FROM favorite WHERE favorite.id = userfav.favorite_id)"
),
"food_name",
],
],
include: [
{
model: User,
attributes: ["first_name", "last_name"],
},
],
})
.then((dbFavoriteData) => res.json(dbFavoriteData))
.catch((err) => {
console.log(err);
res.status(303).json(err);
});
});
// GET all favorites /api/userfav
router.get("/", (req, res) => {
console.log("====GET=userfav=BY=favorite====");
UserFavorites.findAll({
attributes: [
"id",
"user_id",
"favorite_id",
[
sequelize.literal(
"(SELECT food_name FROM favorite WHERE favorite.id = userfav.favorite_id)"
),
"food_name",
]
]
}).then((dbFavoriteData) => res.json(dbFavoriteData))
});
//Get all users grouped by favorites
router.get("/favreports", (req,res) => {
UserFavorites.findAll({
attributes: ['favorite_id',[sequelize.fn('count', sequelize.col('user_id')),'count']],
group: ['favorite_id'],
}).then((reportData) => {
res.json(reportData);
}).catch((err) => {
console.log(err);
res.status(303).json(err);
});
})
// GET a favorite by id /api/userfav/1
router.get("/:id", (req, res) => {
console.log("====GET=ID=userfav====");
UserFavorites.findOne({
where: {
id: req.params.id,
},
attributes: ["id", "user_id", "favorite_id", [sequelize.literal(
"(SELECT food_name FROM favorite WHERE favorite.id = userfav.favorite_id)"
),
"food_name",
],],
include: [
{
model: User,
attributes: ["first_name", "last_name"],
},
],
})
.then((dbUserFavData) => {
if (!dbUserFavData) {
res.status(303).json({ message: "No favorite found with this id" });
return;
}
res.json(dbUserFavData);
})
.catch((err) => {
console.log(err);
res.status(303).json(err);
});
});
// POST create a favorite /api/userfav
router.post("/", (req, res) => {
console.log("======POST=profile=====");
// expects {"user_id": 1 ,"restriction_id": 8}
UserFavorites.create({
user_id: req.session.user_id,
favorite_id: req.body.favorite_id,
})
.then((dbUserFavData) => res.json(dbUserFavData))
.catch((err) => {
console.log(err);
res.status(303).json(err);
});
});
// PUT update a favorite by id /api/userfav/1
router.put("/:id", (req, res) => {
console.log("=====UPDATE==userfav=====");
UserFavorites.update(
{
user_id: req.session.user_id,
favorite_id: req.body.favorite_id,
},
{
where: {
id: req.params.id,
},
}
)
.then((dbUserFavData) => {
if (!dbUserFavData) {
res.status(404).json({ message: "No favorite found with this id" });
return;
}
res.json(dbUserFavData);
})
.catch((err) => {
console.log(err);
res.status(303).json(err);
});
});
// DELETE a restriction by id /api/userfav/delete/1
router.delete("/delete/:id", (req, res) => {
console.log("=====DELETE==profile=====");
console.log("id", req.params.id);
UserFavorites.destroy({
where: {
user_id: req.session.user_id,
favorite_id: req.params.id,
},
})
.then((dbUserFavData) => {
if (dbUserFavData.length === 0){
res.status(404).json({ message: "No favorite found with this id" });
return;
}
res.json(dbUserFavData);
})
.catch((err) => {
console.log(err);
res.status(303).json(err);
});
});
module.exports = router;
|
<filename>KillCovid-19/killcovid19leaderboardwidget.cpp<gh_stars>0
#include "killcovid19leaderboardwidget.h"
#include "killcovidrun.h"
#include <shared/leaderboard.h>
/**
* \file killcovid19leaderboardwidget.h
* \brief Implementation of the KillCovid19LeaderboardWidget.
* \author <NAME>
*/
void KillCovid19LeaderboardWidget::setLeaderboard(Leaderboard *lb){
leaderboard = lb;
page = new QWidget();
layout = new QVBoxLayout();
refresh();
}
void KillCovid19LeaderboardWidget::refresh(){
delete page;
delete layout;
page = new QWidget();
layout = new QVBoxLayout();
auto titleLabel=new QLabel("<h2>LEADERBOARD<h2>");
layout->addWidget(titleLabel);
layout->addSpacing(2);
layout->setAlignment(Qt::AlignHCenter | Qt::AlignTop);
auto it = leaderboard->begin();
for(int i = 1; i <= 10 && it != leaderboard->end(); i++){
KillCovidRun* covidRun = (KillCovidRun*) *it;
QString rank = QString::number(i) + ". ";
QString playerName = covidRun->getPlayerName() + " ";
QString score = QString::number(covidRun->getScore());
layout->addWidget(new QLabel(rank + playerName + score));
layout->addSpacing(20);
it++;
}
page->setLayout(layout);
setWidget(page);
}
KillCovid19LeaderboardWidget::KillCovid19LeaderboardWidget(){
}
|
#!/bin/sh
# shell setup
# this file adds the binary to your PATH when it is sourced in a shell profile
# affix colons on either side of $PATH to simplify matching
case ":${PATH}:" in
*:"{path_to_bin}":*)
;;
*)
# Prepending path in case a system installed binary must be overwritten
export PATH="{path_to_bin}:$PATH"
;;
esac |
package org.trenkmann.restsample.controller;
import com.fasterxml.jackson.databind.ObjectMapper;
import io.swagger.v3.oas.annotations.Operation;
import io.swagger.v3.oas.annotations.security.SecurityRequirement;
import java.net.URI;
import java.net.URISyntaxException;
import org.springframework.hateoas.CollectionModel;
import org.springframework.hateoas.EntityModel;
import org.springframework.hateoas.IanaLinkRelations;
import org.springframework.hateoas.RepresentationModel;
import org.springframework.http.ResponseEntity;
import org.springframework.web.bind.annotation.DeleteMapping;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.PathVariable;
import org.springframework.web.bind.annotation.PostMapping;
import org.springframework.web.bind.annotation.PutMapping;
import org.springframework.web.bind.annotation.RequestBody;
import org.springframework.web.bind.annotation.RestController;
import org.trenkmann.restsample.data.MP3Repository;
import org.trenkmann.restsample.exception.MP3CanNotDeleteException;
import org.trenkmann.restsample.exception.MP3CanNotFoundException;
import org.trenkmann.restsample.model.MP3;
import org.trenkmann.restsample.model.dto.MP3DTO;
@RestController
public class MP3Controller {
private final MP3ResourceAssembler assembler;
private MP3Repository mp3Repository;
private final ObjectMapper mapper = new ObjectMapper();
MP3Controller(MP3Repository mp3Repository, MP3ResourceAssembler assembler) {
this.assembler = assembler;
this.mp3Repository = mp3Repository;
}
// aggregates
@Operation(summary = "collects all MP3 in storage", security = @SecurityRequirement(name = "basicScheme"))
@GetMapping(path = "/mp3s")
public CollectionModel<EntityModel<MP3>> getAllMP3s() {
return assembler.toCollectionModel(mp3Repository.findAll());
}
// single item
@Operation(summary = "get a MP3 out of the storage", security = @SecurityRequirement(name = "basicScheme"))
@GetMapping(path = "/mp3/{id}")
public EntityModel<MP3> getMP3ById(@PathVariable Long id) {
MP3 mp3 = mp3Repository.findById(id).orElseThrow(() -> new MP3CanNotFoundException(id));
return assembler.toModel(mp3);
}
@Operation(summary = "send a single MP3 to storage", security = @SecurityRequirement(name = "basicScheme"))
@PostMapping(path = "/mp3s")
public ResponseEntity<RepresentationModel> newMP3(@RequestBody MP3DTO mp3DTO)
throws URISyntaxException {
MP3 mp3 = mapper.convertValue(mp3DTO, MP3.class);
EntityModel<MP3> entityModel = assembler.toModel(mp3Repository.save(mp3));
return ResponseEntity
.created(
new URI(entityModel.getRequiredLink(IanaLinkRelations.SELF).getHref()))
.body(entityModel);
}
@Operation(summary = "alter or create a single MP3 in storage", security = @SecurityRequirement(name = "basicScheme"))
@PutMapping(path = "/mp3/{id}")
public EntityModel<MP3> changeExistingMP3(@PathVariable Long id, @RequestBody MP3DTO mp3DTO) {
MP3 changedMP3 = mapper.convertValue(mp3DTO, MP3.class);
MP3 originMP3 = mp3Repository.findById(id).orElse(new MP3(id));
originMP3.setAlbum(changedMP3.getAlbum());
originMP3.setAlbumOrderNumber(changedMP3.getAlbumOrderNumber());
originMP3.setArtist(changedMP3.getArtist());
originMP3.setLength(changedMP3.getLength());
originMP3.setTitle(changedMP3.getTitle());
return assembler.toModel(mp3Repository.save(originMP3));
}
@Operation(summary = "alter or create a single MP3 in storage", security = @SecurityRequirement(name = "basicScheme"))
@DeleteMapping(path = "/mp3/{id}")
public ResponseEntity<RepresentationModel> deleteMP3(@PathVariable Long id) {
try {
mp3Repository.deleteById(id);
} catch (Exception ex) {
throw new MP3CanNotDeleteException(id);
}
return ResponseEntity.noContent().build();
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.