text
stringlengths 1
1.05M
|
|---|
#! /bin/sh
#
# To be used with Photon environment
#
pterm ./repeater.sh &
pterm ./sendCmd.sh &
pterm ./armrun.sh &
|
<reponame>javisantos/paseto
const crypto = require('crypto')
const { promisify } = require('util')
const { PasetoNotSupported } = require('../errors')
const randomBytes = require('../help/random_bytes')
const generateKeyPair = promisify(crypto.generateKeyPair)
const LOCAL_KEY_LENGTH = 32
const PUBLIC_KEY_ARGS = ['rsa', { modulusLength: 2048 }]
async function generateKey (purpose) {
switch (purpose) {
case 'local':
return crypto.createSecretKey(await randomBytes(LOCAL_KEY_LENGTH))
case 'public': {
const { privateKey } = await generateKeyPair(...PUBLIC_KEY_ARGS)
return privateKey
}
default:
throw new PasetoNotSupported('unsupported v1 purpose')
}
}
module.exports = generateKey
|
<filename>presentation/0/index.js
import React from "react";
import {
Heading,
Slide,
Text,
Link,
Image
} from "spectacle";
import preloader from "spectacle/lib/utils/preloader";
const images = {
logoTwitter: require("../../assets/logo-twitter.svg")
};
preloader(images);
export const Slide0 = (
<Slide>
<Heading size={4} margin="0 0 1em 0">.NET Core and Single Page Applications</Heading>
<Text>Follow along at: http://bit.ly/netcore2spa</Text>
<Text>Originally presented by <NAME> @ NDC Sydney 2017</Text>
<Text style={{ marginTop: 0 }}>
<Image
height="1.5em"
src={images.logoTwitter}
style={{ verticalAlign: "middle", borderRadius: "5px", marginRight: "10px" }}
bgColor="#4099FF"
/>
@stevensanderson
</Text>
</Slide>
);
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads/v7/services/campaign_draft_service.proto
require 'google/ads/googleads/v7/enums/response_content_type_pb'
require 'google/ads/googleads/v7/resources/campaign_draft_pb'
require 'google/api/annotations_pb'
require 'google/api/client_pb'
require 'google/api/field_behavior_pb'
require 'google/api/resource_pb'
require 'google/longrunning/operations_pb'
require 'google/protobuf/field_mask_pb'
require 'google/rpc/status_pb'
require 'google/protobuf'
Google::Protobuf::DescriptorPool.generated_pool.build do
add_file("google/ads/googleads/v7/services/campaign_draft_service.proto", :syntax => :proto3) do
add_message "google.ads.googleads.v7.services.GetCampaignDraftRequest" do
optional :resource_name, :string, 1
end
add_message "google.ads.googleads.v7.services.MutateCampaignDraftsRequest" do
optional :customer_id, :string, 1
repeated :operations, :message, 2, "google.ads.googleads.v7.services.CampaignDraftOperation"
optional :partial_failure, :bool, 3
optional :validate_only, :bool, 4
optional :response_content_type, :enum, 5, "google.ads.googleads.v7.enums.ResponseContentTypeEnum.ResponseContentType"
end
add_message "google.ads.googleads.v7.services.PromoteCampaignDraftRequest" do
optional :campaign_draft, :string, 1
optional :validate_only, :bool, 2
end
add_message "google.ads.googleads.v7.services.CampaignDraftOperation" do
optional :update_mask, :message, 4, "google.protobuf.FieldMask"
oneof :operation do
optional :create, :message, 1, "google.ads.googleads.v7.resources.CampaignDraft"
optional :update, :message, 2, "google.ads.googleads.v7.resources.CampaignDraft"
optional :remove, :string, 3
end
end
add_message "google.ads.googleads.v7.services.MutateCampaignDraftsResponse" do
optional :partial_failure_error, :message, 3, "google.rpc.Status"
repeated :results, :message, 2, "google.ads.googleads.v7.services.MutateCampaignDraftResult"
end
add_message "google.ads.googleads.v7.services.MutateCampaignDraftResult" do
optional :resource_name, :string, 1
optional :campaign_draft, :message, 2, "google.ads.googleads.v7.resources.CampaignDraft"
end
add_message "google.ads.googleads.v7.services.ListCampaignDraftAsyncErrorsRequest" do
optional :resource_name, :string, 1
optional :page_token, :string, 2
optional :page_size, :int32, 3
end
add_message "google.ads.googleads.v7.services.ListCampaignDraftAsyncErrorsResponse" do
repeated :errors, :message, 1, "google.rpc.Status"
optional :next_page_token, :string, 2
end
end
end
module Google
module Ads
module GoogleAds
module V7
module Services
GetCampaignDraftRequest = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.ads.googleads.v7.services.GetCampaignDraftRequest").msgclass
MutateCampaignDraftsRequest = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.ads.googleads.v7.services.MutateCampaignDraftsRequest").msgclass
PromoteCampaignDraftRequest = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.ads.googleads.v7.services.PromoteCampaignDraftRequest").msgclass
CampaignDraftOperation = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.ads.googleads.v7.services.CampaignDraftOperation").msgclass
MutateCampaignDraftsResponse = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.ads.googleads.v7.services.MutateCampaignDraftsResponse").msgclass
MutateCampaignDraftResult = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.ads.googleads.v7.services.MutateCampaignDraftResult").msgclass
ListCampaignDraftAsyncErrorsRequest = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.ads.googleads.v7.services.ListCampaignDraftAsyncErrorsRequest").msgclass
ListCampaignDraftAsyncErrorsResponse = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.ads.googleads.v7.services.ListCampaignDraftAsyncErrorsResponse").msgclass
end
end
end
end
end
|
package com.smallcake.utils;
import androidx.annotation.IntRange;
import java.math.BigDecimal;
/**
* MyApplication -- com.smallcake.utils
* Created by Small Cake on 2018/8/2 16:47.
*
* 多位小数的精确计算工具类
* 使用BigDecimal,但一定要用BigDecimal(String)构造器,而千万不要用BigDecimal(double)来构造
* (也不能将float或double型转换成String再来使用BigDecimal(String)来构造,
* 因为在将float或double转换成String时精度已丢失)。例如new BigDecimal(0.1),它将返回一个BigDecimal,
* 也即0.1000000000000000055511151231257827021181583404541015625,
* 正确使用BigDecimal,程序就可以打印出我们所期望的结果0.9
* Java代码:
System.out.println(new BigDecimal("2.0").subtract(new BigDecimal("1.10")));// 0.9
另外,如果要比较两个浮点数的大小,要使用BigDecimal的compareTo方法。
BigDecimal.setScale()方法用于格式化小数点
BigDecimal.ROUND_UP:进位处理
BigDecimal.ROUND_DOWN:直接删除多余的小数位
BigDecimal.ROUND_HALF_UP:四舍五入(默认)2.35变成2.4
BigDecimal.ROUND_HALF_DOWN:四舍五入,2.35变成2.3,如果是5则向下舍
BigDecimal.ROUND_CEILING:接近正无穷大的舍入
BigDecimal.ROUND_FLOOR:接近负无穷大的舍入,数字>0和ROUND_UP作用一样,数字<0和ROUND_DOWN作用一样
BigDecimal.ROUND_HALF_EVEN:向最接近的数字舍入,如果与两个相邻数字的距离相等,则向相邻的偶数舍入
https://www.cnblogs.com/zouhao/p/6713230.html
https://blog.csdn.net/ochangwen/article/details/51531866
*/
public class ComputationUtil {
/**
* x 乘
* @param a double包含了float,精确度更高
* @param b
* @return
*/
public static double add(String a,String b){
BigDecimal bigDecimalA = new BigDecimal(a);
BigDecimal bigDecimalB =new BigDecimal(b);
return bigDecimalA.add(bigDecimalB).doubleValue();
}
public static double sub(String a,String b){
BigDecimal bigDecimalA = new BigDecimal(a);
BigDecimal bigDecimalB =new BigDecimal(b);
return bigDecimalA.subtract(bigDecimalB).doubleValue();
}
public static double mul(String a,String b){
BigDecimal bigDecimalA = new BigDecimal(a);
BigDecimal bigDecimalB =new BigDecimal(b);
return bigDecimalA.multiply(bigDecimalB).doubleValue();
}
public static double div(String a,String b){
BigDecimal bigDecimalA = new BigDecimal(a);
BigDecimal bigDecimalB =new BigDecimal(b);
return bigDecimalA.divide(bigDecimalB).doubleValue();
}
public static double add(double a,double b){
return add(Double.toString(a),Double.toString(b));
}
public static double sub(double a,double b){
return sub(Double.toString(a),Double.toString(b));
}
public static double mul(double a,double b){
return mul(Double.toString(a),Double.toString(b));
}
public static double div(double a,double b){
return div(Double.toString(a),Double.toString(b));
}
/**
*
* @param a
* @param b
* @param scale 保留几位小数1到8位
* @return
*/
public static double add(String a,String b, @IntRange(from = 1,to = 8) int scale){
BigDecimal bigDecimalA = new BigDecimal(a);
BigDecimal bigDecimalB =new BigDecimal(b);
return bigDecimalA.add(bigDecimalB).setScale(scale).doubleValue();
}
public static double sub(String a,String b, @IntRange(from = 1,to = 8) int scale){
BigDecimal bigDecimalA = new BigDecimal(a);
BigDecimal bigDecimalB =new BigDecimal(b);
return bigDecimalA.subtract(bigDecimalB).setScale(scale).doubleValue();
}
public static double mul(String a,String b, @IntRange(from = 1,to = 8) int scale){
BigDecimal bigDecimalA = new BigDecimal(a);
BigDecimal bigDecimalB =new BigDecimal(b);
return bigDecimalA.multiply(bigDecimalB).setScale(scale).doubleValue();
}
public static double div(String a,String b, @IntRange(from = 1,to = 8) int scale){
BigDecimal bigDecimalA = new BigDecimal(a);
BigDecimal bigDecimalB =new BigDecimal(b);
return bigDecimalA.divide(bigDecimalB,scale).doubleValue();
}
/**
* 比较大小
* 使用BigDecimal的坏处是性能比double和float差,在处理庞大,复杂的运算时尤为明显,因根据实际需求决定使用哪种类型。
a<b:-1,a=b:0,a>b:1
* @param a
* @param b
* @return -1,0,1
*/
public static int compare(BigDecimal a,BigDecimal b){
return a.compareTo(b);
}
}
|
#!/usr/bin/python
# -*- coding: ascii -*-
# Author: @harvie <NAME>
# Date: 7 july 2018
__author__ = "@harvie <NAME>"
#__email__ = ""
__name__ = _("ClosePath")
__version__ = "0.1"
import math
import os.path
import re
from CNC import CNC,Block,Segment
from ToolsPage import Plugin
from math import pi, sqrt, sin, cos, asin, acos, atan2, hypot, degrees, radians, copysign, fmod
class Tool(Plugin):
__doc__ = _("""Close the path""") #<<< This comment will be show as tooltip for the ribbon button
def __init__(self, master):
Plugin.__init__(self, master,"ClosePath")
self.icon = "closepath" #<<< This is the name of file used as icon for the ribbon button. It will be search in the "icons" subfolder
self.group = "CAM" #<<< This is the name of group that plugin belongs
self.oneshot = True
#Here we are creating the widgets presented to the user inside the plugin
#Name, Type , Default value, Description
#self.variables = [ #<<< Define a list of components for the GUI
# ("name" , "db" , "", _("Name")) #used to store plugin settings in the internal database
#]
#self.buttons.append("exe") #<<< This is the button added at bottom to call the execute method below
# ----------------------------------------------------------------------
# This method is executed when user presses the plugin execute button
# ----------------------------------------------------------------------
def execute(self, app):
blocks = []
for bid in app.editor.getSelectedBlocks():
if len(app.gcode.toPath(bid)) < 1: continue
eblock = Block("closed "+app.gcode[bid].name())
for path in app.gcode.toPath(bid):
if not path.isClosed():
path.append(Segment(Segment.LINE, path[-1].B, path[0].A))
eblock = app.gcode.fromPath(path,eblock)
#blocks.append(eblock)
app.gcode[bid] = eblock
#active=-1 #add to end
#app.gcode.insBlocks(active, blocks, "Path closed") #<<< insert blocks over active block in the editor
app.refresh() #<<< refresh editor
app.setStatus(_("Generated: Closepath")) #<<< feed back result
#app.gcode.blocks.append(block)
|
<gh_stars>1-10
// 11728. 배열 합치기
// 2021.06.08
// 정렬
#include<iostream>
#include<set>
using namespace std;
int main()
{
int n, m;
cin >> n >> m;
multiset<int> s;
for (int i = 0; i < n; i++)
{
int a;
scanf("%d", &a);
s.insert(a);
}
for (int i = 0; i < m; i++)
{
int b;
scanf("%d", &b);
s.insert(b);
}
for (auto iter = s.begin(); iter != s.end(); iter++)
{
printf("%d ", *iter);
}
printf("\n");
return 0;
}
|
SELECT *
FROM movies
ORDER BY release_date DESC
LIMIT 10;
|
#!/bin/sh
RET_CODE=0
test_posix_newline() {
if [ ! -r "$1" ]; then
echo "File $1 not found or not readable" 1>&2
RET_CODE=1
fi
if grep -Iq . "$1" ; then
final_char=$(tail -q -c 1 "$1")
if [ "${final_char}" != "" ]; then
echo "$1 has not POSIX trailing new line" 1>&2
RET_CODE=1
fi
fi
}
set -e
for i in "$@"; do
test_posix_newline "$i"
done
exit $RET_CODE
|
<reponame>rainrambler/PoemStar
package poemstar;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import javax.swing.DefaultListModel;
import javax.swing.JFileChooser;
import javax.swing.ListModel;
import javax.swing.filechooser.FileFilter;
import org.apache.commons.io.FileUtils;
import org.pmw.tinylog.Logger;
import poemstar.beans.ISearchResults;
import poemstar.beans.Poems;
import poemstar.beans.QueryCondition;
import poemstar.beans.SearchResult;
import poemstar.fileio.PoemsDBManager;
import poemstar.util.DateTimeUtils;
/**
* Main UI
*
* @author Xinway
*/
public class MainJDialog extends javax.swing.JDialog {
/**
* Creates new form MainJDialog
* @param parent
* @param modal
*/
public MainJDialog(java.awt.Frame parent, boolean modal) {
super(parent, modal);
initComponents();
}
/**
* This method is called from within the constructor to initialize the form.
* WARNING: Do NOT modify this code. The content of this method is always
* regenerated by the Form Editor.
*/
@SuppressWarnings("unchecked")
// <editor-fold defaultstate="collapsed" desc="Generated Code">//GEN-BEGIN:initComponents
private void initComponents() {
buttonGroupDynasty = new javax.swing.ButtonGroup();
jScrollPaneLog = new javax.swing.JScrollPane();
jListResult = new javax.swing.JList();
jLabelDynasty = new javax.swing.JLabel();
jRadioButtonPrevTang = new javax.swing.JRadioButton();
jRadioButtonTang = new javax.swing.JRadioButton();
jRadioButtonPrevSong = new javax.swing.JRadioButton();
jRadioButtonSong = new javax.swing.JRadioButton();
jRadioButtonYuan = new javax.swing.JRadioButton();
jRadioButtonMingQing = new javax.swing.JRadioButton();
jRadioButtonJinXiandai = new javax.swing.JRadioButton();
jLabelAuthor = new javax.swing.JLabel();
jComboBoxAuthor = new javax.swing.JComboBox();
jButtonClearAuthor = new javax.swing.JButton();
jLabelKeyword = new javax.swing.JLabel();
jTextFieldKeyword = new javax.swing.JTextField();
jButtonQuery = new javax.swing.JButton();
jLabelStatus = new javax.swing.JLabel();
jScrollPane1 = new javax.swing.JScrollPane();
jTextAreaContent = new javax.swing.JTextArea();
jButtonSplitWord = new javax.swing.JButton();
jButtonModify = new javax.swing.JButton();
jButtonExport = new javax.swing.JButton();
setDefaultCloseOperation(javax.swing.WindowConstants.DISPOSE_ON_CLOSE);
setTitle("PoemStar");
setLocationByPlatform(true);
setModal(true);
addWindowListener(new java.awt.event.WindowAdapter() {
public void windowClosed(java.awt.event.WindowEvent evt) {
formWindowClosed(evt);
}
public void windowOpened(java.awt.event.WindowEvent evt) {
formWindowOpened(evt);
}
});
jListResult.addMouseListener(new java.awt.event.MouseAdapter() {
public void mouseClicked(java.awt.event.MouseEvent evt) {
jListResultMouseClicked(evt);
}
});
jScrollPaneLog.setViewportView(jListResult);
jLabelDynasty.setText("朝代:");
buttonGroupDynasty.add(jRadioButtonPrevTang);
jRadioButtonPrevTang.setText("唐以前");
buttonGroupDynasty.add(jRadioButtonTang);
jRadioButtonTang.setText("唐");
buttonGroupDynasty.add(jRadioButtonPrevSong);
jRadioButtonPrevSong.setText("宋以前");
buttonGroupDynasty.add(jRadioButtonSong);
jRadioButtonSong.setText("宋");
buttonGroupDynasty.add(jRadioButtonYuan);
jRadioButtonYuan.setText("元");
buttonGroupDynasty.add(jRadioButtonMingQing);
jRadioButtonMingQing.setText("明清");
buttonGroupDynasty.add(jRadioButtonJinXiandai);
jRadioButtonJinXiandai.setText("近现代");
jLabelAuthor.setText("作者:");
jButtonClearAuthor.setText("清空");
jButtonClearAuthor.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
jButtonClearAuthorActionPerformed(evt);
}
});
jLabelKeyword.setText("关键字:");
jButtonQuery.setText("查询");
jButtonQuery.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
jButtonQueryActionPerformed(evt);
}
});
jTextAreaContent.setColumns(20);
jTextAreaContent.setFont(new java.awt.Font("Microsoft YaHei UI", 0, 16)); // NOI18N
jTextAreaContent.setRows(5);
jScrollPane1.setViewportView(jTextAreaContent);
jButtonSplitWord.setText("分词");
jButtonSplitWord.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
jButtonSplitWordActionPerformed(evt);
}
});
jButtonModify.setText("修改");
jButtonModify.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
jButtonModifyActionPerformed(evt);
}
});
jButtonExport.setText("导出");
jButtonExport.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
jButtonExportActionPerformed(evt);
}
});
javax.swing.GroupLayout layout = new javax.swing.GroupLayout(getContentPane());
getContentPane().setLayout(layout);
layout.setHorizontalGroup(
layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(javax.swing.GroupLayout.Alignment.TRAILING, layout.createSequentialGroup()
.addContainerGap()
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.TRAILING)
.addComponent(jScrollPane1)
.addComponent(jScrollPaneLog, javax.swing.GroupLayout.Alignment.LEADING)
.addComponent(jLabelStatus, javax.swing.GroupLayout.Alignment.LEADING, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE)
.addGroup(javax.swing.GroupLayout.Alignment.LEADING, layout.createSequentialGroup()
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.TRAILING, false)
.addGroup(javax.swing.GroupLayout.Alignment.LEADING, layout.createSequentialGroup()
.addComponent(jLabelKeyword)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.UNRELATED)
.addComponent(jTextFieldKeyword, javax.swing.GroupLayout.PREFERRED_SIZE, 176, javax.swing.GroupLayout.PREFERRED_SIZE)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED, javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE)
.addComponent(jButtonQuery))
.addGroup(javax.swing.GroupLayout.Alignment.LEADING, layout.createSequentialGroup()
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addComponent(jLabelDynasty)
.addComponent(jLabelAuthor))
.addGap(28, 28, 28)
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(layout.createSequentialGroup()
.addComponent(jRadioButtonPrevTang)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.UNRELATED)
.addComponent(jRadioButtonTang)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.UNRELATED)
.addComponent(jRadioButtonPrevSong)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.UNRELATED)
.addComponent(jRadioButtonSong)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.UNRELATED)
.addComponent(jRadioButtonYuan)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.UNRELATED)
.addComponent(jRadioButtonMingQing)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.UNRELATED)
.addComponent(jRadioButtonJinXiandai))
.addGroup(layout.createSequentialGroup()
.addComponent(jComboBoxAuthor, javax.swing.GroupLayout.PREFERRED_SIZE, 177, javax.swing.GroupLayout.PREFERRED_SIZE)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addComponent(jButtonClearAuthor)))))
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addComponent(jButtonExport)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED, 179, Short.MAX_VALUE)
.addComponent(jButtonModify)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.UNRELATED)
.addComponent(jButtonSplitWord)))
.addContainerGap())
);
layout.setVerticalGroup(
layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(layout.createSequentialGroup()
.addGap(22, 22, 22)
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(jLabelDynasty)
.addComponent(jRadioButtonPrevTang)
.addComponent(jRadioButtonTang)
.addComponent(jRadioButtonPrevSong)
.addComponent(jRadioButtonSong)
.addComponent(jRadioButtonYuan)
.addComponent(jRadioButtonMingQing)
.addComponent(jRadioButtonJinXiandai))
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.UNRELATED)
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(jLabelAuthor)
.addComponent(jComboBoxAuthor, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)
.addComponent(jButtonClearAuthor))
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.UNRELATED)
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(jLabelKeyword)
.addComponent(jTextFieldKeyword, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)
.addComponent(jButtonQuery)
.addComponent(jButtonSplitWord)
.addComponent(jButtonModify)
.addComponent(jButtonExport))
.addGap(18, 18, 18)
.addComponent(jScrollPaneLog, javax.swing.GroupLayout.PREFERRED_SIZE, 114, javax.swing.GroupLayout.PREFERRED_SIZE)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.UNRELATED)
.addComponent(jScrollPane1, javax.swing.GroupLayout.PREFERRED_SIZE, 232, javax.swing.GroupLayout.PREFERRED_SIZE)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.UNRELATED)
.addComponent(jLabelStatus)
.addGap(43, 43, 43))
);
jLabelDynasty.getAccessibleContext().setAccessibleName("Dynasty");
pack();
}// </editor-fold>//GEN-END:initComponents
ISearchResults results_;
private void jButtonQueryActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_jButtonQueryActionPerformed
QueryCondition qc = QueryCondition.createQueryCondition(jTextFieldKeyword.getText());
Poems pms = PoemsDBManager.INSTANCE.getPoems();
results_ = pms.findPoems(qc);
DefaultListModel resultList = new DefaultListModel();
jListResult.setModel(resultList);
//jListResult
int curPos = 0; // for the relationship between UI list position and the poem
for (String keyword : results_.getKeywords()) {
Collection<SearchResult> srs = results_.getResults(keyword);
resultList.addElement(keyword + " founded: " + srs.size());
curPos++;
}
for (String keyword : results_.getKeywords()) {
resultList.addElement("--------------------");
curPos++;
Collection<SearchResult> srs = results_.getResults(keyword);
for (SearchResult sr : srs) {
resultList.addElement(sr.getDescription());
results_.addIndextoResult(curPos, sr);
curPos++;
}
}
// TODO: Save UI result to a txt file
}//GEN-LAST:event_jButtonQueryActionPerformed
private void formWindowOpened(java.awt.event.WindowEvent evt) {//GEN-FIRST:event_formWindowOpened
setLocationRelativeTo(null);
//ChineseWords.INSTANCE.loadFromFile("D:\\chinesewords.txt");
Logger.info("{}: PoemStart started.", DateTimeUtils.getTimeDesc());
PoemsDBManager.INSTANCE.init();
jLabelStatus.setText("Read from DB complete! Count: " + PoemsDBManager.INSTANCE.getPoems().getCount());
Logger.info("{}: Read from DB complete!", DateTimeUtils.getTimeDesc());
}//GEN-LAST:event_formWindowOpened
private void jListResultMouseClicked(java.awt.event.MouseEvent evt) {//GEN-FIRST:event_jListResultMouseClicked
final int curIndex = jListResult.getSelectedIndex();
if (curIndex == -1) {
jTextAreaContent.setText("");
} else {
SearchResult sr = results_.FindResult(curIndex);
if (sr != null) {
String result = sr.getDesc() + "\r\n";
result += sr.getAllSentences();
jTextAreaContent.setText(result);
}
}
}//GEN-LAST:event_jListResultMouseClicked
private void jButtonSplitWordActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_jButtonSplitWordActionPerformed
SplitWordDialog dlg = new SplitWordDialog(null, true);
dlg.parseAllPoems(PoemsDBManager.INSTANCE.getPoems());
dlg.setVisible(true);
}//GEN-LAST:event_jButtonSplitWordActionPerformed
private void formWindowClosed(java.awt.event.WindowEvent evt) {//GEN-FIRST:event_formWindowClosed
//ChineseWords.INSTANCE.saveToFile("D:\\chinesewords.txt");
PoemsDBManager.INSTANCE.close();
}//GEN-LAST:event_formWindowClosed
private void jButtonClearAuthorActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_jButtonClearAuthorActionPerformed
}//GEN-LAST:event_jButtonClearAuthorActionPerformed
private void jButtonModifyActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_jButtonModifyActionPerformed
ModifyPoemJDialog dlg = new ModifyPoemJDialog(null, true);
dlg.setVisible(true);
}//GEN-LAST:event_jButtonModifyActionPerformed
private void jButtonExportActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_jButtonExportActionPerformed
fc.addChoosableFileFilter(new FileFilter() {
@Override
public boolean accept(File f) {
if (f.isDirectory()) {
return true;
}
return f.getName().endsWith(".txt");
}
@Override
public String getDescription() {
return "*.txt";
}
});
int returnVal = fc.showSaveDialog(this);
if (returnVal != JFileChooser.APPROVE_OPTION) {
return;
}
File f = fc.getSelectedFile();
//jListResult
//FileUtils.writeLines(f, allSentences_);
ArrayList<String> allSentences = new ArrayList<>();
ListModel listModel = jListResult.getModel();
for (int i = 0; i < listModel.getSize(); i++) {
allSentences.add(listModel.getElementAt(i).toString());
}
try {
FileUtils.writeLines(f, allSentences);
} catch (IOException ex) {
Logger.error(ex);
}
}//GEN-LAST:event_jButtonExportActionPerformed
//Create a file chooser
final JFileChooser fc = new JFileChooser();
// Variables declaration - do not modify//GEN-BEGIN:variables
private javax.swing.ButtonGroup buttonGroupDynasty;
private javax.swing.JButton jButtonClearAuthor;
private javax.swing.JButton jButtonExport;
private javax.swing.JButton jButtonModify;
private javax.swing.JButton jButtonQuery;
private javax.swing.JButton jButtonSplitWord;
private javax.swing.JComboBox jComboBoxAuthor;
private javax.swing.JLabel jLabelAuthor;
private javax.swing.JLabel jLabelDynasty;
private javax.swing.JLabel jLabelKeyword;
private javax.swing.JLabel jLabelStatus;
private javax.swing.JList jListResult;
private javax.swing.JRadioButton jRadioButtonJinXiandai;
private javax.swing.JRadioButton jRadioButtonMingQing;
private javax.swing.JRadioButton jRadioButtonPrevSong;
private javax.swing.JRadioButton jRadioButtonPrevTang;
private javax.swing.JRadioButton jRadioButtonSong;
private javax.swing.JRadioButton jRadioButtonTang;
private javax.swing.JRadioButton jRadioButtonYuan;
private javax.swing.JScrollPane jScrollPane1;
private javax.swing.JScrollPane jScrollPaneLog;
private javax.swing.JTextArea jTextAreaContent;
private javax.swing.JTextField jTextFieldKeyword;
// End of variables declaration//GEN-END:variables
}
|
#!/bin/bash
# Secure WireGuard server installer for Debian, Ubuntu, CentOS, Fedora and Arch Linux
# https://github.com/angristan/wireguard-install
RED='\033[0;31m'
ORANGE='\033[0;33m'
NC='\033[0m'
function isRoot() {
if [ "${EUID}" -ne 0 ]; then
echo "You need to run this script as root"
exit 1
fi
}
function checkVirt() {
if [ "$(systemd-detect-virt)" == "openvz" ]; then
echo "OpenVZ is not supported"
exit 1
fi
if [ "$(systemd-detect-virt)" == "lxc" ]; then
echo "LXC is not supported (yet)."
echo "WireGuard can technically run in an LXC container,"
echo "but the kernel module has to be installed on the host,"
echo "the container has to be run with some specific parameters"
echo "and only the tools need to be installed in the container."
exit 1
fi
}
function checkOS() {
# Check OS version
if [[ -e /etc/debian_version ]]; then
source /etc/os-release
OS="${ID}" # debian or ubuntu
if [[ ${ID} == "debian" || ${ID} == "raspbian" ]]; then
if [[ ${VERSION_ID} -ne 10 ]]; then
echo "Your version of Debian (${VERSION_ID}) is not supported. Please use Debian 10 Buster"
exit 1
fi
fi
elif [[ -e /etc/fedora-release ]]; then
source /etc/os-release
OS="${ID}"
elif [[ -e /etc/centos-release ]]; then
source /etc/os-release
OS=centos
elif [[ -e /etc/arch-release ]]; then
OS=arch
elif [[ -e /etc/oracle-release ]]; then
source /etc/os-release
OS=oracle
else
echo "Looks like you aren't running this installer on a Debian, Ubuntu, Fedora, CentOS or Arch Linux system"
exit 1
fi
}
function initialCheck() {
isRoot
checkVirt
checkOS
}
function installQuestions() {
echo "Welcome to the WireGuard installer!"
echo "The git repository is available at: https://github.com/angristan/wireguard-install"
echo ""
echo "I need to ask you a few questions before starting the setup."
echo "You can leave the default options and just press enter if you are ok with them."
echo ""
# Detect public IPv4 or IPv6 address and pre-fill for the user
SERVER_PUB_IP=$(ip -4 addr | sed -ne 's|^.* inet \([^/]*\)/.* scope global.*$|\1|p' | head -1)
if [[ -z ${SERVER_PUB_IP} ]]; then
# Detect public IPv6 address
SERVER_PUB_IP=$(ip -6 addr | sed -ne 's|^.* inet6 \([^/]*\)/.* scope global.*$|\1|p' | head -1)
fi
read -rp "IPv4 or IPv6 public address: " -e -i "${SERVER_PUB_IP}" SERVER_PUB_IP
# Detect public interface and pre-fill for the user
SERVER_NIC="$(ip -4 route ls | grep default | grep -Po '(?<=dev )(\S+)' | head -1)"
until [[ ${SERVER_PUB_NIC} =~ ^[a-zA-Z0-9_]+$ ]]; do
read -rp "Public interface: " -e -i "${SERVER_NIC}" SERVER_PUB_NIC
done
until [[ ${SERVER_WG_NIC} =~ ^[a-zA-Z0-9_]+$ && ${#SERVER_WG_NIC} -lt 16 ]]; do
read -rp "WireGuard interface name: " -e -i wg0 SERVER_WG_NIC
done
until [[ ${SERVER_WG_IPV4} =~ ^([0-9]{1,3}\.){3} ]]; do
read -rp "Server's WireGuard IPv4: " -e -i 10.66.66.1 SERVER_WG_IPV4
done
until [[ ${SERVER_WG_IPV6} =~ ^([a-f0-9]{1,4}:){3,4}: ]]; do
read -rp "Server's WireGuard IPv6: " -e -i fd42:42:42::1 SERVER_WG_IPV6
done
# Generate random number within private ports range
RANDOM_PORT=$(shuf -i49152-65535 -n1)
until [[ ${SERVER_PORT} =~ ^[0-9]+$ ]] && [ "${SERVER_PORT}" -ge 1 ] && [ "${SERVER_PORT}" -le 65535 ]; do
read -rp "Server's WireGuard port [1-65535]: " -e -i "${RANDOM_PORT}" SERVER_PORT
done
# Adguard DNS by default
until [[ ${CLIENT_DNS_1} =~ ^((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$ ]]; do
read -rp "First DNS resolver to use for the clients: " -e -i 94.140.14.14 CLIENT_DNS_1
done
until [[ ${CLIENT_DNS_2} =~ ^((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$ ]]; do
read -rp "Second DNS resolver to use for the clients (optional): " -e -i 94.140.15.15 CLIENT_DNS_2
if [[ ${CLIENT_DNS_2} == "" ]]; then
CLIENT_DNS_2="${CLIENT_DNS_1}"
fi
done
echo ""
echo "Okay, that was all I needed. We are ready to setup your WireGuard server now."
echo "You will be able to generate a client at the end of the installation."
read -n1 -r -p "Press any key to continue..."
}
function installWireGuard() {
# Run setup questions first
installQuestions
# Install WireGuard tools and module
if [[ ${OS} == 'ubuntu' ]]; then
apt-get update
apt-get install -y wireguard iptables resolvconf qrencode
elif [[ ${OS} == 'debian' ]]; then
if ! grep -rqs "^deb .* buster-backports" /etc/apt/; then
echo "deb http://deb.debian.org/debian buster-backports main" >/etc/apt/sources.list.d/backports.list
apt-get update
fi
apt update
apt-get install -y iptables resolvconf qrencode
apt-get install -y -t buster-backports wireguard
elif [[ ${OS} == 'fedora' ]]; then
if [[ ${VERSION_ID} -lt 32 ]]; then
dnf install -y dnf-plugins-core
dnf copr enable -y jdoss/wireguard
dnf install -y wireguard-dkms
fi
dnf install -y wireguard-tools iptables qrencode
elif [[ ${OS} == 'centos' ]]; then
yum -y install epel-release elrepo-release
if [[ ${VERSION_ID} -eq 7 ]]; then
yum -y install yum-plugin-elrepo
fi
yum -y install kmod-wireguard wireguard-tools iptables qrencode
elif [[ ${OS} == 'arch' ]]; then
pacman -S --needed --noconfirm wireguard-tools qrencode
elif [[ ${OS} == 'oracle' ]]; then
if [[ ${VERSION_ID%.*} -eq 7 ]]; then
yum -y install oraclelinux-developer-release-el7
yum-config-manager --disable ol7_developer
yum-config-manager --enable ol7_developer_UEKR6
yum-config-manager --save --setopt=ol7_developer_UEKR6.includepkgs='wireguard-tools*'
yum -y install wireguard-tools qrencode
elif [[ ${VERSION_ID%.*} -eq 8 ]]; then
dnf install -y oraclelinux-developer-release-el8
dnf config-manager --disable ol8_developer
dnf config-manager --enable ol8_developer_UEKR6
dnf config-manager --save --setopt=ol8_developer_UEKR6.includepkgs='wireguard-tools*'
dnf install -y wireguard-tools qrencode
fi
fi
# Make sure the directory exists (this does not seem the be the case on fedora)
mkdir /etc/wireguard >/dev/null 2>&1
chmod 600 -R /etc/wireguard/
SERVER_PRIV_KEY=$(wg genkey)
SERVER_PUB_KEY=$(echo "${SERVER_PRIV_KEY}" | wg pubkey)
# Save WireGuard settings
echo "SERVER_PUB_IP=${SERVER_PUB_IP}
SERVER_PUB_NIC=${SERVER_PUB_NIC}
SERVER_WG_NIC=${SERVER_WG_NIC}
SERVER_WG_IPV4=${SERVER_WG_IPV4}
SERVER_WG_IPV6=${SERVER_WG_IPV6}
SERVER_PORT=${SERVER_PORT}
SERVER_PRIV_KEY=${SERVER_PRIV_KEY}
SERVER_PUB_KEY=${SERVER_PUB_KEY}
CLIENT_DNS_1=${CLIENT_DNS_1}
CLIENT_DNS_2=${CLIENT_DNS_2}" >/etc/wireguard/params
# Add server interface
echo "[Interface]
Address = ${SERVER_WG_IPV4}/24,${SERVER_WG_IPV6}/64
ListenPort = ${SERVER_PORT}
PrivateKey = ${SERVER_PRIV_KEY}" >"/etc/wireguard/${SERVER_WG_NIC}.conf"
if pgrep firewalld; then
FIREWALLD_IPV4_ADDRESS=$(echo "${SERVER_WG_IPV4}" | cut -d"." -f1-3)".0"
FIREWALLD_IPV6_ADDRESS=$(echo "${SERVER_WG_IPV6}" | sed 's/:[^:]*$/:0/')
echo "PostUp = firewall-cmd --add-port ${SERVER_PORT}/udp && firewall-cmd --add-rich-rule='rule family=ipv4 source address=${FIREWALLD_IPV4_ADDRESS}/24 masquerade' && firewall-cmd --add-rich-rule='rule family=ipv6 source address=${FIREWALLD_IPV6_ADDRESS}/24 masquerade'
PostDown = firewall-cmd --remove-port ${SERVER_PORT}/udp && firewall-cmd --remove-rich-rule='rule family=ipv4 source address=${FIREWALLD_IPV4_ADDRESS}/24 masquerade' && firewall-cmd --remove-rich-rule='rule family=ipv6 source address=${FIREWALLD_IPV6_ADDRESS}/24 masquerade'" >>"/etc/wireguard/${SERVER_WG_NIC}.conf"
else
echo "PostUp = iptables -A FORWARD -i ${SERVER_PUB_NIC} -o ${SERVER_WG_NIC} -j ACCEPT; iptables -A FORWARD -i ${SERVER_WG_NIC} -j ACCEPT; iptables -t nat -A POSTROUTING -o ${SERVER_PUB_NIC} -j MASQUERADE; ip6tables -A FORWARD -i ${SERVER_WG_NIC} -j ACCEPT; ip6tables -t nat -A POSTROUTING -o ${SERVER_PUB_NIC} -j MASQUERADE
PostDown = iptables -D FORWARD -i ${SERVER_PUB_NIC} -o ${SERVER_WG_NIC} -j ACCEPT; iptables -D FORWARD -i ${SERVER_WG_NIC} -j ACCEPT; iptables -t nat -D POSTROUTING -o ${SERVER_PUB_NIC} -j MASQUERADE; ip6tables -D FORWARD -i ${SERVER_WG_NIC} -j ACCEPT; ip6tables -t nat -D POSTROUTING -o ${SERVER_PUB_NIC} -j MASQUERADE" >>"/etc/wireguard/${SERVER_WG_NIC}.conf"
fi
# Enable routing on the server
echo "net.ipv4.ip_forward = 1
net.ipv6.conf.all.forwarding = 1" >/etc/sysctl.d/wg.conf
sysctl --system
systemctl start "wg-quick@${SERVER_WG_NIC}"
systemctl enable "wg-quick@${SERVER_WG_NIC}"
newClient
echo "If you want to add more clients, you simply need to run this script another time!"
# Check if WireGuard is running
systemctl is-active --quiet "wg-quick@${SERVER_WG_NIC}"
WG_RUNNING=$?
# WireGuard might not work if we updated the kernel. Tell the user to reboot
if [[ ${WG_RUNNING} -ne 0 ]]; then
echo -e "\n${RED}WARNING: WireGuard does not seem to be running.${NC}"
echo -e "${ORANGE}You can check if WireGuard is running with: systemctl status wg-quick@${SERVER_WG_NIC}${NC}"
echo -e "${ORANGE}If you get something like \"Cannot find device ${SERVER_WG_NIC}\", please reboot!${NC}"
fi
}
function newClient() {
ENDPOINT="${SERVER_PUB_IP}:${SERVER_PORT}"
echo ""
echo "Tell me a name for the client."
echo "The name must consist of alphanumeric character. It may also include an underscore or a dash and can't exceed 15 chars."
until [[ ${CLIENT_NAME} =~ ^[a-zA-Z0-9_-]+$ && ${CLIENT_EXISTS} == '0' && ${#CLIENT_NAME} -lt 16 ]]; do
read -rp "Client name: " -e CLIENT_NAME
CLIENT_EXISTS=$(grep -c -E "^### Client ${CLIENT_NAME}\$" "/etc/wireguard/${SERVER_WG_NIC}.conf")
if [[ ${CLIENT_EXISTS} == '1' ]]; then
echo ""
echo "A client with the specified name was already created, please choose another name."
echo ""
fi
done
for DOT_IP in {2..254}; do
DOT_EXISTS=$(grep -c "${SERVER_WG_IPV4::-1}${DOT_IP}" "/etc/wireguard/${SERVER_WG_NIC}.conf")
if [[ ${DOT_EXISTS} == '0' ]]; then
break
fi
done
if [[ ${DOT_EXISTS} == '1' ]]; then
echo ""
echo "The subnet configured supports only 253 clients."
exit 1
fi
BASE_IP=$(echo "$SERVER_WG_IPV4" | awk -F '.' '{ print $1"."$2"."$3 }')
until [[ ${IPV4_EXISTS} == '0' ]]; do
read -rp "Client's WireGuard IPv4: ${BASE_IP}." -e -i "${DOT_IP}" DOT_IP
CLIENT_WG_IPV4="${BASE_IP}.${DOT_IP}"
IPV4_EXISTS=$(grep -c "$CLIENT_WG_IPV4/24" "/etc/wireguard/${SERVER_WG_NIC}.conf")
if [[ ${IPV4_EXISTS} == '1' ]]; then
echo ""
echo "A client with the specified IPv4 was already created, please choose another IPv4."
echo ""
fi
done
BASE_IP=$(echo "$SERVER_WG_IPV6" | awk -F '::' '{ print $1 }')
until [[ ${IPV6_EXISTS} == '0' ]]; do
read -rp "Client's WireGuard IPv6: ${BASE_IP}::" -e -i "${DOT_IP}" DOT_IP
CLIENT_WG_IPV6="${BASE_IP}::${DOT_IP}"
IPV6_EXISTS=$(grep -c "${CLIENT_WG_IPV6}/64" "/etc/wireguard/${SERVER_WG_NIC}.conf")
if [[ ${IPV6_EXISTS} == '1' ]]; then
echo ""
echo "A client with the specified IPv6 was already created, please choose another IPv6."
echo ""
fi
done
# Generate key pair for the client
CLIENT_PRIV_KEY=$(wg genkey)
CLIENT_PUB_KEY=$(echo "${CLIENT_PRIV_KEY}" | wg pubkey)
CLIENT_PRE_SHARED_KEY=$(wg genpsk)
# Home directory of the user, where the client configuration will be written
if [ -e "/home/${CLIENT_NAME}" ]; then
# if $1 is a user name
HOME_DIR="/home/${CLIENT_NAME}"
elif [ "${SUDO_USER}" ]; then
# if not, use SUDO_USER
if [ "${SUDO_USER}" == "root" ]; then
# If running sudo as root
HOME_DIR="/root"
else
HOME_DIR="/home/${SUDO_USER}"
fi
else
# if not SUDO_USER, use /root
HOME_DIR="/root"
fi
# Create client file and add the server as a peer
echo "[Interface]
PrivateKey = ${CLIENT_PRIV_KEY}
Address = ${CLIENT_WG_IPV4}/32,${CLIENT_WG_IPV6}/128
DNS = ${CLIENT_DNS_1},${CLIENT_DNS_2}
[Peer]
PublicKey = ${SERVER_PUB_KEY}
PresharedKey = ${CLIENT_PRE_SHARED_KEY}
Endpoint = ${ENDPOINT}
AllowedIPs = 0.0.0.0/0,::/0" >>"${HOME_DIR}/${SERVER_WG_NIC}-client-${CLIENT_NAME}.conf"
# Add the client as a peer to the server
echo -e "\n### Client ${CLIENT_NAME}
[Peer]
PublicKey = ${CLIENT_PUB_KEY}
PresharedKey = ${CLIENT_PRE_SHARED_KEY}
AllowedIPs = ${CLIENT_WG_IPV4}/32,${CLIENT_WG_IPV6}/128" >>"/etc/wireguard/${SERVER_WG_NIC}.conf"
wg syncconf "${SERVER_WG_NIC}" <(wg-quick strip "${SERVER_WG_NIC}")
echo -e "\nHere is your client config file as a QR Code:"
qrencode -t ansiutf8 -l L <"${HOME_DIR}/${SERVER_WG_NIC}-client-${CLIENT_NAME}.conf"
echo "It is also available in ${HOME_DIR}/${SERVER_WG_NIC}-client-${CLIENT_NAME}.conf"
}
function revokeClient() {
NUMBER_OF_CLIENTS=$(grep -c -E "^### Client" "/etc/wireguard/${SERVER_WG_NIC}.conf")
if [[ ${NUMBER_OF_CLIENTS} == '0' ]]; then
echo ""
echo "You have no existing clients!"
exit 1
fi
echo ""
echo "Select the existing client you want to revoke"
grep -E "^### Client" "/etc/wireguard/${SERVER_WG_NIC}.conf" | cut -d ' ' -f 3 | nl -s ') '
until [[ ${CLIENT_NUMBER} -ge 1 && ${CLIENT_NUMBER} -le ${NUMBER_OF_CLIENTS} ]]; do
if [[ ${CLIENT_NUMBER} == '1' ]]; then
read -rp "Select one client [1]: " CLIENT_NUMBER
else
read -rp "Select one client [1-${NUMBER_OF_CLIENTS}]: " CLIENT_NUMBER
fi
done
# match the selected number to a client name
CLIENT_NAME=$(grep -E "^### Client" "/etc/wireguard/${SERVER_WG_NIC}.conf" | cut -d ' ' -f 3 | sed -n "${CLIENT_NUMBER}"p)
# remove [Peer] block matching $CLIENT_NAME
sed -i "/^### Client ${CLIENT_NAME}\$/,/^$/d" "/etc/wireguard/${SERVER_WG_NIC}.conf"
# remove generated client file
rm -f "${HOME}/${SERVER_WG_NIC}-client-${CLIENT_NAME}.conf"
# restart wireguard to apply changes
wg syncconf "${SERVER_WG_NIC}" <(wg-quick strip "${SERVER_WG_NIC}")
}
function uninstallWg() {
echo ""
read -rp "Do you really want to remove WireGuard? [y/n]: " -e -i n REMOVE
if [[ $REMOVE == 'y' ]]; then
checkOS
systemctl stop "wg-quick@${SERVER_WG_NIC}"
systemctl disable "wg-quick@${SERVER_WG_NIC}"
if [[ ${OS} == 'ubuntu' ]]; then
apt-get autoremove --purge -y wireguard qrencode
elif [[ ${OS} == 'debian' ]]; then
apt-get autoremove --purge -y wireguard qrencode
elif [[ ${OS} == 'fedora' ]]; then
dnf remove -y wireguard-tools qrencode
if [[ ${VERSION_ID} -lt 32 ]]; then
dnf remove -y wireguard-dkms
dnf copr disable -y jdoss/wireguard
fi
dnf autoremove -y
elif [[ ${OS} == 'centos' ]]; then
yum -y remove kmod-wireguard wireguard-tools qrencode
yum -y autoremove
elif [[ ${OS} == 'arch' ]]; then
pacman -Rs --noconfirm wireguard-tools qrencode
elif [[ ${OS} == 'oracle' ]]; then
if [[ ${VERSION_ID%.*} -eq 7 ]]; then
yum -y remove oraclelinux-developer-release-el7
yum -y remove wireguard-tools qrencode
elif [[ ${VERSION_ID%.*} -eq 8 ]]; then
dnf remove -y oraclelinux-developer-release-el8
dnf remove -y wireguard-tools qrencode
fi
fi
rm -rf /etc/wireguard
rm -f /etc/sysctl.d/wg.conf
# Reload sysctl
sysctl --system
# Check if WireGuard is running
systemctl is-active --quiet "wg-quick@${SERVER_WG_NIC}"
WG_RUNNING=$?
if [[ ${WG_RUNNING} -eq 0 ]]; then
echo "WireGuard failed to uninstall properly."
exit 1
else
echo "WireGuard uninstalled successfully."
exit 0
fi
else
echo ""
echo "Removal aborted!"
fi
}
function manageMenu() {
echo "Welcome to WireGuard-install!"
echo "The git repository is available at: https://github.com/angristan/wireguard-install"
echo ""
echo "It looks like WireGuard is already installed."
echo ""
echo "What do you want to do?"
echo " 1) Add a new user"
echo " 2) Revoke existing user"
echo " 3) Uninstall WireGuard"
echo " 4) Exit"
until [[ ${MENU_OPTION} =~ ^[1-4]$ ]]; do
read -rp "Select an option [1-4]: " MENU_OPTION
done
case "${MENU_OPTION}" in
1)
newClient
;;
2)
revokeClient
;;
3)
uninstallWg
;;
4)
exit 0
;;
esac
}
# Check for root, virt, OS...
initialCheck
# Check if WireGuard is already installed and load params
if [[ -e /etc/wireguard/params ]]; then
source /etc/wireguard/params
manageMenu
else
installWireGuard
fi
|
<gh_stars>0
package ddbt.tpcc.loadtest
import ddbt.lib.util.ThreadInfo
import java.io.FileInputStream
import java.io.IOException
import java.io.InputStream
import java.nio.charset.Charset
import java.text.DecimalFormat
import java.util.{Date, Properties}
import java.util.concurrent.ExecutorService
import java.util.concurrent.Executors
import java.util.concurrent.TimeUnit
import ddbt.tpcc.tx.TpccTable._
import org.slf4j.LoggerFactory
import org.slf4j.Logger
import ddbt.tpcc.tx.TpccTable
import TpccUnitTest._
import ddbt.tpcc.mtx._
import ddbt.tpcc.itx._
import DatabaseConnector._
import TpccConstants._
import tpcc.lmsgen._
import tpcc.lms._
import java.sql.Connection
import java.util.concurrent.ExecutorService
import java.util.concurrent.Executors
import java.util.concurrent.TimeUnit
object TpccUnitTest {
private val NUMBER_OF_TX_TESTS = 100
private val logger = LoggerFactory.getLogger(classOf[Tpcc])
private val DEBUG = logger.isDebugEnabled
val VERSION = "1.0.1"
private val DRIVER = "DRIVER"
private val WAREHOUSECOUNT = "WAREHOUSECOUNT"
private val DATABASE = "DATABASE"
private val USER = "USER"
private val PASSWORD = "PASSWORD"
private val CONNECTIONS = "CONNECTIONS"
private val RAMPUPTIME = "RAMPUPTIME"
private val DURATION = "DURATION"
private val JDBCURL = "JDBCURL"
private val PROPERTIESFILE = "./conf/tpcc.properties"
private val TRANSACTION_NAME = Array("NewOrder", "Payment", "Order Stat", "Delivery", "Slev")
@volatile var counting_on: Boolean = false
def main(argv: Array[String]) {
println("TPCC version " + VERSION + " Number of Arguments: " +
argv.length)
val sysProp = Array("os.name", "os.arch", "os.version", "java.runtime.name", "java.vm.version", "java.library.path")
for (s <- sysProp) {
logger.info("System Property: " + s + " = " + System.getProperty(s))
}
val df = new DecimalFormat("#,##0.0")
println("maxMemory = " +
df.format(Runtime.getRuntime.totalMemory() / (1024.0 * 1024.0)) +
" MB")
val tpcc = new TpccUnitTest
var ret = 0
if (argv.length == 0) {
println("Using the properties file for configuration.")
tpcc.init()
ret = tpcc.runBenchmark(false, argv)
} else {
if ((argv.length % 2) == 0) {
println("Using the command line arguments for configuration.")
tpcc.init()
ret = tpcc.runBenchmark(true, argv)
} else {
println("Invalid number of arguments.")
println("The possible arguments are as follows: ")
println("-h [database host]")
println("-d [database name]")
println("-u [database username]")
println("-p [database password]")
println("-w [number of warehouses]")
println("-c [number of connections]")
println("-r [ramp up time]")
println("-t [duration of the benchmark (sec)]")
println("-j [java driver]")
println("-l [jdbc url]")
println("-h [jdbc fetch size]")
println("-i [target implementation]")
println("-n [number of transactions to execute]")
System.exit(-1)
}
}
println("Terminating process now")
System.exit(ret)
}
}
class TpccUnitTest {
private var numberOfTestTransactions = NUMBER_OF_TX_TESTS
private var implVersionUnderTest = 0
private var javaDriver: String = _
private var jdbcUrl: String = _
private var dbUser: String = _
private var dbPassword: String = _
private var numWare: Int = _
private var numConn: Int = _
private var rampupTime: Int = _
private var measureTime: Int = _
private var fetchSize: Int = 100
private var num_node: Int = _
private var prev_s: Array[Int] = new Array[Int](5)
private var prev_l: Array[Int] = new Array[Int](5)
java.util.Arrays.fill(prev_s, 0)
java.util.Arrays.fill(prev_l, 0)
private var max_rt: Array[Float] = new Array[Float](5)
java.util.Arrays.fill(max_rt, 0f)
private var port: Int = 3306
private var properties: Properties = _
private var inputStream: InputStream = _
private def init() {
logger.info("Loading properties from: " + PROPERTIESFILE)
properties = new Properties()
inputStream = new FileInputStream(PROPERTIESFILE)
properties.load(inputStream)
}
private def runBenchmark(overridePropertiesFile: Boolean, argv: Array[String]): Int = {
println("***************************************")
println("****** Java TPC-C Load Generator ******")
println("***************************************")
RtHist.histInit()
num_node = 0
{
dbUser = properties.getProperty(USER)
dbPassword = <PASSWORD>(PASSWORD)
numWare = Integer.parseInt(properties.getProperty(WAREHOUSECOUNT))
numConn = Integer.parseInt(properties.getProperty(CONNECTIONS))
rampupTime = Integer.parseInt(properties.getProperty(RAMPUPTIME))
measureTime = Integer.parseInt(properties.getProperty(DURATION))
javaDriver = properties.getProperty(DRIVER)
jdbcUrl = properties.getProperty(JDBCURL)
val jdbcFetchSize = properties.getProperty("JDBCFETCHSIZE")
if (jdbcFetchSize != null) {
fetchSize = Integer.parseInt(jdbcFetchSize)
}
}
if (overridePropertiesFile) {
var i = 0
while (i < argv.length) {
if (argv(i) == "-u") {
dbUser = argv(i + 1)
} else if (argv(i) == "-p") {
dbPassword = argv(i + 1)
} else if (argv(i) == "-w") {
numWare = Integer.parseInt(argv(i + 1))
} else if (argv(i) == "-c") {
numConn = Integer.parseInt(argv(i + 1))
} else if (argv(i) == "-r") {
rampupTime = Integer.parseInt(argv(i + 1))
} else if (argv(i) == "-t") {
measureTime = Integer.parseInt(argv(i + 1))
} else if (argv(i) == "-j") {
javaDriver = argv(i + 1)
} else if (argv(i) == "-l") {
jdbcUrl = argv(i + 1)
} else if (argv(i) == "-f") {
fetchSize = Integer.parseInt(argv(i + 1))
} else if (argv(i) == "-i") {
implVersionUnderTest = Integer.parseInt(argv(i + 1))
} else if (argv(i) == "-n") {
numberOfTestTransactions = Integer.parseInt(argv(i + 1))
} else {
println("Incorrect Argument: " + argv(i))
println("The possible arguments are as follows: ")
println("-h [database host]")
println("-d [database name]")
println("-u [database username]")
println("-p [database password]")
println("-w [number of warehouses]")
println("-c [number of connections]")
println("-r [ramp up time]")
println("-t [duration of the benchmark (sec)]")
println("-j [java driver]")
println("-l [jdbc url]")
println("-h [jdbc fetch size]")
println("-i [target implementation]")
println("-n [number of transactions to execute]")
System.exit(-1)
}
i = i + 2
}
}
if (num_node > 0) {
if (numWare % num_node != 0) {
logger.error(" [warehouse] value must be devided by [num_node].")
return 1
}
if (numConn % num_node != 0) {
logger.error("[connection] value must be devided by [num_node].")
return 1
}
}
if (javaDriver == null) {
throw new RuntimeException("Java Driver is null.")
}
if (jdbcUrl == null) {
throw new RuntimeException("JDBC Url is null.")
}
if (dbUser == null) {
throw new RuntimeException("User is null.")
}
if (dbPassword == null) {
throw new RuntimeException("Password is null.")
}
if (numWare < 1) {
throw new RuntimeException("Warehouse count has to be greater than or equal to 1.")
}
if (numConn < 1) {
throw new RuntimeException("Connections has to be greater than or equal to 1.")
}
if (rampupTime < 1) {
throw new RuntimeException("Rampup time has to be greater than or equal to 1.")
}
if (measureTime < 1) {
throw new RuntimeException("Duration has to be greater than or equal to 1.")
}
if (implVersionUnderTest == 0) {
throw new RuntimeException("Target implementation should be selected for testing.")
}
val commandHistory:Array[TpccCommand]=new Array[TpccCommand](numberOfTestTransactions);
var commandHistoryCounter = 0
var newOrder: INewOrderInMem = null
var payment: IPaymentInMem = null
var orderStat: IOrderStatusInMem = null
var delivery: IDeliveryInMem = null
var slev: IStockLevelInMem = null
if(implVersionUnderTest == 11) {
// newOrder = new ddbt.tpcc.tx11.NewOrder
// payment = new ddbt.tpcc.tx11.Payment
// orderStat = new ddbt.tpcc.tx11.OrderStatus
// delivery = new ddbt.tpcc.tx11.Delivery
// slev = new ddbt.tpcc.tx11.StockLevel
} else if(implVersionUnderTest == 10) {
newOrder = new ddbt.tpcc.tx10.NewOrder
payment = new ddbt.tpcc.tx10.Payment
orderStat = new ddbt.tpcc.tx10.OrderStatus
delivery = new ddbt.tpcc.tx10.Delivery
slev = new ddbt.tpcc.tx10.StockLevel
} else if(implVersionUnderTest == 9) {
newOrder = new ddbt.tpcc.tx9.NewOrder
payment = new ddbt.tpcc.tx9.Payment
orderStat = new ddbt.tpcc.tx9.OrderStatus
delivery = new ddbt.tpcc.tx9.Delivery
slev = new ddbt.tpcc.tx9.StockLevel
} else if(implVersionUnderTest == 8) {
newOrder = new ddbt.tpcc.tx8.NewOrder
payment = new ddbt.tpcc.tx8.Payment
orderStat = new ddbt.tpcc.tx8.OrderStatus
delivery = new ddbt.tpcc.tx8.Delivery
slev = new ddbt.tpcc.tx8.StockLevel
} else if(implVersionUnderTest == 7) {
newOrder = new ddbt.tpcc.tx7.NewOrder
payment = new ddbt.tpcc.tx7.Payment
orderStat = new ddbt.tpcc.tx7.OrderStatus
delivery = new ddbt.tpcc.tx7.Delivery
slev = new ddbt.tpcc.tx7.StockLevel
} else if(implVersionUnderTest == 6) {
newOrder = new ddbt.tpcc.tx6.NewOrder
payment = new ddbt.tpcc.tx6.Payment
orderStat = new ddbt.tpcc.tx6.OrderStatus
delivery = new ddbt.tpcc.tx6.Delivery
slev = new ddbt.tpcc.tx6.StockLevel
} else if(implVersionUnderTest == 5) {
newOrder = new ddbt.tpcc.tx5.NewOrder
payment = new ddbt.tpcc.tx5.Payment
orderStat = new ddbt.tpcc.tx5.OrderStatus
delivery = new ddbt.tpcc.tx5.Delivery
slev = new ddbt.tpcc.tx5.StockLevel
} else if(implVersionUnderTest == 4) {
newOrder = new ddbt.tpcc.tx4.NewOrder
payment = new ddbt.tpcc.tx4.Payment
orderStat = new ddbt.tpcc.tx4.OrderStatus
delivery = new ddbt.tpcc.tx4.Delivery
slev = new ddbt.tpcc.tx4.StockLevel
} else if(implVersionUnderTest == 3) {
newOrder = new ddbt.tpcc.tx3.NewOrder
payment = new ddbt.tpcc.tx3.Payment
orderStat = new ddbt.tpcc.tx3.OrderStatus
delivery = new ddbt.tpcc.tx3.Delivery
slev = new ddbt.tpcc.tx3.StockLevel
} else if(implVersionUnderTest == 2) {
newOrder = new ddbt.tpcc.tx2.NewOrder
payment = new ddbt.tpcc.tx2.Payment
orderStat = new ddbt.tpcc.tx2.OrderStatus
delivery = new ddbt.tpcc.tx2.Delivery
slev = new ddbt.tpcc.tx2.StockLevel
} else if(implVersionUnderTest == 1) {
newOrder = new ddbt.tpcc.tx1.NewOrder
payment = new ddbt.tpcc.tx1.Payment
orderStat = new ddbt.tpcc.tx1.OrderStatus
delivery = new ddbt.tpcc.tx1.Delivery
slev = new ddbt.tpcc.tx1.StockLevel
} else if(implVersionUnderTest == -1) {
newOrder = new NewOrderLMSImpl
payment = new PaymentLMSImpl
orderStat = new OrderStatusLMSImpl
delivery = new DeliveryLMSImpl
slev = new StockLevelLMSImpl
} else if(implVersionUnderTest == -2) { // Command Print
newOrder = new INewOrderInMem {
override def newOrderTx(datetime: Date, t_num: Int, w_id: Int, d_id: Int, c_id: Int, o_ol_count: Int, o_all_local: Int, itemid: Array[Int], supware: Array[Int], quantity: Array[Int], price: Array[Float], iname: Array[String], stock: Array[Int], bg: Array[Char], amt: Array[Float])(implicit tInfo: ThreadInfo): Int = {
commandHistory(commandHistoryCounter) = NewOrderCommand(datetime, t_num, w_id, d_id, c_id, o_ol_count, o_all_local, itemid, supware, quantity, price, iname, stock, bg, amt)
commandHistoryCounter += 1
1
}
override def setSharedData(db: AnyRef): this.type = this
}
payment = new IPaymentInMem {
override def paymentTx(datetime: Date, t_num: Int, w_id: Int, d_id: Int, c_by_name: Int, c_w_id: Int, c_d_id: Int, c_id: Int, c_last: String, h_amount: Float)(implicit tInfo: ThreadInfo): Int = {
commandHistory(commandHistoryCounter) = PaymentCommand(datetime,t_num,w_id,d_id, c_by_name, c_w_id, c_d_id, c_id, c_last, h_amount)
commandHistoryCounter += 1
1
}
override def setSharedData(db: AnyRef): this.type = this
}
orderStat = new IOrderStatusInMem {
override def orderStatusTx(datetime: Date, t_num: Int, w_id: Int, d_id: Int, c_by_name: Int, c_id: Int, c_last: String)(implicit tInfo: ThreadInfo): Int = {
commandHistory(commandHistoryCounter) = OrderStatusCommand(datetime, t_num, w_id, d_id, c_by_name, c_id, c_last)
commandHistoryCounter += 1
1
}
override def setSharedData(db: AnyRef): this.type = this
}
slev = new IStockLevelInMem {
override def stockLevelTx(t_num: Int, w_id: Int, d_id: Int, threshold: Int)(implicit tInfo: ThreadInfo): Int = {
commandHistory(commandHistoryCounter) = StockLevelCommand(t_num, w_id, d_id, threshold)
commandHistoryCounter += 1
1
}
override def setSharedData(db: AnyRef): this.type = this
}
delivery = new IDeliveryInMem {
override def deliveryTx(datetime: Date, w_id: Int, o_carrier_id: Int)(implicit tInfo: ThreadInfo): Int = {
commandHistory(commandHistoryCounter) = DeliveryCommand(datetime, w_id, o_carrier_id)
commandHistoryCounter += 1
1
}
override def setSharedData(db: AnyRef): this.type = this
}
} else {
throw new RuntimeException("No in-memory implementation selected.")
}
System.out.print("<Parameters>\n")
System.out.print(" [driver]: %s\n".format(javaDriver))
System.out.print(" [URL]: %s\n".format(jdbcUrl))
System.out.print(" [user]: %s\n".format(dbUser))
System.out.print(" [pass]: %s\n".format(dbPassword))
System.out.print(" [warehouse]: %d\n".format(numWare))
System.out.print(" [connection]: %d\n".format(numConn))
System.out.print(" [rampup]: %d (sec.)\n".format(rampupTime))
System.out.print(" [measure]: %d (sec.)\n".format(measureTime))
System.out.print("[implVersion]: %d\n".format(implVersionUnderTest))
System.out.print(" [numTests]: %d\n".format(numberOfTestTransactions))
Util.seqInit(10, 10, 1, 1, 1)
if (DEBUG) logger.debug("Creating TpccThread")
val executor = Executors.newFixedThreadPool(numConn, new NamedThreadFactory("tpcc-thread"))
val conn = connectToDB(javaDriver, jdbcUrl, dbUser, dbPassword)
val pStmts: TpccStatements = new TpccStatements(conn, fetchSize)
var SharedDataScala: TpccTable = null
var SharedDataLMS: EfficientExecutor = null
if(implVersionUnderTest > 0) {
SharedDataScala = new TpccTable(implVersionUnderTest)
SharedDataScala.loadDataIntoMaps(javaDriver,jdbcUrl,dbUser,dbPassword)
logger.info(SharedDataScala.getAllMapsInfoStr)
if(implVersionUnderTest == 6) {
SharedDataScala = SharedDataScala.toITpccTable
} else if(implVersionUnderTest == 7) {
SharedDataScala = SharedDataScala.toMVCCTpccTableV0
} else if(implVersionUnderTest == 8) {
SharedDataScala = SharedDataScala.toMVCCTpccTableV1
} else if(implVersionUnderTest == 9) {
SharedDataScala = SharedDataScala.toMVCCTpccTableV2
} else if(implVersionUnderTest == 10) {
SharedDataScala = SharedDataScala.toMVCCTpccTableV3(numConn)
}
// else if(implVersionUnderTest == 11) {
// SharedDataScala = SharedDataScala.toMVCCTpccTableV4
// }
newOrder.setSharedData(SharedDataScala)
payment.setSharedData(SharedDataScala)
orderStat.setSharedData(SharedDataScala)
slev.setSharedData(SharedDataScala)
delivery.setSharedData(SharedDataScala)
} else if(implVersionUnderTest == -1){
SharedDataLMS = new EfficientExecutor
LMSDataLoader.loadDataIntoMaps(SharedDataLMS,javaDriver,jdbcUrl,dbUser,dbPassword)
logger.info(LMSDataLoader.getAllMapsInfoStr(SharedDataLMS))
newOrder.setSharedData(SharedDataLMS)
payment.setSharedData(SharedDataLMS)
orderStat.setSharedData(SharedDataLMS)
slev.setSharedData(SharedDataLMS)
delivery.setSharedData(SharedDataLMS)
}
// val initialData = new TpccTable
// initialData.loadDataIntoMaps(javaDriver,jdbcUrl,dbUser,dbPassword)
// if(initialData equals SharedDataScala) {
// println("\n1- initialData equals SharedDataScala")
// } else {
// println("\n1- initialData is not equal to SharedDataScala")
// }
val drivers = new Array[TpccDriver](numConn)
if(numConn == 1) {
val newOrderMix: INewOrder = new NewOrderMixedImpl(new ddbt.tpcc.loadtest.NewOrder(pStmts), newOrder)
val paymentMix: IPayment = new PaymentMixedImpl(new ddbt.tpcc.loadtest.Payment(pStmts), payment)
val orderStatMix: IOrderStatus = new OrderStatusMixedImpl(new ddbt.tpcc.loadtest.OrderStat(pStmts), orderStat)
val slevMix: IStockLevel = new StockLevelMixedImpl(new ddbt.tpcc.loadtest.Slev(pStmts), slev)
val deliveryMix: IDelivery = new DeliveryMixedImpl(new ddbt.tpcc.loadtest.Delivery(pStmts), delivery)
val driver = new TpccDriver(conn, fetchSize, TRANSACTION_COUNT, newOrderMix, paymentMix, orderStatMix, slevMix, deliveryMix)
drivers(0) = driver
try {
if (DEBUG) {
logger.debug("Starting driver with: numberOfTestTransactions: " + numberOfTestTransactions + " num_ware: " +
numWare +
" num_conn: " +
numConn)
}
driver.runAllTransactions(new ThreadInfo(0), numWare, numConn, false, numberOfTestTransactionsPerThread)
} catch {
case e: Throwable => logger.error("Unhandled exception", e)
}
if(implVersionUnderTest == -1) {
SharedDataScala = LMSDataLoader.moveDataToTpccTable(SharedDataLMS, implVersionUnderTest)
}
} else {
var listOfCommittedCommands: Seq[ddbt.lib.util.XactCommand] = null
{ // Running the parallel implementation with enabled unit-test
// in order to collect the committed transactions
SharedDataScala.enableUnitTest
for (i <- 0 until numConn) {
val conn: Connection = null //connectToDB(javaDriver, jdbcUrl, dbUser, dbPassword)
// val pStmts: TpccStatements = new TpccStatements(conn, fetchSize)
// val newOrder: NewOrder = new NewOrder(pStmts)
// val payment: Payment = new Payment(pStmts)
// val orderStat: OrderStat = new OrderStat(pStmts)
// val slev: Slev = new Slev(pStmts)
// val delivery: Delivery = new Delivery(pStmts)
// val newOrder: INewOrder = new ddbt.tpcc.tx.NewOrder(SharedDataScala)
// val payment: IPayment = new ddbt.tpcc.tx.Payment(SharedDataScala)
// val orderStat: IOrderStatus = new ddbt.tpcc.tx.OrderStatus(SharedDataScala)
// val slev: IStockLevel = new ddbt.tpcc.tx.StockLevel(SharedDataScala)
// val delivery: IDelivery = new ddbt.tpcc.tx.Delivery(SharedDataScala)
val worker = new TpccThread(new ThreadInfo(i), port, 1, dbUser, dbPassword, numWare, numConn, javaDriver, jdbcUrl,
fetchSize, TRANSACTION_COUNT, conn, newOrder, payment, orderStat, slev, delivery, false, numberOfTestTransactionsPerThread)
drivers(i) = worker.driver
executor.execute(worker)
// conn.close
}
executor.shutdown()
try {
executor.awaitTermination(3600, TimeUnit.SECONDS)
} catch {
case e: InterruptedException => throw new RuntimeException("Timed out waiting for executor to terminate")
}
listOfCommittedCommands = SharedDataScala.getListOfCommittedCommands
}
{ // Running the transactions serially, in the same serial order
// against a database using only a single thread, to make sure
// that no transaction fails, in order to check the correctness
// of the execution
val newOrder: INewOrder = new ddbt.tpcc.loadtest.NewOrder(pStmts)
val payment: IPayment = new ddbt.tpcc.loadtest.Payment(pStmts)
val orderStat: IOrderStatus = new ddbt.tpcc.loadtest.OrderStat(pStmts)
val slev: IStockLevel = new ddbt.tpcc.loadtest.Slev(pStmts)
val delivery: IDelivery = new ddbt.tpcc.loadtest.Delivery(pStmts)
val driver = new TpccDriver(conn, fetchSize, TRANSACTION_COUNT, newOrder, payment, orderStat, slev, delivery)
val numConn = 1 //we want to avoid any unwanted rollback due to concurrency in the reference DB
try {
if (DEBUG) {
logger.debug("Starting driver with: numberOfTestTransactions: " + numberOfTestTransactions + " num_ware: " +
numWare +
" num_conn: " +
numConn)
}
logger.info("Number of committed transactions in the reference implementation: " + listOfCommittedCommands.size)
driver.runAllTransactions(new ThreadInfo(0), numWare, numConn, false, numberOfTestTransactionsPerThread, listOfCommittedCommands)
} catch {
case e: Throwable => logger.error("Unhandled exception", e)
}
}
}
if(implVersionUnderTest >= -1){
val newData = new TpccTable(if(implVersionUnderTest == 6) 5 else implVersionUnderTest)
newData.loadDataIntoMaps(javaDriver,jdbcUrl,dbUser,dbPassword)
if(newData equals SharedDataScala.toTpccTable) {
println("\nAll transactions completed successfully and the result is correct.")
} else {
println("\nThere is some error in transactions, as the results does not match.")
}
} else if(implVersionUnderTest == -2) {
println()
System.err.println(commandHistory.mkString("","\n",""))
}
0
}
lazy val numberOfTestTransactionsPerThread = numberOfTestTransactions/numConn
// def transactionCountChecker(counter:Int) = (counter < numberOfTestTransactionsPerThread)
}
|
#!/bin/sh
SCRIPT="$0"
while [ -h "$SCRIPT" ] ; do
ls=`ls -ld "$SCRIPT"`
link=`expr "$ls" : '.*-> \(.*\)$'`
if expr "$link" : '/.*' > /dev/null; then
SCRIPT="$link"
else
SCRIPT=`dirname "$SCRIPT"`/"$link"
fi
done
if [ ! -d "${APP_DIR}" ]; then
APP_DIR=`dirname "$SCRIPT"`/..
APP_DIR=`cd "${APP_DIR}"; pwd`
fi
executable="./modules/swagger-codegen-cli/target/swagger-codegen-cli.jar"
if [ ! -f "$executable" ]
then
mvn clean package
fi
# if you've executed sbt assembly previously it will use that instead.
export JAVA_OPTS="${JAVA_OPTS} -XX:MaxPermSize=256M -Xmx1024M -DloggerPath=conf/log4j.properties"
ags="$@ generate -t modules/swagger-codegen/src/main/resources/objc -i modules/swagger-codegen/src/test/resources/2_0/petstore.yaml -l objc -DapiDocs=false,modelDocs=false -o samples/client/petstore/objc/core-data --additional-properties coreData=true"
java -DappName=PetstoreClient $JAVA_OPTS -jar $executable $ags
|
use std::collections::HashMap;
use std::fs;
fn lint_analysis(file_path: &str) -> Result<HashMap<String, HashMap<String, usize>>, String> {
let content = match fs::read_to_string(file_path) {
Ok(content) => content,
Err(_) => return Err("Error reading file".to_string()),
};
let mut result: HashMap<String, HashMap<String, usize>> = HashMap::new();
for line in content.lines() {
if let Some(lint_type) = line.split_whitespace().nth(1) {
let lint_name = line.split("::").last().unwrap().trim_matches(']');
let count = result
.entry(lint_type.to_string())
.or_insert_with(HashMap::new)
.entry(lint_name.to_string())
.or_insert(0);
*count += 1;
}
}
Ok(result)
}
|
class Node:
def __init__(self, data):
self.data = data
self.next = None
# Linked list class
class LinkedList:
def __init__(self):
self.head = None
# Insert nodes in the linked list
llist = LinkedList()
llist.head = Node(1)
second = Node(2)
third = Node(3)
# Link first node with second
llist.head.next = second
# Link second node with the third node
second.next = third
|
<filename>src/app/date/subtractYears.ts
/**
*
* @memberof module:Date
* @function subtractYears
*
* @description Returns the date with the subtract of the years, by default the date is `new Date()`
*
* @param {!number} yearsToSubtract - The number of years to subtract
* @param {Date} [date=new Date()] - The date to which the years are subtracted, by default is `new Date()`
* @returns {Date} The date with subtracted years
*
* @example
* import { subtractYears } from 'main-fns';
*
* const myDate = new Date(1997, 05, 27);
* console.log(subtractYears(1, myDate)); // Date(1996, 05, 27)
*/
export function subtractYears(yearsToSubtract: number, date: Date = new Date()): Date {
const result = new Date(date);
result.setFullYear(result.getFullYear() - Math.abs(yearsToSubtract));
return result;
}
|
package mw
import (
"fmt"
"reflect"
"strings"
"syscall"
)
// IP address family
const (
V4AddrFamily AddrFamily = syscall.AF_INET
V6AddrFamily AddrFamily = syscall.AF_INET6
)
// IP address lengths (bytes).
const (
V4AddrLen = 4
V6AddrLen = 16
)
// IP address expressions
var (
V4Any = V4(0, 0, 0, 0)
V4Broadcast = V4(255, 255, 255, 255)
)
// ASSIGNED INTERNET PROTOCOL NUMBERS
// https://datatracker.ietf.org/doc/html/rfc790#page-6
var protocolNumbers = map[ProtocolNumber]string{
// 0: Reserved
1: "ICMP",
3: "Gateway-to-Gateway",
4: "CMCC Gateway Monitoring Message",
5: "ST",
6: "TCP",
7: "UCL",
9: "Secure",
10: "BBN RCC Monitoring",
11: "NVP",
12: "PUP",
13: "Pluribus",
14: "Telenet",
15: "XNET",
16: "Chaos",
17: "User Datagram",
18: "Multiplexing",
19: "DCN",
20: "TAC Monitoring",
// 21-62: Unassigned
63: "any local net",
64: "SATNET and Backroom EXPAK",
65: "MIT Subnet Support",
// 66-68: Unassigned
69: "SATNET Monitoring",
71: "Internet EthMessage Core Utility",
// 72-75: Unassigned
76: "Backroom SATNET Monitoring",
78: "WIDEBAND Monitoring",
79: "WIDEBAND EXPAK",
// 80-254: Unassigned
// 255: Reserved
}
var v4InV6Prefix = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff}
// AddrFamily is IP address family.
type AddrFamily int
func (v AddrFamily) String() string {
return addrFamilies[v]
}
var addrFamilies = map[AddrFamily]string{
V4AddrFamily: "IPv4",
V6AddrFamily: "IPv6",
}
// An IP is a single IP address.
type IP []byte
func (v IP) Equal(x IP) bool {
if len(v) == len(x) {
return reflect.DeepEqual(v, x)
}
if len(v) == V4AddrLen && len(x) == V6AddrLen {
return comp(x[0:12], v4InV6Prefix) && comp(v, x[12:])
}
if len(v) == V6AddrLen && len(x) == V4AddrLen {
return comp(v[0:12], v4InV6Prefix) && comp(v[12:], x)
}
return false
}
func (v IP) EqualV4(v4 [V4AddrLen]byte) bool {
return v[0] == v4[0] && v[1] == v4[1] && v[2] == v4[2] && v[3] == v4[3]
}
func (v IP) Mask(mask IP) IP {
if len(mask) == V6AddrLen && len(v) == V4AddrLen && allFF(mask[:12]) {
mask = mask[12:]
}
if len(mask) == V4AddrLen && len(v) == V6AddrLen && comp(v[:12], v4InV6Prefix) {
v = v[12:]
}
n := len(v)
if n != len(mask) {
return nil
}
ret := make(IP, n)
for i := 0; i < n; i++ {
ret[i] = v[i] & mask[i]
}
return ret
}
// String returns the string form of IP.
func (v IP) String() string {
const maxIPv4StringLen = len("255.255.255.255")
b := make(IP, maxIPv4StringLen)
n := ubtoa(b, 0, v[0])
b[n] = '.'
n++
n += ubtoa(b, n, v[1])
b[n] = '.'
n++
n += ubtoa(b, n, v[2])
b[n] = '.'
n++
n += ubtoa(b, n, v[3])
return string(b[:n])
}
// ToV4 converts IP to 4 bytes representation.
func (v IP) ToV4() (ip [V4AddrLen]byte) {
if len(v) == V6AddrLen && isZeros(v[0:10]) && v[10] == 0xff && v[11] == 0xff {
copy(ip[:], v[12:16])
return
}
copy(ip[:], v)
return
}
// INTERNET PROTOCOL
// https://datatracker.ietf.org/doc/html/rfc791#page-13
// The number 576 is selected to allow a reasonable sized data block to be transmitted in addition to the required
// header information. For example, this size allows a data block of 512 octets plus 64 header octets to fit in a
// datagram. The maximal internet header is 60 octets, and a typical internet header is 20 octets, allowing a margin for
// headers of higher level protocols.
// Internet Header Format
// https://datatracker.ietf.org/doc/html/rfc791#section-3.1
// IpHdr is an internet protocol header
type IpHdr struct {
VHL uint8
TOS uint8
TotalLen uint16
ID uint16
Offset uint16
TTL uint8
Protocol ProtocolNumber
Checksum uint16
Src [V4AddrLen]byte
Dst [V4AddrLen]byte
Options [0]byte
}
// ProtocolNumber is assigned internet protocol number
type ProtocolNumber uint8
func (v ProtocolNumber) String() string {
return protocolNumbers[v]
}
type V4Addr [V4AddrLen]byte
func (v V4Addr) String() string {
return fmt.Sprintf("%d.%d.%d.%d", v[0], v[1], v[2], v[3])
}
// Computing the Internet Checksum
// https://datatracker.ietf.org/doc/html/rfc1071
func Checksum(b []byte, init uint32) uint16 {
sum := init
// sum up all fields of IP header by each 16bits
for i := 0; i < len(b); i += 2 {
sum += uint32(b[i])<<8 | uint32(b[i+1])
}
// add last 8bits if exists
if len(b)%2 != 0 {
sum += uint32(b[len(b)-1])
}
// fold sum to 16bits
for (sum >> 16) != 0 {
sum = (sum & 0x0000ffff) + (sum >> 16)
}
return ^(uint16(sum))
}
func LongestIP(ip1 IP, ip2 IP) IP {
if len(ip1) != len(ip2) {
return nil
}
for i, v := range ip1 {
if v < ip2[i] {
return ip2
}
}
return ip1
}
// ParseIP parses string as IPv4 or IPv6 address by detecting its format.
func ParseIP(s string) IP {
if strings.Contains(s, ".") {
return parseV4(s)
}
if strings.Contains(s, ":") {
return parseV6(s)
}
return nil
}
// The prefix for the special addresses described in RFC5952.
//var v4InV6Prefix = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff}
// v4 creates IP from bytes.
//func v4(a, b, c, d byte) IP {
// p := make(IP, V6AddrLen)
// copy(p, v4InV6Prefix)
// p[12] = a
// p[13] = b
// p[14] = c
// p[15] = d
// return p
//}
// V4 creates IP from bytes. TODO: use IPv4-mapped address above
func V4(a, b, c, d byte) IP {
p := make(IP, V4AddrLen)
p[0] = a
p[1] = b
p[2] = c
p[3] = d
return p
}
func V4FromByte(b [V4AddrLen]byte) IP {
return IP{b[0], b[1], b[2], b[3]}
}
func allFF(b []byte) bool {
for _, c := range b {
if c != 0xff {
return false
}
}
return true
}
func comp(s1 []byte, s2 []byte) bool {
for i, val := range s1 {
if val != s2[i] {
return false
}
}
return true
}
// isZeros checks if ip all zeros.
func isZeros(ip IP) bool {
for i := 0; i < len(ip); i++ {
if ip[i] != 0 {
return false
}
}
return true
}
// parseV4 parses string as IPv4 address.
func parseV4(s string) IP {
var p [V4AddrLen]byte
for i := 0; i < V4AddrLen; i++ {
if i > 0 {
if s[0] != '.' {
return nil
}
s = s[1:]
}
n, c, ok := stoi(s)
if !ok || n > 0xff {
return nil
}
s = s[c:]
p[i] = byte(n)
}
return V4(p[0], p[1], p[2], p[3])
}
// parseV6 parses string as IPv6 address.
func parseV6(s string) IP {
// TODO: parse the string as IPv6 address
return nil
}
// stoi converts string to integer and returns number, characters consumed, and success.
func stoi(s string) (n int, c int, ok bool) {
n = 0
for c = 0; c < len(s) && '0' <= s[c] && s[c] <= '9'; c++ {
n = n*10 + int(s[c]-'0')
}
if c == 0 {
return 0, 0, false
}
return n, c, true
}
// ubtoa encodes the string form of the integer v to dst[start:] and
// returns the number of bytes written to dst.
func ubtoa(dst []byte, start int, v byte) int {
if v < 10 {
dst[start] = v + '0' // convert a decimal number into ASCII code
return 1
}
if v < 100 {
dst[start+1] = v%10 + '0'
dst[start] = v/10 + '0'
return 2
}
dst[start+2] = (v % 10) + '0'
dst[start+1] = ((v / 10) % 10) + '0'
dst[start] = (v / 100) + '0'
return 3
}
|
def find_common_elements(list1, list2):
common_elements = []
i = 0
j = 0
while i < len(list1) and j < len(list2):
if list1[i] == list2[j]:
common_elements.append(list1[i])
i += 1
j += 1
elif list1[i] > list2[j]:
j += 1
else:
i += 1
return common_elements
|
def search2Dlist(list, item):
row_index = None
col_index = None
for row in range(len(list)):
for col in range(len(list[row])):
if list[row][col] == item:
row_index = row
col_index = col
if (row_index == None) and (col_index == None):
return None
else:
return row_index, col_index
# Output: (1, 2) (index of item 8 in the 2D list)
|
<gh_stars>1-10
export const TILBAKE_I_ARBEID = 'TILBAKE_I_ARBEID';
export const TILBAKE_NAR = 'TILBAKE_NAR';
export const JOBBET_DU_GRADERT = 'JOBBET_DU_GRADERT';
export const JOBBET_DU_100_PROSENT = 'JOBBET_DU_100_PROSENT';
export const ANDRE_INNTEKTSKILDER = 'ANDRE_INNTEKTSKILDER';
export const HVOR_MANGE_TIMER = 'HVOR_MANGE_TIMER';
export const HVOR_MYE_HAR_DU_JOBBET = 'HVOR_MYE_HAR_DU_JOBBET';
export const HVILKE_ANDRE_INNTEKTSKILDER = 'HVILKE_ANDRE_INNTEKTSKILDER';
export const INNTEKTSKILDE_ARBEIDSFORHOLD = 'INNTEKTSKILDE_ARBEIDSFORHOLD';
export const INNTEKTSKILDE_JORDBRUKER = 'INNTEKTSKILDE_JORDBRUKER';
export const INNTEKTSKILDE_FRILANSER_SELVSTENDIG = 'INNTEKTSKILDE_FRILANSER_SELVSTENDIG';
export const INNTEKTSKILDE_ANNET = 'INNTEKTSKILDE_ANNET';
export const INNTEKTSKILDE_ANNET_ER_DU_SYKMELDT = 'INNTEKTSKILDE_ANNET_ER_DU_SYKMELDT';
export const INNTEKTSKILDE_ARBEIDSFORHOLD_ER_DU_SYKMELDT = 'INNTEKTSKILDE_ARBEIDSFORHOLD_ER_DU_SYKMELDT';
export const INNTEKTSKILDE_JORDBRUKER_ER_DU_SYKMELDT = 'INNTEKTSKILDE_JORDBRUKER_ER_DU_SYKMELDT';
export const INNTEKTSKILDE_FRILANSER_SELVSTENDIG_ER_DU_SYKMELDT = 'INNTEKTSKILDE_FRILANSER_SELVSTENDIG_ER_DU_SYKMELDT';
export const INNTEKTSKILDE_OMSORGSLONN_ER_DU_SYKMELDT = 'INNTEKTSKILDE_OMSORGSLONN_ER_DU_SYKMELDT';
export const INNTEKTSKILDE_FOSTERHJEM_ER_DU_SYKMELDT = 'INNTEKTSKILDE_FOSTERHJEM_ER_DU_SYKMELDT';
export const PERIODER = 'PERIODER';
export const VAER_KLAR_OVER_AT = 'VAER_KLAR_OVER_AT';
export const UTLAND = 'UTLAND';
export const UTDANNING = 'UTDANNING';
export const ANSVARSERKLARING = 'ANSVARSERKLARING';
export const BEKREFT_OPPLYSNINGER = 'BEKREFT_OPPLYSNINGER';
export const SYKMELDINGSGRAD = 'SYKMELDINGSGRAD';
export const FERIE = 'FERIE';
export const FERIE_V2 = 'FERIE_V2';
export const FERIE_NAR_V2 = 'FERIE_NAR_V2';
export const PERMISJON_V2 = 'PERMISJON_V2';
export const PERMISJON_NAR_V2 = 'PERMISJON_NAR_V2';
export const ARBEIDSGIVER = 'ARBEIDSGIVER';
export const LAND = 'LAND';
export const PERIODEUTLAND = 'PERIODEUTLAND';
export const BEKREFT_OPPLYSNINGER_UTLAND = 'BEKREFT_OPPLYSNINGER_UTLAND';
export const BEKREFT_OPPLYSNINGER_UTLAND_INFO = 'BEKREFT_OPPLYSNINGER_UTLAND_INFO';
export const EGENMELDINGER = 'EGENMELDINGER';
export const FERIE_PERMISJON_UTLAND = 'FERIE_PERMISJON_UTLAND';
export const BETALER_ARBEIDSGIVER = 'BETALER_ARBEIDSGIVER';
export const INNTEKTSKILDE_SELVSTENDIG_ER_DU_SYKMELDT = 'INNTEKTSKILDE_SELVSTENDIG_ER_DU_SYKMELDT';
export const INNTEKTSKILDE_SELVSTENDIG_DAGMAMMA_ER_DU_SYKMELDT = 'INNTEKTSKILDE_SELVSTENDIG_DAGMAMMA_ER_DU_SYKMELDT';
export const INNTEKTSKILDE_FRILANSER_ER_DU_SYKMELDT = 'INNTEKTSKILDE_FRILANSER_ER_DU_SYKMELDT';
export const INNTEKTSKILDE_ANDRE_ARBEIDSFORHOLD_ER_DU_SYKMELDT = 'INNTEKTSKILDE_ANDRE_ARBEIDSFORHOLD_ER_DU_SYKMELDT';
export const UTLANDSOPPHOLD_SOKT_SYKEPENGER = 'UTLANDSOPPHOLD_SOKT_SYKEPENGER';
export const HVOR_MYE_PROSENT = 'HVOR_MYE_PROSENT';
export const HVOR_MYE_TIMER = 'HVOR_MYE_TIMER';
export const HVOR_MYE_TIMER_VERDI = 'HVOR_MYE_TIMER_VERDI';
export const HVOR_MANGE_TIMER_PER_UKE = 'HVOR_MANGE_TIMER_PER_UKE';
export const HVOR_MYE_PROSENT_VERDI = 'HVOR_MYE_PROSENT_VERDI';
export const ENKELTSTAENDE_BEHANDLINGSDAGER = 'ENKELTSTAENDE_BEHANDLINGSDAGER';
export const ENKELTSTAENDE_BEHANDLINGSDAGER_UKE = 'ENKELTSTAENDE_BEHANDLINGSDAGER_UKE';
export const ENKELTSTAENDE_BEHANDLINGSDAGER_DAG_NAR = 'ENKELTSTAENDE_BEHANDLINGSDAGER_DAG_NAR';
export const FRAVER_FOR_BEHANDLING = 'FRAVER_FOR_BEHANDLING';
|
#!/bin/bash
# turn on bash's job control
set -m
# run the API server in the background
stacks serve -a 0.0.0.0:5000 &
# edit the port in the nginx config
sed -i -e 's/$PORT/'"$PORT"'/g' /etc/nginx/conf.d/default.conf
# Run nginx and leave it running
nginx -g 'daemon off;'
|
<filename>source/pages/_app.tsx
if (process.env.NODE_ENV === 'development') {
// Must use require here as import statements are only allowed
// to exist at the top of a file.
require('preact/debug');
}
import { AppProps } from 'next/app';
import React from 'react';
import NoSSR from 'react-no-ssr';
import '../config/mobx.config';
import ErrorPage from '../features/errors/ErrorPage';
import { I18nFeatureProvider } from '../features/i18n/ui/I18nFeatureProvider';
import { isSupportedLocale } from '../features/i18n/utils';
import { NavigationFeatureProvider } from '../features/navigation/ui/NavigationFeatureProvider';
import { NetworkInfoFeatureProvider } from '../features/network-info/ui/NetworkInfoFeatureProvider';
import { BrowserUpdate } from '../features/outdated-browser/BrowserUpdate';
import { SearchFeatureProvider } from '../features/search/ui/SearchFeatureProvider';
import GraphQLProvider from '../lib/graphql/GraphQLProvider';
import { PageComponentWithStaticLayout } from '../lib/types';
import '../styles/global/index.scss';
import PolymorphThemeProvider from '../styles/theme/PolymorphThemeProvider';
import LoadingSpinner from '../widgets/loading-spinner/LoadingSpinner';
import styles from './_app.module.scss';
const EmptyStaticLayout = (props: { children: React.ReactNode }) => (
<>{props.children}</>
);
export default function CardanoExplorer({ Component, pageProps }: AppProps) {
const { locale, statusCode } = pageProps;
let PageWithOptionalLayout = Component as PageComponentWithStaticLayout;
// Next.js doesn't know that we only want sub-paths for supported languages
// so we need to check ourselves:
if ((locale && !isSupportedLocale(locale)) || statusCode === 404) {
PageWithOptionalLayout = ErrorPage;
}
const StaticLayout =
PageWithOptionalLayout.getStaticLayout?.() ?? EmptyStaticLayout;
// Provide global app features that must survive page navigation:
return (
<I18nFeatureProvider locale={locale}>
<GraphQLProvider>
<PolymorphThemeProvider>
<NetworkInfoFeatureProvider>
<NavigationFeatureProvider>
<SearchFeatureProvider>
<StaticLayout>
<NoSSR
onSSR={
<LoadingSpinner className={styles.loadingSpinnerMargin} />
}
>
<BrowserUpdate />
<Component {...pageProps} />
</NoSSR>
</StaticLayout>
</SearchFeatureProvider>
</NavigationFeatureProvider>
</NetworkInfoFeatureProvider>
</PolymorphThemeProvider>
</GraphQLProvider>
</I18nFeatureProvider>
);
}
|
<reponame>yash-srivastava/iot_subscriber
package dbutils
import (
_ "github.com/jinzhu/gorm/dialects/mysql"
"github.com/jinzhu/gorm"
"github.com/revel/revel"
)
var(
DBCONN *gorm.DB
)
func InitDB(){
DBCONN = newClient()
DBCONN.AutoMigrate(&Sgu{}, &Scu{}, &Attached_Schedules{})
}
func newClient() *gorm.DB{
db, err := gorm.Open("mysql", "HavellsDBAdmin:HavellsCCMS420@tcp(mysql.cqwf1pvghoch.us-west-2.rds.amazonaws.com:3306)/HavellsSubscriber?parseTime=True&loc=Local")
if err != nil {
revel.ERROR.Println(err.Error())
}
db.SetLogger(revel.WARN)
db.LogMode(true)
return db
}
|
<gh_stars>0
CREATE TABLE [auth].[Audits]
(
[Id] INT NOT NULL identity(100000, 1),
[Type] char(1) not null,
[TableName] varchar(64) not null,
[PrimaryKeyField] varchar(64) not null,
[PrimaryKeyValue] int not null,
[FieldName] varchar(64) not null,
[OldValue] nvarchar(max) null,
[NewValue] nvarchar(max) null,
[CreatedByUserId] int null,
[CreatedByClientId] int null,
[CreatedDate] as getdate(),
constraint PK_Audits_ID primary key ([Id]),
constraint FK_Audits_CreatedByClientId foreign key ([CreatedByClientId]) references [auth].[ApiClients]([Id]),
constraint FK_Audits_CreatedByUserId foreign key ([CreatedByUserId]) references [auth].[ApiUsers]([Id])
)
|
#include <iostream>
#include <string>
#include <cmath>
class Camera {
public:
// Constructor to initialize the camera with a given name
Camera(const std::string& name) : name_(name), distance_(0.0f), fov_(90.0f), position_{0.0f, 0.0f, 0.0f}, orientation_{0.0f, 0.0f, 0.0f} {}
// Method to set the distance to the focal point
void setDistance(float distance) {
distance_ = distance;
}
// Method to set the field of view angle
void setFOV(float fov) {
fov_ = fov;
}
// Method to set the camera's position in 3D space
void setPosition(float x, float y, float z) {
position_[0] = x;
position_[1] = y;
position_[2] = z;
}
// Method to set the camera's orientation (pitch, yaw, roll)
void setOrientation(float pitch, float yaw, float roll) {
orientation_[0] = pitch;
orientation_[1] = yaw;
orientation_[2] = roll;
}
// Method to retrieve the camera's name
std::string getName() const {
return name_;
}
// Method to retrieve the distance to the focal point
float getDistance() const {
return distance_;
}
// Method to retrieve the field of view angle
float getFOV() const {
return fov_;
}
// Method to retrieve the camera's position in 3D space
const float* getPosition() const {
return position_;
}
// Method to retrieve the camera's orientation
const float* getOrientation() const {
return orientation_;
}
private:
float distance_; // Distance to the focal point
float fov_; // Field of view angle
std::string name_; // Name of the camera
float position_[3]; // Camera's position in 3D space (x, y, z)
float orientation_[3]; // Camera's orientation (pitch, yaw, roll)
};
int main() {
Camera myCamera("MainCamera");
myCamera.setDistance(10.0f);
myCamera.setFOV(120.0f);
myCamera.setPosition(1.0f, 2.0f, 3.0f);
myCamera.setOrientation(45.0f, 30.0f, 0.0f);
std::cout << "Camera Name: " << myCamera.getName() << std::endl;
std::cout << "Distance to Focal Point: " << myCamera.getDistance() << std::endl;
std::cout << "Field of View: " << myCamera.getFOV() << std::endl;
const float* position = myCamera.getPosition();
std::cout << "Camera Position: (" << position[0] << ", " << position[1] << ", " << position[2] << ")" << std::endl;
const float* orientation = myCamera.getOrientation();
std::cout << "Camera Orientation (Pitch, Yaw, Roll): (" << orientation[0] << ", " << orientation[1] << ", " << orientation[2] << ")" << std::endl;
return 0;
}
|
#!/bin/sh
: ${OUT:?output file not specified}
: ${PROJECT_ROOT:?project root not specified}
: ${MODULE_ROOT:?module root not specified}
: ${ADDITIONAL_ARGS=''}
OUTPUT_DIR="$(cd "$(dirname "$OUT")" && pwd)"
ABS_OUT="${OUTPUT_DIR}/$(basename "$OUT")"
cd "${PROJECT_ROOT}"
lsif-go --output "$ABS_OUT" --module-root "$MODULE_ROOT" --no-animation $ADDITIONAL_ARGS
cd -
|
<gh_stars>0
/**
* @param {number} x
* @return {number}
*/
var mySqrt = function(x) {
if (x <= 1) {
return x
}
var h = 0
var l = 0
var r = x
while (true) {
h = Math.floor((l + r) / 2)
const sq = h * h
if (sq === x) { return h }
if (sq < x) {
const sq2 = sq + 2 * h + 1
if (sq2 > x) {
return h
} else if(sq2 == x) {
return h + 1
} else {
l = h + 1
}
} else {
r = h
}
}
};
|
course := "progfun2"
assignment := "quickcheck"
assignmentInfo := AssignmentInfo(
key = "<KEY>",
itemId = "ML01L",
premiumItemId = Some("DF4y7"),
partId = "DZTNG",
styleSheet = Some((_: File) / "scalastyle" / "scalastyle_config.xml")
)
|
#!/bin/bash
#
# Start up daemon process to rebuild changed sources
#
# $Id: //depot/HotReloading/start_daemon.sh#33 $
#
cd "$(dirname "$0")"
if [ "$CONFIGURATION" = "Release" ]; then
echo "error: You shouldn't be shipping HotReloading in your app!"
exit 1
fi
if [ -f "/tmp/injecting_storyboard.txt" ]; then
rm /tmp/injecting_storyboard.txt
exit 0
fi
DERIVED_DATA="$(dirname $(dirname $SYMROOT))"
export DERIVED_LOGS="$DERIVED_DATA/Logs/Build"
LAST_LOG=`ls -t $DERIVED_LOGS/*.xcactivitylog | head -n 1`
export NORMAL_ARCH_FILE="$OBJECT_FILE_DIR_normal/$ARCHS/$PRODUCT_NAME"
export LINK_FILE_LIST="$NORMAL_ARCH_FILE.LinkFileList"
# kill any existing daemon process
kill -9 `ps auxww | grep .build/debug/injectiond | grep -v grep | awk '{ print $2 }'`
# Avoid having to fetch dependancies again
# mkdir -p .build; ln -s "$DERIVED_DATA"/SourcePackages/repositories .build
# rebuild daemon
/usr/bin/env -i PATH="$PATH" "$TOOLCHAIN_DIR"/usr/bin/swift build --product injectiond &&
# clone Contents directory for Cocoa
rsync -at Contents .build/debug &&
# run in background passing project file, logs directory
# followed by a list of additional directories to watch.
(.build/debug/injectiond "$PROJECT_FILE_PATH" "$DERIVED_LOGS" `gunzip <$LAST_LOG | tr '\r' '\n' | grep -e ' cd ' | sort -u | grep -v DerivedData | awk '{ print $2 }'` >/tmp/hot_reloading.log 2>&1 &)
|
<filename>setup.py
# Copyright 2020-present Kensho Technologies, LLC.
import codecs
import os
from setuptools import find_packages, setup
# single sourcing package version strategy taken from
# https://packaging.python.org/guides/single-sourcing-package-version
PACKAGE_NAME = "kwnlp_sql_parser"
def read_file(filename: str) -> str:
"""Read package file as text to get name and version."""
here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here, PACKAGE_NAME, filename), "r") as f:
return f.read()
def find_version() -> str:
"""Only define version in one place."""
for line in read_file("__init__.py").splitlines():
if line.startswith("__version__"):
delim = '"' if '"' in line else "'"
return line.split(delim)[1]
else:
raise RuntimeError("Unable to find version string.")
def find_long_description() -> str:
"""Return the content of the README.rst file."""
return read_file("../README.md")
setup(
name=PACKAGE_NAME,
version=find_version(),
description="Utility for parsing Wikipedia SQL dumps into CSVs.",
long_description=find_long_description(),
long_description_content_type="text/markdown",
url="https://github.com/kensho-technologies/kwnlp-sql-parser",
author="Kensho Technologies LLC.",
author_email="<EMAIL>",
license="Apache 2.0",
packages=find_packages(exclude=["tests*"]),
package_data={"": []},
install_requires=[],
extras_require={
"dev": [
"pre-commit",
]
},
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
keywords="wikipedia sql dump open data",
python_requires=">=3.6",
)
|
#!/bin/bash
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
set -e
wrapper=""
if [[ "${RESTARTABLE}" == "yes" ]]; then
wrapper="run-one-constantly"
fi
if [[ ! -z "${JUPYTERHUB_API_TOKEN}" ]]; then
# launched by JupyterHub, use single-user entrypoint
exec /usr/local/bin/start-singleuser.sh "$@"
elif [[ ! -z "${JUPYTER_ENABLE_LAB}" ]]; then
. /usr/local/bin/start.sh $wrapper jupyter lab "$@"
else
. /usr/local/bin/start.sh $wrapper jupyter notebook "$@"
fi
|
#include "test.h"
int main() {
start();
}
|
import puppeteer from "puppeteer";
(async () => {
const b = await puppeteer.launch({
headless: false,
});
const p = await b.newPage();
await p.goto("https://example.com");
})();
|
const axios = require('axios');
const mongoose = require('mongoose');
mongoose.connect('mongodb://localhost:27017/test', {useNewUrlParser: true});
const dataSchema = new mongoose.Schema({
id: Number,
username: String,
data: Object
});
const Data = mongoose.model('Data', dataSchema);
axios.get('https://api.github.com/users/username')
.then(res => {
const data = new Data({
id: res.data.id,
username: res.data.login,
data: res.data
});
data.save()
.then(result => {
console.log(result);
})
.catch(err => {
console.log(err);
});
});
|
#!/bin/sh
# Copyright (c) Microsoft Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
create_systemd_service() {
srvName=$1
srvDescription=$2
serverName=$3
# Add systemd unit file
cat <<EOF > /etc/systemd/system/${srvName}.service
[Unit]
Description=${srvDescription}
RequiresMountsFor=/datadrive
After=network.target
[Service]
Type=forking
ExecStart=/bin/sh -c "${IHS_INSTALL_DIRECTORY}/bin/${serverName} start"
ExecStop=/bin/sh -c "${IHS_INSTALL_DIRECTORY}/bin/${serverName} stop"
SuccessExitStatus=0
TimeoutStartSec=900
[Install]
WantedBy=default.target
EOF
# Enable service
systemctl daemon-reload
systemctl enable "$srvName"
}
# Get IHS installation properties
source /datadrive/virtualimage.properties
# Check whether the user is entitled or not
while [ ! -f "$WAS_LOG_PATH" ]
do
sleep 5
done
isDone=false
while [ $isDone = false ]
do
result=`(tail -n1) <$WAS_LOG_PATH`
if [[ $result = $ENTITLED ]] || [[ $result = $UNENTITLED ]] || [[ $result = $UNDEFINED ]]; then
isDone=true
else
sleep 5
fi
done
# Remove cloud-init artifacts and logs
cloud-init clean --logs
# Terminate the process for the un-entitled or undefined user
if [ ${result} != $ENTITLED ]; then
if [ ${result} = $UNENTITLED ]; then
echo "The provided IBMid does not have entitlement to install WebSphere Application Server. Please contact the primary or secondary contacts for your IBM Passport Advantage site to grant you access or follow steps at IBM eCustomer Care (https://ibm.biz/IBMidEntitlement) for further assistance."
else
echo "No WebSphere Application Server installation packages were found. This is likely due to a temporary issue with the installation repository. Try again and open an IBM Support issue if the problem persists."
fi
exit 1
fi
# Check required parameters
if [ "$8" == "" ]; then
echo "Usage:"
echo " ./configure-ihs.sh [dmgrHostname] [ihsUnixUsername] [ihsAdminUsername] [ihsAdminPassword] [storageAccountName] [storageAccountKey] [fileShareName] [mountpointPath]"
exit 1
fi
dmgrHostname=$1
ihsUnixUsername=$2
ihsAdminUsername=$3
ihsAdminPassword=$4
storageAccountName=$5
storageAccountKey=$6
fileShareName=$7
mountpointPath=$8
echo "$(date): Start to configure IHS."
# Open ports
firewall-cmd --zone=public --add-port=80/tcp --permanent
firewall-cmd --zone=public --add-port=8008/tcp --permanent
firewall-cmd --reload
hostname=`hostname`
responseFile="pct.response.txt"
# Create response file
echo "configType=remote" > $responseFile
echo "enableAdminServerSupport=true" >> $responseFile
echo "enableUserAndPass=true" >> $responseFile
echo "enableWinService=false" >> $responseFile
echo "ihsAdminCreateUserAndGroup=true" >> $responseFile
echo "ihsadminPort=8008" >> $responseFile
echo "ihsAdminUnixUserID=$ihsUnixUsername" >> $responseFile
echo "ihsAdminUnixUserGroup=$ihsUnixUsername" >> $responseFile
echo "ihsAdminUserID=$ihsAdminUsername" >> $responseFile
echo "ihsAdminPassword=$ihsAdminPassword" >> $responseFile
echo "mapWebServerToApplications=true" >> $responseFile
echo "wasMachineHostName=$dmgrHostname" >> $responseFile
echo "webServerConfigFile1=$IHS_INSTALL_DIRECTORY/conf/httpd.conf" >> $responseFile
echo "webServerDefinition=webserver1" >> $responseFile
echo "webServerHostName=$hostname" >> $responseFile
echo "webServerInstallArch=64" >> $responseFile
echo "webServerPortNumber=80" >> $responseFile
echo "webServerSelected=ihs" >> $responseFile
echo "webServerType=IHS" >> $responseFile
# Configure IHS using WCT
$WCT_INSTALL_DIRECTORY/WCT/wctcmd.sh -tool pct -importDefinitionLocation -defLocPathname $PLUGIN_INSTALL_DIRECTORY -defLocName WS1 -response $responseFile
rm -rf $responseFile
# Start IHS admin server
$IHS_INSTALL_DIRECTORY/bin/adminctl start
# Create systemd services to automatically starting IHS admin server when system is rebooted
create_systemd_service ihs_web_server "IBM HTTP Server" apachectl
create_systemd_service ihs_admin_server "IBM HTTP Server admin server" adminctl
# Mount Azure File Share system
mkdir -p $mountpointPath
mkdir /etc/smbcredentials
echo "username=$storageAccountName" > /etc/smbcredentials/${storageAccountName}.cred
echo "password=$storageAccountKey" >> /etc/smbcredentials/${storageAccountName}.cred
chmod 600 /etc/smbcredentials/${storageAccountName}.cred
echo "//${storageAccountName}.file.core.windows.net/${fileShareName} $mountpointPath cifs nofail,credentials=/etc/smbcredentials/${storageAccountName}.cred,dir_mode=0777,file_mode=0777,serverino" >> /etc/fstab
mount -t cifs //${storageAccountName}.file.core.windows.net/${fileShareName} $mountpointPath -o credentials=/etc/smbcredentials/${storageAccountName}.cred,dir_mode=0777,file_mode=0777,serverino
if [[ $? != 0 ]]; then
echo "$(date): Failed to mount //${storageAccountName}.file.core.windows.net/${fileShareName} $mountpointPath."
exit 1
fi
# Move the IHS confguration script to Azure File Share system
mv $PLUGIN_INSTALL_DIRECTORY/bin/configurewebserver1.sh $mountpointPath
if [[ $? != 0 ]]; then
echo "$(date): Failed to move $PLUGIN_INSTALL_DIRECTORY/bin/configurewebserver1.sh to $mountpointPath."
exit 1
fi
echo "$(date): Complete to configure IHS."
|
<gh_stars>1-10
import R from 'ramda';
import { handleActions } from 'redux-actions';
import { insert, update, remove, insertWithUUID} from '@/Utils/StateHelper';
import { defaultCategories } from '../../constants/Categories';
import types from './types';
const createOrUpdateCategory = (props) => {
const {
id,
code,
name,
icon,
incomeIcon,
type,
parentId = 0,
isIncome = true,
usedTimes = 0,
isHide = false,
isSynced = false,
order = 0,
} = props;
return {
id,
code,
name,
icon,
incomeIcon,
type,
isIncome,
parentId,
usedTimes,
isHide,
isSynced,
order,
};
};
let initialState = {};
R.map(item=>{
initialState = insertWithUUID(initialState,createOrUpdateCategory(item));
}, defaultCategories);
const categoriesReducer = handleActions({
[types.ADD]: (state, { payload }) => insertWithUUID(state, createOrUpdateCategory(payload)),
[types.UPDATE]: (state, {
payload
}) => update(state, payload.id, createOrUpdateCategory(payload)),
[types.REMOVE]: (state, { payload }) => remove(state, payload),
[types.RESET]: (state, { payload }) => state = (payload ? payload : initialState),
}, initialState);
export default categoriesReducer;
|
// This file is part of SWGANH which is released under the MIT license.
// See file LICENSE or go to http://swganh.com/LICENSE
#pragma once
#include <cstdint>
#include <string>
#include "swganh/byte_buffer.h"
#include "swganh_core/messages/obj_controller_message.h"
namespace swganh {
namespace messages {
namespace controllers {
class SitOnObject : public ObjControllerMessage
{
public:
explicit SitOnObject(uint32_t controller_type = 0x0000001B)
: ObjControllerMessage(controller_type, message_type())
, cell_id(0)
, coord_x(0.0f)
, coord_z(0.0f)
, coord_y(0.0f)
{}
SitOnObject(const ObjControllerMessage& base)
: ObjControllerMessage(base)
{
}
static uint32_t message_type() { return 0x0000013B; }
uint64_t cell_id;
float coord_x;
float coord_z;
float coord_y;
void OnControllerSerialize(swganh::ByteBuffer& buffer) const
{
buffer.write(cell_id);
buffer.write(coord_x);
buffer.write(coord_z);
buffer.write(coord_y);
}
void OnControllerDeserialize(swganh::ByteBuffer& buffer)
{
cell_id = buffer.read<uint64_t>();
coord_x = buffer.read<float>();
coord_z = buffer.read<float>();
coord_y = buffer.read<float>();
}
};
}}} // namespace swganh::messages::controllers
|
<reponame>roshancd/packplanner<gh_stars>1-10
package com.sample.packplan.util;
/**
* Constant values for pack planner application
*/
public final class Constant {
private Constant() {
// To restrict creating instances of the class
}
public static final String EMPTY_STRING = "";
public static final String COMMA = ",";
public static final String NEW_LINE = "\n";
public static final String INPUT_FORMAT = "#Please enter your data in following format:\n" +
"[Sort order] [max pieces per pack] [max weight per pack]\n" +
"[item id],[item length],[item quantity],[piece weight]\n" +
"[item id],[item length],[item quantity],[piece weight]\n" +
"[item id],[item length],[item quantity],[piece weight]\n";
public static final String CONSTRAINT_PATTERN = "^([A-Z]*_?[A-Z]*_?[A-Z]*),([0-9]*),([0-9]*.?[0-9]+)$";
public static final String ITEM_DATA_PATTERN = "^([0-9]*),([0-9]*),([0-9]*),([0-9]*.?[0-9]+)$";
public static final String OUTPUT_MESSAGE = "================== Pack Planner ================== \n";
public static final String DIVIDER = "================================================== \n";
public static final String SEPARATOR = "-------------------------------------------------- \n";
public static final String PACK_NUMBER = "Pack Number: ";
public static final String PACK_LENGTH = "Pack Length: ";
public static final String PACK_WEIGHT = "Pack Weight: ";
public static final float LENGTH_CONVERTING_FACTOR = 1000.0f;
public static final String UNIT_METER = "m";
public static final String UNIT_KILOGRAM = "kg";
public static final int ID_INCREMENT = 1;
}
|
def cal_union(set1, set2):
return set1 | set2
result = cal_union({1, 2, 3}, {3, 4, 5})
print(result)
|
module API {
export interface IQuizQuestionsResponse {
status: number;
data: IQuizQuestion[];
}
}
|
#!/usr/bin/env bash
# Copyright 2017 Banco Bilbao Vizcaya Argentaria S.A.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# basic project data
export project="spark"
export repository="https://github.com/BBVA/docker-hdfs-alluxio-spark.git"
nodes=${1:-"7"}
# https://docs.openshift.org/latest/dev_guide/builds/build_inputs.html
# Create new oc project
oc new-project "${project}"
# Create builds for each docker image
for c in "hdfs" "alluxio" "spark" "spark-submitter" "zeppelin"; do
oc process -p REPOSITORY=${repository} \
-p CONTEXTDIR="${c}" \
-p ID="${c}" \
-f oc-build-has.yaml | oc create -f -
done
# Deploy HDFS namenode
export hdfs_image=$(oc get is/hdfs --template="{{ .status.dockerImageRepository }}" --namespace ${project})
oc process \
-p IMAGE=${hdfs_image} \
-f "oc-deploy-hdfs-namenode.yaml" | oc create -f -
# Deploy HDFS httpfs node
oc process \
-p IMAGE=${hdfs_image} \
-f "oc-deploy-hdfs-httpfs.yaml" | oc create -f -
# Deploy Alluxio master
export alluxio_image=$(oc get is/alluxio --template="{{ .status.dockerImageRepository }}" --namespace ${project})
oc process \
-p IMAGE=${alluxio_image} \
-p ALLUXIO_WORKER_MEMORY_SIZE="16GB" \
-f "oc-deploy-alluxio-master.yaml" | oc create -f -
# Deploy Spark master
export spark_image=$(oc get is/spark --template="{{ .status.dockerImageRepository }}" --namespace ${project})
oc process \
-p IMAGE=${spark_image} \
-p SPARK_MASTER_WEBUI_PORT="8080" \
-p SPARK_WORKER_MEMORY="10G" \
-p SPARK_WORKER_PORT="35000" \
-p SPARK_WORKER_WEBUI_PORT="8081" \
-p SPARK_DAEMON_MEMORY="1G" \
-f "oc-deploy-spark-master.yaml" | oc create -f -
# Deploy splark history server
oc process \
-p IMAGE=${spark_image} \
-f "oc-deploy-spark-history.yaml" | oc create -f -
# Deploy workers
for id in $(seq 1 1 ${nodes}); do
oc process -p ID=${id} \
-p IMAGE_SPARK="${spark_image}" \
-p IMAGE_ALLUXIO="${alluxio_image}" \
-p IMAGE_HDFS="${hdfs_image}" \
-p "HDFS_MEMORY=1GB" \
-p ALLUXIO_WORKER_MEMORY_SIZE="16GB" \
-p SPARK_MASTER_WEBUI_PORT="8080" \
-p SPARK_WORKER_MEMORY="10G" \
-p SPARK_WORKER_PORT="35000" \
-p SPARK_WORKER_WEBUI_PORT="8081" \
-p SPARK_DAEMON_MEMORY="1G" \
-f "oc-deploy-has-node.yaml" | oc create -f -
done
# Deploy a Zeppelin client
export zeppelin_image=$(oc get is/zeppelin --template="{{ .status.dockerImageRepository }}" --namespace ${project})
oc process -p ID=0 \
-p IMAGE=${zeppelin_image} \
-p SPARK_EXECUTOR_MEMORY="1g" \
-p SPARK_APP_NAME="BigZeppelin" \
-p SPARK_CORES_MAX="14" \
-f "oc-deploy-zeppelin.yaml" | oc create -f -
for id in $(seq 1 1 4); do
oc process -p ID=${id} \
-p IMAGE=${zeppelin_image} \
-p SPARK_EXECUTOR_MEMORY="512m" \
-p SPARK_APP_NAME="SmallZeppelin" \
-p SPARK_CORES_MAX="3" \
-f "oc-deploy-zeppelin.yaml" | oc create -f -
# HDFS ports
# MASTER 8020, 8022, 50070,
# SLAVES 50010, 50075, 50020
# ALLUXIO ports
# MASTER 19999, 19998
# SLAVES 29998, 29999, 30000
# SPARK ports
# MASTER 7077, 6066, 8080
# SLAVE 35000, 8081
# DRIVER 51000-51016, 51100,51116, 51200-51216, 51300-51316, 51400-51416, 51500-51516,51600-51616
|
import React from "react";
import ReactDOM from "react-dom";
import { createBrowserHistory } from "history";
import { Router, Route, Switch } from "react-router-dom";
import "assets/scss/material-kit-react.scss?v=1.9.0";
// pages for this product
//import Components from "views/Components/Components.js";
import LandingPage from "views/LandingPage/LandingPage.js";
import Konsultasi from "views/Konsultasi/Konsultasi.js";
import History from "views/History/History.js";
import Kontak from "views/Kontak/Kontak.js";
import Hasil from "views/Hasil/Hasil.js";
// import LoginPage from "views/LoginPage/LoginPage";
// import LandingPageLogin from "views/LandingPage/LandingPageLogin";
//import LoginPage from "views/LoginPage/LoginPage.js";
var hist = createBrowserHistory();
ReactDOM.render(
// <Router history={hist}>
// <Switch>
// <Route path="/" component={LandingPage} />
// <Route path="/Konsultasi" component={Konsultasi} />
// </Switch>
// </Router>,
<Router history={hist}>
<Switch>
<Route exact path="/" component={LandingPage} />
<Route path="/Konsultasi" component={Konsultasi} />
<Route path="/History" component={History} />
<Route path="/Kontak" component={Kontak} />
<Route path="/Hasil" component={Hasil} />
{/* <Route path="/Login" component={LoginPage} />
<Route path="/LandingPageLogin" component={LandingPageLogin} /> */}
</Switch>
</Router>,
document.getElementById("root")
);
|
<filename>js/controllers.js
/* global _, angular, i18n */
'use strict';
var controllers = angular.module('acs.controllers', []);
controllers.controller('root', ['$scope', '$location', '$q', 'user', function($scope, $location, $q, user) {
$scope.loaded = false;
$scope.user = user;
$scope.permissions = {};
$scope.waiting = false;
$scope.init = function() {
if (!user.loggedIn()) {
$scope.loaded = true;
return;
}
var promises = [];
promises.push(user.permissions('administrator')
.then(function(permissions) {
$scope.permissions.administrator = permissions;
}));
promises.push(user.permissions('user')
.then(function(permissions) {
$scope.permissions.users = permissions;
}));
promises.push(user.permissions('role')
.then(function(permissions) {
$scope.permissions.roles = permissions;
}));
$q.all(promises)
.then(function() {
$scope.loaded = true;
}, function() {
$scope.loaded = true;
});
};
$scope.active = function(path) {
return $location.path().match(new RegExp(path + '.*', 'i')) != null;
};
$scope.logout = function() {
$scope.user.clear();
window.location.reload();
};
}]);
controllers.controller('navigation', ['$scope', '$location', 'user', function($scope, $location, user) {
$scope.user = user;
$scope.navigation = function() {
if ($scope.active('/administrator') && user.loggedIn()) {
return 'partials/navigation-administrator.html';
} else {
return 'partials/navigation.html';
}
};
}]);
controllers.controller('login', ['$scope', '$location', '$http', '$window', 'alerts', 'user', function($scope, $location, $http, $window, alerts, user) {
$scope.alerts = alerts;
$scope.input = {};
$scope.login = function() {
$scope.waiting = true;
$http.post('api/user/login', {
email: $scope.input.email,
password: $<PASSWORD>
}).success(function(data) {
$scope.waiting = false;
if (data.status) {
user.setEmail(data.email);
user.setToken(data.token);
$location.path('home');
$window.location.reload();
} else {
if (_.isEmpty(data.errors)) {
data.errors = i18n.t('fill_out_login');
}
_.forEach(data.errors, function(error) {
if (error != null) {
alerts.fail(i18n.s(error.type, error.field));
}
});
}
});
};
}]);
controllers.controller('register', ['$scope', '$location', '$http', 'alerts', function($scope, $location, $http, alerts) {
$scope.alerts = alerts;
$scope.input = {};
$scope.register = function() {
$scope.waiting = true;
if ($scope.input.password != $scope.input.confirmation) {
alerts.fail(i18n.t('passwords_not_match'));
$scope.waiting = false;
return;
}
$http.post('api/user/register', {
email: $scope.input.email,
password: $<PASSWORD>
}).success(function(data) {
$scope.waiting = false;
if (data.status) {
alerts.success(i18n.t('you_may_login'));
$location.path('login');
} else {
if (_.isEmpty(data.errors)) {
data.errors = '';
}
_.forEach(data.errors, function(error) {
if (error != null) {
alerts.fail(i18n.s(error.type, error.field));
}
});
}
});
};
}]);
controllers.controller('home', ['$scope', '$location', '$http', 'user', function($scope, $location, $http, user) {
$scope.user = user;
}]);
controllers.controller('administrator', ['$scope', '$location', '$http', 'user', function($scope, $location, $http, user) {
$scope.user = user;
$scope.information = function() {
$http.post('api/user/information', {
token: $scope.user.token
}).success(function(data) {
if (data.status) {
alert(data.message);
} else {
}
});
};
}]);
controllers.controller('users', ['$scope', '$location', '$http', 'user', 'alerts', 'ngTableParams', function($scope, $location, $http, user, alerts, ngTableParams) {
$scope.user = user;
$scope.alerts = alerts;
$scope.tableLoaded = false;
$scope.delete = function(id) {
$http.post('api/user/delete', {
token: $scope.user.getToken(),
id: id
}).success(function(data) {
if (data.status) {
$scope.tableParams.reload();
$scope.alerts.success(i18n.t('user_delete'));
$scope.alerts.success('User successfully deleted.');
} else {
$scope.alerts.fail(data.errors);
}
});
};
$scope.tableParams = new ngTableParams({
page: 1,
count: 10,
sorting: {
id: 'asc'
}
}, {
total: 0,
getData: function($defer, params) {
$http.post('api/user/table', {
token: $scope.user.getToken(),
params: JSON.stringify(params.$params)
}).success(function(data) {
params.total(data.total);
$defer.resolve(data.users);
$scope.tableLoaded = true;
});
}
});
}]);
controllers.controller('user', ['$scope', '$timeout', '$location', '$http', '$routeParams', 'user', 'alerts', 'ngTableParams', function($scope, $timeout, $location, $http, $routeParams, user, alerts, ngTableParams) {
$scope.user = user;
$scope.alerts = alerts;
$scope.input = {user: {roles: []}};
$scope.read = function() {
$http.post('api/user/read', {
token: $scope.user.getToken(),
id: $routeParams.id
}).success(function(data) {
if (data.status) {
$scope.input = {user: data.user};
$scope.tableParams.reload();
}
});
};
$scope.update = function(close) {
$http.post('api/user/update', {
token: $scope.user.getToken(),
user: JSON.stringify($scope.input.user)
}).success(function(data) {
if (data.status) {
if (_.isUndefined(close)) {
$scope.input = {user: data.user};
$scope.tableParams.reload();
} else {
$location.path('administrator/users');
}
$scope.alerts.success(i18n.t('user_updated'));
} else {
_.forEach(data.errors, function(error) {
if (error != null) {
alerts.fail(i18n.s(error.type, error.field));
}
});
}
});
};
$scope.getRoles = function() {
$http.post('api/role/table', {
token: $scope.user.getToken(),
params: '{}'
}).success(function(data) {
if (data.status) {
$scope.roles = data.roles;
}
});
};
$scope.addRole = function(role) {
if (_.isEmpty(role)) {
alerts.fail(i18n.t('enter_role_name'));
return;
}
role = JSON.stringify(role.toLowerCase()).replace(/\W/g, '').trim();
if (_.isEmpty(role)) {
alerts.fail(i18n.t('enter_role_name'));
return;
}
$scope.input.user.roles.push(role);
$scope.tableParams.reload();
};
$scope.deleteRole = function(role) {
$scope.input.user.roles = _.without($scope.input.user.roles, role);
$scope.tableParams.reload();
};
$scope.tableParams = new ngTableParams({
page: 1,
count: 10,
sorting: {
role: 'asc'
}
}, {
total: 0,
getData: function($defer, params) {
params.total($scope.input.user.roles.length);
$defer.resolve($scope.input.user.roles);
}
});
$scope.cancel = function() {
$location.path('administrator/users');
};
}]);
controllers.controller('roles', ['$scope', '$location', '$http', 'user', 'alerts', 'ngTableParams', function($scope, $location, $http, user, alerts, ngTableParams) {
$scope.user = user;
$scope.alerts = alerts;
$scope.input = {};
$scope.tableLoaded = false;
$scope.tableParams = new ngTableParams({
page: 1,
count: 10,
sorting: {
role: 'asc'
}
}, {
total: 0,
getData: function($defer, params) {
$http.post('api/role/table', {
token: $scope.user.getToken(),
params: JSON.stringify(params.$params)
}).success(function(data) {
params.total(data.total);
$defer.resolve(data.roles);
$scope.tableLoaded = true;
});
}
});
$scope.addRole = function(role) {
if (_.isEmpty(role)) {
alerts.fail(i18n.t('enter_role_name'));
return;
}
role = JSON.stringify(role.toLowerCase()).replace(/\W/g, '').trim();
if (_.isEmpty(role)) {
alerts.fail(i18n.t('enter_role_name'));
return;
}
$http.post('api/role/create', {
token: $scope.user.getToken(),
role: role
}).success(function(data) {
if (data.status) {
$scope.tableParams.reload();
$scope.alerts.success(i18n.t('role_added'));
} else {
_.forEach(data.errors, function(error) {
if (error != null) {
alerts.fail(i18n.s(error.type, error.field));
}
});
}
});
};
$scope.deleteRole = function(role) {
$http.post('api/role/delete', {
token: $scope.user.getToken(),
role: role
}).success(function(data) {
if (data.status) {
$scope.tableParams.reload();
$scope.alerts.success(i18n.t('role_deleted'));
} else {
_.forEach(data.errors, function(error) {
if (error != null) {
alerts.fail(i18n.s(error.type, error.field));
}
});
}
});
};
}]);
controllers.controller('role', ['$scope', '$location', '$http', '$routeParams', 'user', 'alerts', 'ngTableParams', function($scope, $location, $http, $routeParams, user, alerts, ngTableParams) {
$scope.user = user;
$scope.alerts = alerts;
$scope.input = {resources: []};
$scope.updateCount = 0;
$scope.update = function(close) {
$scope.failCount = 0;
_.forEach($scope.input.resources, function(resource) {
$scope.updateCount += 1;
$http.post('api/role/update', {
token: $scope.user.getToken(),
role: $routeParams.role,
resource: resource.name,
permissions: JSON.stringify(resource.permissions)
}).success(function(data) {
if (!data.status) {
$scope.failCount += 1;
$scope.errors = data.errors;
}
$scope.updateCount -= 1;
if ($scope.updateCount == 0) {
if (_.isUndefined(close)) {
$scope.tableParams.reload();
} else {
$location.path('administrator/roles');
}
if ($scope.failCount) {
_.forEach($scope.errors, function(error) {
if (error != null) {
alerts.fail(i18n.s(error.type, error.field));
}
});
} else {
alerts.success(i18n.t('role_updated'));
}
}
});
});
};
$scope.tableParams = new ngTableParams({
page: 1,
count: 10,
sorting: {
resource: 'asc'
}
}, {
total: 0,
getData: function($defer, params) {
$http.post('api/resource/table', {
token: $scope.user.getToken(),
params: JSON.stringify(params.$params),
role: $routeParams.role
}).success(function(data) {
$scope.input.resources = data.resources;
params.total(data.total);
$defer.resolve(data.resources);
});
}
});
$scope.cancel = function() {
$location.path('administrator/roles');
};
}]);
|
<reponame>slaufer/Prebid.js<filename>modules/iasBidAdapter.js
import * as utils from '../src/utils.js';
import { registerBidder } from '../src/adapters/bidderFactory.js';
const BIDDER_CODE = 'ias';
const otherBidIds = [];
function isBidRequestValid(bid) {
const { pubId, adUnitPath } = bid.params;
return !!(pubId && adUnitPath);
}
/**
* Converts GPT-style size array into a string
* @param {Array} sizes: list of GPT-style sizes, e.g. [[300, 250], [300, 300]]
* @return {String} a string containing sizes, e.g. '[300.250,300.300]'
*/
function stringifySlotSizes(sizes) {
let result = '';
if (utils.isArray(sizes)) {
result = sizes.reduce((acc, size) => {
acc.push(size.join('.'));
return acc;
}, []);
result = '[' + result.join(',') + ']';
}
return result;
}
function stringifySlot(bidRequest) {
const id = bidRequest.adUnitCode;
const ss = stringifySlotSizes(bidRequest.sizes);
const p = bidRequest.params.adUnitPath;
const slot = { id, ss, p };
const keyValues = utils.getKeys(slot).map(function(key) {
return [key, slot[key]].join(':');
});
return '{' + keyValues.join(',') + '}';
}
function stringifyWindowSize() {
return [ window.innerWidth || -1, window.innerHeight || -1 ].join('.');
}
function stringifyScreenSize() {
return [ (window.screen && window.screen.width) || -1, (window.screen && window.screen.height) || -1 ].join('.');
}
function buildRequests(bidRequests) {
const IAS_HOST = 'https://pixel.adsafeprotected.com/services/pub';
const anId = bidRequests[0].params.pubId;
let queries = [];
queries.push(['anId', anId]);
queries = queries.concat(bidRequests.reduce(function(acc, request) {
acc.push(['slot', stringifySlot(request)]);
return acc;
}, []));
queries.push(['wr', stringifyWindowSize()]);
queries.push(['sr', stringifyScreenSize()]);
queries.push(['url', encodeURIComponent(window.location.href)]);
const queryString = encodeURI(queries.map(qs => qs.join('=')).join('&'));
bidRequests.forEach(function (request) {
if (bidRequests[0].bidId != request.bidId) {
otherBidIds.push(request.bidId);
}
});
return {
method: 'GET',
url: IAS_HOST,
data: queryString,
bidRequest: bidRequests[0]
};
}
function getPageLevelKeywords(response) {
let result = {};
shallowMerge(result, response.brandSafety);
result.fr = response.fr;
result.custom = response.custom;
return result;
}
function shallowMerge(dest, src) {
utils.getKeys(src).reduce((dest, srcKey) => {
dest[srcKey] = src[srcKey];
return dest;
}, dest);
}
function interpretResponse(serverResponse, request) {
const iasResponse = serverResponse.body;
const bidResponses = [];
// Keys in common bid response are not used;
// Necessary to get around with prebid's common bid response check
const commonBidResponse = {
requestId: request.bidRequest.bidId,
cpm: 0.01,
width: 100,
height: 200,
creativeId: 434,
dealId: 42,
currency: 'USD',
netRevenue: true,
ttl: 360
};
shallowMerge(commonBidResponse, getPageLevelKeywords(iasResponse));
commonBidResponse.slots = iasResponse.slots;
bidResponses.push(commonBidResponse);
otherBidIds.forEach(function (bidId) {
var otherResponse = Object.assign({}, commonBidResponse);
otherResponse.requestId = bidId;
bidResponses.push(otherResponse);
});
return bidResponses;
}
export const spec = {
code: BIDDER_CODE,
aliases: [],
isBidRequestValid: isBidRequestValid,
buildRequests: buildRequests,
interpretResponse: interpretResponse
};
registerBidder(spec);
|
<reponame>LauraBeatris/floripamais-strapi-api
module.exports = {
jwtSecret: process.env.JWT_SECRET || '45fa7028-627e-44f5-92be-f72482e73f63'
};
|
#!/bin/bash
echo "START"
cd /root/portworx-setup/kubespray
cat > wait_for_ssh.yml <<EOF
---
- name: wait for connection to new VMs
hosts: all
tasks:
- name: Wait for ssh
wait_for:
port: 22
host: '{{ (ansible_ssh_host|default(ansible_host))|default(inventory_hostname) }}'
search_regex: OpenSSH
delay: 10
connection: local
EOF
echo "waiting for SSH come up on the VMs"
until ansible-playbook -i inventory/testdrivecluster/hosts.yaml wait_for_ssh.yml
do
echo "waiting, then trying again"
sleep 10
done
#Restart NTP on VM/lab resume
ansible all -i inventory/testdrivecluster/hosts.yaml -m shell -b -a "systemctl restart chronyd"
#ansible all -i inventory/testdrivecluster/hosts.yaml -m shell -b -a "systemctl restart portworx"
rm wait_for_ssh.yml
# Grafana pod should be restarted to fix transport errors on lab resumes/ntp reboot
export KUBECONFIG=/root/portworx-setup/kubespray/inventory/testdrivecluster/artifacts/admin.conf
#kubectl label nodes node2 node3 node4 px/service=restart
kubectl delete po -n kube-system -l name=portworx --wait=false
# Stork, CSI, Grafana dont like px or NTP reboot, so we let them restart
kubectl delete po -n central -l app=pxcentral-grafana --wait=false
kubectl delete po -n kube-system -l name=stork --wait=false
kubectl delete po -n kube-system -l app=px-csi-driver --wait=false
# Refresh the app to make sure no old cache is used.
cd /home/portworx/testdrive-workspace/example
kubectl create -f postgres-db.yaml
sleep 3
kubectl create -f k8s-webapp.yaml
echo "Ran resume operation script to sync ntp, $(date)" >> /root/portworx-setup/resume-op.log
echo "FINISH"
|
<reponame>famod/qson<gh_stars>10-100
package io.quarkus.qson;
public class QsonException extends RuntimeException {
public QsonException() {
}
public QsonException(String message) {
super(message);
}
public QsonException(String message, Throwable cause) {
super(message, cause);
}
public QsonException(Throwable cause) {
super(cause);
}
}
|
public class ZeroOneKnapsack_TopDown {
public int knapsack(int[] profits, int[] weights, int capacity) {
Integer[][] dp = new Integer[profits.length][capacity + 1];
return this.knapsackAux(dp, profits, weights, capacity, 0);
}//end of method
private int knapsackAux(Integer[][] dp, int[] profits, int[] weights, int capacity, int currentIndex) {
if (capacity <= 0 || currentIndex < 0 || currentIndex >= profits.length) //Base case
return 0;
if (dp[currentIndex][capacity] != null) // if we have already solved this problem, then return the result from memory
return dp[currentIndex][capacity];
int profit1 = 0;
if (weights[currentIndex] <= capacity) // Taking current element
profit1 = profits[currentIndex] + knapsackAux(dp, profits, weights, capacity - weights[currentIndex], currentIndex + 1);
int profit2 = knapsackAux(dp, profits, weights, capacity, currentIndex + 1); // Not taking current element
dp[currentIndex][capacity] = Math.max(profit1, profit2);
return dp[currentIndex][capacity];
}//end of method
public static void main(String[] args) {
ZeroOneKnapsack_TopDown ks = new ZeroOneKnapsack_TopDown();
int[] profits = { 31, 26, 72, 17 };
int[] weights = { 3, 1, 5, 2 };
int maxProfit = ks.knapsack(profits, weights, 7);
System.out.println(maxProfit);
}//end of method
}//end of class
|
<reponame>allancssio1/registrationTeacherAndStudents
const { date, age, grade, modalidad, graduation } = require('../lib/utils')
const db = require('../config/db')
module.exports = {
all (callback) {
db.query(`
SELECT *
FROM teachers
ORDER BY name ASC`,
function (err, results) {
if (err) throw `Error Database ${err}`
callback(results.rows)
}
)
},
findBy (filter, callback) {
db.query(`
SELECT *
FROM teachers
WHERE teachers.name ILIKE '%${filter}%'
OR teachers.subject_taught ILIKE '%${filter}%'
ORDER BY name ASC`,
function (err, results) {
if (err) throw `Error Database ${err}`
callback(results.rows)
}
)
},
create (data, callback) {
const query = `
INSERT INTO teachers (
avatar_url,
name,
birth_date,
education_level,
class_type,
subject_taught,
created_at
)
VALUES ($1, $2, $3, $4, $5, $6, $7)
RETURNING id`
const values = [
data.avatar_url,
data.name,
date(data.birth_date).iso,
data.education_level,
data.class_type,
data.subject_taught,
date(Date.now()).iso
]
db.query (query, values,
function (err, results) {
if (err) throw `Error Database ${err}`
callback(results.rows[0].id)
}
)
},
find (id, callback) {
db.query (`
SELECT *
FROM teachers
WHERE id = $1`,
[id], function (err, results) {
if (err) throw `Error Database ${err}`
callback (results.rows[0])
}
)
},
update (data, callback) {
const query = `
UPDATE teachers
SET avatar_url=($1),
name=($2),
birth_date=($3),
education_level=($4),
class_type=($5),
subject_taught=($6)
WHERE id=$7`
const values = [
data.avatar_url,
data.name,
date(data.birth_date).iso,
data.education_level,
data.class_type,
data.subject_taught,
data.id
]
db.query (query, values,
function (err, results) {
if (err) throw `Error Database ${err}`
callback()
}
)
},
delete (id, callback) {
db.query (`
DELETE FROM teachers
WHERE id = $1`, [id],
function (err, results) {
if (err) throw `Error Database ${err}`
callback ()
}
)
},
paginate (params) {
const {filter, limit, offset, callback} = params
let query = "",
filterQuery = "",
totalQuery = `(
SELECT count(*) FROM teachers
) AS total`
if (filter) {
filterQuery = `
WHERE teachers.name ILIKE '%${filter}%'`
totalQuery = `(
SELECT count(*)
FROM teachers ${filterQuery}
) AS total`
}
query = `
SELECT teachers.*, ${totalQuery},
count(students) AS total_students
FROM teachers
LEFT JOIN students ON (teachers.id = students.teacher_id)
${filterQuery}
GROUP BY teachers.id
LIMIT $1 OFFSET $2`
db.query(query, [limit, offset],
function (err, results) {
if (err) throw `Database ${err}`
callback(results.rows)
}
)
}
}
|
SELECT COUNT(*) AS 'Number of purchases', CustomerID,
MONTH(Timestamp) AS 'Month of Purchase'
FROM Orders
GROUP BY CustomerID, MONTH(Timestamp);
|
import numpy as np
def reshape_image_coordinates(image_size_x, image_size_y):
# Calculate the total number of pixels in the image
pixel_length = image_size_x * image_size_y
# Generate a 1D array of image coordinates ranging from image_size_x - 1 to 0
u_coord = np.repeat(np.arange(image_size_x - 1, -1, -1), image_size_y)
return u_coord
|
<gh_stars>0
import type Timeline from '../Timeline';
import type { Effect } from '../effects/Effect';
import Bus from '../Bus';
export interface ContainerProps {
effects?: Effect[];
}
export default abstract class Container {
protected timeline: Timeline;
public readonly bus: Bus;
constructor({
effects = [],
}: ContainerProps ) {
this.bus = new Bus({
effects,
});
}
public bindTimeline( timeline: Timeline ): void {
this.timeline = timeline;
}
public destroy(): void {
this.bus.destroy();
}
}
|
"""empty message
Revision ID: <KEY>
Revises: da366b325ea9
Create Date: 2020-09-30 18:14:38.190702
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = 'da366b325ea9'
branch_labels = None
depends_on = None
def upgrade():
op.add_column('tickets', sa.Column('mentor_slackUID', sa.String(), nullable=True))
def downgrade():
op.drop_column('tickets', 'mentor_slackUID')
|
#!/bin/sh
## Passed in from environment variables:
# HOSTNAME=
# PORT=8545
# NETWORK_ID=108
CLEAR_DATA_FILE_PATH="${VOLUME_PATH}/.clear_data_key_${CLEAR_DATA_KEY}"
if [[ -n "$CLEAR_DATA_KEY" && ! -f "$CLEAR_DATA_FILE_PATH" ]]; then
echo "Detected change in CLEAR_DATA_KEY. Purging data."
rm -rf ${VOLUME_PATH}/*
rm -rf ${VOLUME_PATH}/.clear_data_key_*
echo "Local data cleared from '${VOLUME_PATH}/*'"
echo "Contents of volume dir: $(ls -alh $VOLUME_PATH)"
touch $CLEAR_DATA_FILE_PATH
fi
echo "Starting Geth..."
## Command to kick off geth
geth --dev --datadir $VOLUME_PATH --rpc --rpcaddr $HOSTNAME --rpcvhosts=* --rpcport $PORT --networkid $NETWORK_ID --rpcapi 'eth,net' --gasprice '0' --targetgaslimit '4294967295' --nousb --gcmode=archive
|
package com.aqzscn.www.global.domain.co;
import com.aqzscn.www.global.domain.dto.ReturnError;
import lombok.Getter;
import lombok.NonNull;
import org.springframework.validation.ObjectError;
import java.util.List;
/**
* 全局异常类
*
* @author Godbobo
* @date 2019/5/10.
*/
@Getter
public class AppException extends RuntimeException {
private ReturnError error;
private List<ObjectError> results;
private AppException(String message) {
super(message);
this.error = ReturnError.FAILED;
}
private AppException(ReturnError error) {
super(error.getTitle());
this.error = error;
}
// 若传入results则一定为参数校验失败异常
private AppException(List<ObjectError> results) {
super(ReturnError.VALIDATE_FAILED.getTitle());
this.error = ReturnError.VALIDATE_FAILED;
this.results = results;
}
private AppException(ReturnError error, String message) {
super(message);
this.error = error;
}
// 针对自定义的错误消息
public static AppException of(@NonNull String message) {
return new AppException(message);
}
// 针对系统中已有的错误类型
public static AppException of(ReturnError error){
return new AppException(error);
}
// 参数校验异常
public static AppException of(List<ObjectError> results) {
return new AppException(results);
}
// 错误类型已存在,但需要自定义错误说明
public static AppException of(ReturnError error, @NonNull String message) {
return new AppException(error, message);
}
}
|
<reponame>psema4/Atomic-OS<gh_stars>1-10
module("HxStream");
test("load", function() {
var myStream = new HxStream();
ok(myStream instanceof HxStream, "new HxStream");
});
|
#!/bin/bash
#
# START: CONFIGURATION OPTIONS
#
# The below two paths should point to the data set root and ROS package
package_source=~/catkin_ws/src/a2d2_to_ros
data_root=~/data/a2d2
# Duration (in integer seconds) to record into a single bag file before splitting off a new one
split_duration=7
# Relative location of the sensor fusion data set being converted
# Set 'sensor_data' to the desired location
munich_data= #TODO
gaimersheim_data= #TODO
ingolstadt_data=camera_lidar/20190401_145936
sensor_data=$ingolstadt_data
# Earliest time in the data set (in microseconds) for which all sensor modalities provide data
# Set 'record_start_time' to the appropriate time for the dataset being converted
munich_start_time= #TODO
gaimersheim_start_time= #TODO
ingolstadt_start_time=1554121595035037
record_start_time=$ingolstadt_start_time
# Approximate duration (in integer seconds) of the data set
# Set 'data_set_duration' to the appropriate duration
munich_duration= #TODO
gaimersheim_duration= #TODO
ingolstadt_duration=746
data_set_duration=$ingolstadt_duration
#
# END: CONFIGURATION OPTIONS
#
bus_data_subdir=/bus
data_source="$data_root/$sensor_data"
sensor_locations=(cam_front_center cam_front_left cam_front_right cam_rear_center cam_side_left cam_side_right)
# Convert bus signal data
start_time=0
while [ $start_time -lt $data_set_duration ]
do
end_time=$(( $start_time + $split_duration ))
sub_dir="timespan_${start_time}s_${end_time}s"
if [ ! -d "$sub_dir" ]; then
mkdir $sub_dir
fi
rosrun a2d2_to_ros sensor_fusion_bus_signals --sensor-config-json-path $data_root --sensor-config-schema-path $package_source/schemas/sensor_config.schema --bus-signal-json-path $data_source$bus_data_subdir --bus-signal-schema-path $package_source/schemas/sensor_fusion_bus_signal.schema --min-time-offset $start_time --duration $split_duration --output-path $sub_dir --include-clock-topic true --start-time $record_start_time || exit 1
start_time=$end_time
done
# Convert sensor data
for location in "${sensor_locations[@]}"
do
start_time=0
camera_data="$data_source/camera/$location"
while [ $start_time -lt $data_set_duration ]
do
end_time=$(( $start_time + $split_duration ))
sub_dir="timespan_${start_time}s_${end_time}s"
if [ ! -d "$sub_dir" ]; then
mkdir $sub_dir
fi
rosrun a2d2_to_ros sensor_fusion_camera --camera-data-path $camera_data --frame-info-schema-path $package_source/schemas/sensor_fusion_camera_frame.schema --sensor-config-path $data_root --sensor-config-schema-path $package_source/schemas/sensor_config.schema --min-time-offset $start_time --duration $split_duration --output-path $sub_dir --include-clock-topic false --start-time $record_start_time || exit 1
start_time=$end_time
done
lidar_data="$data_source/lidar/$location"
start_time=0
camera_data="$data_source/camera/$location"
while [ $start_time -lt $data_set_duration ]
do
end_time=$(( $start_time + $split_duration ))
sub_dir="timespan_${start_time}s_${end_time}s"
if [ ! -d "$sub_dir" ]; then
mkdir $sub_dir
fi
rosrun a2d2_to_ros sensor_fusion_lidar --lidar-data-path $lidar_data --camera-data-path $camera_data --frame-info-schema-path $package_source/schemas/sensor_fusion_camera_frame.schema --min-time-offset $start_time --duration $split_duration --output-path $sub_dir --include-clock-topic false --start-time $record_start_time || exit 1
start_time=$end_time
done
done
|
#!/bin/bash
SCRIPTPATH=$( cd $(dirname $0) ; pwd -P )
BUILDPATH=${SCRIPTPATH}/build
set -e
set -o xtrace
rm -rf "${BUILDPATH}"
mkdir -p "${BUILDPATH}"
javac "${SCRIPTPATH}/src/DrawingWindow.java" \
"${SCRIPTPATH}/src/ColorPalette.java" \
"${SCRIPTPATH}/src/ColorMap.java" \
-d "${BUILDPATH}"
#jar cf "${SCRIPTPATH}/DrawingWindow.jar" -C "${BUILDPATH}" org
cp ${SCRIPTPATH}/src/* ${BUILDPATH}/org/finomnis/common/graphics/
jar cf "${SCRIPTPATH}/DrawingWindow.jar" -C "${BUILDPATH}" org
|
<reponame>janitha09/eve
// Copyright (c) 2017-2018 Zededa, Inc.
// SPDX-License-Identifier: Apache-2.0
// Common code to communicate to zedcloud
package zedcloud
import (
"crypto"
"crypto/ecdsa"
"crypto/tls"
"crypto/x509"
"encoding/asn1"
"encoding/pem"
"github.com/lf-edge/eve/pkg/pillar/cmd/tpmmgr"
log "github.com/sirupsen/logrus"
"golang.org/x/crypto/ocsp"
"io"
"io/ioutil"
"math/big"
"strings"
"time"
)
const (
identityDirname = "/config"
serverFilename = identityDirname + "/server"
deviceCertName = identityDirname + "/device.cert.pem"
deviceKeyName = identityDirname + "/device.key.pem"
rootCertName = identityDirname + "/root-certificate.pem"
)
//TpmPrivateKey is Custom implementation of crypto.PrivateKey interface
type TpmPrivateKey struct {
PublicKey crypto.PublicKey
}
//Helper structure to pack ecdsa signature for ASN1 encoding
type ecdsaSignature struct {
R, S *big.Int
}
//Public implements crypto.PrivateKey interface
func (s TpmPrivateKey) Public() crypto.PublicKey {
clientCertName := "/config/device.cert.pem"
clientCertBytes, err := ioutil.ReadFile(clientCertName)
if err != nil {
return nil
}
block, _ := pem.Decode(clientCertBytes)
var cert *x509.Certificate
cert, _ = x509.ParseCertificate(block.Bytes)
ecdsaPublicKey := cert.PublicKey.(*ecdsa.PublicKey)
return ecdsaPublicKey
}
//Sign implements cryto.PrivateKey interface
func (s TpmPrivateKey) Sign(r io.Reader, digest []byte, opts crypto.SignerOpts) ([]byte, error) {
R, S, err := tpmmgr.TpmSign(digest)
if err != nil {
return nil, err
}
return asn1.Marshal(ecdsaSignature{R, S})
}
//GetClientCert prepares tls.Certificate to connect to the cloud Controller
func GetClientCert() (tls.Certificate, error) {
if !tpmmgr.IsTpmEnabled() {
//Not a TPM capable device, return openssl certificate
return tls.LoadX509KeyPair(deviceCertName, deviceKeyName)
}
// TPM capable device, return TPM bcased certificate
deviceCertBytes, err := ioutil.ReadFile(deviceCertName)
if err != nil {
return tls.Certificate{}, err
}
deviceCertDERBytes, _ := pem.Decode(deviceCertBytes)
deviceTLSCert := tls.Certificate{}
deviceTLSCert.Certificate = append(deviceTLSCert.Certificate,
deviceCertDERBytes.Bytes)
tpmPrivKey := TpmPrivateKey{}
tpmPrivKey.PublicKey = tpmPrivKey.Public()
deviceTLSCert.PrivateKey = tpmPrivKey
return deviceTLSCert, nil
}
// If a server arg is specified it overrides the serverFilename content.
// If a clientCert is specified it overrides the device*Name files.
func GetTlsConfig(serverName string, clientCert *tls.Certificate) (*tls.Config, error) {
if serverName == "" {
// get the server name
bytes, err := ioutil.ReadFile(serverFilename)
if err != nil {
return nil, err
}
strTrim := strings.TrimSpace(string(bytes))
serverName = strings.Split(strTrim, ":")[0]
}
if clientCert == nil {
deviceTLSCert, err := GetClientCert()
if err != nil {
return nil, err
}
clientCert = &deviceTLSCert
}
// Load CA cert
caCert, err := ioutil.ReadFile(rootCertName)
if err != nil {
return nil, err
}
caCertPool := x509.NewCertPool()
caCertPool.AppendCertsFromPEM(caCert)
tlsConfig := &tls.Config{
Certificates: []tls.Certificate{*clientCert},
ServerName: serverName,
RootCAs: caCertPool,
CipherSuites: []uint16{
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256},
// TLS 1.2 because we can
MinVersion: tls.VersionTLS12,
}
tlsConfig.BuildNameToCertificate()
return tlsConfig, nil
}
func stapledCheck(connState *tls.ConnectionState) bool {
if connState.VerifiedChains == nil {
log.Errorln("stapledCheck: No VerifiedChains")
return false
}
if len(connState.VerifiedChains[0]) == 0 {
log.Errorln("stapledCheck: No VerifiedChains 2")
return false
}
issuer := connState.VerifiedChains[0][1]
resp, err := ocsp.ParseResponse(connState.OCSPResponse, issuer)
if err != nil {
log.Errorln("stapledCheck: error parsing response: ", err)
return false
}
now := time.Now()
age := now.Unix() - resp.ProducedAt.Unix()
remain := resp.NextUpdate.Unix() - now.Unix()
log.Debugf("OCSP age %d, remain %d\n", age, remain)
if remain < 0 {
log.Errorln("OCSP expired.")
return false
}
if resp.Status == ocsp.Good {
log.Debugln("Certificate Status Good.")
} else if resp.Status == ocsp.Unknown {
log.Errorln("Certificate Status Unknown")
} else {
log.Errorln("Certificate Status Revoked")
}
return resp.Status == ocsp.Good
}
|
# AUTOGENERATED! DO NOT EDIT! File to edit: source_nbs/12_top.ipynb (unless otherwise specified).
__all__ = ['empty_tensor_handling_loss', 'nan_loss_handling', 'create_dummy_if_empty', 'BaseTop', 'SequenceLabel',
'Classification', 'PreTrain', 'Seq2Seq', 'MultiLabelClassification', 'MaskLM']
# Cell
import logging
from functools import partial
from typing import Dict, Tuple, Union
import tensorflow as tf
import tensorflow_addons as tfa
import transformers
from transformers.modeling_tf_utils import TFSharedEmbeddings
from tensorflow_addons.layers.crf import CRF
from tensorflow_addons.text.crf import crf_log_likelihood
from .params import BaseParams
from .utils import gather_indexes
@tf.function
def empty_tensor_handling_loss(labels, logits, loss_fn):
if tf.equal(tf.size(labels), 0):
return 0.0
if tf.equal(tf.size(tf.shape(labels)), 0):
return 0.0
if tf.equal(tf.shape(labels)[0], 0):
return 0.0
else:
return tf.reduce_mean(loss_fn(
labels, logits, from_logits=True))
@tf.function
def nan_loss_handling(loss):
if tf.math.is_nan(loss):
return 0.0
else:
return loss
@tf.function
def create_dummy_if_empty(inp_tensor: tf.Tensor) -> tf.Tensor:
shape_tensor = tf.shape(inp_tensor)
if tf.equal(shape_tensor[0], 0):
data_type = inp_tensor.dtype
dummy_shape_first_dim = tf.convert_to_tensor([1], dtype=tf.int32)
dummy_shape = tf.concat(
[dummy_shape_first_dim, shape_tensor[1:]], axis=0)
dummy_tensor = tf.zeros(dummy_shape, dtype=data_type)
return dummy_tensor
else:
return inp_tensor
class BaseTop(tf.keras.Model):
def __init__(self, params: BaseParams, problem_name: str) -> None:
super(BaseTop, self).__init__(name=problem_name)
self.params = params
self.problem_name = problem_name
def call(self, inputs: Tuple[Dict], mode: str):
raise NotImplementedError
# Cell
class SequenceLabel(tf.keras.Model):
def __init__(self, params: BaseParams, problem_name: str):
super(SequenceLabel, self).__init__(name=problem_name)
self.params = params
self.problem_name = problem_name
num_classes = self.params.num_classes[self.problem_name]
self.dense = tf.keras.layers.Dense(num_classes, activation=None)
self.dropout = tf.keras.layers.Dropout(1-params.dropout_keep_prob)
if self.params.crf:
self.crf = CRF(num_classes)
self.metric_fn = tf.keras.metrics.Accuracy(
name='{}_acc'.format(self.problem_name)
)
else:
self.metric_fn = tf.keras.metrics.SparseCategoricalAccuracy(
name='{}_acc'.format(self.problem_name))
def return_crf_result(self, labels: tf.Tensor, logits: tf.Tensor, mode: str, input_mask: tf.Tensor):
input_mask.set_shape([None, None])
logits = create_dummy_if_empty(logits)
input_mask = create_dummy_if_empty(input_mask)
viterbi_decoded, potentials, sequence_length, chain_kernel = self.crf(
logits, input_mask)
if mode != tf.estimator.ModeKeys.PREDICT:
loss = -crf_log_likelihood(potentials,
labels, sequence_length, chain_kernel)[0]
loss = tf.reduce_mean(loss)
loss = nan_loss_handling(loss)
self.add_loss(loss)
acc = self.metric_fn(
labels, viterbi_decoded, sample_weight=input_mask)
self.add_metric(acc)
# make the crf prediction has the same shape as non-crf prediction
return tf.one_hot(viterbi_decoded, name='%s_predict' % self.problem_name, depth=self.params.num_classes[self.problem_name])
def call(self, inputs, mode):
training = (mode == tf.estimator.ModeKeys.TRAIN)
feature, hidden_feature = inputs
hidden_feature = hidden_feature['seq']
if mode != tf.estimator.ModeKeys.PREDICT:
labels = feature['{}_label_ids'.format(self.problem_name)]
# sometimes the length of labels dose not equal to length of inputs
# that's caused by tf.data.experimental.bucket_by_sequence_length in multi problem scenario
pad_len = tf.shape(input=hidden_feature)[
1] - tf.shape(input=labels)[1]
# top, bottom, left, right
pad_tensor = [[0, 0], [0, pad_len]]
labels = tf.pad(tensor=labels, paddings=pad_tensor)
else:
labels = None
hidden_feature = self.dropout(hidden_feature, training)
if self.params.crf:
return self.return_crf_result(labels, hidden_feature, mode, feature['model_input_mask'])
logits = self.dense(hidden_feature)
if mode != tf.estimator.ModeKeys.PREDICT:
loss = empty_tensor_handling_loss(
labels, logits,
tf.keras.losses.sparse_categorical_crossentropy)
self.add_loss(loss)
acc = self.metric_fn(
labels, logits, sample_weight=feature['model_input_mask'])
self.add_metric(acc)
return tf.nn.softmax(
logits, name='%s_predict' % self.problem_name)
# Cell
class Classification(tf.keras.layers.Layer):
def __init__(self, params: BaseParams, problem_name: str) -> None:
super(Classification, self).__init__(name=problem_name)
self.params = params
self.problem_name = problem_name
num_classes = self.params.num_classes[self.problem_name]
self.dense = tf.keras.layers.Dense(num_classes, activation=None)
self.metric_fn = tf.keras.metrics.SparseCategoricalAccuracy(
name='{}_acc'.format(self.problem_name))
self.dropout = tf.keras.layers.Dropout(1-params.dropout_keep_prob)
def call(self, inputs, mode):
training = (mode == tf.estimator.ModeKeys.TRAIN)
feature, hidden_feature = inputs
hidden_feature = hidden_feature['pooled']
if mode != tf.estimator.ModeKeys.PREDICT:
labels = feature['{}_label_ids'.format(self.problem_name)]
else:
labels = None
hidden_feature = self.dropout(hidden_feature, training)
logits = self.dense(hidden_feature)
if mode != tf.estimator.ModeKeys.PREDICT:
# labels = tf.squeeze(labels)
# convert labels to one-hot to use label_smoothing
one_hot_labels = tf.one_hot(
labels, depth=self.params.num_classes[self.problem_name])
loss_fn = partial(tf.keras.losses.categorical_crossentropy,
from_logits=True, label_smoothing=self.params.label_smoothing)
loss = empty_tensor_handling_loss(
one_hot_labels, logits,
loss_fn)
loss = nan_loss_handling(loss)
self.add_loss(loss)
acc = self.metric_fn(labels, logits)
self.add_metric(acc)
return tf.nn.softmax(
logits, name='%s_predict' % self.problem_name)
# Cell
class PreTrain(tf.keras.Model):
def __init__(self, params: BaseParams, problem_name: str, input_embeddings: tf.Tensor=None, share_embedding=True):
super(PreTrain, self).__init__(name=problem_name)
self.params = params
self.nsp = transformers.models.bert.modeling_tf_bert.TFBertNSPHead(
self.params.bert_config)
if share_embedding is False:
self.vocab_size = self.params.bert_config.vocab_size
self.share_embedding = False
else:
word_embedding_weight = input_embeddings.word_embeddings
self.vocab_size = word_embedding_weight.shape[0]
embedding_size = word_embedding_weight.shape[-1]
share_valid = (self.params.bert_config.hidden_size ==
embedding_size)
if not share_valid and self.params.share_embedding:
logging.warning(
'Share embedding is enabled but hidden_size != embedding_size')
self.share_embedding = self.params.share_embedding & share_valid
if self.share_embedding:
self.share_embedding_layer = TFSharedEmbeddings(
vocab_size=word_embedding_weight.shape[0], hidden_size=word_embedding_weight.shape[1])
self.share_embedding_layer.build([1])
self.share_embedding_layer.weight = word_embedding_weight
else:
self.share_embedding_layer = tf.keras.layers.Dense(self.vocab_size)
def call(self,
inputs: Tuple[Dict[str, Dict[str, tf.Tensor]], Dict[str, Dict[str, tf.Tensor]]],
mode: str) -> Tuple[tf.Tensor, tf.Tensor]:
features, hidden_features = inputs
# compute logits
nsp_logits = self.nsp(hidden_features['pooled'])
# masking is done inside the model
seq_hidden_feature = hidden_features['seq']
if mode != tf.estimator.ModeKeys.PREDICT:
positions = features['masked_lm_positions']
# gather_indexes will flatten the seq hidden_states, we need to reshape
# back to 3d tensor
input_tensor = gather_indexes(seq_hidden_feature, positions)
shape_tensor = tf.shape(positions)
shape_list = tf.concat(
[shape_tensor, [seq_hidden_feature.shape.as_list()[-1]]], axis=0)
input_tensor = tf.reshape(input_tensor, shape=shape_list)
# set_shape to determin rank
input_tensor.set_shape(
[None, None, seq_hidden_feature.shape.as_list()[-1]])
else:
input_tensor = seq_hidden_feature
if self.share_embedding:
mlm_logits = self.share_embedding_layer(
input_tensor, mode='linear')
else:
mlm_logits = self.share_embedding_layer(input_tensor)
if mode != tf.estimator.ModeKeys.PREDICT:
nsp_labels = features['next_sentence_label_ids']
mlm_labels = features['masked_lm_ids']
mlm_labels.set_shape([None, None])
# compute loss
nsp_loss = empty_tensor_handling_loss(
nsp_labels, nsp_logits,
tf.keras.losses.sparse_categorical_crossentropy)
mlm_loss_layer = transformers.modeling_tf_utils.TFMaskedLanguageModelingLoss()
# mlm_loss = tf.reduce_mean(
# mlm_loss_layer.compute_loss(mlm_labels, mlm_logits))
# add a useless from_logits argument to match the function signature of keras losses.
def loss_fn_wrapper(labels, logits, from_logits=True):
return mlm_loss_layer.compute_loss(labels, logits)
mlm_loss = empty_tensor_handling_loss(
mlm_labels,
mlm_logits,
loss_fn_wrapper
)
loss = nsp_loss + mlm_loss
self.add_loss(loss)
return (tf.sigmoid(nsp_logits), tf.nn.softmax(mlm_logits))
# Cell
class Seq2Seq(tf.keras.Model):
def __init__(self, params: BaseParams, problem_name: str, input_embeddings: tf.keras.layers.Layer):
super(Seq2Seq, self).__init__(name=problem_name)
# self.params = params
# self.problem_name = problem_name
# # if self.params.init_weight_from_huggingface:
# # self.decoder = load_transformer_model(
# # self.params.transformer_decoder_model_name,
# # self.params.transformer_decoder_model_loading)
# # else:
# # self.decoder = load_transformer_model(
# # self.params.bert_decoder_config, self.params.transformer_decoder_model_loading)
# # TODO: better implementation
# logging.warning(
# 'Seq2Seq model is not well supported yet. Bugs are expected.')
# config = self.params.bert_decoder_config
# # some hacky approach to share embeddings from encoder to decoder
# word_embedding_weight = input_embeddings.word_embeddings
# self.vocab_size = word_embedding_weight.shape[0]
# self.share_embedding_layer = TFSharedEmbeddings(
# vocab_size=word_embedding_weight.shape[0], hidden_size=word_embedding_weight.shape[1])
# self.share_embedding_layer.build([1])
# self.share_embedding_layer.weight = word_embedding_weight
# # self.decoder = TFBartDecoder(
# # config=config, embed_tokens=self.share_embedding_layer)
# self.decoder = TFBartDecoderForConditionalGeneration(
# config=config, embedding_layer=self.share_embedding_layer)
# self.decoder.set_bos_id(self.params.bos_id)
# self.decoder.set_eos_id(self.params.eos_id)
# self.metric_fn = tf.keras.metrics.SparseCategoricalAccuracy(
# name='{}_acc'.format(self.problem_name))
raise NotImplementedError
def _seq2seq_label_shift_right(self, labels: tf.Tensor, eos_id: int) -> tf.Tensor:
batch_eos_ids = tf.fill([tf.shape(labels)[0], 1], eos_id)
batch_eos_ids = tf.cast(batch_eos_ids, dtype=tf.int64)
decoder_lable = labels[:, 1:]
decoder_lable = tf.concat([decoder_lable, batch_eos_ids], axis=1)
return decoder_lable
def call(self,
inputs: Tuple[Dict[str, Dict[str, tf.Tensor]], Dict[str, Dict[str, tf.Tensor]]],
mode: str):
features, hidden_features = inputs
encoder_mask = features['model_input_mask']
if mode == tf.estimator.ModeKeys.PREDICT:
input_ids = None
decoder_padding_mask = None
else:
input_ids = features['%s_label_ids' % self.problem_name]
decoder_padding_mask = features['{}_mask'.format(
self.problem_name)]
if mode == tf.estimator.ModeKeys.PREDICT:
return self.decoder.generate(eos_token_id=self.params.eos_id, encoder_hidden_states=hidden_features['seq'])
else:
decoder_output = self.decoder(input_ids=input_ids,
encoder_hidden_states=hidden_features['seq'],
encoder_padding_mask=encoder_mask,
decoder_padding_mask=decoder_padding_mask,
decode_max_length=self.params.decode_max_seq_len,
mode=mode)
loss = decoder_output.loss
logits = decoder_output.logits
self.add_loss(loss)
decoder_label = self._seq2seq_label_shift_right(
features['%s_label_ids' % self.problem_name], eos_id=self.params.eos_id)
acc = self.metric_fn(decoder_label, logits)
self.add_metric(acc)
return logits
# Cell
class MultiLabelClassification(tf.keras.Model):
def __init__(self, params: BaseParams, problem_name: str) -> None:
super(MultiLabelClassification, self).__init__(name=problem_name)
self.params = params
self.problem_name = problem_name
self.dense = tf.keras.layers.Dense(
self.params.num_classes[problem_name])
self.dropout = tf.keras.layers.Dropout(
1-self.params.dropout_keep_prob
)
# self.metric_fn = tfa.metrics.F1Score(
# num_classes=self.params.num_classes[problem_name],
# threshold=self.params.multi_cls_threshold,
# average='macro',
# name='{}_f1'.format(problem_name))
def call(self, inputs, mode):
training = (mode == tf.estimator.ModeKeys.TRAIN)
feature, hidden_feature = inputs
hidden_feature = hidden_feature['pooled']
if mode != tf.estimator.ModeKeys.PREDICT:
labels = feature['{}_label_ids'.format(self.problem_name)]
else:
labels = None
hidden_feature = self.dropout(hidden_feature, training)
logits = self.dense(hidden_feature)
if mode != tf.estimator.ModeKeys.PREDICT:
labels = tf.cast(labels, tf.float32)
# use weighted loss
label_weights = self.params.multi_cls_positive_weight
def _loss_fn_wrapper(x, y, from_logits=True):
return tf.nn.weighted_cross_entropy_with_logits(x, y, pos_weight=label_weights, name='{}_loss'.format(self.problem_name))
loss = empty_tensor_handling_loss(
labels, logits, _loss_fn_wrapper)
loss = nan_loss_handling(loss)
self.add_loss(loss)
# labels = create_dummy_if_empty(labels)
# logits = create_dummy_if_empty(logits)
# f1 = self.metric_fn(labels, logits)
# self.add_metric(f1)
return tf.nn.sigmoid(
logits, name='%s_predict' % self.problem_name)
# Cell
class MaskLM(tf.keras.Model):
"""Multimodal MLM top layer.
"""
def __init__(self, params: BaseParams, problem_name: str, input_embeddings: tf.keras.layers.Layer=None, share_embedding=True) -> None:
super(MaskLM, self).__init__(name=problem_name)
self.params = params
self.problem_name = problem_name
if share_embedding is False:
self.vocab_size = self.params.bert_config.vocab_size
self.share_embedding = False
else:
word_embedding_weight = input_embeddings.word_embeddings
self.vocab_size = word_embedding_weight.shape[0]
embedding_size = word_embedding_weight.shape[-1]
share_valid = (self.params.bert_config.hidden_size ==
embedding_size)
if not share_valid and self.params.share_embedding:
logging.warning(
'Share embedding is enabled but hidden_size != embedding_size')
self.share_embedding = self.params.share_embedding & share_valid
if self.share_embedding:
self.share_embedding_layer = TFSharedEmbeddings(
vocab_size=self.vocab_size, hidden_size=word_embedding_weight.shape[1])
self.share_embedding_layer.build([1])
self.share_embedding_layer.weight = word_embedding_weight
else:
self.share_embedding_layer = tf.keras.layers.Dense(self.vocab_size)
def call(self, inputs, mode):
features, hidden_features = inputs
# masking is done inside the model
seq_hidden_feature = hidden_features['seq']
if mode != tf.estimator.ModeKeys.PREDICT:
positions = features['masked_lm_positions']
# gather_indexes will flatten the seq hidden_states, we need to reshape
# back to 3d tensor
input_tensor = gather_indexes(seq_hidden_feature, positions)
shape_tensor = tf.shape(positions)
shape_list = tf.concat([shape_tensor, [seq_hidden_feature.shape.as_list()[-1]]], axis=0)
input_tensor = tf.reshape(input_tensor, shape=shape_list)
# set_shape to determin rank
input_tensor.set_shape(
[None, None, seq_hidden_feature.shape.as_list()[-1]])
else:
input_tensor = seq_hidden_feature
if self.share_embedding:
mlm_logits = self.share_embedding_layer(
input_tensor, mode='linear')
else:
mlm_logits = self.share_embedding_layer(input_tensor)
if mode != tf.estimator.ModeKeys.PREDICT:
mlm_labels = features['masked_lm_ids']
mlm_labels.set_shape([None, None])
# compute loss
mlm_loss = empty_tensor_handling_loss(
mlm_labels,
mlm_logits,
tf.keras.losses.sparse_categorical_crossentropy
)
loss = nan_loss_handling(mlm_loss)
self.add_loss(loss)
return tf.nn.softmax(mlm_logits)
|
import React from 'react';
import ReactDom from 'react-dom';
import ReactDomServer from 'react-dom/server';
import Swiper from '../../dist/react-swiper';
const App = React.createClass({
render() {
var config = {
slidesPerView: 1,
paginationClickable: true,
spaceBetween: 30,
loop: true
};
return (
<div id="demo-slides-per-view" className="demo-wrapper">
<Swiper swiperConfig={ config }>
<img src="http://placehold.it/1000x400&text=slide1"/>
<img src="http://placehold.it/1000x400&text=slide2"/>
<img src="http://placehold.it/1000x400&text=slide3"/>
<img src="http://placehold.it/1000x400&text=slide4"/>
<img src="http://placehold.it/1000x400&text=slide5"/>
<img src="http://placehold.it/1000x400&text=slide6"/>
<img src="http://placehold.it/1000x400&text=slide7"/>
<img src="http://placehold.it/1000x400&text=slide8"/>
<img src="http://placehold.it/1000x400&text=slide9"/>
<img src="http://placehold.it/1000x400&text=slide10"/>
</Swiper>
</div>
)
}
});
const content = document.getElementById('content');
ReactDom.render(<App/>, content);
|
#!/bin/bash -f
#*********************************************************************************************************
# Vivado (TM) v2018.1 (64-bit)
#
# Filename : dist_mem_gen_0.sh
# Simulator : Synopsys Verilog Compiler Simulator
# Description : Simulation script for compiling, elaborating and verifying the project source files.
# The script will automatically create the design libraries sub-directories in the run
# directory, add the library logical mappings in the simulator setup file, create default
# 'do/prj' file, execute compilation, elaboration and simulation steps.
#
# Generated by Vivado on Wed Feb 20 16:26:40 +0800 2019
# SW Build 2188600 on Wed Apr 4 18:40:38 MDT 2018
#
# Copyright 1986-2018 Xilinx, Inc. All Rights Reserved.
#
# usage: dist_mem_gen_0.sh [-help]
# usage: dist_mem_gen_0.sh [-lib_map_path]
# usage: dist_mem_gen_0.sh [-noclean_files]
# usage: dist_mem_gen_0.sh [-reset_run]
#
# Prerequisite:- To compile and run simulation, you must compile the Xilinx simulation libraries using the
# 'compile_simlib' TCL command. For more information about this command, run 'compile_simlib -help' in the
# Vivado Tcl Shell. Once the libraries have been compiled successfully, specify the -lib_map_path switch
# that points to these libraries and rerun export_simulation. For more information about this switch please
# type 'export_simulation -help' in the Tcl shell.
#
# You can also point to the simulation libraries by either replacing the <SPECIFY_COMPILED_LIB_PATH> in this
# script with the compiled library directory path or specify this path with the '-lib_map_path' switch when
# executing this script. Please type 'dist_mem_gen_0.sh -help' for more information.
#
# Additional references - 'Xilinx Vivado Design Suite User Guide:Logic simulation (UG900)'
#
#*********************************************************************************************************
# Directory path for design sources and include directories (if any) wrt this path
ref_dir="."
# Override directory with 'export_sim_ref_dir' env path value if set in the shell
if [[ (! -z "$export_sim_ref_dir") && ($export_sim_ref_dir != "") ]]; then
ref_dir="$export_sim_ref_dir"
fi
# Command line options
vlogan_opts="-full64"
vhdlan_opts="-full64"
vcs_elab_opts="-full64 -debug_pp -t ps -licqueue -l elaborate.log"
vcs_sim_opts="-ucli -licqueue -l simulate.log"
# Design libraries
design_libs=(dist_mem_gen_v8_0_12 xil_defaultlib)
# Simulation root library directory
sim_lib_dir="vcs_lib"
# Script info
echo -e "dist_mem_gen_0.sh - Script generated by export_simulation (Vivado v2018.1 (64-bit)-id)\n"
# Main steps
run()
{
check_args $# $1
setup $1 $2
compile
elaborate
simulate
}
# RUN_STEP: <compile>
compile()
{
# Compile design files
vlogan -work dist_mem_gen_v8_0_12 $vlogan_opts +v2k \
"$ref_dir/../../../ipstatic/simulation/dist_mem_gen_v8_0.v" \
2>&1 | tee -a vlogan.log
vlogan -work xil_defaultlib $vlogan_opts +v2k \
"$ref_dir/../../../../MIPS_CPU.srcs/sources_1/ip/dist_mem_gen_0/sim/dist_mem_gen_0.v" \
2>&1 | tee -a vlogan.log
vlogan -work xil_defaultlib $vlogan_opts +v2k \
glbl.v \
2>&1 | tee -a vlogan.log
}
# RUN_STEP: <elaborate>
elaborate()
{
vcs $vcs_elab_opts xil_defaultlib.dist_mem_gen_0 xil_defaultlib.glbl -o dist_mem_gen_0_simv
}
# RUN_STEP: <simulate>
simulate()
{
./dist_mem_gen_0_simv $vcs_sim_opts -do simulate.do
}
# STEP: setup
setup()
{
case $1 in
"-lib_map_path" )
if [[ ($2 == "") ]]; then
echo -e "ERROR: Simulation library directory path not specified (type \"./dist_mem_gen_0.sh -help\" for more information)\n"
exit 1
fi
create_lib_mappings $2
;;
"-reset_run" )
reset_run
echo -e "INFO: Simulation run files deleted.\n"
exit 0
;;
"-noclean_files" )
# do not remove previous data
;;
* )
create_lib_mappings $2
esac
create_lib_dir
# Add any setup/initialization commands here:-
# <user specific commands>
}
# Define design library mappings
create_lib_mappings()
{
file="synopsys_sim.setup"
if [[ -e $file ]]; then
if [[ ($1 == "") ]]; then
return
else
rm -rf $file
fi
fi
touch $file
lib_map_path=""
if [[ ($1 != "") ]]; then
lib_map_path="$1"
fi
for (( i=0; i<${#design_libs[*]}; i++ )); do
lib="${design_libs[i]}"
mapping="$lib:$sim_lib_dir/$lib"
echo $mapping >> $file
done
if [[ ($lib_map_path != "") ]]; then
incl_ref="OTHERS=$lib_map_path/synopsys_sim.setup"
echo $incl_ref >> $file
fi
}
# Create design library directory paths
create_lib_dir()
{
if [[ -e $sim_lib_dir ]]; then
rm -rf $sim_lib_dir
fi
for (( i=0; i<${#design_libs[*]}; i++ )); do
lib="${design_libs[i]}"
lib_dir="$sim_lib_dir/$lib"
if [[ ! -e $lib_dir ]]; then
mkdir -p $lib_dir
fi
done
}
# Delete generated data from the previous run
reset_run()
{
files_to_remove=(ucli.key dist_mem_gen_0_simv vlogan.log vhdlan.log compile.log elaborate.log simulate.log .vlogansetup.env .vlogansetup.args .vcs_lib_lock scirocco_command.log 64 AN.DB csrc dist_mem_gen_0_simv.daidir)
for (( i=0; i<${#files_to_remove[*]}; i++ )); do
file="${files_to_remove[i]}"
if [[ -e $file ]]; then
rm -rf $file
fi
done
create_lib_dir
}
# Check command line arguments
check_args()
{
if [[ ($1 == 1 ) && ($2 != "-lib_map_path" && $2 != "-noclean_files" && $2 != "-reset_run" && $2 != "-help" && $2 != "-h") ]]; then
echo -e "ERROR: Unknown option specified '$2' (type \"./dist_mem_gen_0.sh -help\" for more information)\n"
exit 1
fi
if [[ ($2 == "-help" || $2 == "-h") ]]; then
usage
fi
}
# Script usage
usage()
{
msg="Usage: dist_mem_gen_0.sh [-help]\n\
Usage: dist_mem_gen_0.sh [-lib_map_path]\n\
Usage: dist_mem_gen_0.sh [-reset_run]\n\
Usage: dist_mem_gen_0.sh [-noclean_files]\n\n\
[-help] -- Print help information for this script\n\n\
[-lib_map_path <path>] -- Compiled simulation library directory path. The simulation library is compiled\n\
using the compile_simlib tcl command. Please see 'compile_simlib -help' for more information.\n\n\
[-reset_run] -- Recreate simulator setup files and library mappings for a clean run. The generated files\n\
from the previous run will be removed. If you don't want to remove the simulator generated files, use the\n\
-noclean_files switch.\n\n\
[-noclean_files] -- Reset previous run, but do not remove simulator generated files from the previous run.\n\n"
echo -e $msg
exit 1
}
# Launch script
run $1 $2
|
#!/bin/bash
# Provision a node based on an injected "ic-bootstrap.tar" file. This script
# is meant to be run as a prerequisite before launching orchestrator/replica.
#
# The configuration format is presently described here:
# https://docs.google.com/document/d/1W2bDkq3xhNvQyWPIVSKpYuBzaa5d1QN-N4uiXByr2Qg/edit
#
# The tar file can be supplied using one of two methods:
# - as "ic-bootstrap.tar" stored on a (virtual) removable media attached
# on first boot
# - it can be directly "pushed" into the filesystem as /mnt/ic-bootstrap.tar
# (e.g. bind mount when running the entire stack as docker container)
set -eo pipefail
# List all block devices marked as "removable".
function find_removable_devices() {
for DEV in $(ls -C /sys/class/block); do
if [ -e /sys/class/block/"${DEV}"/removable ]; then
local IS_REMOVABLE=$(cat /sys/class/block/"${DEV}"/removable)
if [ "${IS_REMOVABLE}" == 1 ]; then
# If this is a partitioned device (and it usually is), then
# the first partition is of relevance.
# return first partition for use instead.
if [ -e /sys/class/block/"${DEV}1" ]; then
local TGT="/dev/${DEV}1"
elif [ -e /sys/class/block/"${DEV}p1" ]; then
local TGT="/dev/${DEV}p1"
else
local TGT="/dev/${DEV}"
fi
# Sanity check whether device is usable (it could be a
# CD drive with no medium in)
if blockdev "$TGT" >/dev/null 2>/dev/null; then
echo "$TGT"
fi
fi
fi
done
}
# Process the bootstrap package given as first argument to populate
# both config space and
# parts of /var/lib/ic/data and /var/lib/ic/crypto
#
# Arguments:
# - $1: path to the bootstrap package (typically /mnt/ic-bootstrap.tar)
# - $2: path to config space (typically /boot/config)
# - $3: path to ic storage root (typically /var/lib/ic)
function process_bootstrap() {
local BOOTSTRAP_TAR="$1"
local CONFIG_ROOT="$2"
local STATE_ROOT="$3"
local TMPDIR=$(mktemp -d)
tar xf "${BOOTSTRAP_TAR}" -C "${TMPDIR}"
for DIR in ic_registry_local_store nns_public_key.pem; do
if [ -e "${TMPDIR}/${DIR}" ]; then
cp -r -T "${TMPDIR}/${DIR}" "${STATE_ROOT}/data/${DIR}"
fi
done
# stash a couple of things away to config store
for FILE in journalbeat.conf network.conf nns.conf backup.conf; do
if [ -e "${TMPDIR}/${FILE}" ]; then
cp "${TMPDIR}/${FILE}" "${CONFIG_ROOT}/${FILE}"
fi
done
for DIR in accounts_ssh_authorized_keys; do
if [ -e "${TMPDIR}/${DIR}" ]; then
cp -r "${TMPDIR}/${DIR}" "${CONFIG_ROOT}/${DIR}"
fi
done
rm -rf "${TMPDIR}"
}
MAX_TRIES=10
while [ ! -f /boot/config/CONFIGURED ]; do
DEV="$(find_removable_devices)"
# Check whether we were provided with a removable device -- on "real"
# VM deployments this will be the method used to inject bootstrap information
# into the system.
# But even if nothing can be mounted, just try and see if something usable
# is there already -- this might be useful when operating this thing as a
# docker container instead of full-blown VM.
if [ "${DEV}" != "" ]; then
mount -t vfat -o ro "${DEV}" /mnt
fi
if [ -e /mnt/ic-bootstrap.tar ]; then
echo "Processing bootstrap config"
process_bootstrap /mnt/ic-bootstrap.tar /boot/config /var/lib/ic
touch /boot/config/CONFIGURED
else
MAX_TRIES=$(("${MAX_TRIES}" - 1))
if [ "${MAX_TRIES}" == 0 ]; then
echo "No registration configuration provided to bootstrap IC node -- continuing without"
exit 1
else
echo "Retrying to find bootstrap config"
sleep 1
fi
fi
# Fix up permissions. This is actually the wrong place.
chown dfinity.nogroup -R /var/lib/ic/data
if [ "${DEV}" != "" ]; then
umount /mnt
fi
done
|
docker run -it --init -p 1993:1993 deno-demo
|
#!/bin/bash
# Make output directory for masks
MASKDIR=$BASEDIR/data/outputs/masks
mkdir -p $MASKDIR
#######################################################################################################
# Structure mask processing
# Download ABI structure masks at 50 um
# Definitions: http://api.brain-map.org/api/v2/structure_graph_download/1.json
# Download server: http://download.alleninstitute.org/informatics-archive/current-release/
wget -O $MASKDIR/structure_315.nrrd http://download.alleninstitute.org/informatics-archive/current-release/mouse_ccf/annotation/ccf_2017/structure_masks/structure_masks_50/structure_315.nrrd # Isocortex, Isocortex / 70FF71
wget -O $MASKDIR/structure_549.nrrd http://download.alleninstitute.org/informatics-archive/current-release/mouse_ccf/annotation/ccf_2017/structure_masks/structure_masks_50/structure_549.nrrd # Thalamus, TH / FF7080
wget -O $MASKDIR/structure_688.nrrd http://download.alleninstitute.org/informatics-archive/current-release/mouse_ccf/annotation/ccf_2017/structure_masks/structure_masks_50/structure_688.nrrd # Cerebral cortex, CTX / B0FFB8 (includes hippocampus)
# Download ABI structure masks at 25 um, for high res plots
wget -O $MASKDIR/structure_315_25um.nrrd http://download.alleninstitute.org/informatics-archive/current-release/mouse_ccf/annotation/ccf_2017/structure_masks/structure_masks_25/structure_315.nrrd # Isocortex, Isocortex / 70FF71
wget -O $MASKDIR/structure_549_25um.nrrd http://download.alleninstitute.org/informatics-archive/current-release/mouse_ccf/annotation/ccf_2017/structure_masks/structure_masks_25/structure_549.nrrd # Thalamus, TH / FF7080
wget -O $MASKDIR/structure_688_25um.nrrd http://download.alleninstitute.org/informatics-archive/current-release/mouse_ccf/annotation/ccf_2017/structure_masks/structure_masks_25/structure_688.nrrd # Cerebral cortex, CTX / B0FFB8 (includes hippocampus)
# Convert to 50um data to MINC
python3 code/scripts/transform_space.py $MASKDIR/structure_315.nrrd $MASKDIR/Isocortex_50um.mnc
python3 code/scripts/transform_space.py $MASKDIR/structure_549.nrrd $MASKDIR/TH_50um.mnc
python3 code/scripts/transform_space.py $MASKDIR/structure_688.nrrd $MASKDIR/CTX_50um.mnc
# Downsample 50um data
mincresample -2 -like data/resources/average_template_200.mnc -nearest_neighbour -keep_real_range $MASKDIR/Isocortex_50um.mnc $MASKDIR/Isocortex_200um.mnc
mincresample -2 -like data/resources/average_template_200.mnc -nearest_neighbour -keep_real_range $MASKDIR/TH_50um.mnc $MASKDIR/TH_200um.mnc
mincresample -2 -like data/resources/average_template_200.mnc -nearest_neighbour -keep_real_range $MASKDIR/CTX_50um.mnc $MASKDIR/CTX_200um.mnc
# Convert to 25um data to MINC
python3 code/scripts/transform_space.py $MASKDIR/structure_315_25um.nrrd $MASKDIR/Isocortex_25um.mnc
python3 code/scripts/transform_space.py $MASKDIR/structure_549_25um.nrrd $MASKDIR/TH_25um.mnc
python3 code/scripts/transform_space.py $MASKDIR/structure_688_25um.nrrd $MASKDIR/CTX_25um.mnc
# Intersect each mask with both coronal and sagittal coverage maps to produce 6 masks for potential use
# (only 2 of these will be used, one for thalamus and one for cortex, related to either coronal or sagittal coverage
minccalc -2 -expression "A[0]>0.5 && A[1]>0.5" data/resources/sagittal_200um_coverage_bin0.8_resampled_50um.mnc $MASKDIR/Isocortex_50um.mnc $MASKDIR/Isocortex_sagittal_coverage_50um.mnc
minccalc -2 -expression "A[0]>0.5 && A[1]>0.5" data/resources/sagittal_200um_coverage_bin0.8_resampled_50um.mnc $MASKDIR/TH_50um.mnc $MASKDIR/TH_sagittal_coverage_50um.mnc
minccalc -2 -expression "A[0]>0.5 && A[1]>0.5" data/resources/sagittal_200um_coverage_bin0.8_resampled_50um.mnc $MASKDIR/CTX_50um.mnc $MASKDIR/CTX_sagittal_coverage_50um.mnc
minccalc -2 -expression "A[0]>0.5 && A[1]>0.5" data/resources/coronal_200um_coverage_bin0.8_resampled_50um.mnc $MASKDIR/Isocortex_50um.mnc $MASKDIR/Isocortex_coronal_coverage_50um.mnc
minccalc -2 -expression "A[0]>0.5 && A[1]>0.5" data/resources/coronal_200um_coverage_bin0.8_resampled_50um.mnc $MASKDIR/TH_50um.mnc $MASKDIR/TH_coronal_coverage_50um.mnc
minccalc -2 -expression "A[0]>0.5 && A[1]>0.5" data/resources/coronal_200um_coverage_bin0.8_resampled_50um.mnc $MASKDIR/CTX_50um.mnc $MASKDIR/CTX_coronal_coverage_50um.mnc
# Also do at 200um
minccalc -2 -expression "A[0]>0.5 && A[1]>0.5" data/resources/sagittal_200um_coverage_bin0.8.mnc $MASKDIR/Isocortex_200um.mnc $MASKDIR/Isocortex_sagittal_coverage_200um.mnc
minccalc -2 -expression "A[0]>0.5 && A[1]>0.5" data/resources/sagittal_200um_coverage_bin0.8.mnc $MASKDIR/TH_200um.mnc $MASKDIR/TH_sagittal_coverage_200um.mnc
minccalc -2 -expression "A[0]>0.5 && A[1]>0.5" data/resources/sagittal_200um_coverage_bin0.8.mnc $MASKDIR/CTX_200um.mnc $MASKDIR/CTX_sagittal_coverage_200um.mnc
minccalc -2 -expression "A[0]>0.5 && A[1]>0.5" data/resources/coronal_200um_coverage_bin0.8.mnc $MASKDIR/Isocortex_200um.mnc $MASKDIR/Isocortex_coronal_coverage_200um.mnc
minccalc -2 -expression "A[0]>0.5 && A[1]>0.5" data/resources/coronal_200um_coverage_bin0.8.mnc $MASKDIR/TH_200um.mnc $MASKDIR/TH_coronal_coverage_200um.mnc
minccalc -2 -expression "A[0]>0.5 && A[1]>0.5" data/resources/coronal_200um_coverage_bin0.8.mnc $MASKDIR/CTX_200um.mnc $MASKDIR/CTX_coronal_coverage_200um.mnc
# Create a cortical-thalamic label set
minccalc -2 -expression "A[0] + A[1] + A[2] + 3*A[3]" data/resources/average_template_50_mask.mnc $MASKDIR/CTX_50um.mnc $MASKDIR/Isocortex_50um.mnc $MASKDIR/TH_50um.mnc $MASKDIR/combined_label_set_50um.mnc
#######################################################################################################
# Split into hemispheres
# Split 50um masks into left hemisphere only
left_mask_file_50="data/resources/left_side_50um.mnc"
for f in $MASKDIR/*_50um.mnc
do
outfile_50=`echo $f | sed "s/.mnc/_left.mnc/g"`
echo "Working on file: ${f} -> ${outfile_50}"
mincmask ${f} ${left_mask_file_50} ${outfile_50} -clobber
done
# Split 200um masks into left hemisphere only
left_mask_file_200="data/resources/left_side_200um.mnc"
for f in $MASKDIR/*_200um.mnc
do
outfile_200=`echo $f | sed "s/.mnc/_left.mnc/g"`
echo "Working on file: ${f} -> ${outfile_200}"
mincmask ${f} ${left_mask_file_200} ${outfile_200} -clobber
done
# Split 25um masks into left hemisphere only
left_mask_file_25="data/resources/left_side_25um.mnc"
for f in $MASKDIR/*_25um.mnc
do
outfile_25=`echo $f | sed "s/.mnc/_left.mnc/g"`
echo "Working on file: ${f} -> ${outfile_25}"
mincmask ${f} ${left_mask_file_25} ${outfile_25} -clobber
done
# Split 50um masks into right hemisphere only
right_mask_file_50="data/resources/right_side_50um.mnc"
for f in $MASKDIR/*_50um.mnc
do
outfile_50=`echo $f | sed "s/.mnc/_right.mnc/g"`
echo "Working on file: ${f} -> ${outfile_50}"
mincmask ${f} ${right_mask_file_50} ${outfile_50} -clobber
done
# Split 200um masks into right hemisphere only
right_mask_file_200="data/resources/right_side_200um.mnc"
for f in $MASKDIR/*_200um.mnc
do
outfile_200=`echo $f | sed "s/.mnc/_right.mnc/g"`
echo "Working on file: ${f} -> ${outfile_200}"
mincmask ${f} ${right_mask_file_200} ${outfile_200} -clobber
done
# Split 25um masks into right hemisphere only
right_mask_file_25="data/resources/right_side_25um.mnc"
for f in $MASKDIR/*_25um.mnc
do
outfile_25=`echo $f | sed "s/.mnc/_right.mnc/g"`
echo "Working on file: ${f} -> ${outfile_25}"
mincmask ${f} ${right_mask_file_25} ${outfile_25} -clobber
done
#######################################################################################################
# Create definitions
cat << EOF > $MASKDIR/combined_label_set_defs.csv
Structure,right.label,left.label
rest_of_brain,1,1
rest_of_cerebral_cortex,2,2
isocortex,3,3
thalamus,4,4
EOF
#######################################################################################################
# Laplacian processing
# Get laplacian, convert to MINC, downsample to 50um and 200um, and delete nrrd / 10um MINC files
# Transforming space requires up to 30GB
wget -O $MASKDIR/laplacian_10.nrrd http://download.alleninstitute.org/informatics-archive/current-release/mouse_ccf/cortical_coordinates/ccf_2017/laplacian_10.nrrd
python3 code/scripts/transform_space.py $MASKDIR/laplacian_10.nrrd $MASKDIR/laplacian_10.mnc
mincresample -2 -like data/resources/average_template_50.mnc -keep_real_range $MASKDIR/laplacian_10.mnc $MASKDIR/laplacian_50um.mnc
mincresample -2 -like data/resources/average_template_200.mnc -keep_real_range $MASKDIR/laplacian_10.mnc $MASKDIR/laplacian_200um.mnc
rm $MASKDIR/laplacian_10.mnc
# Mask (at 50um) within Isocortex, and also coronal and sagittal coverage masks
mincmask $MASKDIR/laplacian_50um.mnc $MASKDIR/Isocortex_50um.mnc $MASKDIR/laplacian_50um_under_Isocortex_50um.mnc
mincmask $MASKDIR/laplacian_50um.mnc $MASKDIR/Isocortex_coronal_coverage_50um.mnc $MASKDIR/laplacian_50um_under_Isocortex_coronal_coverage_50um.mnc
mincmask $MASKDIR/laplacian_50um.mnc $MASKDIR/Isocortex_sagittal_coverage_50um.mnc $MASKDIR/laplacian_50um_under_Isocortex_sagittal_coverage_50um.mnc
# Mask (at 200um) within Isocortex, and also coronal and sagittal coverage masks
mincmask $MASKDIR/laplacian_200um.mnc $MASKDIR/Isocortex_200um.mnc $MASKDIR/laplacian_200um_under_Isocortex_200um.mnc
mincmask $MASKDIR/laplacian_200um.mnc $MASKDIR/Isocortex_coronal_coverage_200um.mnc $MASKDIR/laplacian_200um_under_Isocortex_coronal_coverage_200um.mnc
mincmask $MASKDIR/laplacian_200um.mnc $MASKDIR/Isocortex_sagittal_coverage_200um.mnc $MASKDIR/laplacian_200um_under_Isocortex_sagittal_coverage_200um.mnc
# Mask left, 50um
mincmask $MASKDIR/laplacian_50um.mnc ${left_mask_file_50} $MASKDIR/laplacian_50um_left.mnc
mincmask $MASKDIR/laplacian_50um_under_Isocortex_50um.mnc ${left_mask_file_50} $MASKDIR/laplacian_50um_under_Isocortex_50um_left.mnc
mincmask $MASKDIR/laplacian_50um_under_Isocortex_coronal_coverage_50um.mnc ${left_mask_file_50} $MASKDIR/laplacian_50um_under_Isocortex_coronal_coverage_50um_left.mnc
mincmask $MASKDIR/laplacian_50um_under_Isocortex_sagittal_coverage_50um.mnc ${left_mask_file_50} $MASKDIR/laplacian_50um_under_Isocortex_sagittal_coverage_50um_left.mnc
# Mask right, 50um
mincmask $MASKDIR/laplacian_50um.mnc ${right_mask_file_50} $MASKDIR/laplacian_50um_right.mnc
mincmask $MASKDIR/laplacian_50um_under_Isocortex_50um.mnc ${right_mask_file_50} $MASKDIR/laplacian_50um_under_Isocortex_50um_right.mnc
mincmask $MASKDIR/laplacian_50um_under_Isocortex_coronal_coverage_50um.mnc ${right_mask_file_50} $MASKDIR/laplacian_50um_under_Isocortex_coronal_coverage_50um_right.mnc
mincmask $MASKDIR/laplacian_50um_under_Isocortex_sagittal_coverage_50um.mnc ${right_mask_file_50} $MASKDIR/laplacian_50um_under_Isocortex_sagittal_coverage_50um_right.mnc
# Mask left, 200um
mincmask $MASKDIR/laplacian_200um.mnc ${left_mask_file_200} $MASKDIR/laplacian_200um_left.mnc
mincmask $MASKDIR/laplacian_200um_under_Isocortex_200um.mnc ${left_mask_file_200} $MASKDIR/laplacian_200um_under_Isocortex_200um_left.mnc
mincmask $MASKDIR/laplacian_200um_under_Isocortex_coronal_coverage_200um.mnc ${left_mask_file_200} $MASKDIR/laplacian_200um_under_Isocortex_coronal_coverage_200um_left.mnc
mincmask $MASKDIR/laplacian_200um_under_Isocortex_sagittal_coverage_200um.mnc ${left_mask_file_200} $MASKDIR/laplacian_200um_under_Isocortex_sagittal_coverage_200um_left.mnc
# Mask right, 200um
mincmask $MASKDIR/laplacian_200um.mnc ${right_mask_file_200} $MASKDIR/laplacian_200um_right.mnc
mincmask $MASKDIR/laplacian_200um_under_Isocortex_200um.mnc ${right_mask_file_200} $MASKDIR/laplacian_200um_under_Isocortex_200um_right.mnc
mincmask $MASKDIR/laplacian_200um_under_Isocortex_coronal_coverage_200um.mnc ${right_mask_file_200} $MASKDIR/laplacian_200um_under_Isocortex_coronal_coverage_200um_right.mnc
mincmask $MASKDIR/laplacian_200um_under_Isocortex_sagittal_coverage_200um.mnc ${right_mask_file_200} $MASKDIR/laplacian_200um_under_Isocortex_sagittal_coverage_200um_right.mnc
|
class Lock:
def __init__(self):
self._isLocked = False
self._code = ""
def lock(self):
self._isLocked = True
def unlock(self, code):
if code == self._code:
self._isLocked = False
return True
else:
return False
def is_locked(self):
return self._isLocked
def reset_code(self, code):
self._code = code
|
<reponame>minuk8932/Algorithm_BaekJoon<gh_stars>1-10
import java.io.BufferedReader;
import java.io.InputStreamReader;
import java.util.ArrayList;
import java.util.List;
import java.util.PriorityQueue;
import java.util.StringTokenizer;
public class Boj21062 {
private static int[][] parent = new int[2][];
private static final long INF = 1_000_000_000_000_000L;
private static class Node implements Comparable<Node>{
int node1;
int node2;
long cost;
public Node(int node1, int node2, long cost) {
this.node1 = node1;
this.node2 = node2;
this.cost = cost;
}
@Override
public int compareTo(Node n) {
return this.cost < n.cost ? -1: 1;
}
}
public static void main(String[] args) throws Exception {
BufferedReader br = new BufferedReader(new InputStreamReader(System.in));
StringTokenizer st = new StringTokenizer(br.readLine());
int n1 = Integer.parseInt(st.nextToken());
int m1 = Integer.parseInt(st.nextToken());
int n2 = Integer.parseInt(st.nextToken());
int m2 = Integer.parseInt(st.nextToken());
init(0, n1);
init(1, n2);
PriorityQueue<Node> pq = new PriorityQueue<>();
while(m1-- > 0) {
st = new StringTokenizer(br.readLine());
int u = Integer.parseInt(st.nextToken());
int v = Integer.parseInt(st.nextToken());
long w = Long.parseLong(st.nextToken());
pq.offer(new Node(u, v, w));
}
List<Long> values1 = kruskal(0, pq);
while(m2-- > 0) {
st = new StringTokenizer(br.readLine());
int u = Integer.parseInt(st.nextToken());
int v = Integer.parseInt(st.nextToken());
long w = Long.parseLong(st.nextToken());
pq.offer(new Node(u, v, w));
}
List<Long> values2 = kruskal(1, pq);
/**
* input
* 4 3 3 2
* 0 1 1
* 1 2 1
* 2 3 4
* 0 1 3
* 1 2 3
*
* output 24
* answer 22
* => fixed but, still blocked 12%
*
*/
System.out.println(getMinCost(values1, values2, n1, n2));
}
private static long getMinCost(List<Long> v1, List<Long> v2, int n1, int n2) {
long[] mst = {makeSum(v1), makeSum(v2)};
long[][] prefixMax = {prefixSumMax(v1), prefixSumMax(v2)};
long[][] prefixMin = {prefixSumMin(v1), prefixSumMin(v2)};
long max = Math.min(calculation(n1, mst, 0, 1, prefixMax[1])
, calculation(n2, mst, 1, 0, prefixMax[0]));
long min = Math.min(calculation(n1, mst, 0, 1, prefixMin[1])
, calculation(n2, mst, 1, 0, prefixMin[0]));
return Math.min(max, min);
}
private static long calculation(int n, long[] mst, int src, int snk, long[] prefix) {
long result = INF;
for(int i = 0; i < prefix.length; i++) {
result = Math.min(result
, mst[src] * (i + 1) + (mst[snk] * n - prefix[i] * (n - 1)));
}
return result;
}
private static long[] prefixSumMin(List<Long> value) {
int size = value.size();
long[] sum = new long[size + 1];
for(int i = 1; i < sum.length; i++) {
sum[i] = sum[i - 1] + value.get(i - 1);
}
return sum;
}
private static long[] prefixSumMax(List<Long> value) {
int size = value.size();
long[] sum = new long[size + 1];
for(int i = 1; i < sum.length; i++) {
sum[i] = sum[i - 1] + value.get(size - i);
}
return sum;
}
private static long makeSum(List<Long> value) {
long sum = 0;
for(long v: value) {
sum += v;
}
return sum;
}
private static List<Long> kruskal (int idx, PriorityQueue<Node> pq){
List<Long> value = new ArrayList<>();
while(!pq.isEmpty()) {
Node current = pq.poll();
if(merged(idx, current.node1, current.node2)) continue;
value.add(current.cost);
}
return value;
}
private static void init(int index, int n) {
parent[index] = new int[n];
for(int i = 0; i < n; i++) {
parent[index][i] = -1;
}
}
private static int find (int index, int x) {
if(parent[index][x] < 0) return x;
return parent[index][x] = find(index, parent[index][x]);
}
private static boolean merged (int index, int x, int y) {
x = find(index, x);
y = find(index, y);
if(x == y) return true;
if(parent[index][x] < parent[index][y]) {
parent[index][x] += parent[index][y];
parent[index][y] = x;
}
else {
parent[index][y] += parent[index][x];
parent[index][x] = y;
}
return false;
}
}
|
import React from "react";
import { Image, Pressable, Text, View } from "react-native";
import { streamingOnStyles as styles } from "../../Stylesheets/Styles";
export const StreamingOn = ({
isNetflix,
gotoHomepage,
isDisneyPlus,
isPrimeVideo,
}) => {
return (
<View style={styles.streaming}>
<Text style={styles.header}>Streaming On : </Text>
{isNetflix() && (
<Pressable onPress={gotoHomepage}>
<Image
source={require("../../../assets/images/Netflix.png")}
style={styles.netflix}
/>
</Pressable>
)}
{isDisneyPlus() && (
<Pressable onPress={gotoHomepage}>
<Image
source={require("../../../assets/images/Disney.jpg")}
style={styles.disney}
/>
</Pressable>
)}
{isPrimeVideo() && (
<Pressable onPress={gotoHomepage}>
<Image
source={require("../../../assets/images/Prime.png")}
style={styles.prime}
/>
</Pressable>
)}
</View>
);
};
|
<gh_stars>0
import { ui } from "../ui/layaMaxUI";
import { insertCount } from "../utils/Count";
import { AppConfig } from "../AppConfig";
import { sharkAni } from "../utils/Common";
export default class DynamicWidget extends ui.item.appViewUI {
private index: number = -1;
private father: Laya.Sprite = null;
private isAni: boolean = true;
private static lastIndex: number = -1;
private pageName: string = null;
private data: any = null;
private scaleValue: number = 1;
constructor() {
super();
}
onDisable() {
if (!this.destroyed) {
Laya.Pool.recover("DynamicWidget", this);
}
}
private adapter(context: Laya.Sprite, pageName: string, data?: any, noAni?: boolean) {
this.father = context;
this.father.removeChildren(0, context.numChildren);
if (noAni) {
this.isAni = false;
}
this.on(Laya.Event.CLICK, this, this.clickGoToApp);
//100:120
this.scaleValue = Math.min(this.father.width / 250, this.father.height / 300);
this.scale(this.scaleValue, this.scaleValue);
this.pos(this.father.width / 2, this.father.height / 2);
this.father.addChild(this);
this.pageName = pageName;
this.data = data;
if (this.data) {
this.setData(this.data);
}
}
/**点击跳转 */
private clickGoToApp(e: Laya.Event) {
e.stopPropagation();
if (!this.data) return;
var thiz = this;
var data = this.data;
insertCount({ type: '点击悬浮按钮', mark: data.name + ',' + data.appid, page: thiz.pageName });
if (AppConfig.systemInfo.version > '2.2.0') {
wx.navigateToMiniProgram({
appId: data.appid,
path: data.path,
extraData: {
from: AppConfig.APPNAME,
},
envVersion: 'release',
success: res => {
//成功回调,
insertCount({ type: '点击悬浮按钮,跳转成功', mark: data.name + ',' + data.appid, page: thiz.pageName });
insertCount({ type: '跳转成功_' + data.name, mark: data.name + ',' + data.appid, page: thiz.pageName });
insertCount({ type: '跳转成功', mark: thiz.pageName, page: thiz.pageName });
//查重
/* if (ifCheckSame) {
hasJumpApp.push(data.appid);
checkSameApp();
} */
},
fail: e => {
//失败回调
/* if (ifShowApps) {
AppDialog.getSelf(thiz.pageName).init(data);
} */
}
});
}
}
public static getSelf(context: Laya.Sprite, pageName: string, data?: any, noAni?: boolean): DynamicWidget {
let view: DynamicWidget = Laya.Pool.getItemByClass("DynamicWidget", DynamicWidget);
view.adapter(context, pageName, data, noAni);
return view;
}
public setData(data: any): void {
this.data = data;
this.appName.changeText(data.name);
this.appIcon.skin = data.skin;
// this.iconState.visible = true;
// this.iconState.skin = "comp/icon_hot_new.png";
var thiz = this;
if (this.isAni) {
thiz.scale(0, 0);
// thiz.iconState.scale(0, 0);
Laya.Tween.to(thiz, { scaleX: this.scaleValue, scaleY: this.scaleValue }, 300, Laya.Ease.bounceOut, Laya.Handler.create(thiz, function () {
sharkAni(thiz, thiz);
}), 100);
/* Laya.Tween.to(thiz.iconState, { scaleX: 1, scaleY: 1 }, 300, Laya.Ease.bounceOut, Laya.Handler.create(thiz, function () {
sharkAni(thiz, thiz);
}), 300); */
}
}
}
|
<filename>fwdmodel.cc<gh_stars>0
/* fwdmodel.cc - base class for generic forward models
<NAME> and <NAME>, FMRIB Image Analysis Group & IBME QuBIc Group
Copyright (C) 2007-2015 University of Oxford */
/* CCOPYRIGHT */
#include "fwdmodel.h"
#include "easylog.h"
#include "priors.h"
#include "rundata.h"
#include "transforms.h"
#include "armawrap/newmat.h"
#include <memory>
#include <sstream>
#include <string>
#include <vector>
using namespace std;
typedef int (*GetNumModelsFptr)(void);
typedef const char *(*GetModelNameFptr)(int);
typedef NewInstanceFptr (*GetNewInstanceFptrFptr)(const char *);
#ifdef _WIN32
// This stops Windows defining a load of macros which clash with FSL
#define WIN32_LEAN_AND_MEAN
#include "windows.h"
#define GETSYMBOL GetProcAddress
#define GETERROR GetLastErrorAsString
string GetLastErrorAsString()
{
// Get the error message, if any.
DWORD errorMessageID = ::GetLastError();
if (errorMessageID == 0)
return std::string(); // No error message has been recorded
LPSTR messageBuffer = NULL;
size_t size = FormatMessageA(
FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS,
NULL, errorMessageID, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPSTR)&messageBuffer, 0,
NULL);
std::string message(messageBuffer, size);
// Free the buffer.
LocalFree(messageBuffer);
return message;
}
#else
// POSIX-style methods for shared libraries
#include <dlfcn.h>
#define GETSYMBOL dlsym
#define GETERROR dlerror
#endif
void FwdModel::LoadFromDynamicLibrary(const std::string &filename, EasyLog *log)
{
FwdModelFactory *factory = FwdModelFactory::GetInstance();
GetNumModelsFptr get_num_models;
GetModelNameFptr get_model_name;
GetNewInstanceFptrFptr get_new_instance_fptr;
if (log)
log->LogStream() << "Loading dynamic models from " << filename << endl;
#ifdef _WIN32
HINSTANCE libptr = LoadLibrary(filename.c_str());
#else
void *libptr = dlopen(filename.c_str(), RTLD_NOW);
#endif
if (!libptr)
{
throw InvalidOptionValue(
"loadmodels", filename, string("Failed to open library ") + GETERROR());
}
get_num_models = (GetNumModelsFptr)GETSYMBOL(libptr, "get_num_models");
if (!get_num_models)
{
throw InvalidOptionValue("loadmodels", filename,
string("Failed to resolve symbol 'get_num_models' ") + GETERROR());
}
get_model_name = (GetModelNameFptr)GETSYMBOL(libptr, "get_model_name");
if (!get_model_name)
{
throw InvalidOptionValue("loadmodels", filename,
string("Failed to resolve symbol 'get_model_name' ") + GETERROR());
}
get_new_instance_fptr = (GetNewInstanceFptrFptr)GETSYMBOL(libptr, "get_new_instance_func");
if (!get_new_instance_fptr)
{
throw InvalidOptionValue("loadmodels", filename,
string("Failed to resolve symbol 'get_new_instance_func' ") + GETERROR());
}
int num_models = get_num_models();
if (log)
log->LogStream() << "Loading " << num_models << " models" << endl;
for (int i = 0; i < num_models; i++)
{
const char *model_name = get_model_name(i);
if (!model_name)
{
throw InvalidOptionValue("loadmodels", filename,
"Dynamic library failed to return model name for index " + stringify(i));
}
else
{
if (log)
log->LogStream() << "Loading model " << model_name << endl;
NewInstanceFptr new_instance_fptr = get_new_instance_fptr(model_name);
if (!new_instance_fptr)
{
throw InvalidOptionValue("loadmodels", filename,
string("Dynamic library failed to return new instance function for model")
+ model_name);
}
factory->Add(model_name, new_instance_fptr);
}
}
}
std::vector<std::string> FwdModel::GetKnown()
{
FwdModelFactory *factory = FwdModelFactory::GetInstance();
return factory->GetNames();
}
FwdModel *FwdModel::NewFromName(const string &name)
{
FwdModelFactory *factory = FwdModelFactory::GetInstance();
FwdModel *model = factory->Create(name);
if (model == NULL)
{
throw InvalidOptionValue("model", name, "Unrecognized forward model");
}
return model;
}
void FwdModel::Initialize(FabberRunData &args)
{
m_log = args.GetLogger();
}
void FwdModel::UsageFromName(const string &name, std::ostream &stream)
{
std::auto_ptr<FwdModel> model(NewFromName(name));
stream << name << ": " << model->ModelVersion() << endl << endl;
stream << model->GetDescription() << endl << endl;
stream << "Options: " << endl << endl;
vector<OptionSpec> options;
model->GetOptions(options);
if (options.size() > 0)
{
for (vector<OptionSpec>::iterator iter = options.begin(); iter != options.end(); ++iter)
{
stream << *iter;
}
}
else
{
model->Usage(stream);
}
vector<string> outputs;
model->GetOutputs(outputs);
if (outputs.size() > 0)
{
stream << endl << "Additional outputs: " << endl << endl;
for (vector<string>::iterator iter = outputs.begin(); iter != outputs.end(); ++iter)
{
if (*iter != "")
stream << " " << *iter << endl;
}
}
}
string FwdModel::GetDescription() const
{
return "No description available";
}
string FwdModel::ModelVersion() const
{
return "No version info available.";
}
void FwdModel::Usage(std::ostream &stream) const
{
stream << "No usage information available" << endl;
}
void FwdModel::PassData(unsigned int voxel_idx, const NEWMAT::ColumnVector &voxdata, const NEWMAT::ColumnVector &voxcoords,
const NEWMAT::ColumnVector &voxsuppdata)
{
voxel = voxel_idx;
data = voxdata;
suppdata = voxsuppdata;
coords = voxcoords;
coord_x = coords(1);
coord_y = coords(2);
coord_z = coords(3);
}
void FwdModel::GetParameters(FabberRunData &rundata, vector<Parameter> ¶ms)
{
GetParameterDefaults(params);
m_params.clear();
for (vector<Parameter>::iterator p = params.begin(); p < params.end(); ++p)
{
// Complexity below is due to there being two ways of specifying
// priors. One is using the param-spatial-priors option which is
// a sequence of chars in model parameter order, one for each
// parameter. A + character means 'use the previous value for all
// remaining parameters'. An 'I' means an image prior and
// the filename is specified separately using an image-prior<n> option
string types = Prior::ExpandPriorTypesString(
rundata.GetStringDefault("param-spatial-priors", ""), params.size());
assert(types.size() == params.size());
if (types[p->idx] != PRIOR_DEFAULT)
{
p->prior_type = types[p->idx];
}
// Record the data key (filename) for an image prior. Note that the index is
// conceptually different from the PSP_byname_image method use below - here
// it is the parameter index in the model's list (starting at 1), below it depends on
// the order in which the names are given in the options.
p->options["image"] = "image-prior" + stringify(p->idx + 1);
// Determine if we have any PSP_byname options for this parameter. These override the
// options above
int psp_idx = 1;
while (true)
{
string name = rundata.GetStringDefault("PSP_byname" + stringify(psp_idx), "stop!");
if (name == "stop!")
break;
else if (name == p->name)
{
string psp_idx_str = stringify(psp_idx);
string transform_code
= rundata.GetStringDefault("PSP_byname" + psp_idx_str + "_transform", "");
if (transform_code != "")
p->transform = GetTransform(transform_code);
char prior_type = convertTo<char>(rundata.GetStringDefault(
"PSP_byname" + psp_idx_str + "_type", stringify(p->prior_type)));
if (prior_type != PRIOR_DEFAULT)
p->prior_type = prior_type;
double mean = rundata.GetDoubleDefault(
"PSP_byname" + psp_idx_str + "_mean", p->prior.mean());
double prec = rundata.GetDoubleDefault(
"PSP_byname" + psp_idx_str + "_prec", p->prior.prec());
p->prior = DistParams(mean, 1 / prec);
p->options["image"] = "PSP_byname" + psp_idx_str + "_image";
}
psp_idx++;
}
if (p->prior.prec() > 1e12) {
WARN_ONCE("Specified precision " + stringify(p->prior.prec()) + " is very high - this can trigger numerical instability. Using 1e12 instead");
p->prior = DistParams(p->prior.mean(), 1e-12);
}
// FIXME do this here, or let the priors do it?
//
// Need to transform mean/precision as specified in the model into Fabber-space
// Note that posterior is transformed in GetInitialPosterior
p->prior = p->transform->ToFabber(p->prior);
// Keep our own list of parameters
m_params.push_back(*p);
}
}
void FwdModel::GetInitialPosterior(MVNDist &posterior, FabberRunData &rundata) const
{
posterior.SetSize(m_params.size());
// Set model defaults
NEWMAT::SymmetricMatrix cov = posterior.GetCovariance();
for (size_t p = 0; p < m_params.size(); p++)
{
if (m_params[p].prior_type == PRIOR_IMAGE)
{
// For an image prior we should initialize the posterior
// from the voxelwise image
string filename = m_params[p].options.find("image")->second;
NEWMAT::RowVector image = rundata.GetVoxelData(filename).AsRow();
posterior.means(p + 1) = image(voxel);
}
else
{
posterior.means(p + 1) = m_params[p].post.mean();
}
cov(p + 1, p + 1) = m_params[p].post.var();
}
posterior.SetCovariance(cov);
// Do voxelwise initialization
InitVoxelPosterior(posterior);
// Finally, apply transforms
ToFabber(posterior);
}
void FwdModel::ToFabber(MVNDist &mvn) const
{
NEWMAT::SymmetricMatrix cov = mvn.GetCovariance();
for (size_t p = 0; p < m_params.size(); p++)
{
mvn.means(p + 1) = m_params[p].transform->ToFabber(mvn.means(p + 1));
cov(p + 1, p + 1) = m_params[p].transform->ToFabberVar(cov(p + 1, p + 1));
}
mvn.SetCovariance(cov);
}
void FwdModel::ToModel(MVNDist &mvn) const
{
NEWMAT::SymmetricMatrix cov = mvn.GetCovariance();
for (size_t p = 0; p < m_params.size(); p++)
{
DistParams dp(mvn.means(p + 1), cov(p + 1, p + 1));
dp = m_params[p].transform->ToModel(dp);
mvn.means(p + 1) = dp.mean();
cov(p + 1, p + 1) = dp.var();
}
mvn.SetCovariance(cov);
}
void FwdModel::GetParameterDefaults(vector<Parameter> ¶ms) const
{
params.clear();
vector<string> names;
// Old method of naming parameters
NameParams(names);
// Old method of specifying default prior and posterior
MVNDist priors(names.size()), posts(names.size());
HardcodedInitialDists(priors, posts);
for (unsigned int i = 0; i < names.size(); i++)
{
DistParams prior(priors.means(i + 1), priors.GetCovariance()(i + 1, i + 1));
DistParams post(posts.means(i + 1), posts.GetCovariance()(i + 1, i + 1));
Parameter p(i, names[i], prior, post, PRIOR_NORMAL, TRANSFORM_IDENTITY());
// Old method of specifying ARD priors
if (find(ardindices.begin(), ardindices.end(), i + 1) != ardindices.end())
{
p.prior_type = PRIOR_ARD;
}
params.push_back(p);
}
}
void FwdModel::EvaluateFabber(
const NEWMAT::ColumnVector ¶ms, NEWMAT::ColumnVector &result, const std::string &key) const
{
assert((m_params.size() == 0) || (int(m_params.size()) == params.Nrows()));
if (m_params.size() == 0)
{
EvaluateModel(params, result, key);
}
else
{
NEWMAT::ColumnVector tparams(params.Nrows());
for (int i = 1; i <= params.Nrows(); i++)
{
tparams(i) = m_params[i - 1].transform->ToModel(params(i));
}
EvaluateModel(tparams, result, key);
}
}
void FwdModel::DumpParameters(const NEWMAT::ColumnVector ¶ms, const string &indent) const
{
LOG << indent << "Parameters:" << endl;
vector<string> names;
NameParams(names);
assert(int(names.size()) == params.Nrows());
for (size_t i = 1; i <= names.size(); i++)
LOG << indent << " " << names[i - 1] << " = " << params(i) << endl;
LOG << indent << "Total of " << names.size() << " parameters" << endl;
}
|
export const commonEnvironment = {
production: true,
application: {
protocol: 'https',
host: 'palindromo-web.herokuapp.com'
},
api: {
source: '/api',
host: 'palindromo-api.herokuapp.com'
}
};
|
import org.springframework.amqp.rabbit.connection.CachingConnectionFactory;
import org.springframework.amqp.rabbit.connection.ConnectionFactory;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import java.net.URISyntaxException;
import java.security.KeyManagementException;
import java.security.NoSuchAlgorithmException;
@Configuration
public class RabbitMQConfig {
@Bean
public ConnectionFactory rabbitConnectionFactory(@Value("${rabbitmq.broker-url}") final String brokerUrl) throws NoSuchAlgorithmException, KeyManagementException, URISyntaxException {
final CachingConnectionFactory connectionFactory = new CachingConnectionFactory();
connectionFactory.setUri(brokerUrl);
return connectionFactory;
}
}
|
VIM_MYSQL=`pwd`
ln -sf ${VIM_MYSQL}/plugin/mysql_run /usr/local/bin/mysql_run
|
package com.wanshare.wscomponent.update.contract;
import com.wanshare.wscomponent.update.model.VersionEntity;
public class MainContract {
public interface View{
void showVersion(VersionEntity version);
}
public interface Presenter{
void getVersion(Integer equipType);
}
}
|
#!/bin/sh
# Integration Tests for Simple Menu Twimlet/Funlet
#
# Parameter:
# $1 - URL of a deployed instance of the Simple Menu Twimlet/Funlet
#
# Uses:
# * curl - transfer a URL
# * xmllint - command line XML tool
#
url="$1"
indentXml(){
xmllint --format -
}
# Join HTTP parameters with '&'
# and save them into $params.
joinParams(){
regularIFS="$IFS"
IFS=\&
params="$*"
IFS="$regularIFS"
}
# Function: query()
# Query the Twimlet/Funlet using given HTTP parameters
#
# Parameters:
# $* - list of key or key=value HTTP parameters, properly URL-encoded
#
query(){
joinParams "$@"
curl -s "$url"?"$params" | indentXml
}
# Function: showRedirect()
# Alternative to query() for use when a redirect is expected instead of TwiML.
# If the `location` header is found, displays the redirect URL.
# Otherwise, display the output of the query, indented as XML,
# expecting to display a TwiML <Redirect/> instead.
showRedirect()
{
joinParams "$@"
response="$( curl -s -w 'Redirect: %{redirect_url}' "$url"?"$params" )"
case "$response" in
'Redirect: '*) echo "$response" ;;
*) echo "$response" | sed 's/Redirect: $//' | indentXml
esac
}
if test -z "$url"
then
echo "Usage: $0 url" >&2
echo "for example: $0 'https://twimlets.com/menu'"
exit 1
fi
echo '[SIMPLE-MENU-1-1] Recorded Message'
recordedMessage='https%3A%2F%2Fexample.com%2Frecorded-message.mp3'
query Message="$recordedMessage"
echo
echo '[SIMPLE-MENU-1-2] Text Message'
textMessage='Text%20message'
query Message="$textMessage"
echo
echo '[SIMPLE-MENU-1-3] Multiple Digits to Gather'
option12345='Options%5B12345%5D'
action12345='https%3A%2F%2Fexample.com%2F12345'
query Message="$textMessage" "$option12345"="$action12345"
echo
echo '[SIMPLE-MENU-2-1] Digits Pressed Match an Option'
showRedirect Digits=12345 "$option12345"="$action12345"
echo
echo '[SIMPLE-MENU-2-2] Digits Pressed Do Not Match Any Option'
query Digits=42
|
package bomberman.server;
import java.util.Random;
import bomberman.server.item.InterfaceItem;
import bomberman.server.enemy.InterfaceEnemy;
import bomberman.Constants;
public class Map {
// Atributos
private int width;
private int height;
private int initialX;
private int initialY;
private Random r;
private int level;
private InterfaceItem[] items;
private InterfaceEnemy[] npcs;
private Square[][] map = new Square[17][17];
// Constructor
public Map(int width, int height, int initialX, int initialY, Random r, int level) {
setWidth(width);
setHeight(height);
setInitialX(initialX);
setInitialY(initialY);
setR(r);
setLevel(level);
setMap(map);
}
// Setters y getters
public int getWidth() {
return width;
}
public void setWidth(int width) {
this.width = width;
}
public int getHeight() {
return height;
}
public void setHeight(int height) {
this.height = height;
}
public int getInitialX() {
return initialX;
}
public void setInitialX(int initialX) {
this.initialX = initialX;
}
public int getInitialY() {
return initialY;
}
public void setInitialY(int initialY) {
this.initialY = initialY;
}
public Random getR() {
return r;
}
public void setR(Random r) {
this.r = r;
}
public int getLevel() {
return level;
}
public void setLevel(int level) {
this.level = level;
}
public InterfaceItem[] getItems() {
return items;
}
public void setItems(InterfaceItem[] items) {
this.items = items;
}
public InterfaceEnemy[] getNpcs() {
return npcs;
}
public void setNpcs(InterfaceEnemy[] npcs) {
this.npcs = npcs;
}
public Square[][] getMap() {
return map;
}
public void setMap(Square[][] map) {
map = new Square[Constants.M_WIDTH][Constants.M_HEIGHT];
for (int i = 0; i < map.length; i++) {
for (int j = 0; j < map[i].length; j++) {
map[i][j] = new Square(Constants.S_NONE); // todas se hacen
// transitables y
// luego a las no
// transitables se
// les cambia el
// valor
}
}
// Para muro indestructible S_WALL que es igual a 1
// Para ladrillo S_BRICKS que es igual a 2
for (int i = 1; i < 16; i += 1) {
for (int j = 1; j < 16; j += 1) {
if (map[i][j].getType() == Constants.S_NONE) {
map[i][j].setType(Constants.S_BRICKS);
}
}
}
for (int i = 1; i < 16; i += 1) {
for (int j = 1; j < 16; j += 1) {
int random = (int) Math.floor((Math.random() * 2));
if (map[i][j].getType() == 2) {
if (random == 0) {
map[i][j].setType(0);
} else {
map[i][j].setType(2);
}
}
}
map[1][1].setType(Constants.S_NONE);
map[1][2].setType(Constants.S_NONE);
map[2][1].setType(Constants.S_NONE);
}
for (int i = 0; i < map.length; i = i + 2) {
for (int j = 0; j < map[i].length; j = j + 2) {
map[i][j].setType(Constants.S_WALL);
}
}
for(int i = 0; i < map.length; i++){
map[0][i].setType(Constants.S_WALL);
map[i][0].setType(Constants.S_WALL);
map[16][i].setType(Constants.S_WALL);
map[i][16].setType(Constants.S_WALL);
}
this.map = map;
}
}
|
<filename>misc/python/materialize/cli/mzconduct.py
# Copyright Materialize, Inc. All rights reserved.
#
# Use of this software is governed by the Business Source License
# included in the LICENSE file at the root of this repository.
#
# As of the Change Date specified in that file, in accordance with
# the Business Source License, use of this software will be governed
# by the Apache License, Version 2.0.
#
# mzconduct.py - Conduct the runtime behavior of mzcompose compositions
import contextlib
import itertools
import json
import os
import random
import re
import shlex
import subprocess
import time
import webbrowser
from datetime import datetime, timezone
from pathlib import Path
from threading import Thread
from typing import (
Any,
Callable,
Collection,
Dict,
Iterable,
List,
Match,
Optional,
Type,
TypeVar,
Union,
cast,
)
from typing_extensions import Literal
import click
import pg8000 # type: ignore
import pymysql
import yaml
from materialize import spawn
from materialize import ui
from materialize.errors import (
BadSpec,
Failed,
MzRuntimeError,
UnknownItem,
error_handler,
)
T = TypeVar("T")
say = ui.speaker("C>")
_BASHLIKE_ENV_VAR_PATTERN = re.compile(
r"""\$\{
(?P<var>[^:}]+)
(?P<default>:-[^}]+)?
\}""",
re.VERBOSE,
)
@click.group(context_settings=dict(help_option_names=["-h", "--help"]))
def cli() -> None:
"""Conduct composed docker services"""
@cli.command()
@click.option("-w", "--workflow", help="The name of a workflow to run")
@click.argument("composition")
@click.argument("services", nargs=-1)
def up(composition: str, workflow: Optional[str], services: Iterable[str],) -> None:
"""Conduct a docker-composed set of services
With the --workflow flag, perform all the steps in the workflow together.
"""
comp = Composition.find(composition)
if workflow is not None:
say(f"Executing {comp.name} -> {workflow}")
if services:
serv = " ".join(services)
say(f"WARNING: services list specified with -w, ignoring: {serv}")
comp.run_workflow(workflow)
else:
comp.up(list(services))
@cli.command()
@click.option("-w", "--workflow", help="The name of a workflow to run")
@click.argument("composition")
@click.argument("services", nargs=-1)
def run(composition: str, workflow: Optional[str], services: Iterable[str],) -> None:
"""Conduct a docker-composed set of services
With the --workflow flag, perform all the steps in the workflow together.
"""
comp = Composition.find(composition)
if workflow is not None:
say(f"Executing {comp.name} -> {workflow}")
if services:
serv = " ".join(services)
say(f"WARNING: services list specified with -w, ignoring: {serv}")
comp.run_workflow(workflow)
else:
comp.run(list(services))
@cli.command()
@click.argument("composition")
def build(composition: str) -> None:
Composition.find(composition).build()
@cli.command()
@click.option("-v", "--volumes", is_flag=True, help="Also destroy volumes")
@click.argument("composition")
def down(composition: str, volumes: bool) -> None:
comp = Composition.find(composition)
comp.down(volumes)
@cli.command()
@click.argument("composition")
def ps(composition: str) -> None:
comp = Composition.find(composition)
comp.ps()
# Non-mzcompose commands
@cli.command()
@click.argument("composition")
def help_workflows(composition: str) -> None:
"""Help on available workflows in DEMO"""
comp = Composition.find(composition)
print("Workflows available:")
for workflow in comp.workflows():
print(f" {workflow.name}")
@cli.command()
@click.argument("composition")
def nuke(composition: str) -> None:
"""Destroy everything docker, stopping composition before trying"""
comp = Composition.find(composition)
comp.down()
cmds = ["docker system prune -af".split(), "docker volume prune -f".split()]
for cmd in cmds:
spawn.runv(cmd, capture_output=True)
@cli.command()
@click.argument("composition")
@click.argument("service")
def web(composition: str, service: str) -> None:
"""
Attempt to open a service in a web browser
This parses the output of `mzconduct ps` and tries to find the right way to open a
web browser for you.
"""
comp = Composition.find(composition)
comp.web(service)
@cli.group()
def show() -> None:
"""Show properties of a composition"""
@show.command()
@click.argument("composition")
def dir(composition: str) -> None:
"""Show the directory that this composition is in"""
print(str(Composition.find(composition).path))
# Composition Discovery
class Composition:
"""Information about an mzcompose instance
This includes its location, and all the workflows it knows about.
"""
_demos: Optional[Dict[str, "Composition"]] = None
def __init__(self, name: str, path: Path, workflows: "Workflows") -> None:
self.name = name
self._path = path
self._workflows = workflows
def __str__(self) -> str:
return (
f"Composition<{self.name}, {self._path}, {len(self.workflows())} workflows>"
)
@property
def path(self) -> Path:
return self._path
def workflow(self, workflow: str) -> "Workflow":
"""Get a workflow by name"""
return self._workflows[workflow]
def workflows(self) -> Collection["Workflow"]:
return self._workflows.all_workflows()
def up(self, services: List[str]) -> None:
with cd(self._path):
try:
mzcompose_up(services)
except subprocess.CalledProcessError:
raise Failed("error when bringing up all services")
def build(self) -> None:
"""run mzcompose build in this directory"""
with cd(self._path):
spawn.runv(["./mzcompose", "--mz-quiet", "build"])
def run(self, services: List[str]) -> None:
"""run mzcompose run in this directory"""
with cd(self._path):
try:
mzcompose_run(services)
except subprocess.CalledProcessError:
raise Failed("error when bringing up all services")
def down(self, volumes: bool = False) -> None:
"""run mzcompose down in this directory"""
with cd(self._path):
mzcompose_down(volumes)
def ps(self) -> None:
with cd(self._path):
spawn.runv(["./mzcompose", "--mz-quiet", "ps"])
def run_workflow(self, workflow: str) -> None:
with cd(self._path):
try:
workflow_ = self._workflows[workflow]
except KeyError:
raise UnknownItem("workflow", workflow, self._workflows.names())
workflow_.run(self, None)
def web(self, service: str) -> None:
"""Best effort attempt to open the service in a web browser"""
with cd(self._path):
ports = self.find_host_ports(service)
if len(ports) == 1:
webbrowser.open(f"http://localhost:{ports[0]}")
elif not ports:
raise MzRuntimeError(f"No running services matched {service}")
else:
raise MzRuntimeError(f"Too many ports matched {service}, found: {ports}")
@classmethod
def find(cls, comp: str) -> "Composition":
"""Try to find a configured comp
Raises:
`UnknownItem`: if the composition cannot be discovered
"""
if cls._demos is None:
cls._demos = cls.load()
try:
return cls._demos[comp]
except KeyError:
raise UnknownItem("composition", comp, Composition.known_compositions())
@classmethod
def known_compositions(cls) -> Collection[str]:
if cls._demos is None:
cls._demos = cls.load()
return cls._demos.keys()
@staticmethod
def load() -> Dict[str, "Composition"]:
"""Load all demos in the repo"""
compositions = {}
compose_files = itertools.chain(
Path("demo").glob("*/mzcompose.yml"),
Path("test").glob("*/mzcompose.yml"),
Path("test/performance").glob("*/mzcompose.yml"),
)
for mzcompose in compose_files:
with mzcompose.open() as fh:
mzcomp = yaml.safe_load(fh)
name = mzcompose.parent.name
raw_comp = mzcomp.get("mzconduct")
workflows = {}
if raw_comp is not None:
# TODO: move this into the workflow so that it can use env vars that are
# manually defined.
raw_comp = _substitute_env_vars(raw_comp)
name = raw_comp.get("name", name)
for workflow_name, raw_w in raw_comp["workflows"].items():
built_steps = []
for raw_step in raw_w["steps"]:
step_name = raw_step.pop("step")
step_ty = Steps.named(step_name)
munged = {k.replace("-", "_"): v for k, v in raw_step.items()}
try:
step = step_ty(**munged)
except TypeError as e:
a = " ".join([f"{k}={v}" for k, v in munged.items()])
raise BadSpec(
f"Unable to construct {step_name} with args {a}: {e}"
)
built_steps.append(step)
env = raw_w.get("env")
if not isinstance(env, dict) and env is not None:
raise BadSpec(
f"Workflow {workflow_name} has wrong type for env: "
f"expected mapping, got {type(env).__name__}: {env}",
)
# ensure that integers (e.g. ports) are treated as env vars
if isinstance(env, dict):
env = {k: str(v) for k, v in env.items()}
workflows[workflow_name] = Workflow(
workflow_name,
built_steps,
env=env,
include_compose=raw_w.get("include_compose"),
)
compositions[name] = Composition(
name, mzcompose.parent, Workflows(workflows)
)
return compositions
def find_host_ports(self, service: str) -> List[str]:
"""Find all ports open on the host for a given service
"""
# Parsing the output of `docker-compose ps` directly is fraught, as the
# output depends on terminal width (!). Using the `-q` flag is safe,
# however, and we can pipe the container IDs into `docker inspect`,
# which supports machine-readable output.
containers = spawn.capture(
["./mzcompose", "--mz-quiet", "ps", "-q"], unicode=True
).splitlines()
metadata = spawn.capture(
["docker", "inspect", "-f", "{{json .}}", *containers,]
)
metadata = [json.loads(line) for line in metadata.splitlines()]
ports = []
for md in metadata:
if md["Config"]["Labels"]["com.docker.compose.service"] == service:
for (name, port_entry) in md["NetworkSettings"]["Ports"].items():
for p in port_entry or []:
ports.append(p["HostPort"])
return ports
def get_container_id(self, service: str, running: bool = False) -> str:
"""Given a service name, tries to find a unique matching container id
If running is True, only return running containers.
"""
try:
if running:
cmd = f"docker ps".split()
else:
cmd = f"docker ps -a".split()
list_containers = spawn.capture(cmd, unicode=True)
pattern = re.compile(f"^(?P<c_id>[^ ]+).*{service}")
matches = []
for line in list_containers.splitlines():
m = pattern.search(line)
if m:
matches.append(m.group("c_id"))
if len(matches) != 1:
raise Failed(
f"failed to get a unique container id for service {service}, found: {matches}"
)
return matches[0]
except subprocess.CalledProcessError as e:
raise Failed(f"failed to get container id for {service}: {e}")
def docker_inspect(self, format: str, container_id: str) -> str:
try:
cmd = f"docker inspect -f '{format}' {container_id}".split()
output = spawn.capture(cmd, unicode=True, stderr_too=True).splitlines()[0]
except subprocess.CalledProcessError as e:
ui.log_in_automation(
"docker inspect ({}): error running {}: {}, stdout:\n{}\nstderr:\n{}".format(
container_id, ui.shell_quote(cmd), e, e.stdout, e.stderr,
)
)
raise Failed(f"failed to inspect Docker container: {e}")
else:
return output
def docker_container_is_running(self, container_id: str) -> bool:
return self.docker_inspect("{{.State.Running}}", container_id) == "'true'"
def _substitute_env_vars(val: T) -> T:
"""Substitute docker-compose style env vars in a dict
This is necessary for mzconduct, since its parameters are not handled by docker-compose
"""
if isinstance(val, str):
val = cast(T, _BASHLIKE_ENV_VAR_PATTERN.sub(_subst, val))
elif isinstance(val, dict):
for k, v in val.items():
val[k] = _substitute_env_vars(v)
elif isinstance(val, list):
val = cast(T, [_substitute_env_vars(v) for v in val])
return val
def _subst(match: Match) -> str:
var = match.group("var")
if var is None:
raise BadSpec(f"Unable to parse environment variable {match.group(0)}")
# https://github.com/python/typeshed/issues/3902
default = cast(Optional[str], match.group("default"))
env_val = os.getenv(var)
if env_val is None and default is None:
say(f"WARNING: unknown env var {var!r}")
return cast(str, match.group(0))
elif env_val is None and default is not None:
# strip the leading ":-"
env_val = default[2:]
assert env_val is not None, "should be replaced correctly"
return env_val
class Workflows:
"""All Known Workflows inside a Composition"""
def __init__(self, workflows: Dict[str, "Workflow"]) -> None:
self._inner = workflows
def __getitem__(self, workflow: str) -> "Workflow":
return self._inner[workflow]
def all_workflows(self) -> Collection["Workflow"]:
return self._inner.values()
def names(self) -> Collection[str]:
return self._inner.keys()
class Workflow:
"""
A workflow is a collection of WorkflowSteps and some context
It is possible to specify additional compose files for specific workflows, and all
their child workflows will have access to services defined in those files.
"""
def __init__(
self,
name: str,
steps: List["WorkflowStep"],
env: Optional[Dict[str, str]] = None,
include_compose: Optional[List[str]] = None,
) -> None:
self.name = name
self.include_compose = include_compose or []
self.env = env if env is not None else {}
self._steps = steps
self._parent: Optional["Workflow"] = None
def overview(self) -> str:
steps = " ".join([s.name for s in self._steps])
additional = ""
if self.include_compose:
additional = " [depends on {}]".format(",".join(self.include_compose))
return "{} [{}]{}".format(self.name, steps, additional)
def __repr__(self) -> str:
return "Workflow<{}>".format(self.overview())
def with_parent(self, parent: Optional["Workflow"]) -> "Workflow":
"""Create a new workflow from this one, but with access to the properties on the parent
"""
env = parent.env.copy() if parent is not None else {}
env.update(self.env)
w = Workflow(self.name, self._steps, env, self.include_compose)
w._parent = parent
return w
def _include_compose(self) -> List[str]:
add = list(self.include_compose)
if self._parent is not None:
add.extend(self._parent._include_compose())
return add
# Commands
def run(self, comp: Composition, parent_workflow: Optional["Workflow"]) -> None:
for step in self._steps:
step.run(comp, self.with_parent(parent_workflow))
def mzcompose_up(self, services: List[str]) -> None:
mzcompose_up(services, self._docker_extra_args(), extra_env=self.env)
def mzcompose_run(self, services: List[str], service_ports: bool = True) -> None:
mzcompose_run(
services,
self._docker_extra_args(),
service_ports=service_ports,
extra_env=self.env,
)
def _docker_extra_args(self) -> List[str]:
"""Get additional docker arguments specified by this workflow context
"""
args = []
additional = self._include_compose()
if additional is not None:
args.extend(["-f", "./mzcompose.yml"])
for f in additional:
args.extend(["-f", f])
return args
class Steps:
"""A registry of named `WorkflowStep`_"""
_steps: Dict[str, Type["WorkflowStep"]] = {}
@classmethod
def named(cls, name: str) -> Type["WorkflowStep"]:
try:
return cls._steps[name]
except KeyError:
raise UnknownItem("step", name, list(cls._steps))
@classmethod
def register(cls, name: str) -> Callable[[Type[T]], Type[T]]:
if name in cls._steps:
raise ValueError(f"Double registration of step name: {name}")
def reg(to_register: Type[T]) -> Type[T]:
if not issubclass(to_register, WorkflowStep):
raise ValueError(
f"Registered step must be a WorkflowStep: {to_register}"
)
cls._steps[name] = to_register
to_register.name = name
return to_register # type: ignore
return reg
@classmethod
def print_known_steps(cls) -> None:
"""Print all steps registered with `register`_"""
for name in sorted(cls._steps):
print(name)
class WorkflowStep:
"""Peform a single action in a workflow"""
# populated by Steps.register
name: str
"""The name used to refer to this step in a workflow file"""
def __init__(self, **kwargs: Any) -> None:
pass
def run(self, comp: Composition, workflow: Workflow) -> None:
"""Perform the action specified by this step"""
@Steps.register("print-env")
class PrintEnvStep(WorkflowStep):
"""Prints the `env` `Dict` for this workflow.
"""
def __init__(self) -> None:
pass
def run(self, comp: Composition, workflow: Workflow) -> None:
print("Workflow has environment of", workflow.env)
@Steps.register("start-services")
class StartServicesStep(WorkflowStep):
"""
Params:
services: List of service names
"""
def __init__(self, *, services: Optional[List[str]] = None) -> None:
self._services = services if services is not None else []
if not isinstance(self._services, list):
raise BadSpec(f"services should be a list, got: {self._services}")
def run(self, comp: Composition, workflow: Workflow) -> None:
try:
workflow.mzcompose_up(self._services)
except subprocess.CalledProcessError:
services = ", ".join(self._services)
raise Failed(f"ERROR: services didn't come up cleanly: {services}")
@Steps.register("stop-services")
class StopServicesStep(WorkflowStep):
"""
Params:
services: List of service names
"""
def __init__(self, *, services: Optional[List[str]] = None) -> None:
self._services = services if services is not None else []
if not isinstance(self._services, list):
raise BadSpec(f"services should be a list, got: {self._services}")
def run(self, comp: Composition, workflow: Workflow) -> None:
try:
mzcompose_stop(self._services)
except subprocess.CalledProcessError:
services = ", ".join(self._services)
raise Failed(f"ERROR: services didn't come up cleanly: {services}")
@Steps.register("wait-for-postgres")
class WaitForPgStep(WorkflowStep):
"""
Args:
dbname: the name of the database to wait for
host: the host postgres is listening on
port: the port postgres is listening on
timeout_secs: How long to wait for postgres to be up before failing (Default: 30)
query: The query to execute to ensure that it is running (Default: "Select 1")
user: The chosen user (this is only relevant for postgres)
service: The service that postgres is running as (Default: postgres)
"""
def __init__(
self,
*,
dbname: str,
port: Optional[int] = None,
host: str = "localhost",
timeout_secs: int = 30,
query: str = "SELECT 1",
user: str = "postgres",
expected: Union[Iterable[Any], Literal["any"]] = [[1]],
print_result: bool = False,
service: str = "postgres",
) -> None:
self._dbname = dbname
self._host = host
self._port = port
self._user = user
self._timeout_secs = timeout_secs
self._query = query
self._expected = expected
self._print_result = print_result
self._service = service
def run(self, comp: Composition, workflow: Workflow) -> None:
if self._port is None:
ports = comp.find_host_ports(self._service)
if len(ports) != 1:
raise Failed(
f"Unable to unambiguously determine port for {self._service}, "
f"found ports: {','.join(ports)}"
)
port = int(ports[0])
else:
port = self._port
wait_for_pg(
dbname=self._dbname,
host=self._host,
port=port,
timeout_secs=self._timeout_secs,
query=self._query,
user=self._user,
expected=self._expected,
print_result=self._print_result,
)
@Steps.register("wait-for-mz")
class WaitForMzStep(WaitForPgStep):
"""Same thing as wait-for-postgres, but with materialized defaults
"""
def __init__(
self,
*,
dbname: str = "materialize",
host: str = "localhost",
port: Optional[int] = None,
timeout_secs: int = 10,
query: str = "SELECT 1",
expected: Union[Iterable[Any], Literal["any"]] = [[1]],
print_result: bool = False,
service: str = "materialized",
) -> None:
super().__init__(
dbname=dbname,
host=host,
port=port,
timeout_secs=timeout_secs,
query=query,
expected=expected,
print_result=print_result,
service=service,
)
@Steps.register("wait-for-mysql")
class WaitForMysqlStep(WorkflowStep):
"""
Params:
host: The host mysql is running on
port: The port mysql is listening on (Default: discover host port)
user: The user to connect as (Default: mysqluser)
password: The password to use (Default: <PASSWORD>)
service: The name mysql is running as (Default: mysql)
"""
def __init__(
self,
*,
user: str = "root",
password: str = "<PASSWORD>",
host: str = "localhost",
port: Optional[int] = None,
timeout_secs: int = 10,
service: str = "mysql",
) -> None:
self._user = user
self._password = password
self._host = host
self._port = port
self._timeout_secs = timeout_secs
self._service = service
def run(self, comp: Composition, workflow: Workflow) -> None:
if self._port is None:
ports = comp.find_host_ports(self._service)
if len(ports) != 1:
raise Failed(
f"Could not unambiguously determine port for {self._service} "
f"found: {','.join(ports)}"
)
port = int(ports[0])
else:
port = self._port
wait_for_mysql(
user=self._user,
passwd=self._password,
host=self._host,
port=port,
timeout_secs=self._timeout_secs,
)
@Steps.register("run-mysql")
class RunMysql(WorkflowStep):
"""
Params:
host: The host mysql is running on
port: The port mysql is listening on (Default: discover host port)
user: The user to connect as (Default: root)
password: The <PASSWORD> (Default: <PASSWORD>)
service: The name mysql is running as (Default: mysql)
query: The query to execute
"""
def __init__(
self,
*,
user: str = "root",
password: str = "<PASSWORD>",
host: str = "localhost",
port: Optional[int] = None,
service: str = "mysql",
query: str,
) -> None:
self._user = user
self._password = password
self._host = host
self._port = port
self._service = service
self._query = query
def run(self, comp: Composition, workflow: Workflow) -> None:
if self._port is None:
ports = comp.find_host_ports(self._service)
if len(ports) != 1:
raise Failed(
f"Could not unambiguously determine port for {self._service} "
f"found: {','.join(ports)}"
)
port = int(ports[0])
else:
port = self._port
conn = pymysql.connect(
user=self._user,
passwd=<PASSWORD>._password,
host=self._host,
port=port,
client_flag=pymysql.constants.CLIENT.MULTI_STATEMENTS,
autocommit=True,
)
with conn.cursor() as cur:
cur.execute(self._query)
@Steps.register("wait-for-tcp")
class WaitForTcpStep(WorkflowStep):
"""Wait for a tcp port to be open inside a container
Params:
host: The host that is available inside the docker network
port: the port to connect to
timeout_secs: How long to wait (default: 30)
"""
def __init__(
self, *, host: str = "localhost", port: int, timeout_secs: int = 30
) -> None:
self._host = host
self._port = port
self._timeout_secs = timeout_secs
def run(self, comp: Composition, workflow: Workflow) -> None:
ui.progress(
f"waiting for {self._host}:{self._port}", "C",
)
for remaining in ui.timeout_loop(self._timeout_secs):
cmd = f"docker run --rm -t --network {comp.name}_default ubuntu:bionic-20200403".split()
cmd.extend(
[
"timeout",
str(self._timeout_secs),
"bash",
"-c",
f"cat < /dev/null > /dev/tcp/{self._host}/{self._port}",
]
)
try:
spawn.capture(cmd, unicode=True, stderr_too=True)
except subprocess.CalledProcessError as e:
ui.log_in_automation(
"wait-for-tcp ({}:{}): error running {}: {}, stdout:\n{}\nstderr:\n{}".format(
self._host,
self._port,
ui.shell_quote(cmd),
e,
e.stdout,
e.stderr,
)
)
ui.progress(" {}".format(int(remaining)))
else:
ui.progress(" success!", finish=True)
return
raise Failed(f"Unable to connect to {self._host}:{self._port}")
@Steps.register("drop-kafka-topics")
class DropKafkaTopicsStep(WorkflowStep):
def __init__(self, *, kafka_container: str, topic_pattern: str) -> None:
self._container = kafka_container
self._topic_pattern = topic_pattern
def run(self, comp: Composition, workflow: Workflow) -> None:
say(f"dropping kafka topics {self._topic_pattern} from {self._container}")
try:
spawn.runv(
[
"docker",
"exec",
"-t",
self._container,
"kafka-topics",
"--delete",
"--bootstrap-server",
"localhost:9092",
"--topic",
self._topic_pattern,
]
)
except subprocess.CalledProcessError as e:
# generally this is fine, it just means that the topics already don't exist
say(f"INFO: error purging topics: {e}")
@Steps.register("random-chaos")
class RandomChaos(WorkflowStep):
"""
Add random chaos to running Docker containers.
:param chaos: List containing types of chaos to add. If not provided,
will default to 'default_chaos'.
:param services: List of target Docker services for chaos. If not provided,
will default to all running Docker services.
:param other_service: Chaos will be randomly added to Docker services as long as
'other_service' is running. If not provided, chaos will be
added forever.
"""
default_chaos = [
"pause",
"stop",
"kill",
"delay",
"rate",
"loss",
"duplicate",
"corrupt",
]
def __init__(
self, chaos: List[str] = [], services: List[str] = [], other_service: str = "",
):
self._chaos = chaos
self._services = services
self._other_service = other_service
@staticmethod
def get_docker_processes(running: bool = False) -> str:
"""
Use 'docker ps' to return all Docker process information.
:param running: If True, only return running processes.
:return: str of processes
"""
try:
if running:
cmd = f"docker ps".split()
else:
cmd = f"docker ps -a".split()
return spawn.capture(cmd, unicode=True)
except subprocess.CalledProcessError as e:
raise Failed(f"failed to get Docker container ids: {e}")
def get_container_ids(
self, services: List[str] = [], running: bool = False
) -> List[str]:
"""
Parse Docker processes for container ids.
:param services: If provided, only return container ids for these services.
:param running: If True, only return container ids of running processes.
:return: Docker container id strs
"""
try:
docker_processes = self.get_docker_processes(running=running)
patterns = []
if services:
for service in services:
patterns.append(f"^(?P<c_id>[^ ]+).*{service}")
else:
patterns.append(f"^(?P<c_id>[^ ]+).*")
matches = []
for pattern in patterns:
compiled_pattern = re.compile(pattern)
for process in docker_processes.splitlines():
m = compiled_pattern.search(process)
if m and m.group("c_id") != "CONTAINER":
matches.append(m.group("c_id"))
return matches
except subprocess.CalledProcessError as e:
raise Failed(f"failed to get Docker container ids: {e}")
def run_cmd(self, cmd: str) -> None:
try:
spawn.runv(cmd.split())
except subprocess.CalledProcessError as e:
say(f"Failed to run command {cmd}: {e}")
def add_and_remove_chaos(self, add_cmd: str, remove_cmd: str = "") -> None:
self.run_cmd(add_cmd)
# todo: Make sleep durations configurable
say(f"sleeping for 60 seconds...")
time.sleep(60)
if remove_cmd:
self.run_cmd(remove_cmd)
def add_and_remove_netem_chaos(self, container_id: str, add_cmd: str) -> None:
remove_cmd = f"docker exec -t {container_id} tc qdisc del dev eth0 root netem"
self.add_and_remove_chaos(add_cmd, remove_cmd)
def run(self, comp: Composition, workflow: Workflow) -> None:
if not self._chaos:
self._chaos = self.default_chaos
if not self._services:
self._services = self.get_container_ids(running=True)
say(
f"will run these chaos types: {self._chaos} on these containers: {self._services}"
)
if not self._other_service:
say(f"no 'other_service' provided, running chaos forever")
while True:
self.add_chaos()
else:
container_ids = self.get_container_ids(services=[self._other_service])
if len(container_ids) != 1:
raise Failed(
f"wrong number of container ids found for service {self._other_service}. expected 1, found: {len(container_ids)}"
)
container_id = container_ids[0]
say(
f"running chaos as long as {self._other_service} (container {container_id}) is running"
)
while comp.docker_container_is_running(container_id):
self.add_chaos()
def add_chaos(self) -> None:
random_container = random.choice(self._services)
random_chaos = random.choice(self._chaos)
if random_chaos == "pause":
self.add_and_remove_chaos(
add_cmd=f"docker pause {random_container}",
remove_cmd=f"docker unpause {random_container}",
)
elif random_chaos == "stop":
self.add_and_remove_chaos(
add_cmd=f"docker stop {random_container}",
remove_cmd=f"docker start {random_container}",
)
elif random_chaos == "kill":
self.add_and_remove_chaos(
add_cmd=f"docker kill {random_container}",
remove_cmd=f"docker start {random_container}",
)
elif random_chaos == "delay":
self.add_and_remove_netem_chaos(
container_id=random_container,
add_cmd=f"docker exec -t {random_container} tc qdisc add dev eth0 root netem \
delay 100ms 100ms distribution normal",
)
elif random_chaos == "rate":
self.add_and_remove_netem_chaos(
container_id=random_container,
add_cmd=f"docker exec -t {random_container} tc qdisc add dev eth0 root netem \
rate 5kbit 20 100 5",
)
elif random_chaos == "loss":
self.add_and_remove_netem_chaos(
container_id=random_container,
add_cmd=f"docker exec -t {random_container} tc qdisc add dev eth0 root netem loss 10",
)
elif random_chaos == "duplicate":
self.add_and_remove_netem_chaos(
container_id=random_container,
add_cmd=f"docker exec -t {random_container} tc qdisc add dev eth0 root netem duplicate 10",
)
elif random_chaos == "corrupt":
self.add_and_remove_netem_chaos(
container_id=random_container,
add_cmd=f"docker exec -t {random_container} tc qdisc add dev eth0 root netem corrupt 10",
)
else:
raise Failed(f"unexpected type of chaos: {random_chaos}")
@Steps.register("chaos-confirm")
class ChaosConfirmStep(WorkflowStep):
"""
Confirms the status of a Docker container. Silently succeeds or raises an error.
:param service: Name of Docker service to confirm, will be used to grep for container id.
NOTE: service name must be unique!
:param running: If True, confirm container is currently running.
:param exit_code: If provided, confirm container exit code matches this exit code.
:param wait: If True, wait for target container to exit before confirming its exit code.
"""
def __init__(
self,
service: str,
running: bool = False,
exit_code: int = 0,
wait: bool = False,
) -> None:
self._service = service
self._running = running
self._exit_code = exit_code
self._wait = wait
def run(self, comp: Composition, workflow: Workflow) -> None:
container_id = comp.get_container_id(self._service)
if self._running:
if not comp.docker_container_is_running(container_id):
raise Failed(f"chaos-confirm: container {container_id} is not running")
else:
if self._wait:
while comp.docker_container_is_running(container_id):
say(f"chaos-confirm: waiting for {self._service} to exit")
time.sleep(60)
else:
if comp.docker_container_is_running(container_id):
raise Failed(
f"chaos-confirm: expected {container_id} to have exited, is running"
)
actual_exit_code = comp.docker_inspect("{{.State.ExitCode}}", container_id)
if actual_exit_code != f"'{self._exit_code}'":
raise Failed(
f"chaos-confirm: expected exit code '{self._exit_code}' for {container_id}, found {actual_exit_code}"
)
@Steps.register("workflow")
class WorkflowWorkflowStep(WorkflowStep):
def __init__(self, workflow: str) -> None:
self._workflow = workflow
def run(self, comp: Composition, workflow: Workflow) -> None:
try:
# Run the specified workflow with the context of the parent workflow
sub_workflow = comp.workflow(self._workflow)
sub_workflow.run(comp, workflow)
except KeyError:
raise UnknownItem(
f"workflow in {comp.name}",
self._workflow,
(w.name for w in comp.workflows()),
)
@Steps.register("run")
class RunStep(WorkflowStep):
"""
Run a service using `mzcompose run`
Running a service behaves slightly differently than making it come up, importantly it
is not an _error_ if it ends at all.
Args:
- service: (required) the name of the service, from the mzcompose file
- entrypoint: Overwrite the entrypoint with this
- command: the command to run. These are the arguments to the entrypoint
- daemon: run as a daemon (default: False)
- service_ports: expose and use service ports. (Default: True)
"""
def __init__(
self,
*,
service: str,
command: Optional[str] = None,
daemon: bool = False,
entrypoint: Optional[str] = None,
service_ports: bool = True,
) -> None:
cmd = []
if daemon:
cmd.append("-d")
if entrypoint:
cmd.append(f"--entrypoint={entrypoint}")
cmd.append(service)
if command is not None:
cmd.extend(shlex.split(command))
self._service_ports = service_ports
self._command = cmd
def run(self, comp: Composition, workflow: Workflow) -> None:
try:
workflow.mzcompose_run(self._command, service_ports=self._service_ports)
except subprocess.CalledProcessError:
raise Failed("giving up: {}".format(ui.shell_quote(self._command)))
@Steps.register("ensure-stays-up")
class EnsureStaysUpStep(WorkflowStep):
def __init__(self, *, container: str, seconds: int) -> None:
self._container = container
self._uptime_secs = seconds
def run(self, comp: Composition, workflow: Workflow) -> None:
pattern = f"{comp.name}_{self._container}"
ui.progress(f"Ensuring {self._container} stays up ", "C")
for i in range(self._uptime_secs, 0, -1):
time.sleep(1)
try:
stdout = spawn.capture(
["docker", "ps", "--format={{.Names}}"], unicode=True
)
except subprocess.CalledProcessError as e:
raise Failed(f"{e.stdout}")
found = False
for line in stdout.splitlines():
if line.startswith(pattern):
found = True
break
if not found:
print(f"failed! {pattern} logs follow:")
print_docker_logs(pattern, 10)
raise Failed(f"container {self._container} stopped running!")
ui.progress(f" {i}")
print()
@Steps.register("down")
class DownStep(WorkflowStep):
def __init__(self, *, destroy_volumes: bool = False) -> None:
"""Bring the cluster down"""
self._destroy_volumes = destroy_volumes
def run(self, comp: Composition, workflow: Workflow) -> None:
say("bringing the cluster down")
mzcompose_down(self._destroy_volumes)
# Generic commands
def mzcompose_up(
services: List[str],
args: Optional[List[str]] = None,
extra_env: Optional[Dict[str, str]] = None,
) -> subprocess.CompletedProcess:
if args is None:
args = []
cmd = ["./mzcompose", "--mz-quiet", *args, "up", "-d"]
return spawn.runv(cmd + services, env=_merge_env(extra_env))
def mzcompose_run(
command: List[str],
args: Optional[List[str]] = None,
service_ports: bool = True,
extra_env: Optional[Dict[str, str]] = None,
) -> subprocess.CompletedProcess:
if args is None:
args = []
sp = ["--service-ports"] if service_ports else []
cmd = ["./mzcompose", "--mz-quiet", *args, "run", *sp, *command]
return spawn.runv(cmd, env=_merge_env(extra_env))
def _merge_env(extra_env: Optional[Dict[str, str]]) -> Dict[str, str]:
"""Get a mapping that has values from os.environ overwritten by env, if present
"""
env = cast(dict, os.environ)
if extra_env:
env = os.environ.copy()
env.update(extra_env)
return env
def mzcompose_stop(services: List[str]) -> subprocess.CompletedProcess:
cmd = ["./mzcompose", "--mz-quiet", "stop"]
return spawn.runv(cmd + services)
def mzcompose_down(destroy_volumes: bool = False) -> subprocess.CompletedProcess:
cmd = ["./mzcompose", "--mz-quiet", "down"]
if destroy_volumes:
cmd.append("--volumes")
return spawn.runv(cmd)
# Helpers
def print_docker_logs(pattern: str, tail: int = 0) -> None:
out = spawn.capture(
["docker", "ps", "-a", "--format={{.Names}}"], unicode=True
).splitlines()
for line in out:
if line.startswith(pattern):
spawn.runv(["docker", "logs", "--tail", str(tail), line])
def now() -> datetime:
return datetime.now(timezone.utc)
def wait_for_pg(
timeout_secs: int,
query: str,
dbname: str,
port: int,
host: str,
user: str,
print_result: bool,
expected: Union[Iterable[Any], Literal["any"]],
) -> None:
"""Wait for a pg-compatible database (includes materialized)
"""
args = f"dbname={dbname} host={host} port={port} user={user}"
ui.progress(f"waiting for {args} to handle {query!r}", "C")
error = None
for remaining in ui.timeout_loop(timeout_secs):
try:
conn = pg8000.connect(
database=dbname, host=host, port=port, user=user, timeout=1
)
cur = conn.cursor()
cur.execute(query)
if expected == "any" and cur.rowcount == -1:
ui.progress("success!", finish=True)
return
result = list(cur.fetchall())
if expected == "any" or result == expected:
if print_result:
say(f"query result: {result}")
else:
ui.progress("success!", finish=True)
return
else:
say(
f"host={host} port={port} did not return rows matching {expected} got: {result}"
)
except Exception as e:
ui.progress(" " + str(int(remaining)))
error = e
ui.progress(finish=True)
raise Failed(f"never got correct result for {args}: {error}")
def wait_for_mysql(
timeout_secs: int, user: str, passwd: str, host: str, port: int
) -> None:
args = f"mysql user={user} host={host} port={port}"
ui.progress(f"waiting for {args}", "C")
error = None
for _ in ui.timeout_loop(timeout_secs):
try:
conn = pymysql.connect(user=user, passwd=passwd, host=host, port=port)
with conn.cursor() as cur:
cur.execute("SELECT 1")
result = cur.fetchone()
if result == (1,):
print(f"success!")
return
else:
print(f"weird, {args} did not return 1: {result}")
except Exception as e:
ui.progress(".")
error = e
ui.progress(finish=True)
raise Failed(f"Never got correct result for {args}: {error}")
@contextlib.contextmanager
def cd(path: Path) -> Any:
"""Execute block within path, and then return"""
orig_path = os.getcwd()
try:
os.chdir(path)
yield
finally:
os.chdir(orig_path)
if __name__ == "__main__":
with error_handler(say):
cli(auto_envvar_prefix="MZ")
|
#!/usr/bin/env bash
# local version: 1.1.0.0
@test "monteenth of May 2013" {
#[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 5 teenth Monday
[[ $status -eq 0 ]]
[[ $output == "2013-05-13" ]]
}
@test "monteenth of August 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 8 teenth Monday
[[ $status -eq 0 ]]
[[ $output == "2013-08-19" ]]
}
@test "monteenth of September 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 9 teenth Monday
[[ $status -eq 0 ]]
[[ $output == "2013-09-16" ]]
}
@test "tuesteenth of March 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 3 teenth Tuesday
[[ $status -eq 0 ]]
[[ $output == "2013-03-19" ]]
}
@test "tuesteenth of April 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 4 teenth Tuesday
[[ $status -eq 0 ]]
[[ $output == "2013-04-16" ]]
}
@test "tuesteenth of August 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 8 teenth Tuesday
[[ $status -eq 0 ]]
[[ $output == "2013-08-13" ]]
}
@test "wednesteenth of January 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 1 teenth Wednesday
[[ $status -eq 0 ]]
[[ $output == "2013-01-16" ]]
}
@test "wednesteenth of February 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 2 teenth Wednesday
[[ $status -eq 0 ]]
[[ $output == "2013-02-13" ]]
}
@test "wednesteenth of June 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 6 teenth Wednesday
[[ $status -eq 0 ]]
[[ $output == "2013-06-19" ]]
}
@test "thursteenth of May 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 5 teenth Thursday
[[ $status -eq 0 ]]
[[ $output == "2013-05-16" ]]
}
@test "thursteenth of June 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 6 teenth Thursday
[[ $status -eq 0 ]]
[[ $output == "2013-06-13" ]]
}
@test "thursteenth of September 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 9 teenth Thursday
[[ $status -eq 0 ]]
[[ $output == "2013-09-19" ]]
}
@test "friteenth of April 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 4 teenth Friday
[[ $status -eq 0 ]]
[[ $output == "2013-04-19" ]]
}
@test "friteenth of August 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 8 teenth Friday
[[ $status -eq 0 ]]
[[ $output == "2013-08-16" ]]
}
@test "friteenth of September 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 9 teenth Friday
[[ $status -eq 0 ]]
[[ $output == "2013-09-13" ]]
}
@test "saturteenth of February 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 2 teenth Saturday
[[ $status -eq 0 ]]
[[ $output == "2013-02-16" ]]
}
@test "saturteenth of April 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 4 teenth Saturday
[[ $status -eq 0 ]]
[[ $output == "2013-04-13" ]]
}
@test "saturteenth of October 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 10 teenth Saturday
[[ $status -eq 0 ]]
[[ $output == "2013-10-19" ]]
}
@test "sunteenth of May 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 5 teenth Sunday
[[ $status -eq 0 ]]
[[ $output == "2013-05-19" ]]
}
@test "sunteenth of June 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 6 teenth Sunday
[[ $status -eq 0 ]]
[[ $output == "2013-06-16" ]]
}
@test "sunteenth of October 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 10 teenth Sunday
[[ $status -eq 0 ]]
[[ $output == "2013-10-13" ]]
}
@test "first Monday of March 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 3 first Monday
[[ $status -eq 0 ]]
[[ $output == "2013-03-04" ]]
}
@test "first Monday of April 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 4 first Monday
[[ $status -eq 0 ]]
[[ $output == "2013-04-01" ]]
}
@test "first Tuesday of May 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 5 first Tuesday
[[ $status -eq 0 ]]
[[ $output == "2013-05-07" ]]
}
@test "first Tuesday of June 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 6 first Tuesday
[[ $status -eq 0 ]]
[[ $output == "2013-06-04" ]]
}
@test "first Wednesday of July 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 7 first Wednesday
[[ $status -eq 0 ]]
[[ $output == "2013-07-03" ]]
}
@test "first Wednesday of August 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 8 first Wednesday
[[ $status -eq 0 ]]
[[ $output == "2013-08-07" ]]
}
@test "first Thursday of September 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 9 first Thursday
[[ $status -eq 0 ]]
[[ $output == "2013-09-05" ]]
}
@test "first Thursday of October 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 10 first Thursday
[[ $status -eq 0 ]]
[[ $output == "2013-10-03" ]]
}
@test "first Friday of November 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 11 first Friday
[[ $status -eq 0 ]]
[[ $output == "2013-11-01" ]]
}
@test "first Friday of December 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 12 first Friday
[[ $status -eq 0 ]]
[[ $output == "2013-12-06" ]]
}
@test "first Saturday of January 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 1 first Saturday
[[ $status -eq 0 ]]
[[ $output == "2013-01-05" ]]
}
@test "first Saturday of February 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 2 first Saturday
[[ $status -eq 0 ]]
[[ $output == "2013-02-02" ]]
}
@test "first Sunday of March 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 3 first Sunday
[[ $status -eq 0 ]]
[[ $output == "2013-03-03" ]]
}
@test "first Sunday of April 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 4 first Sunday
[[ $status -eq 0 ]]
[[ $output == "2013-04-07" ]]
}
@test "second Monday of March 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 3 second Monday
[[ $status -eq 0 ]]
[[ $output == "2013-03-11" ]]
}
@test "second Monday of April 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 4 second Monday
[[ $status -eq 0 ]]
[[ $output == "2013-04-08" ]]
}
@test "second Tuesday of May 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 5 second Tuesday
[[ $status -eq 0 ]]
[[ $output == "2013-05-14" ]]
}
@test "second Tuesday of June 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 6 second Tuesday
[[ $status -eq 0 ]]
[[ $output == "2013-06-11" ]]
}
@test "second Wednesday of July 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 7 second Wednesday
[[ $status -eq 0 ]]
[[ $output == "2013-07-10" ]]
}
@test "second Wednesday of August 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 8 second Wednesday
[[ $status -eq 0 ]]
[[ $output == "2013-08-14" ]]
}
@test "second Thursday of September 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 9 second Thursday
[[ $status -eq 0 ]]
[[ $output == "2013-09-12" ]]
}
@test "second Thursday of October 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 10 second Thursday
[[ $status -eq 0 ]]
[[ $output == "2013-10-10" ]]
}
@test "second Friday of November 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 11 second Friday
[[ $status -eq 0 ]]
[[ $output == "2013-11-08" ]]
}
@test "second Friday of December 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 12 second Friday
[[ $status -eq 0 ]]
[[ $output == "2013-12-13" ]]
}
@test "second Saturday of January 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 1 second Saturday
[[ $status -eq 0 ]]
[[ $output == "2013-01-12" ]]
}
@test "second Saturday of February 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 2 second Saturday
[[ $status -eq 0 ]]
[[ $output == "2013-02-09" ]]
}
@test "second Sunday of March 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 3 second Sunday
[[ $status -eq 0 ]]
[[ $output == "2013-03-10" ]]
}
@test "second Sunday of April 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 4 second Sunday
[[ $status -eq 0 ]]
[[ $output == "2013-04-14" ]]
}
@test "third Monday of March 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 3 third Monday
[[ $status -eq 0 ]]
[[ $output == "2013-03-18" ]]
}
@test "third Monday of April 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 4 third Monday
[[ $status -eq 0 ]]
[[ $output == "2013-04-15" ]]
}
@test "third Tuesday of May 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 5 third Tuesday
[[ $status -eq 0 ]]
[[ $output == "2013-05-21" ]]
}
@test "third Tuesday of June 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 6 third Tuesday
[[ $status -eq 0 ]]
[[ $output == "2013-06-18" ]]
}
@test "third Wednesday of July 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 7 third Wednesday
[[ $status -eq 0 ]]
[[ $output == "2013-07-17" ]]
}
@test "third Wednesday of August 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 8 third Wednesday
[[ $status -eq 0 ]]
[[ $output == "2013-08-21" ]]
}
@test "third Thursday of September 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 9 third Thursday
[[ $status -eq 0 ]]
[[ $output == "2013-09-19" ]]
}
@test "third Thursday of October 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 10 third Thursday
[[ $status -eq 0 ]]
[[ $output == "2013-10-17" ]]
}
@test "third Friday of November 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 11 third Friday
[[ $status -eq 0 ]]
[[ $output == "2013-11-15" ]]
}
@test "third Friday of December 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 12 third Friday
[[ $status -eq 0 ]]
[[ $output == "2013-12-20" ]]
}
@test "third Saturday of January 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 1 third Saturday
[[ $status -eq 0 ]]
[[ $output == "2013-01-19" ]]
}
@test "third Saturday of February 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 2 third Saturday
[[ $status -eq 0 ]]
[[ $output == "2013-02-16" ]]
}
@test "third Sunday of March 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 3 third Sunday
[[ $status -eq 0 ]]
[[ $output == "2013-03-17" ]]
}
@test "third Sunday of April 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 4 third Sunday
[[ $status -eq 0 ]]
[[ $output == "2013-04-21" ]]
}
@test "fourth Monday of March 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 3 fourth Monday
[[ $status -eq 0 ]]
[[ $output == "2013-03-25" ]]
}
@test "fourth Monday of April 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 4 fourth Monday
[[ $status -eq 0 ]]
[[ $output == "2013-04-22" ]]
}
@test "fourth Tuesday of May 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 5 fourth Tuesday
[[ $status -eq 0 ]]
[[ $output == "2013-05-28" ]]
}
@test "fourth Tuesday of June 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 6 fourth Tuesday
[[ $status -eq 0 ]]
[[ $output == "2013-06-25" ]]
}
@test "fourth Wednesday of July 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 7 fourth Wednesday
[[ $status -eq 0 ]]
[[ $output == "2013-07-24" ]]
}
@test "fourth Wednesday of August 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 8 fourth Wednesday
[[ $status -eq 0 ]]
[[ $output == "2013-08-28" ]]
}
@test "fourth Thursday of September 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 9 fourth Thursday
[[ $status -eq 0 ]]
[[ $output == "2013-09-26" ]]
}
@test "fourth Thursday of October 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 10 fourth Thursday
[[ $status -eq 0 ]]
[[ $output == "2013-10-24" ]]
}
@test "fourth Friday of November 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 11 fourth Friday
[[ $status -eq 0 ]]
[[ $output == "2013-11-22" ]]
}
@test "fourth Friday of December 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 12 fourth Friday
[[ $status -eq 0 ]]
[[ $output == "2013-12-27" ]]
}
@test "fourth Saturday of January 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 1 fourth Saturday
[[ $status -eq 0 ]]
[[ $output == "2013-01-26" ]]
}
@test "fourth Saturday of February 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 2 fourth Saturday
[[ $status -eq 0 ]]
[[ $output == "2013-02-23" ]]
}
@test "fourth Sunday of March 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 3 fourth Sunday
[[ $status -eq 0 ]]
[[ $output == "2013-03-24" ]]
}
@test "fourth Sunday of April 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 4 fourth Sunday
[[ $status -eq 0 ]]
[[ $output == "2013-04-28" ]]
}
@test "last Monday of March 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 3 last Monday
[[ $status -eq 0 ]]
[[ $output == "2013-03-25" ]]
}
@test "last Monday of April 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 4 last Monday
[[ $status -eq 0 ]]
[[ $output == "2013-04-29" ]]
}
@test "last Tuesday of May 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 5 last Tuesday
[[ $status -eq 0 ]]
[[ $output == "2013-05-28" ]]
}
@test "last Tuesday of June 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 6 last Tuesday
[[ $status -eq 0 ]]
[[ $output == "2013-06-25" ]]
}
@test "last Wednesday of July 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 7 last Wednesday
[[ $status -eq 0 ]]
[[ $output == "2013-07-31" ]]
}
@test "last Wednesday of August 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 8 last Wednesday
[[ $status -eq 0 ]]
[[ $output == "2013-08-28" ]]
}
@test "last Thursday of September 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 9 last Thursday
[[ $status -eq 0 ]]
[[ $output == "2013-09-26" ]]
}
@test "last Thursday of October 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 10 last Thursday
[[ $status -eq 0 ]]
[[ $output == "2013-10-31" ]]
}
@test "last Friday of November 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 11 last Friday
[[ $status -eq 0 ]]
[[ $output == "2013-11-29" ]]
}
@test "last Friday of December 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 12 last Friday
[[ $status -eq 0 ]]
[[ $output == "2013-12-27" ]]
}
@test "last Saturday of January 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 1 last Saturday
[[ $status -eq 0 ]]
[[ $output == "2013-01-26" ]]
}
@test "last Saturday of February 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 2 last Saturday
[[ $status -eq 0 ]]
[[ $output == "2013-02-23" ]]
}
@test "last Sunday of March 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 3 last Sunday
[[ $status -eq 0 ]]
[[ $output == "2013-03-31" ]]
}
@test "last Sunday of April 2013" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2013 4 last Sunday
[[ $status -eq 0 ]]
[[ $output == "2013-04-28" ]]
}
@test "last Wednesday of February 2012" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2012 2 last Wednesday
[[ $status -eq 0 ]]
[[ $output == "2012-02-29" ]]
}
@test "last Wednesday of December 2014" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2014 12 last Wednesday
[[ $status -eq 0 ]]
[[ $output == "2014-12-31" ]]
}
@test "last Sunday of February 2015" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2015 2 last Sunday
[[ $status -eq 0 ]]
[[ $output == "2015-02-22" ]]
}
@test "first Friday of December 2012" {
[[ $BATS_RUN_SKIPPED = true ]] || skip
run bash meetup.sh 2012 12 first Friday
[[ $status -eq 0 ]]
[[ $output == "2012-12-07" ]]
}
|
package memento;
import java.util.ArrayList;
import composite.Task;
import setting.PointSetting;
public final class Memento {
private ArrayList<Task> saved = new ArrayList<Task>();
private PointSetting point;
public Memento(PointSetting p, ArrayList<Task> taskList) {
this.point = new PointSetting(p.getTotal());
for (Task Task : taskList) {
this.saved.add(new Task(Task));
}
}
public final ArrayList<Task> returnList() {
ArrayList<Task> list = new ArrayList<Task>();
for (Task Task : this.saved) {
list.add(new Task(Task));
}
return list;
}
public final PointSetting returnPoint() {
return new PointSetting(this.point.getTotal());
}
}
|
package edu.pdx.cs410J.seung2.client;
import com.google.gwt.user.client.rpc.SerializationException;
import com.google.gwt.user.client.rpc.SerializationStreamReader;
import com.google.gwt.user.client.rpc.SerializationStreamWriter;
import com.google.gwt.user.client.rpc.impl.ReflectionHelper;
@SuppressWarnings("deprecation")
public class PhoneCall_Array_Rank_1_FieldSerializer implements com.google.gwt.user.client.rpc.impl.TypeHandler {
public static void deserialize(SerializationStreamReader streamReader, edu.pdx.cs410J.seung2.client.PhoneCall[] instance) throws SerializationException {
com.google.gwt.user.client.rpc.core.java.lang.Object_Array_CustomFieldSerializer.deserialize(streamReader, instance);
}
public static edu.pdx.cs410J.seung2.client.PhoneCall[] instantiate(SerializationStreamReader streamReader) throws SerializationException {
int size = streamReader.readInt();
return new edu.pdx.cs410J.seung2.client.PhoneCall[size];
}
public static void serialize(SerializationStreamWriter streamWriter, edu.pdx.cs410J.seung2.client.PhoneCall[] instance) throws SerializationException {
com.google.gwt.user.client.rpc.core.java.lang.Object_Array_CustomFieldSerializer.serialize(streamWriter, instance);
}
public Object create(SerializationStreamReader reader) throws SerializationException {
return edu.pdx.cs410J.seung2.client.PhoneCall_Array_Rank_1_FieldSerializer.instantiate(reader);
}
public void deserial(SerializationStreamReader reader, Object object) throws SerializationException {
edu.pdx.cs410J.seung2.client.PhoneCall_Array_Rank_1_FieldSerializer.deserialize(reader, (edu.pdx.cs410J.seung2.client.PhoneCall[])object);
}
public void serial(SerializationStreamWriter writer, Object object) throws SerializationException {
edu.pdx.cs410J.seung2.client.PhoneCall_Array_Rank_1_FieldSerializer.serialize(writer, (edu.pdx.cs410J.seung2.client.PhoneCall[])object);
}
}
|
<filename>packages/cactus-api-client/src/main/typescript/default-consortium-provider.ts<gh_stars>1-10
import {
Logger,
LogLevelDesc,
LoggerProvider,
} from "@hyperledger/cactus-common";
import { Checks, IAsyncProvider } from "@hyperledger/cactus-common";
import { ConsortiumDatabase } from "@hyperledger/cactus-core-api";
import {
DefaultApi as ConsortiumManualApi,
GetConsortiumJwsResponse,
} from "@hyperledger/cactus-plugin-consortium-manual";
export interface IDefaultConsortiumProviderOptions {
logLevel?: LogLevelDesc;
apiClient: ConsortiumManualApi;
}
export class DefaultConsortiumProvider
implements IAsyncProvider<ConsortiumDatabase> {
public static readonly CLASS_NAME = "DefaultConsortiumProvider";
private readonly log: Logger;
public get className() {
return DefaultConsortiumProvider.CLASS_NAME;
}
constructor(public readonly options: IDefaultConsortiumProviderOptions) {
const fnTag = `${this.className}#constructor()`;
Checks.truthy(options, `${fnTag} arg options`);
const level = this.options.logLevel || "INFO";
const label = this.className;
this.log = LoggerProvider.getOrCreate({ level, label });
}
parseConsortiumJws(response: GetConsortiumJwsResponse): ConsortiumDatabase {
const fnTag = `DefaultConsortiumProvider#parseConsortiumJws()`;
Checks.truthy(response, `${fnTag}::response`);
Checks.truthy(response.jws, `${fnTag}::response.jws`);
Checks.truthy(response.jws.payload, `${fnTag}::response.jws.payload`);
const json = Buffer.from(response.jws.payload, "base64").toString();
const body = JSON.parse(json);
const {
consortiumDatabase,
}: { consortiumDatabase: ConsortiumDatabase } = body;
Checks.truthy(consortiumDatabase, `${fnTag}::consortiumDatabase`);
// FIXME Ideally there would be an option here to validate the JWS based on
// all the signatures and the corresponding public keys (which the caller
// would have to be able to supply).
// We do not yet have this crypto functions available in a cross platform
// manner so it is omitted for now but much needed prior to any GA release.
return consortiumDatabase;
}
public async get(): Promise<ConsortiumDatabase> {
try {
const res = await this.options.apiClient.getConsortiumJws();
return this.parseConsortiumJws(res.data);
} catch (ex) {
const innerException = (ex.toJSON && ex.toJSON()) || ex;
this.log.error(`Request for Consortium JWS failed: `, innerException);
throw ex;
}
}
}
|
#!/bin/bash
set -e
cd "`dirname "$0"`"
#if [ ! -f app/config/parameters.yml ]; then
# cp app/config/parameters.yml.dist app/config/parameters.yml
#fi
if [ ! -f composer.phar ]; then
curl -s http://getcomposer.org/installer | php
fi
php composer.phar install
rm -rf app/cache/* app/logs/*
if [ "$1" == "--symlink" ]; then
./app/console assets:install --symlink -v
else
./app/console assets:install -v
fi
./
./app/console fos:js-routing:debug
./app/console assetic:dump --env=prod --no-debug -v
./app/console doctrine:schema:update --force
./c
./app/console cache:clear --env=prod
./app/console fos:user:create testuser test@example.com p@ssword
|
def compute_similarity(s1, s2):
len1 = len(s1)
len2 = len(s2)
max_len = max(len1, len2)
count = 0
for i in range(max_len):
if i >= len1 or i >= len2:
break
if s1[i] == s2[i]:
count += 1
return count / max_len
|
#!/bin/bash
#
# sub_rdtest_splits.sh
#
set -e
coveragefile=/data/talkowski/Samples/common-mind/matrices/CMC.all.binCov.bed.gz
medianfile=/data/talkowski/Samples/common-mind/matrices/CMC.all.binCov.median
famfile=/data/talkowski/Samples/common-mind/ref/CMC.fam
for batch in CMC; do
for source in delly lumpy manta wham depth; do
for chrom in 1; do
# for chrom in $(seq 1 22) X Y; do
for bed in split_beds/${batch}.${source}.${chrom}.*; do
split=$(basename $bed | cut -d"." -f4)
# echo $bed
# echo $split
bsub -q short -o split_logs/${batch}.${source}.${chrom}.out -sla miket_sc -J "rdtest_${bed}" "
Rscript scripts/RdTest.R \
-b $bed \
-o split_rdtest \
-n ${batch}.${source}.${chrom}.${split} \
-c $coveragefile \
-m $medianfile \
-f $famfile " > /dev/null
done
done
done
done
|
#pragma once
#include "software/util/time/duration.h"
#include "software/util/time/time.h"
/**
* A simple Timestamp class built around doubles. This Timestamp is intended to represent
* the t_capture timestamps we receive from the SSL Vision system. These t_capture values
* are monotonic (meaning they are always positive and always increase), and are relative
* to the "epoch time" defined by SSL Vision. This "epoch" is when SSL Vision starts up
* and begins streaming data. Therefore, these timestamps are not absolute "wall clock"
* time, but points in time relative to when the SSL Vision program started. They can and
* should be used to timestamp all data received from SSL Vision and propagated throughout
* the system in order to calculate time differences (durations), velocities, and other
* time-dependent values.
*/
class Timestamp : public Time
{
public:
/**
* The default constructor for a Timestamp. Creates a Timestamp at time 0
*/
Timestamp();
/**
* Creates a new Timestamp value from a value in seconds.
* @param seconds A value >= 0.0, in seconds, from which to create the Timestamp
* @throws std::invalid_argument if the given value is < 0.0
* @return A Timestamp created from the given value
*/
static const Timestamp fromSeconds(double seconds);
/**
* Creates a new Timestamp value from a value in milliseconds
* @param milliseconds A value >= 0.0, in milliseconds, from which to create the
* Timestamp
* @throws std::invalid_argument if the given value is < 0.0
* @return A Timestamp created from the given value
*/
static const Timestamp fromMilliseconds(double milliseconds);
/**
* Compares Timestamps for equality. Timestamps are considered equal if their values
* in seconds are within EPSILON from one another.
*
* @param other the Timestamp to compare with for equality
* @return true if the Timestamps are equal and false otherwise
*/
bool operator==(const Timestamp& other) const;
/**
* Compares Timestamps for inequality
*
* @param other the Timestamp to compare with for inequality
* @return true if the Timestamps are not equal, and false otherwise
*/
bool operator!=(const Timestamp& other) const;
/**
* Defines the "less than" operator. Returns true if this Timestamp is strictly less
* than (and not equal to) the other Timestamp
*
* @param other the Timestamp to compare with
* @return true if this Timestamp is strictly less than (and not equal to) the other
* Timestamp, and false otherwise
*/
bool operator<(const Timestamp& other) const;
/**
* Defines the "less than or equal to" operator. Returns true if this Timestamp is
* less than or equal to the other Timestamp
*
* @param other the Timestamp to compare with
* @return true if this Timestamp is less than or equal to the other Timestamp, and
* false otherwise
*/
bool operator<=(const Timestamp& other) const;
/**
* Defines the "greater than" operator. Returns true if this Timestamp is strictly
* greater than (and not equal to) the other Timestamp
*
* @param other the Timestamp to compare with
* @return true if this Timestamp is strictly greater than (and not equal to) the
* other Timestamp, and false otherwise
*/
bool operator>(const Timestamp& other) const;
/**
* Defines the "greater than or equal to" operator. Returns true if this Timestamp
* is greater than or equal to the other Timestamp
*
* @param other the Timestamp to compare with
* @return true if this Timestamp is greater than or equal to the other Timestamp, and
* false otherwise
*/
bool operator>=(const Timestamp& other) const;
/**
* Defines the addition operator for Timestamps. Allows Durations to be added to
* Timestamps
*
* @param duration the Duration to add to this Timestamp
* @return A new Timestamp with the given Duration added to this Timestamp
*/
Timestamp operator+(const Duration& duration) const;
/**
* Defines the subtraction operator for Timestamps. Allows Durations to be subtracted
* from Timestamps
*
* @param duration the Duration to subtract from this Timestamp
* @return A new Timestamp with the given Duration subtracted from to this Timestamp
*/
Timestamp operator-(const Duration& duration) const;
/**
* Defines the subtraction operator for Timestamps. Allows Timestamps to be subtracted
* from Timestamps
*
* @param timestamp The Timestamp to subtract from this Timestamp
* @return A Duration that is the difference in time between the two timestamps
*/
Duration operator-(const Timestamp& timestamp) const;
private:
/**
* Constructs a Timestamp value from a value in seconds.
* @param timestamp_seconds A value >= 0.0, in seconds, from which to create the
* timestamp
* @throws std::invalid_argument if the provided value is < 0.0
*/
explicit Timestamp(double timestamp_seconds);
};
|
<reponame>MarcosFernandez/gemtols-cnv
/*
* PROJECT: GEM-Tools library
* FILE: gt_output_sam.h
* DATE: 01/08/2012
* AUTHOR(S): <NAME> <<EMAIL>>
* DESCRIPTION: // TODO
*/
#ifndef GT_OUTPUT_SAM_H_
#define GT_OUTPUT_SAM_H_
#include "gt_essentials.h"
#include "gt_dna_string.h"
#include "gt_dna_read.h"
#include "gt_alignment_utils.h"
#include "gt_template_utils.h"
#include "gt_sam_attributes.h"
#include "gt_map_score.h"
#include "gt_output_buffer.h"
#include "gt_buffered_output_file.h"
#include "gt_generic_printer.h"
/*
* Error Codes
*/
#define GT_SOE_PRINTING_MISM_STRING 10
/*
* SAM Output attributes
*/
typedef enum { GT_SAM, GT_BAM } gt_output_sam_format_t;
typedef struct {
/* Format */
gt_output_sam_format_t format; // TODO
/* Read/Qualities */
bool always_output_read__qualities;
bool output_seq_qual_for_secondary_align;
gt_qualities_offset_t qualities_offset;
/* Maps */
uint64_t max_printable_maps;
bool compact_format; // Compact map representation in SAM via XA field
/* CIGAR/Mismatch string */
bool print_mismatches;
/* SAM Optional Fields */
bool print_optional_fields;
gt_sam_attributes* sam_attributes; // Optional fields stored as sam_attributes
gt_sam_attribute_func_params* attribute_func_params; // Parameters provided to generate functional attributes
} gt_output_sam_attributes;
/*
* BAM record
*/
typedef struct {
int32_t block_size; // Length of the remainder of the alignment record
int32_t refID; // Reference sequence ID, 1 <= refID < n ref; -1 for a read without a mapping position.
int32_t pos; // pos 0-based leftmost coordinate (= POS 1)
uint32_t bin_mq_nl; // bin<<16|MAPQ<<8|l read name;
// bin is computed by the reg2bin() function in Section 4.3;
// l read name is the length of read name below (= length(QNAME) + 1).
uint32_t flag_mq_nl; // FLAG<<16|n cigar op; n cigar op is the number of operations in CIGAR.
int32_t l_seq; // Length of SEQ
int32_t next_refID; // Ref-ID of the next segment (1 <= mate refID < n_ref)
int32_t next_pos; // 0-based leftmost pos of the next segment (= PNEXT - 1)
int32_t tlen; // Template length (= TLEN)
gt_string* read_name; // Read name, NULL terminated (QNAME plus a tailing `\0')
uint32_t* cigar; // op_len<<4|op. `MIDNSHP=X'!`012345678'
uint8_t* seq; // 4-bit encoded read: `=ACMGRSVTWYHKDBN'! [0; 15];
// Other characters mapped to `N';
// High nybble first (1st base in the highest 4-bit of the 1st byte)
char* qual; // Phred base quality (a sequence of 0xFF if absent)
gt_string* optional_fields; // Optional Fields String. List of auxiliary data (until the end of the alignment block)
} gt_bam_record;
/* Setup */
GT_INLINE gt_output_sam_attributes* gt_output_sam_attributes_new();
GT_INLINE void gt_output_sam_attributes_delete(gt_output_sam_attributes* const attributes);
GT_INLINE void gt_output_sam_attributes_clear(gt_output_sam_attributes* const attributes);
/* Format */
GT_INLINE void gt_output_sam_attributes_set_format(gt_output_sam_attributes* const attributes,gt_output_sam_format_t const format);
/* Read/Qualities */
GT_INLINE void gt_output_sam_attributes_dump_read__qualities_once(gt_output_sam_attributes* const attributes);
GT_INLINE void gt_output_sam_attributes_always_dump_read__qualities(gt_output_sam_attributes* const attributes);
GT_INLINE void gt_output_sam_attributes_set_qualities_offset(gt_output_sam_attributes* const attributes,gt_qualities_offset_t const qualities_offset);
/* Maps */
GT_INLINE void gt_output_sam_attributes_set_max_printable_maps(gt_output_sam_attributes* const attributes,const uint64_t max_printable_maps);
GT_INLINE void gt_output_sam_attributes_set_compact_format(gt_output_sam_attributes* const attributes,const bool compact_format);
/* CIGAR/Mismatch string */
GT_INLINE void gt_output_sam_attributes_set_print_mismatches(gt_output_sam_attributes* const attributes,const bool print_mismatches);
/* SAM Optional Fields */
GT_INLINE void gt_output_sam_attributes_set_print_optional_fields(gt_output_sam_attributes* const attributes,const bool print_optional_fields);
GT_INLINE void gt_output_sam_attributes_set_reference_sequence_archive(gt_output_sam_attributes* const attributes,gt_sequence_archive* const reference_sequence_archive);
GT_INLINE gt_sam_attributes* gt_output_sam_attributes_get_sam_attributes(gt_output_sam_attributes* const attributes);
/*
* SAM Headers
*/
GT_GENERIC_PRINTER_PROTOTYPE(gt_output_sam,print_headers_sh,gt_sam_headers* const sam_headers);
GT_GENERIC_PRINTER_PROTOTYPE(gt_output_sam,print_headers_sa,gt_sequence_archive* const sequence_archive);
/*
* SAM QNAME (Tag)
*/
GT_GENERIC_PRINTER_PROTOTYPE(gt_output_sam,print_qname,gt_string* const tag);
/*
* SAM Flag
* Beware of the SAM flags, they might cause severe mind injuries...
*
* 0x1 (Bit 0) => Template having multiple segments in sequencing
* (The read was part of a pair during sequencing) [read paired]
* 0x2 (Bit 1) => Each segment properly aligned according to the aligner
* (The read is mapped in a pair) [read mapped in proper pair]
* 0x4 (Bit 2) => Segment unmapped. The query sequence is unmapped [read unmapped]
* 0x8 (Bit 3) => Next segment in the template unmapped. The mate is unmapped [mate unmapped]
* 0x10 (Bit 4) => SEQ being reverse complemented. Strand of query (0=forward 1=reverse) [read reverse strand]
* 0x20 (Bit 5) => SEQ of the next segment in the template being reversed [mate reverse strand]
* 0x40 (Bit 6) => The first segment in the template [first in pair]
* 0x80 (Bit 7) => The last segment in the template [second in pair]
* 0x100 (Bit 8) => Secondary alignment [not primary alignment]
* 0x200 (Bit 9) => Not passing quality controls [read fails platform/vendor quality checks]
* 0x400 (Bit 10) => PCR or optical duplicate [read is PCR or optical duplicate]
* 0x800 (Bit 11) => Supplementary alignment [second or subsequent segment in a chimeric alignment]
*
* - Bit 0x4 is the only reliable place to tell whether the segment is unmapped. If 0x4 is set,
* no assumptions can be made about RNAME, POS, CIGAR, MAPQ, bits 0x2, 0x10 and 0x100
* and the bit 0x20 of the next segment in the template.
* - If 0x40 and 0x80 are both set, the segment is part of a linear template, but it is neither
* the first nor the last segment. If both 0x40 and 0x80 are unset, the index of the segment
* in the template is unknown. This may happen for a non-linear template or the index is
* lost in data processing.
* - Bit 0x100 marks the alignment not to be used in certain analysis when the tools in use
* are aware of this bit.
* - If 0x1 is unset, no assumptions can be made about 0x2, 0x8, 0x20, 0x40 and 0x80.
*/
GT_INLINE uint16_t gt_output_sam_calculate_flag(
const bool paired_end,const bool read_paired,
const bool read_mapped,const bool mate_mapped,
const gt_strand read_strand,const gt_strand mate_strand,
const bool first_in_pair,const bool last_in_pair,
const bool secondary_alignment,const bool supplementary_alignment,const bool not_passing_QC,const bool PCR_duplicate);
GT_INLINE uint16_t gt_output_sam_calculate_flag_pe(
const bool read_paired,const bool read_mapped,const bool mate_mapped,
const gt_strand read_strand,const gt_strand mate_strand,
const bool first_in_pair,const bool last_in_pair,
const bool secondary_alignment,const bool supplementary_alignment,const bool not_passing_QC,const bool PCR_duplicate);
GT_INLINE uint16_t gt_output_sam_calculate_flag_se(
const bool read_mapped,const gt_strand read_strand,
const bool secondary_alignment,const bool supplementary_alignment,const bool not_passing_QC,const bool PCR_duplicate);
GT_INLINE uint16_t gt_output_sam_calculate_flag_se_map(
gt_map* const map,const bool secondary_alignment,const bool supplementary_alignment,const bool not_passing_QC,const bool PCR_duplicate);
GT_INLINE uint16_t gt_output_sam_calculate_flag_pe_map(
gt_map* const map,gt_map* const mate,const bool is_map_first_in_pair,
const bool secondary_alignment,const bool supplementary_alignment,const bool not_passing_QC,const bool PCR_duplicate,const bool paire);
/*
* SAM CIGAR
*/
GT_INLINE gt_status gt_output_sam_gprint_map_cigar(
gt_generic_printer* const gprinter,gt_map* const map_segment,bool print_mismatches,
const uint64_t hard_left_trim_read,const uint64_t hard_right_trim_read);
GT_GENERIC_PRINTER_PROTOTYPE(gt_output_sam,print_cigar,gt_map* const map_segment,bool print_mismatches);
/*
* SAM CORE fields
* (QNAME,FLAG,RNAME,POS,MAPQ,CIGAR,RNEXT,PNEXT,TLEN,SEQ,QUAL). No EOL is printed
* Don't handle quimeras (just print one record out of the first map segment)
*/
GT_GENERIC_PRINTER_PROTOTYPE(gt_output_sam,print_core_fields_se,
gt_string* const tag,gt_string* const read,gt_string* const qualities,
gt_map* const map,const uint64_t position,const uint8_t phred_score,
const uint64_t hard_left_trim_read,const uint64_t hard_right_trim_read,
const bool secondary_alignment,const bool supplementary_alignment,const bool not_passing_QC,const bool PCR_duplicate,
gt_output_sam_attributes* const attributes);
GT_GENERIC_PRINTER_PROTOTYPE(gt_output_sam,print_core_fields_pe,
gt_string* const tag,gt_string* const read,gt_string* const qualities,
gt_map* const map,const uint64_t position,const uint8_t phred_score,
gt_map* const mate,const uint64_t mate_position,const int64_t template_length,
const uint64_t hard_left_trim_read,const uint64_t hard_right_trim_read,
const bool is_map_first_in_pair,const bool secondary_alignment,const bool supplementary_alignment,const bool not_passing_QC,const bool PCR_duplicate,const bool paired,
gt_output_sam_attributes* const attributes);
GT_GENERIC_PRINTER_PROTOTYPE(gt_output_sam,print_map_core_fields_se,
gt_string* const tag,gt_string* const read,gt_string* const qualities,gt_map* const map_segment,
const uint64_t hard_left_trim_read,const uint64_t hard_right_trim_read,
const bool secondary_alignment,const bool supplementary_alignment,const bool not_passing_QC,const bool PCR_duplicate,
gt_output_sam_attributes* const attributes);
GT_GENERIC_PRINTER_PROTOTYPE(gt_output_sam,print_map_core_fields_pe,
gt_string* const tag,gt_string* const read,gt_string* const qualities,
gt_map* const map_segment,gt_map* const mate_segment,gt_mmap_attributes* const mmap_attributes,
const uint64_t hard_left_trim_read,const uint64_t hard_right_trim_read,
const bool is_map_first_in_pair,const bool secondary_alignment,const bool supplementary_alignment,const bool not_passing_QC,const bool PCR_duplicate,
gt_output_sam_attributes* const output_attributes);
GT_GENERIC_PRINTER_PROTOTYPE(gt_output_sam,print_map_placeholder,
gt_string* const tag,gt_string* const read,gt_string* const qualities,
gt_map_placeholder* const map_placeholder,gt_output_sam_attributes* const output_attributes);
/*
* SAM Optional fields
* - SAM Attributes is a shash of gt_sam_attribute (gt_sam_data_attributes.h)
* - SAM Attributes (gt_sam_data_attributes.h) can be either a value(int,double,string)
* or a function -> f(gt_sam_attribute_func_params* params) returning a value(int,double,string)
* - gt_output_sam_print_optional_fields_values() prints all the values contained in @sam_attributes
* gt_output_sam_print_optional_fields() prints all attributes.
* Those relying on a function, are generating calling that function with @gt_sam_attribute_func_params
* as argument (some fields can be NULL, so the attribute function must be ready to deal with that)
*/
GT_GENERIC_PRINTER_PROTOTYPE(gt_output_sam,print_sam_attribute,gt_sam_attribute* const sam_attribute,gt_sam_attribute_func_params* const attribute_func_params);
GT_GENERIC_PRINTER_PROTOTYPE(gt_output_sam,print_optional_fields_values,gt_sam_attributes* const sam_attributes,gt_output_sam_attributes* const output_attributes);
GT_GENERIC_PRINTER_PROTOTYPE(gt_output_sam,print_optional_fields,gt_sam_attributes* const sam_attributes,gt_output_sam_attributes* const output_attributes);
/*
* SAM High-level MMap/Map Printers
*/
GT_GENERIC_PRINTER_PROTOTYPE(gt_output_sam,print_mmap,
gt_template* const template,gt_map* const map_end1,gt_map* const map_end2,gt_mmap_attributes* const mmap_attributes,
const bool secondary_alignment,const bool not_passing_QC,const bool PCR_duplicate,
gt_output_sam_attributes* const output_attributes);
GT_GENERIC_PRINTER_PROTOTYPE(gt_output_sam,print_map,
gt_template* const template,gt_alignment* const alignment,gt_map* const map,
const bool secondary_alignment,const bool not_passing_QC,const bool PCR_duplicate,
gt_output_sam_attributes* const output_attributes);
/*
* SAM High-level Template/Alignment Printers
* - Optional fields are generated from the first SAM-Attributes object found in the following order:
* 1.- map->attributes{GT_ATTR_ID_SAM_FLAGS} / mmap_attributes->attributes{GT_ATTR_ID_SAM_FLAGS}
* 2.- @output_attributes->sam_attributes
*/
GT_GENERIC_PRINTER_PROTOTYPE(gt_output_sam,print_alignment,gt_alignment* const alignment,gt_output_sam_attributes* const output_attributes);
GT_GENERIC_PRINTER_PROTOTYPE(gt_output_sam,print_template,gt_template* const template,gt_output_sam_attributes* const output_attributes);
#endif /* GT_OUTPUT_SAM_H_ */
|
package store
import (
"fnd/log"
"github.com/pkg/errors"
"github.com/syndtr/goleveldb/leveldb"
)
type TxCb func(tx *leveldb.Transaction) error
var logger = log.WithModule("store")
func Open(path string) (*leveldb.DB, error) {
db, err := leveldb.OpenFile(path, nil)
if err != nil {
return nil, errors.Wrap(err, "error opening database")
}
return db, nil
}
func WithTx(db *leveldb.DB, cb TxCb) error {
tx, err := db.OpenTransaction()
if err != nil {
return errors.Wrap(err, "error opening transaction")
}
defer func() {
if p := recover(); p != nil {
tx.Discard()
panic(p)
} else if err != nil {
tx.Discard()
} else {
err = tx.Commit()
}
}()
return cb(tx)
}
|
set -e
COPIED_APP_PATH=/copied-app
BUNDLE_DIR=/tmp/bundle-dir
# Make sure copied folder doesn't cause any issues
cp -R /app $COPIED_APP_PATH
cd $COPIED_APP_PATH
meteor build --directory $BUNDLE_DIR
cd $BUNDLE_DIR/bundle/programs/server/
npm install
mv $BUNDLE_DIR/bundle /built_app
# Cleanup
rm -rf $COPIED_APP_PATH
rm -rf $BUNDLE_DIR
rm -rf ~/.meteor
rm /usr/local/bin/meteor
|
/*********************************
* import webpack plugins
********************************/
const path = require('path');
const fs = require('fs');
const webpack = require('webpack');
const CopyWebpackPlugin = require('copy-webpack-plugin');
const GasPlugin = require('gas-webpack-plugin');
const TerserPlugin = require('terser-webpack-plugin');
const HtmlWebpackPlugin = require('html-webpack-plugin');
const HtmlWebpackInlineSourcePlugin = require('html-webpack-inline-source-plugin');
const DynamicCdnWebpackPlugin = require('dynamic-cdn-webpack-plugin');
const moduleToCdn = require('module-to-cdn');
/*********************************
* set up environment variables
********************************/
const dotenv = require('dotenv').config();
const parsed = dotenv.error ? {} : dotenv.parsed;
const envVars = parsed || {};
const PORT = envVars.PORT || 3000;
envVars.NODE_ENV = process.env.NODE_ENV;
envVars.PORT = PORT;
const isProd = process.env.NODE_ENV === 'production';
/*********************************
* define entrypoints
********************************/
// our destination directory
const destination = path.resolve(__dirname, 'dist');
// define server paths
const serverEntry = './src/server/index.js';
// define appsscript.json file path
const copyAppscriptEntry = './appsscript.json';
// define live development dialog paths
const devDialogEntry = './dev/index.js';
// define client entry points and output names
const clientEntrypoints = [
{
name: 'CLIENT - Demo Bootstrap',
entry: './src/client/demo-bootstrap/index.js',
filename: 'demo-bootstrap',
template: './src/client/demo-bootstrap/index.html',
},
];
// define certificate locations
// see "npm run setup:https" script in package.json
const keyPath = path.resolve(__dirname, './certs/key.pem');
const certPath = path.resolve(__dirname, './certs/cert.pem');
/*********************************
* Declare settings
********************************/
// webpack settings for copying files to the destination folder
const copyFilesConfig = {
name: 'COPY FILES - appsscript.json',
mode: 'production', // unnecessary for this config, but removes console warning
entry: copyAppscriptEntry,
output: {
path: destination,
},
plugins: [
new CopyWebpackPlugin({
patterns: [
{
from: copyAppscriptEntry,
to: destination,
},
],
}),
],
};
// webpack settings used by both client and server
const sharedClientAndServerConfig = {
context: __dirname,
};
// webpack settings used by all client entrypoints
const clientConfig = {
...sharedClientAndServerConfig,
mode: isProd ? 'production' : 'development',
output: {
path: destination,
// this file will get added to the html template inline
// and should be put in .claspignore so it is not pushed
filename: 'main.js',
},
resolve: {
extensions: ['.ts', '.tsx', '.js', '.jsx', '.json'],
},
module: {
rules: [
// typescript config
{
test: /\.tsx?$/,
exclude: /node_modules/,
use: [
{
loader: 'babel-loader',
},
{
loader: 'ts-loader',
},
],
},
{
test: /\.jsx?$/,
exclude: /node_modules/,
use: {
loader: 'babel-loader',
},
},
// we could add support for scss here
{
test: /\.css$/,
use: ['style-loader', 'css-loader'],
},
],
},
};
// DynamicCdnWebpackPlugin settings
// these settings help us load 'react', 'react-dom' and the packages defined below from a CDN
// see https://github.com/delco97/React-GAS-Web-Template#adding-packages
const DynamicCdnWebpackPluginConfig = {
// set "verbose" to true to print console logs on CDN usage while webpack builds
verbose: false,
resolver: (packageName, packageVersion, options) => {
const packageSuffix = isProd ? '.min.js' : '.js';
const moduleDetails = moduleToCdn(packageName, packageVersion, options);
if (moduleDetails) {
return moduleDetails;
}
// "name" should match the package being imported
// "var" is important to get right -- this should be the exposed global. Look up "webpack externals" for info.
switch (packageName) {
case 'react-transition-group':
return {
name: packageName,
var: 'ReactTransitionGroup',
version: packageVersion,
url: `https://unpkg.com/react-transition-group@${packageVersion}/dist/react-transition-group${packageSuffix}`,
};
case 'react-bootstrap':
return {
name: packageName,
var: 'ReactBootstrap',
version: packageVersion,
url: `https://unpkg.com/react-bootstrap@${packageVersion}/dist/react-bootstrap${packageSuffix}`,
};
default:
return null;
}
},
};
// webpack settings used by each client entrypoint defined at top
const clientConfigs = clientEntrypoints.map(clientEntrypoint => {
return {
...clientConfig,
name: clientEntrypoint.name,
entry: clientEntrypoint.entry,
plugins: [
new webpack.DefinePlugin({
'process.env': JSON.stringify(envVars),
}),
new HtmlWebpackPlugin({
template: clientEntrypoint.template,
filename: `${clientEntrypoint.filename}${isProd ? '' : '-impl'}.html`,
inlineSource: '^[^(//)]+.(js|css)$', // embed all js and css inline, exclude packages with '//' for dynamic cdn insertion
}),
// add the generated js code to the html file inline
new HtmlWebpackInlineSourcePlugin(),
// this plugin allows us to add dynamically load packages from a CDN
new DynamicCdnWebpackPlugin(DynamicCdnWebpackPluginConfig),
],
};
});
const gasWebpackDevServerPath = require.resolve(
'google-apps-script-webpack-dev-server'
);
// webpack settings for devServer https://webpack.js.org/configuration/dev-server/
const devServer = {
port: PORT,
https: true,
// run our own route to serve the package google-apps-script-webpack-dev-server
before: app => {
// this '/gas/' path needs to match the path loaded in the iframe in dev/index.js
app.get('/gas/*', (req, res) => {
res.setHeader('Content-Type', 'text/html');
fs.createReadStream(gasWebpackDevServerPath).pipe(res);
});
},
};
if (fs.existsSync(keyPath) && fs.existsSync(certPath)) {
// use key and cert settings only if they are found
devServer.https = {
key: fs.readFileSync(keyPath),
cert: fs.readFileSync(certPath),
};
}
// webpack settings for the development client wrapper
const devClientConfigs = clientEntrypoints.map(clientEntrypoint => {
envVars.FILENAME = clientEntrypoint.filename;
return {
...clientConfig,
name: `DEVELOPMENT: ${clientEntrypoint.name}`,
entry: devDialogEntry,
plugins: [
new webpack.DefinePlugin({
'process.env': JSON.stringify(envVars),
}),
new HtmlWebpackPlugin({
template: './dev/index.html',
// this should match the html files we load in src/server/ui.js
filename: `${clientEntrypoint.filename}.html`,
inlineSource: '^[^(//)]+.(js|css)$', // embed all js and css inline, exclude packages with '//' for dynamic cdn insertion
}),
new HtmlWebpackInlineSourcePlugin(),
new DynamicCdnWebpackPlugin({}),
],
};
});
// webpack settings used by the server-side code
const serverConfig = {
...sharedClientAndServerConfig,
name: 'SERVER',
// server config can't use 'development' mode
// https://github.com/fossamagna/gas-webpack-plugin/issues/135
mode: isProd ? 'production' : 'none',
entry: serverEntry,
output: {
filename: 'code.js',
path: destination,
libraryTarget: 'this',
},
resolve: {
extensions: ['.ts', '.js', '.json'],
},
module: {
rules: [
// typescript config
{
test: /\.tsx?$/,
exclude: /node_modules/,
use: [
{
loader: 'babel-loader',
},
{
loader: 'ts-loader',
},
],
},
{
test: /\.js$/,
exclude: /node_modules/,
use: {
loader: 'babel-loader',
},
},
],
},
optimization: {
minimize: true,
minimizer: [
new TerserPlugin({
sourceMap: true,
terserOptions: {
// ecma 5 is needed to support Rhino "DEPRECATED_ES5" runtime
// can use ecma 6 if V8 runtime is used
ecma: 5,
warnings: false,
parse: {},
compress: {
properties: false,
},
mangle: false,
module: false,
output: {
beautify: true,
// support custom function autocompletion
// https://developers.google.com/apps-script/guides/sheets/functions
comments: /@customfunction/,
},
},
}),
],
},
plugins: [
new webpack.DefinePlugin({
// replace any env variables in client-side code like PORT and NODE_ENV with actual values
'process.env': JSON.stringify(envVars),
'process.env.NODE_ENV': JSON.stringify(
isProd ? 'production' : 'development'
),
}),
new GasPlugin(),
],
};
module.exports = [
// 1. Copy appsscript.json to destination,
// 2. Set up webpack dev server during development
// Note: devServer settings are only read in the first element when module.exports is an array
{ ...copyFilesConfig, ...(isProd ? {} : { devServer }) },
// 3. Create the server bundle
serverConfig,
// 4. Create one client bundle for each client entrypoint.
...clientConfigs,
// 5. Create a development dialog bundle for each client entrypoint during development.
...(isProd ? [] : devClientConfigs),
];
|
<reponame>IslameN/c-
#include "Person.h"
#include "PersonBuilder.h"
PersonBuilder Person::create() {
return PersonBuilder{};
}
|
from flask import Flask, request, jsonify
from flask_login import LoginManager, UserMixin, login_user
from os import getenv
from typing import Dict
app = Flask(__name__)
app.config["SECRET_KEY"] = getenv("SECRET_KEY", default="secret_key_example")
login_manager = LoginManager(app)
users: Dict[str, "User"] = {}
class User(UserMixin):
def __init__(self, username, password):
self.username = username
self.password = password
self.is_authenticated = False
self.is_active = True
self.is_anonymous = False
@login_manager.user_loader
def load_user(username):
return users.get(username)
@app.route('/login', methods=['POST'])
def login():
data = request.get_json()
username = data.get('username')
password = data.get('password')
user = users.get(username)
if user and user.password == password:
login_user(user)
return jsonify({'message': 'Login successful'})
else:
return jsonify({'error': 'Invalid credentials'})
if __name__ == '__main__':
app.run()
|
<reponame>sporting-innovations/FuelSDK-Java<filename>src/test/java/com/exacttarget/fuelsdk/ETAssetTest.java<gh_stars>10-100
/*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package com.exacttarget.fuelsdk;
import java.util.UUID;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import static org.junit.Assert.*;
import org.junit.FixMethodOrder;
import org.junit.runners.MethodSorters;
@FixMethodOrder(MethodSorters.NAME_ASCENDING)
public class ETAssetTest {
private static ETClient client = null;
private static String unique = "";
private ETAsset asset = null;
private String aid = "90840";
public ETAssetTest() {
}
@BeforeClass
public static void setUpClass() throws ETSdkException {
client = new ETClient("fuelsdk.properties");
unique = UUID.randomUUID().toString();
}
@Test
public void getAllAsset() throws ETSdkException
{
ETResponse<ETAsset> response = client.retrieve(ETAsset.class);
System.out.println("resp="+ response.toString());
assertEquals(response.getResponseCode(), "200");
assertEquals(response.getResponseMessage(), "OK");
assertNotNull(response.getRequestId());
ETResult<ETAsset> result = response.getResult();
System.out.println("res="+ result.toString());
assertNotNull(result.getObject());
}
@Test
public void getAsset() throws ETSdkException
{
ETResponse<ETAsset> response = client.retrieve(ETAsset.class, "id="+aid);
System.out.println("resp="+ response.toString());
assertEquals(response.getResponseCode(), "200");
assertEquals(response.getResponseMessage(), "OK");
assertNotNull(response.getRequestId());
ETResult<ETAsset> result = response.getResult();
System.out.println("res="+ result.toString());
assertEquals(result.getObjectId(), aid);
}
}
|
import os
import torch
import pickle
from tqdm import tqdm
# project imports
from networks import define_G, to_device
from methods.scheduler import LearningRateScheduler
class BaseMethod:
def __init__(self, args, loader):
self.args = args
self._name = args.model_name
self.device = args.device
self.mode = args.mode
self.model_dir = args.model_dir
self.model_store_path = os.path.join(args.model_dir, args.model_name)
if not os.path.exists(self.model_store_path) and args.mode == 'train':
os.mkdir(self.model_store_path)
# Also store all args in a text_file.
self.write_args_string_to_file()
if not os.path.exists(self.model_store_path) and args.mode == 'test':
raise FileNotFoundError('Model does not exist. Please check if the model has yet been run.')
self.losses = {}
self.loader = loader
self.epochs = args.epochs
# Define the generator network.
self.G = define_G(args).to(self.device)
print("[{}] initiated with {} trainable parameters".format(args.backbone, self.num_parameters))
# Set the optimizer and scheduler, but wait for method-specific parameters.
self.criterion = None
self.optimizer = None
self.learning_rate_scheduler = None
if args.mode == 'train':
self.learning_rate_scheduler = LearningRateScheduler(args.adjust_lr, args.lr_mode, args.learning_rate,
args.epochs, args.num_iterations)
def predict(self, data):
# Get the inputs
data = to_device(data, self.args.device)
return self.G(data)
def run_epoch(self, current_epoch):
tbar = tqdm(self.loader)
total_iters = len(self.loader)
train_loss = 0.0
for current_iter, data in enumerate(tbar):
# First, adjust learning rate.
self.update_learning_rate(current_epoch, current_iter)
# Then optimize.
self.set_input(data)
iter_loss = self.optimize_parameters()
# Gather running loss, so we can compute the full loss after the epoch.
train_loss += iter_loss
tbar.set_description('Train loss: %.6f' % (train_loss / (current_iter + 1)))
# Record the running loss.
if current_epoch not in self.losses:
self.losses[current_epoch] = {}
self.losses[current_epoch]['train'] = train_loss / total_iters
def set_input(self, data):
pass
def optimize_parameters(self):
pass
def update_learning_rate(self, current_epoch, current_iter):
self.learning_rate_scheduler(self.optimizer, current_epoch, current_iter)
def get_untrained_loss(self):
pass
def store_val_loss(self, val_loss, epoch):
if epoch not in self.losses:
self.losses[epoch] = {}
# Set the validation loss.
self.losses[epoch]['val'] = val_loss
def save_network(self, name):
save_filename = "{}_G.pth".format(name)
save_path = os.path.join(self.model_store_path, save_filename)
torch.save(self.G.state_dict(), save_path)
def load_network(self, name):
load_filename = "{}_G.pth".format(name)
load_path = os.path.join(self.model_store_path, load_filename)
state_dict = torch.load(load_path, map_location=self.device)
if hasattr(state_dict, '_metadata'):
del state_dict._metadata
self.G.load_state_dict(state_dict)
def save_losses(self):
path = os.path.join(self.model_store_path, 'losses.p'.format(self._name))
with open(path, 'wb') as loss_output_file:
pickle.dump(self.losses, loss_output_file)
def to_eval(self):
self.G.eval()
if self.mode == 'train':
self.criterion.to_eval()
def to_train(self):
self.G.train()
self.criterion.to_train()
def write_args_string_to_file(self):
args_string = '=== ARGUMENTS ===\n'
for key, val in vars(self.args).items():
args_string += '{0: <20}: {1}\n'.format(key, val)
args_string += '=================\n'
with open(os.path.join(self.model_store_path, 'params'), 'w') as f:
f.write(args_string)
@property
def num_parameters(self):
return sum(p.numel() for p in self.G.parameters() if p.requires_grad)
@property
def method(self):
return 'Base Method'
@property
def name(self):
return self._name
|
public static boolean isPalindrome(String str) {
// Remove spaces and punctuation, and convert to lowercase
String cleanStr = str.replaceAll("[^a-zA-Z0-9]", "").toLowerCase();
// Check if the clean string is a palindrome
int left = 0;
int right = cleanStr.length() - 1;
while (left < right) {
if (cleanStr.charAt(left) != cleanStr.charAt(right)) {
return false;
}
left++;
right--;
}
return true;
}
|
<gh_stars>0
/* Latvian locals for flatpickr */
import { CustomLocale } from "types/locale";
import { FlatpickrFn } from "types/instance";
const fp: FlatpickrFn =
typeof window !== "undefined" && window.flatpickr !== undefined
? window.flatpickr
: {
l10ns: {},
} as FlatpickrFn;
export const Latvian: CustomLocale = {
firstDayOfWeek: 1,
weekdays: {
shorthand: ["Sv", "P", "Ot", "Tr", "Ce", "Pk", "Se"],
longhand: [
"Svētdiena",
"Pirmdiena",
"Otrdiena",
"Trešdiena",
"Ceturtdiena",
"Piektdiena",
"Sestdiena",
],
},
months: {
shorthand: [
"Jan",
"Feb",
"Mar",
"Mai",
"Apr",
"Jūn",
"Jūl",
"Aug",
"Sep",
"Okt",
"Nov",
"Dec",
],
longhand: [
"Janvāris",
"Februāris",
"Marts",
"Aprīlis",
"Maijs",
"Jūnijs",
"Jūlijs",
"Augusts",
"Septembris",
"Oktobris",
"Novembris",
"Decembris",
],
},
rangeSeparator: " līdz ",
};
fp.l10ns.lv = Latvian;
export default fp.l10ns;
|
def largest_smallest(array):
smallest = array[0]
largest = array[0]
for element in array:
if element > largest:
largest = element
elif element < smallest:
smallest = element
return (smallest, largest)
largest_smallest([7, 9, 5, 4, 8, 1]) => (1, 9)
|
module.exports = {
upload: function(o) {
var t = getApp();
function r(e) {
"function" == typeof o.start && o.start(e), t.core.uploadFile({
url: o.url || t.api.default.upload_image,
filePath: e.path,
name: o.name || "image",
formData: o.data || {},
success: function(e) {
200 == e.statusCode ? "function" == typeof o.success && (e.data = JSON.parse(e.data),
o.success(e.data)) : "function" == typeof o.error && o.error("上传错误:" + e.statusCode + ";" + e.data),
o.complete();
},
fail: function(e) {
"function" == typeof o.error && o.error(e.errMsg), o.complete();
}
});
}
(o = o || {}).complete = o.complete || function() {}, o.data = o.data || {}, t.core.chooseImage({
count: 1,
success: function(e) {
if (e.tempFiles && 0 < e.tempFiles.length) {
var t = e.tempFiles[0];
r(t);
} else "function" == typeof o.error && o.error("请选择文件"), o.complete();
},
fail: function(e) {
"function" == typeof o.error && (o.error("请选择文件"), o.complete());
}
});
}
};
|
import keras
from keras.models import Sequential
from keras.layers import Dense
def construct_model():
model = Sequential()
# Input layer with 2 input nodes
model.add(Dense(2, activation = 'relu', input_dim = 2))
# Hidden layer
model.add(Dense(4, activation = 'relu'))
# Output layer
model.add(Dense(1, activation = 'sigmoid'))
# Compile the model
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
return model
|
// Date: 2014-07-30
// SharpCoder
// This file does the maths for our page.
// It is highly scientific and based on university
// level kerbal physics.
OrbitalMaths = (function() {
function getMass( size ) {
if ( size == 0 ) return 3;
else if ( size == 1 ) return 5;
else if ( size == 2 ) return 15;
else return 30;
}
function getThrust( engine ) {
if ( engine == 0 ) return 50; // LVL-909
else if ( engine == 1 ) return 215; // LV-T30
else if ( engine == 2 ) return 200; // LV-T45
else if ( engine == 3 ) return 650; // Skipper
else if ( engine == 4 ) return 1500; // Mainsail
else if ( engine == 5 ) return 60; // Atomic Rocket
}
// Code from http://www.mredkj.com/javascript/numberFormat.html#addcommas
function addCommas(nStr)
{
nStr += '';
x = nStr.split('.');
x1 = x[0];
x2 = x.length > 1 ? '.' + x[1] : '';
var rgx = /(\d+)(\d{3})/;
while (rgx.test(x1)) {
x1 = x1.replace(rgx, '$1' + ',' + '$2');
}
return x1 + x2;
}
function calculate( g, size, engine, altitude, velocity ) {
// Fix the numbers.
altitude = parseFloat( altitude );
velocity = parseFloat( velocity );
// Calculate how many seconds until we hit (without gravity).
var eta = altitude / velocity;
var sec = 0;
var tAcc = altitude;
var tVel = velocity;
var m = getMass(size);
var Ft = m * g;
var a = (Ft / m) * g;
var twr = (getThrust(engine) / (m * g));
do {
sec++;
tAcc -= a * twr;
} while ( tAcc > 0 && a != 0 && twr != 0);
// Calculate how many seconds it takes to reach 0 velocity.
var eta0 = Math.sqrt( sec );
// Calculate how far you will travel in that time.
var dist = 0;
for ( var i = 0; i < eta0 && tVel > 0; i++ ) {
dist += tVel;
tVel -= a * twr;
}
return Math.round(dist);
}
return {
Calculate: function( g, size, engine, altitude, velocity ) {
// Fix the numbers.
altitude = parseFloat( altitude );
velocity = parseFloat( velocity );
var eta = altitude / velocity;
var alt = altitude;
var best = 0;
// Recalculate the information at different altitudes until we find
// the 'optimum' one. This is very scientific here.
for ( var i = 0; i < eta; i++ ) {
// Iterate.
var result = calculate(g, size, engine, altitude, velocity);
alt -= velocity;
velocity += Math.sqrt(g);
if ( result < alt ) best = result;
}
// Check if we're dead, jim.
if ( result > altitude ) return -1;
return addCommas(result);
}
};
})();
|
<filename>commands/Moderation/unban.js
module.exports = ({
name: "unban",
usage: "unban <user ID>",
description: "Unbans the specified userID",
category: "Moderation",
code: `$title[Unbanned]
$description[Successfully unbanned $userTag[$get[user]].]
$addField[Reason:;$get[reason];no]
$addField[Moderator:;$userTag;no]
$color[RANDOM]
$addTimestamp
$thumbnail[$userAvatar[$get[user]]]
$unban[$get[user];By $userTag[$authorID] Reason: $get[reason]]
$let[reason;$replaceText[$replaceText[$checkCondition[$messageSlice[1]==];true;No reason was provided.];false;$messageSlice[1]]]
$onlyBotPerms[ban;**⛔ I need \`BAN_MEMBERS\` perms to unban users!!**]
$onlyIf[$isBanned[$get[user]]==true;**⛔ The specified user is not a previously banned user!!**]
$onlyIf[$get[user]!=undefined;**⛔ Please specify a valid user to unban!!**]
$let[user;$findUser[$message[1];no]]
$onlyIf[$message[1]!=;**⛔ Please specify someone to unban!!**]
$onlyPerms[ban;**⛔ You must have \`BAN_MEMBERS\` perms to do it!!**]
$let[user;$findUser[$message[1];no]]`
})
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.