text stringlengths 11 4.05M |
|---|
package main
import (
"errors"
log "github.com/sirupsen/logrus"
"github.com/streadway/amqp"
_ "net/http/pprof"
"time"
)
type MqConn struct {
Conn *amqp.Connection
Channel *amqp.Channel
Qqueue *amqp.Queue
exchangeName string // 交换机名称
exchangeType string // 交换机类型
queueName string // 队列名称
routingKey string // key名称
}
func NewMqConn(amqpURI, exchangeName, exchangeType, queueName, routingKey string) *MqConn {
defer func() {
if err := recover(); err != nil {
log.Error("NewMqConn error : ", exchangeName, queueName, err)
}
}()
//建立连接
conn, err := amqp.Dial(amqpURI)
if err != nil {
log.Error("Failed to connect to RabbitMQ:", err, amqpURI)
return nil
}
//创建一个Channel
channel, err := conn.Channel()
if err != nil {
log.Error("Failed to open a channel", err)
return nil
}
//创建一个exchange
err = channel.ExchangeDeclare(
exchangeName, // name
exchangeType, // type
true, // durable
false, // auto-deleted
false, // internal
false, // noWait
nil, // arguments
)
if err != nil {
log.Error("Failed to declare a exchange", err)
return nil
}
//创建一个queue
qQueue, err := channel.QueueDeclare(
queueName, // name
true, // durable
false, // delete when unused
false, // exclusive 当Consumer关闭连接时,这个queue不被deleted
false, // no-wait
nil, // arguments
)
if err != nil {
log.Error("Failed to declare a queue", err)
return nil
}
//绑定到exchange
err = channel.QueueBind(
qQueue.Name, // name of the queue
routingKey, // bindingKey
exchangeName, // sourceExchange
false, // noWait
nil, // arguments
)
if err != nil {
log.Error("Failed to bind a queue", err)
return nil
}
log.Info("NewMqConn:", " addr: ", conn.LocalAddr(), " exchangeName: ", exchangeName, " queueName: ", queueName, " routingKey: ", routingKey)
return &MqConn{conn, channel, &qQueue, exchangeName, exchangeType, queueName, routingKey}
}
func (conn *MqConn) CloseMqConn() {
if conn == nil {
log.Error("CloseMqConn pointer nil")
return
}
conn.Channel.Close()
conn.Conn.Close()
}
func (conn *MqConn) Publish_mq(body []byte) error {
if conn == nil {
log.Error("Publish_mq pointer nil")
return errors.New("Publish_mq pointer nil")
}
err := conn.Channel.Publish(
conn.exchangeName, // exchange
conn.routingKey, // routing key
false, // mandatory
false, // immediate
amqp.Publishing{
Headers: amqp.Table{},
DeliveryMode: amqp.Persistent, //消息持久化,优先级低于队列持久化,exchange、队列、消息三者都持久化才有效
ContentType: "text/plain",
ContentEncoding: "",
Body: body,
})
if err != nil {
log.Error("mq publishing error:", err)
return err
} else {
log.Info("publishing to mq :", len(body), body)
return nil
}
}
func (conn *MqConn) Consumer_mq(autoack bool) <-chan amqp.Delivery {
if conn == nil {
log.Error("Consumer_mq pointer = nil")
return nil
}
//在手动回ack的模式下,设置consumer每次取消息的数量。自动回ack忽略此配置
// err := conn.Channel.Qos(
// 1, // prefetch count
// 0, // prefetch size
// false, // global
// )
// if err != nil {
// imlog.Log(imlog.LogLevelEnum_ERROR, "mq consume channel.Qos error", err)
// }
msgch, err := conn.Channel.Consume(
conn.Qqueue.Name, // queue
"", // consumer
autoack, // auto-ack
false, // exclusive
false, // no-local
false, // no-wait
nil, // args
)
if err != nil {
log.Error("mq consume error", err)
return nil
} else {
return msgch
}
}
type MqPool struct {
MqConnChan chan *MqConn
//以下参数重建conn使用
amqpURI string //URI
exchangeName string // 交换机名称
exchangeType string // 交换机类型
queueName string // 队列名称
routingKey string // key名称
}
func NewMqPool(capacity int, amqpURI, exchangeName, exchangeType, queueName, routingKey string) *MqPool {
if capacity <= 0 {
log.Fatal("NewMqPool numConn error", capacity)
}
MqConnChan := make(chan *MqConn, capacity)
for i := 0; i < capacity; i++ {
conn := NewMqConn(amqpURI, exchangeName, exchangeType, queueName, routingKey)
if conn == nil {
log.Error("NewMqPool: NewMqConn create failure !!")
return nil
}
select {
case MqConnChan <- conn:
default:
}
}
return &MqPool{MqConnChan, amqpURI, exchangeName, exchangeType, queueName, routingKey}
}
func (mqPool *MqPool) OpenClient() *MqConn {
if mqPool == nil {
log.Fatal("OpenClient: mqPool pointer = nil")
}
select {
case conn := <-mqPool.MqConnChan:
return conn
case <-time.After(time.Second * 1):
return nil
}
}
func (mqPool *MqPool) CloseClient(conn *MqConn) {
if mqPool == nil {
log.Fatal("CloseClient: mqPool pointer = nil")
}
if conn == nil {
log.Error("CloseClient: MqConn pointer = nil")
return
}
select {
case mqPool.MqConnChan <- conn:
return
//此处default不可少,否则会阻塞。
default:
conn.CloseMqConn()
}
}
//连接池不负责连接的可用性测试,
//使用者从连接池中取连接后,如果发现连接不可用,需要重新创建连接
//使用者放回的连接超出了连接池的容量,连接池会自动关闭链接,使用者只需往回放这个动作即可
// delay ???? 如果mq队列被删除, Publish_mq也返回成功
func PutDataToMq(mqPool *MqPool, body []byte) error {
if mqPool == nil {
log.Fatal("PutDataToMq: mqPool point nil!!")
}
//重试3次,不行重新创建链接
for i := 0; i < 3; i++ {
//从连接池获取连接
conn := mqPool.OpenClient()
//取链接成功
if conn != nil {
err := conn.Publish_mq(body)
// 发送失败
if err != nil {
log.Error("PutDataToMq: publish failure,close link!! ", err)
conn.CloseMqConn()
} else { //发送成功, 放回连接池,return返回
mqPool.CloseClient(conn)
log.Info("PutDataToMq: get link from pool and publish success!! ")
return nil
}
}
}
//要不就是取链接超时,要不就是取链接成功,但是发送超时。这两种情况都重新创建链接
conn := NewMqConn(mqPool.amqpURI, mqPool.exchangeName, mqPool.exchangeType, mqPool.queueName, mqPool.routingKey)
//如果创建链接失败, 直接return
if conn == nil {
log.Error("PutDataToMq: create new link failure!!")
return errors.New(" create new link failure !!")
} else {
err := conn.Publish_mq(body)
// 发送失败
if err != nil {
log.Error("publish failure, close link!! ", err)
conn.CloseMqConn()
return err
} else { //发送成功, 放回连接池,return返回
mqPool.CloseClient(conn)
log.Info("PutDataToMq: create new link and publish success!! ", err)
return nil
}
}
}
func GetDataFromMq(mqPool *MqPool) {
if mqPool == nil {
log.Fatal("GetDataFromMq: mqPool point nil!!")
}
//从连接池获取连接。 只取一次, 取出来后链接不可用,直接重新创建,不再和链接池交互。
conn := mqPool.OpenClient()
defer func() {
if err := recover(); err != nil {
log.Error("GetDataFromMq: defer func, getDataFromMq error : ", err)
conn.CloseMqConn()
go GetDataFromMq(mqPool)
}
}()
HERE:
//取链接成功
if conn != nil {
msgCh := conn.Consumer_mq(true)
for {
select {
case hint, ok := <-msgCh:
if ok {
//解消息,并处理
log.Info("GetDataFromMq: consume success", hint.Body)
} else {
log.Error("GetDataFromMq: get msg from chan failure! disconnect link and reconnect!")
conn.CloseMqConn()
// chan 读取失败, 需要隔一秒尝试创建一次,直到创建链接成功
for {
conn = NewMqConn(mqPool.amqpURI, mqPool.exchangeName, mqPool.exchangeType, mqPool.queueName, mqPool.routingKey)
if conn != nil {
log.Info("GetDataFromMq:, create new link ok, goto here begin work!! chan func")
goto HERE
} else {
log.Info("GetDataFromMq:, create new link failure, sleep!! chan func")
time.Sleep(time.Second)
}
}
}
}
}
}
//pool 取链接失败,需要隔一秒尝试创建一次,直到创建链接成功
for {
log.Error("GetDataFromMq: get link from pool failure!! pool func")
conn = NewMqConn(mqPool.amqpURI, mqPool.exchangeName, mqPool.exchangeType, mqPool.queueName, mqPool.routingKey)
if conn != nil {
log.Info("GetDataFromMq: create new link ok, goto here begin work!! pool func")
goto HERE
} else {
log.Info("GetDataFromMq:, create new link failure, sleep!! pool func")
time.Sleep(time.Second)
}
}
}
func main() {
uri := "amqp://root:root@192.168.73.3:5672/"
pp := NewMqPool(2, uri, "exchange_xu", "direct", "queue_xu", "xu")
go GetDataFromMq(pp)
for {
PutDataToMq(pp, []byte("hello boy!"))
time.Sleep(1 * time.Second)
}
ch := make(chan int)
<-ch
}
/*
func PutDataToMq2(mqPool *MqPool, body []byte) error {
//从连接池获取连接
conn := mqPool.OpenClient()
//取链接成功
if conn != nil {
err := conn.Publish_mq(body)
// 发送失败
if err !=nil{
log.Error("publish failure!! ", err)
conn.CloseMqConn()
goto HERE
}else{ //发送成功
mqPool.CloseClient(conn)
return nil
}
}else{ //取链接超时
log.Error("get link from pool timeout!!")
goto HERE
}
//重新创建链接,重试put操作.
HERE:
conn = NewMqConn(mqPool.amqpURI, mqPool.exchangeName, mqPool.exchangeType, mqPool.queueName, mqPool.routingKey)
//如果创建链接失败, 直接return
if conn == nil {
log.Error("PutDataToMq: NewMqConn create failure!!")
return errors.New("NewMqConn create failure !!")
}else{
err := conn.Publish_mq(body)
// 发送失败
if err !=nil{
log.Error("publish failure!! ", err)
conn.CloseMqConn()
return err
}else{ //发送成功
mqPool.CloseClient(conn)
return nil
}
}
}
*/
|
package tree
func (node Node) getValue() int {
return node.Value
}
// 别名实现
type MyTreeNode struct {
node *Node
}
func (myNode *MyTreeNode) postOrder() {
if myNode == nil || myNode.node == nil {
return
}
left := MyTreeNode{myNode.node.Left}
left.postOrder()
right := MyTreeNode{myNode.node.Right}
right.postOrder()
// MyTreeNode{myNode.node.Left}.postOrder()
// MyTreeNode{myNode.node.Right}.postOrder()
myNode.node.print()
}
|
package main
import (
"bytes"
"encoding/json"
"flag"
"fmt"
"io"
"io/ioutil"
"mime/multipart"
"net/http"
"os"
"path/filepath"
"strings"
"time"
)
const (
usageMsg = `usage: jiraattach [-config=path] key path
ARGS
key - The key of the Jira Issue to attach files to.
path - Path to file to attach to Jira Issue.
OPTIONS
-config - Path to config file, defaults to ~/.config/jiraattach/config.json.
CONFIG
The config file must be a JSON formated file and contain the following properties.
jira_url - URL for the Jira instance.
auth - API authentication credentials. The expected format is 'username:password'.
`
)
func main() {
configpath := flag.String("config", filepath.Join(os.Getenv("HOME"), ".config", "jiraattach", "config.json"), "path to config file")
flag.Usage = func() {
fmt.Fprintln(os.Stderr, usageMsg)
}
flag.Parse()
args := flag.Args()
if len(args) < 2 {
fmt.Fprintln(os.Stderr, "key and path are required")
os.Exit(2)
}
key, filepath := args[0], args[1]
configfile, err := os.Open(*configpath)
if err != nil {
fmt.Fprintln(os.Stderr, "unable to open config file, %v", *configpath)
os.Exit(2)
}
defer configfile.Close()
config := &Config{}
if err := json.NewDecoder(configfile).Decode(config); err != nil {
fmt.Fprintf(os.Stderr, "failed to read config file, %v: %v\n", *configpath, err)
os.Exit(2)
}
httpclient := &http.Client{
Timeout: 5 * time.Second,
}
file, err := os.Open(filepath)
if err != nil {
fmt.Fprintf(os.Stderr, "error reading attachment, %v: %v\n", filepath, err)
os.Exit(2)
}
defer file.Close()
body := &bytes.Buffer{}
w := multipart.NewWriter(body)
part, err := w.CreateFormFile("file", filepath)
if err != nil {
fmt.Fprintf(os.Stderr, "error attaching file to form: %v\n", err)
os.Exit(2)
}
_, err = io.Copy(part, file)
if err != nil {
fmt.Fprintf(os.Stderr, "error copying attachment into request: %v\n", err)
os.Exit(2)
}
err = w.Close()
if err != nil {
fmt.Fprintf(os.Stderr, "error writing form body: %v\n", err)
os.Exit(2)
}
req, err := http.NewRequest("POST", config.JiraURL+"/rest/api/2/issue/"+key+"/attachments", body)
if err != nil {
fmt.Fprintf(os.Stderr, "error creating request: %v\n", err)
os.Exit(2)
}
req.Header.Set("Content-Type", w.FormDataContentType())
req.Header.Set("X-Atlassian-Token", "nocheck") // Disable XSRF verification
var user, pass string
if strings.Contains(config.Auth, ":") {
parts := strings.Split(config.Auth, ":")
user, pass = parts[0], parts[1]
}
req.SetBasicAuth(user, pass)
resp, err := httpclient.Do(req)
if err != nil {
fmt.Fprintf(os.Stderr, "error sending request: %v\n", err)
os.Exit(2)
}
defer resp.Body.Close()
switch resp.StatusCode {
case http.StatusOK:
// do nothing, request was successful
default:
fmt.Fprintf(os.Stderr, "request failed with status code, %d\n", resp.StatusCode)
respbody, err := ioutil.ReadAll(resp.Body)
if err != nil {
fmt.Fprintf(os.Stderr, "error reading error-response body: %v\n", err)
}
fmt.Fprintln(os.Stderr, string(respbody))
os.Exit(2)
}
}
type Config struct {
JiraURL string `json:"jira_url"`
Auth string `json:"auth"`
}
|
// Web UI package
package webui
import "fmt"
// 所有控件都有的属性
type Common struct {
Id, Value string
Left, Top int
Width, Height int
Do func(*Context)
}
func (a Common) Format(l, t int) string {
return fmt.Sprintf(`id="%s" value="%s" style="position:absolute; left:%d; top:%d; width:%d; height:%d" `, a.Id, a.Value, a.Left+l, a.Top+t, a.Width, a.Height)
}
func (a Common) ID() string {
return a.Id
}
func (a Common) DO() func(*Context) {
return a.Do
}
// 按钮
type Button struct {
Common
}
func (a Button) Format(l, t int) string {
s := `<input type="button" ` + a.Common.Format(l, t)
if a.Do != nil {
s += `onClick="javascript:myfunc(this);" `
}
s += "/>\n"
return s
}
// 单选框
type Radio struct {
Common
Flag []string
Multirow bool
}
func (a Radio) Format(l, t int) string {
s := `<form type="radio" ` + a.Common.Format(l, t)
if a.Do != nil {
s += `onChange="javascript:myfunc(this);" `
}
s += ">\n"
if a.Multirow {
for _, f := range a.Flag {
s += fmt.Sprintf(`<label><input type="radio" value="%s" />%s</label><br/>`+"\n", f, f)
}
} else {
for _, f := range a.Flag {
s += fmt.Sprintf(`<label><input type="radio" value="%s" />%s</label>`+"\n", f, f)
}
}
s += "</form>\n"
return s
}
// 复选框
type Check struct {
Common
Flag []string
Multirow bool
}
func (a Check) Format(l, t int) string {
s := `<form type="check" ` + a.Common.Format(l, t)
if a.Do != nil {
s += `onChange="javascript:myfunc(this);" `
}
s += ">\n"
if a.Multirow {
for _, f := range a.Flag {
s += fmt.Sprintf(`<label><input type="checkbox" value="%s" />%s</label><br/>`+"\n", f, f)
}
} else {
for _, f := range a.Flag {
s += fmt.Sprintf(`<label><input type="checkbox" value="%s" />%s</label>`+"\n", f, f)
}
}
s += "</form>\n"
return s
}
// 选择列表
type Select struct {
Common
Flag []string
Menu bool
}
func (a Select) Format(l, t int) string {
var s string
if a.Menu {
s = `<select type="select" ` + a.Common.Format(l, t)
if a.Do != nil {
s += `onSelect="javascript:myfunc(this);" `
}
s += ">\n"
for _, f := range a.Flag {
s += fmt.Sprintf(`<option>%s</option>`+"\n", f)
}
} else {
s = fmt.Sprintf(`<select size="%d" `, len(a.Flag)) + a.Common.Format(l, t) + `>`
for _, f := range a.Flag {
s += fmt.Sprintf(`<option>%s</option>`+"\n", f)
}
}
s += "</select>\n"
return s
}
// 文本框(单行、多行、密码)
type Text struct {
Common
Password bool
Multirow bool
Autofold bool
Readonly bool
}
func (a Text) Format(l, t int) string {
s := ""
if a.Password {
s = `<input type="password" ` + a.Common.Format(l, t)
} else {
if a.Multirow {
if a.Autofold {
s = `<textarea type="area" wrap="physical" ` + a.Common.Format(l, t)
} else {
s = `<textarea type="area" wrap="off" ` + a.Common.Format(l, t)
}
} else {
s = `<input type="text" ` + a.Common.Format(l, t)
if a.Readonly {
s += `readonly="readonly" `
}
}
}
if a.Do != nil {
s += `onChange="javascript:myfunc(this)" `
}
s += "/>\n"
return s
}
// 标签
type Label struct {
Common
}
func (a Label) Format(l, t int) string {
s := `<label type="label" ` + a.Common.Format(l, t) + ">" + a.Value + "</label>\n"
return s
}
// 图像框
type Image struct {
Common
}
func (a Image) Format(l, t int) string {
return `<input type="image" border="1" ` + a.Common.Format(l, t) + " />\n"
}
// 容器
type Container struct {
Common
Sub []Object
}
func (a Container) Format(l, t int) string {
s := `<div type="container" ` + a.Common.Format(l, t) + ">\n"
for _, f := range a.Sub {
s += f.Format(l, t)
}
s += "</div>\n"
return s
}
// 窗体
type Window struct {
Width, Height int
Sub []Object
}
func (a Window) String() string {
s := fmt.Sprintf("<form>\n"+`<div style="position:relative; margin:auto; width:%d; height:%d; border-style:solid; border-width:1px; border-color:#000">`+"\n", a.Width, a.Height)
for _, f := range a.Sub {
s += f.Format(0, 0)
}
s += "</div>\n</form>\n"
return s
}
const (
Head = `<html>
<head>
<script type="text/javascript">
var ws, path;
if(!("WebSocket" in window))
{
alert("unsupport websocket!");
}
else
{
path = location.pathname;
path = path.substring(0, path.lastIndexOf("/"));
ws = new WebSocket("ws://" + location.host + path + "/interact");
ws.onopen = function()
{
// alert("ready to go!");
};
ws.onmessage = function(m)
{
var o, e = JSON.parse(m.data);
if(e.error!=undefined)
{
alert(e.error);
return;
}
e = e.answer;
for(var key in e)
{
o = document.getElementById(key);
if(o.type==undefined)
{
continue;
}
if(o.type=="radio" || o.type=="check")
{
continue;
}
if(o.type=="container")
{
o.innerHTML = e[key];
}
else
{
o.value = e[key];
}
}
};
ws.onclose = function()
{
// alert("connection is closed");
};
}
function findset(e)
{
var n = e.childNodes;
var s = new Array();
if(e.type!="radio" || e.type!="check")
{
return;
}
for(var i=0; i<n.length; i++)
{
if(n[i].checked)
{
s.push(n[i].value)
}
}
return s.join("|");
}
function myfunc(e)
{
var o, m, s = {};
o = document.getElementsByTagName("input");
for(var i=0; i<o.length; i++)
{
if(o[i].type=="text" || o[i].type=="password")
{
s[o[i].id] = o[i].value;
}
}
o = document.getElementsByTagName("textarea");
for(var i=0; i<o.length; i++)
{
s[o[i].id] = o[i].value;
}
o = document.getElementsByTagName("select");
for(var i=0; i<o.length; i++)
{
s[o[i].id] = o[i].value;
}
o = document.getElementsByTagName("form");
for(var i=0; i<o.length; i++)
{
m = findset(o[i]);
if(m!=undefined)
{
s[o[i].id] = m;
}
}
ws.send(JSON.stringify({"call":e.id, "param":s}));
}
</script>
</head>
<body>
`
Tail = `</body>
</html>`
)
|
package shardkv
const (
OK = "OK"
ErrNoKey = "ErrNoKey"
ErrWrongGroup = "ErrWrongGroup"
)
type Err string
type PutAppendArgs struct {
Key string
Value string
Op string // "Put" or "Append"
Impl PutAppendArgsImpl
}
type PutAppendReply struct {
Err Err
}
type GetArgs struct {
Key string
Impl GetArgsImpl
}
type GetReply struct {
Err Err
Value string
}
|
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT license.
package rotatecerts
import (
"fmt"
"math/rand"
"time"
"github.com/Azure/aks-engine/cmd/rotatecerts/internal"
"github.com/Azure/aks-engine/pkg/api/common"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// PauseClusterAutoscaler scales to zero the replica count of the cluster autoscaler deployment
// and returns a function that scales back to the original replica count.
//
// It NOPs if the original replica count is zero.
func PauseClusterAutoscaler(client internal.KubeClient) (func() error, error) {
name := common.ClusterAutoscalerAddonName
deploy, err := client.GetDeployment(metav1.NamespaceSystem, name)
if err != nil && !apierrors.IsNotFound(err) {
e := errors.Wrapf(err, "getting %s deployment", name)
return func() error { return e }, e
}
if apierrors.IsNotFound(err) || *deploy.Spec.Replicas == 0 {
// autoscaler not present or no replicas, NOP
return func() error { return nil }, nil
}
// autoscaler present
patch := func(msg string, count int32) error {
log.Infof(msg)
json := fmt.Sprintf(`{"spec":{"replicas": %d}}`, count)
if _, err = client.PatchDeployment(metav1.NamespaceSystem, name, json); err != nil {
return errors.Wrapf(err, "applying patch to %s deployment", name)
}
return nil
}
// pause autoscaler
if err := patch(fmt.Sprintf("Pausing %s, setting replica count to 0", name), 0); err != nil {
return func() error { return err }, err
}
// resume autoscaler func
return func() error {
c := *deploy.Spec.Replicas
err := patch(fmt.Sprintf("Resuming %s, setting replica count to %d", name, c), c)
log.Warnln("Run \"aks-engine upgrade\" to refresh the cluster-autoscaler node template")
if err != nil {
return err
}
return nil
}, nil
}
// RotateServiceAccountTokens deletes all service account tokens and
// triggers a forced rollout of all daemonsets and deployments.
//
// Service account tokens are signed by the cluster CA,
// deleting them after the CA is rotated ensures that KCM will regenerate tokens signed by the new CA.
func RotateServiceAccountTokens(client internal.KubeClient) error {
if err := deleteSATokens(client); err != nil {
return err
}
if err := rolloutDeployments(client); err != nil {
return err
}
if err := rolloutDaemonSets(client); err != nil {
return err
}
// TODO rolloutStatefulSets
return nil
}
func rolloutDeployments(client internal.KubeClient) error {
random := rand.New(rand.NewSource(time.Now().UnixNano()))
patch := fmt.Sprintf(`{"spec":{"template":{"metadata":{"annotations":{"ca-rotation":"%d"}}}}}`, random.Int31())
deployList, err := client.ListDeployments(metav1.NamespaceAll, metav1.ListOptions{})
if err != nil {
return errors.Wrapf(err, "listing cluster deployments")
}
for _, deploy := range deployList.Items {
// trigger rollout so the deploy replicas mount the newly generated sa token
if _, err := client.PatchDeployment(deploy.Namespace, deploy.Name, patch); err != nil {
return errors.Wrapf(err, "patching %s deployment %s", deploy.Namespace, deploy.Name)
}
}
return nil
}
func rolloutDaemonSets(client internal.KubeClient) error {
random := rand.New(rand.NewSource(time.Now().UnixNano()))
patch := fmt.Sprintf(`{"spec":{"template":{"metadata":{"annotations":{"ca-rotation":"%d"}}}}}`, random.Int31())
dsList, err := client.ListDaemonSets(metav1.NamespaceAll, metav1.ListOptions{})
if err != nil {
return errors.Wrapf(err, "listing cluster daemonsets")
}
for _, ds := range dsList.Items {
// trigger rollout so the ds replicas mount the newly generated sa token
if _, err = client.PatchDaemonSet(ds.Namespace, ds.Name, patch); err != nil {
return errors.Wrapf(err, "patching %s daemonset %s", ds.Namespace, ds.Name)
}
}
return nil
}
func deleteSATokens(client internal.KubeClient) error {
saList, err := client.ListServiceAccounts(metav1.NamespaceAll, metav1.ListOptions{})
if err != nil {
return errors.Wrapf(err, "listing cluster service accounts")
}
if len(saList.Items) == 0 {
return nil
}
for _, sa := range saList.Items {
for _, s := range sa.Secrets {
err := client.DeleteSecret(&v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: sa.Namespace,
Name: s.Name,
},
})
if err != nil && !apierrors.IsNotFound(err) {
return errors.Wrapf(err, "deleting %s secret %s", s.Namespace, s.Name)
}
}
}
return nil
}
|
// Copyright © 2018 Inanc Gumus
// Learn Go Programming Course
// License: https://creativecommons.org/licenses/by-nc-sa/4.0/
//
// For more tutorials : https://learngoprogramming.com
// In-person training : https://www.linkedin.com/in/inancgumus/
// Follow me on twitter: https://twitter.com/inancgumus
package main
import "fmt"
func main() {
min := int8(127)
max := int16(1000)
fmt.Println(max + int16(min))
// EXPLANATION
//
// `int8(max)` destroys the information of max
// It reduces it to 127
// Which is the maximum value of int8
//
// Correct conversion is int16(min)
// Because, int16 > int8
// When you do so, min doesn't lose information
//
// You will learn more about this in
// the "Go Type System" section.
}
|
package fsm
import (
"bytes"
"encoding/json"
"strings"
"encoding/gob"
"fmt"
"reflect"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/swf"
)
// constants used as marker names or signal names
const (
StateMarker = "FSM.State"
CorrelatorMarker = "FSM.Correlator"
ErrorMarker = "FSM.Error"
RepiarStateSignal = "FSM.RepairState"
ContinueTimer = "FSM.ContinueWorkflow"
ContinueSignal = "FSM.ContinueWorkflow"
CompleteState = "complete"
CanceledState = "canceled"
FailedState = "failed"
ErrorState = "error"
//the FSM was not configured with a state named in an outcome.
FSMErrorMissingState = "ErrorMissingFsmState"
//the FSM encountered an erryor while serializaing stateData
FSMErrorStateSerialization = "ErrorStateSerialization"
//the FSM encountered an erryor while deserializaing stateData
FSMErrorStateDeserialization = "ErrorStateDeserialization"
//the FSM encountered an erryor while deserializaing stateData
FSMErrorCorrelationDeserialization = "ErrorCorrelationDeserialization"
//Signal sent when a Long Lived Worker Start()
ActivityStartedSignal = "FSM.ActivityStarted"
//Signal send when long Lived worker sends an update from Work()
ActivityUpdatedSignal = "FSM.ActivityUpdated"
)
// Decider decides an Outcome based on an event and the current data for an
// FSM. You can assert the interface{} parameter that is passed to the Decider
// as the type of the DataType field in the FSM. Alternatively, you can use
// TypedFuncs to create a typed decider to avoid having to do the assertion.
type Decider func(*FSMContext, *swf.HistoryEvent, interface{}) Outcome
//Outcome is the result of a Decider processing a HistoryEvent
type Outcome struct {
//State is the desired next state in the FSM. the empty string ("") is a signal that you wish decision processing to continue
//if the FSM machinery recieves the empty string as the state of a final outcome, it will substitute the current state.
State string
Data interface{}
Decisions []*swf.Decision
}
// FSMState defines the behavior of one state of an FSM
type FSMState struct {
// Name is the name of the state. When returning an Outcome, the NextState should match the Name of an FSMState in your FSM.
Name string
// Decider decides an Outcome given the current state, data, and an event.
Decider Decider
}
//DecisionErrorHandler is the error handling contract for panics that occur in Deciders.
//If your DecisionErrorHandler does not return a non nil Outcome, any further attempt to process the decisionTask is abandoned and the task will time out.
type DecisionErrorHandler func(ctx *FSMContext, event *swf.HistoryEvent, stateBeforeEvent interface{}, stateAfterError interface{}, err error) (*Outcome, error)
// TaskErrorHandler is the error handling contract for errors that occur
// outside of the Decider machinery when handling receiving incoming tasks,
// sending outgoing decisions for tasks, or replicating state.
// This handler is called when a decision task has been abandoned and the task
// will timeout without any further intervention.
type TaskErrorHandler func(decisionTask *swf.PollForDecisionTaskOutput, err error)
//FSMErrorHandler is the error handling contract for errors in the FSM machinery itself.
//These are generally a misconfiguration of your FSM or mismatch between struct and serialized form and cant be resolved without config/code changes
//the paramaters to each method provide all availabe info at the time of the error so you can diagnose issues.
//Note that this is a diagnostic interface that basically leaks implementation details, and as such may change from release to release.
type FSMErrorReporter interface {
ErrorFindingStateData(decisionTask *swf.PollForDecisionTaskOutput, err error)
ErrorFindingCorrelator(decisionTask *swf.PollForDecisionTaskOutput, err error)
ErrorMissingFSMState(decisionTask *swf.PollForDecisionTaskOutput, outcome Outcome)
ErrorDeserializingStateData(decisionTask *swf.PollForDecisionTaskOutput, serializedStateData string, err error)
ErrorSerializingStateData(decisionTask *swf.PollForDecisionTaskOutput, outcome Outcome, eventCorrelator EventCorrelator, err error)
}
// StateSerializer defines the interface for serializing state to and deserializing state from the workflow history.
type StateSerializer interface {
Serialize(state interface{}) (string, error)
Deserialize(serialized string, state interface{}) error
}
// JSONStateSerializer is a StateSerializer that uses go json serialization.
type JSONStateSerializer struct{}
// Serialize serializes the given struct to a json string.
func (j JSONStateSerializer) Serialize(state interface{}) (string, error) {
var b bytes.Buffer
if err := json.NewEncoder(&b).Encode(state); err != nil {
return "", err
}
return b.String(), nil
}
// Deserialize unmarshalls the given (json) string into the given struct
func (j JSONStateSerializer) Deserialize(serialized string, state interface{}) error {
err := json.NewDecoder(strings.NewReader(serialized)).Decode(state)
return err
}
// Serialization is the contract for de/serializing state inside an FSM, typically implemented by the FSM itself
// but serves to break the circular dep between FSMContext and FSM.
type Serialization interface {
EventData(h *swf.HistoryEvent, data interface{})
Serialize(data interface{}) string
StateSerializer() StateSerializer
Deserialize(serialized string, data interface{})
InitialState() string
}
// FSM Data types that implement this interface will have the resulting tags used by
// FSMClient when starting workflows and by the FSMContext when calling ContinueWorkflow()
// it is []*string since thats what SWF api takes atm.
type Taggable interface {
Tags() []*string
}
func GetTagsIfTaggable(data interface{}) []*string {
var tags []*string
if t, ok := data.(Taggable); ok {
tags = t.Tags()
}
return tags
}
// SerializedState is a wrapper struct that allows serializing the current state and current data for the FSM in
// a MarkerRecorded event in the workflow history. We also maintain an epoch, which counts the number of times a workflow has
// been continued, and the StartedId of the DecisionTask that generated this state. The epoch + the id provide a total ordering
// of state over the lifetime of different runs of a workflow.
type SerializedState struct {
StateVersion uint64 `json:"stateVersion"`
StateName string `json:"stateName"`
StateData string `json:"stateData"`
WorkflowId string `json:"workflowId"`
}
//ErrorState is used as the input to a marker that signifies that the workflow is in an error state.
type SerializedErrorState struct {
Details string
EarliestUnprocessedEventId int64
LatestUnprocessedEventId int64
ErrorEvent *swf.HistoryEvent
}
//Payload of Signals ActivityStartedSignal and ActivityUpdatedSignal
type SerializedActivityState struct {
ActivityId string
Input *string
}
// StartFSMWorkflowInput should be used to construct the input for any StartWorkflowExecutionRequests.
// This panics on errors cause really this should never err.
func StartFSMWorkflowInput(serializer Serialization, data interface{}) *string {
ss := new(SerializedState)
stateData := serializer.Serialize(data)
ss.StateData = stateData
serialized := serializer.Serialize(ss)
return aws.String(serialized)
}
//Stasher is used to take snapshots of StateData between each event so that we can have shap
type Stasher struct {
dataType interface{}
}
func NewStasher(dataType interface{}) *Stasher {
gob.Register(dataType)
return &Stasher{
dataType: dataType,
}
}
func (s *Stasher) Stash(data interface{}) *bytes.Buffer {
buf := new(bytes.Buffer)
enc := gob.NewEncoder(buf)
err := enc.Encode(data)
if err != nil {
panic(fmt.Sprintf("at=stash type=%s error=%q", reflect.TypeOf(s.dataType), err))
}
return buf
}
func (s *Stasher) Unstash(stashed *bytes.Buffer, into interface{}) {
dec := gob.NewDecoder(stashed)
err := dec.Decode(into)
if err != nil {
panic(fmt.Sprintf("at=unstash type=%s error=%q", reflect.TypeOf(s.dataType), err))
}
}
|
package goSolution
import "testing"
func TestNumSubarrayBoundedMax(t *testing.T) {
nums := []int {2, 1, 4, 3}
AssertEqual(t, 3, numSubarrayBoundedMax(nums, 2, 3))
}
|
package ui
import (
"fyne.io/fyne/v2"
"fyne.io/fyne/v2/container"
"fyne.io/fyne/v2/theme"
"fyne.io/fyne/v2/widget"
)
type toolbarView struct {
view
}
func NewToolBarView(win fyne.Window) *toolbarView {
return &toolbarView{
view: view{
Win: win,
},
}
}
func (t *toolbarView) MakeUI() fyne.CanvasObject {
newTicketButton := widget.NewButtonWithIcon("Nouvelle requete", theme.DocumentCreateIcon(), func() {})
//axoneLabel := widget.NewLabel("Axone")
//axoneLabel.TextStyle = fyne.TextStyle{Bold: true, Italic: true}
border := container.NewBorder(nil, nil, newTicketButton, nil)
return border
}
|
package main
func getScore(w http.ResponseWriter, r *http.Request) {
// Set proper content-type header for jsonp
w.Header().Set("Content-Type", "text/javascript")
callback := r.FormValue("callback")
s1 := Score{"Mika", 64}
s2 := Score{"Mikko", 62}
s3 := Score{"Pekko", 34}
s4 := Score{"Arimas", 95}
var resp = struct {
Result []Score
}{
Result: []Score{s1, s2, s3, s4},
}
b, err := json.Marshal(&resp)
if err != nil {}
res := callback + "(" + string(b) + ")"
fmt.Fprint(w, res)
}
|
// DRUNKWATER TEMPLATE(add description and prototypes)
// Question Title and Description on leetcode.com
// Function Declaration and Function Prototypes on leetcode.com
//313. Super Ugly Number
//Write a program to find the nth super ugly number.
//Super ugly numbers are positive numbers whose all prime factors are in the given prime list primes of size k. For example, [1, 2, 4, 7, 8, 13, 14, 16, 19, 26, 28, 32] is the sequence of the first 12 super ugly numbers given primes = [2, 7, 13, 19] of size 4.
//Note:
//(1) 1 is a super ugly number for any given primes.
//(2) The given numbers in primes are in ascending order.
//(3) 0 < k ≤ 100, 0 < n ≤ 106, 0 < primes[i] < 1000.
//(4) The nth super ugly number is guaranteed to fit in a 32-bit signed integer.
//Credits:
//Special thanks to @dietpepsi for adding this problem and creating all test cases.
//func nthSuperUglyNumber(n int, primes []int) int {
//}
// Time Is Money |
// Copyright 2023 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"context"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl"
computepb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/compute/compute_go_proto"
emptypb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/empty_go_proto"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute"
)
// NetworkFirewallPolicyServer implements the gRPC interface for NetworkFirewallPolicy.
type NetworkFirewallPolicyServer struct{}
// ProtoToNetworkFirewallPolicy converts a NetworkFirewallPolicy resource from its proto representation.
func ProtoToNetworkFirewallPolicy(p *computepb.ComputeNetworkFirewallPolicy) *compute.NetworkFirewallPolicy {
obj := &compute.NetworkFirewallPolicy{
Location: dcl.StringOrNil(p.GetLocation()),
CreationTimestamp: dcl.StringOrNil(p.GetCreationTimestamp()),
Name: dcl.StringOrNil(p.GetName()),
Id: dcl.StringOrNil(p.GetId()),
Description: dcl.StringOrNil(p.GetDescription()),
Fingerprint: dcl.StringOrNil(p.GetFingerprint()),
SelfLink: dcl.StringOrNil(p.GetSelfLink()),
SelfLinkWithId: dcl.StringOrNil(p.GetSelfLinkWithId()),
RuleTupleCount: dcl.Int64OrNil(p.GetRuleTupleCount()),
Region: dcl.StringOrNil(p.GetRegion()),
Project: dcl.StringOrNil(p.GetProject()),
}
return obj
}
// NetworkFirewallPolicyToProto converts a NetworkFirewallPolicy resource to its proto representation.
func NetworkFirewallPolicyToProto(resource *compute.NetworkFirewallPolicy) *computepb.ComputeNetworkFirewallPolicy {
p := &computepb.ComputeNetworkFirewallPolicy{}
p.SetLocation(dcl.ValueOrEmptyString(resource.Location))
p.SetCreationTimestamp(dcl.ValueOrEmptyString(resource.CreationTimestamp))
p.SetName(dcl.ValueOrEmptyString(resource.Name))
p.SetId(dcl.ValueOrEmptyString(resource.Id))
p.SetDescription(dcl.ValueOrEmptyString(resource.Description))
p.SetFingerprint(dcl.ValueOrEmptyString(resource.Fingerprint))
p.SetSelfLink(dcl.ValueOrEmptyString(resource.SelfLink))
p.SetSelfLinkWithId(dcl.ValueOrEmptyString(resource.SelfLinkWithId))
p.SetRuleTupleCount(dcl.ValueOrEmptyInt64(resource.RuleTupleCount))
p.SetRegion(dcl.ValueOrEmptyString(resource.Region))
p.SetProject(dcl.ValueOrEmptyString(resource.Project))
return p
}
// applyNetworkFirewallPolicy handles the gRPC request by passing it to the underlying NetworkFirewallPolicy Apply() method.
func (s *NetworkFirewallPolicyServer) applyNetworkFirewallPolicy(ctx context.Context, c *compute.Client, request *computepb.ApplyComputeNetworkFirewallPolicyRequest) (*computepb.ComputeNetworkFirewallPolicy, error) {
p := ProtoToNetworkFirewallPolicy(request.GetResource())
res, err := c.ApplyNetworkFirewallPolicy(ctx, p)
if err != nil {
return nil, err
}
r := NetworkFirewallPolicyToProto(res)
return r, nil
}
// applyComputeNetworkFirewallPolicy handles the gRPC request by passing it to the underlying NetworkFirewallPolicy Apply() method.
func (s *NetworkFirewallPolicyServer) ApplyComputeNetworkFirewallPolicy(ctx context.Context, request *computepb.ApplyComputeNetworkFirewallPolicyRequest) (*computepb.ComputeNetworkFirewallPolicy, error) {
cl, err := createConfigNetworkFirewallPolicy(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
return s.applyNetworkFirewallPolicy(ctx, cl, request)
}
// DeleteNetworkFirewallPolicy handles the gRPC request by passing it to the underlying NetworkFirewallPolicy Delete() method.
func (s *NetworkFirewallPolicyServer) DeleteComputeNetworkFirewallPolicy(ctx context.Context, request *computepb.DeleteComputeNetworkFirewallPolicyRequest) (*emptypb.Empty, error) {
cl, err := createConfigNetworkFirewallPolicy(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
return &emptypb.Empty{}, cl.DeleteNetworkFirewallPolicy(ctx, ProtoToNetworkFirewallPolicy(request.GetResource()))
}
// ListComputeNetworkFirewallPolicy handles the gRPC request by passing it to the underlying NetworkFirewallPolicyList() method.
func (s *NetworkFirewallPolicyServer) ListComputeNetworkFirewallPolicy(ctx context.Context, request *computepb.ListComputeNetworkFirewallPolicyRequest) (*computepb.ListComputeNetworkFirewallPolicyResponse, error) {
cl, err := createConfigNetworkFirewallPolicy(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
resources, err := cl.ListNetworkFirewallPolicy(ctx, request.GetProject(), request.GetLocation())
if err != nil {
return nil, err
}
var protos []*computepb.ComputeNetworkFirewallPolicy
for _, r := range resources.Items {
rp := NetworkFirewallPolicyToProto(r)
protos = append(protos, rp)
}
p := &computepb.ListComputeNetworkFirewallPolicyResponse{}
p.SetItems(protos)
return p, nil
}
func createConfigNetworkFirewallPolicy(ctx context.Context, service_account_file string) (*compute.Client, error) {
conf := dcl.NewConfig(dcl.WithUserAgent("dcl-test"), dcl.WithCredentialsFile(service_account_file))
return compute.NewClient(conf), nil
}
|
func findPeakElement(nums []int) int {
res := 0
max := nums[0]
if len(nums)==1{
return 0
}
for i:=1;i<len(nums);i++{
if nums[i]>max{
max = nums[i]
res = i
}else{
res = i-1
break
}
}
return res
}
|
/*
* EVE Swagger Interface
*
* An OpenAPI for EVE Online
*
* OpenAPI spec version: 0.4.1.dev1
*
* Generated by: https://github.com/swagger-api/swagger-codegen.git
*/
package swagger
// 200 ok object
type GetIndustryFacilities200Ok struct {
// ID of the facility
FacilityId int64 `json:"facility_id,omitempty"`
// Owner of the facility
OwnerId int32 `json:"owner_id,omitempty"`
// Region ID where the facility is
RegionId int32 `json:"region_id,omitempty"`
// Solar system ID where the facility is
SolarSystemId int32 `json:"solar_system_id,omitempty"`
// Tax imposed by the facility
Tax float32 `json:"tax,omitempty"`
// Type ID of the facility
TypeId int32 `json:"type_id,omitempty"`
}
|
// Copyright (c) 2017-2018 The qitmeer developers
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package btctypes
import (
"encoding/binary"
"io"
"fmt"
)
const (
// MaxVarIntPayload is the maximum payload size for a variable length integer.
MaxVarIntPayload = 9
// MessageHeaderSize is the number of bytes in a bitcoin message header.
// Bitcoin network (magic) 4 bytes + command 12 bytes + payload length 4 bytes +
// checksum 4 bytes.
MessageHeaderSize = 24
// MaxMessagePayload is the maximum bytes a message can be regardless of other
// individual limits imposed by messages themselves.
MaxMessagePayload = (1024 * 1024 * 32) // 32MB
// MaxBlockPayload is the maximum bytes a block message can be in bytes.
// After Segregated Witness, the max block payload has been raised to 4MB.
MaxBlockPayload = 4000000
)
var (
// littleEndian is a convenience variable since binary.LittleEndian is
// quite long.
littleEndian = binary.LittleEndian
)
// MessageEncoding represents the wire message encoding format to be used.
type MessageEncoding uint32
const (
// BaseEncoding encodes all messages in the default format specified
// for the Bitcoin wire protocol.
BaseEncoding MessageEncoding = 1 << iota
// WitnessEncoding encodes all messages other than transaction messages
// using the default Bitcoin wire protocol specification. For transaction
// messages, the new encoding format detailed in BIP0144 will be used.
WitnessEncoding
)
// LatestEncoding is the most recently specified encoding for the Bitcoin wire
// protocol.
var LatestEncoding = WitnessEncoding
// Message is an interface that describes a bitcoin message. A type that
// implements Message has complete control over the representation of its data
// and may therefore contain additional or fewer fields than those which
// are used directly in the protocol encoded message.
type Message interface {
BtcDecode(io.Reader, uint32, MessageEncoding) error
BtcEncode(io.Writer, uint32, MessageEncoding) error
Command() string
MaxPayloadLength(uint32) uint32
}
// Commands used in bitcoin message headers which describe the type of message.
const (
CmdVersion = "version"
CmdVerAck = "verack"
CmdGetAddr = "getaddr"
CmdAddr = "addr"
CmdGetBlocks = "getblocks"
CmdInv = "inv"
CmdGetData = "getdata"
CmdNotFound = "notfound"
CmdBlock = "block"
CmdTx = "tx"
CmdGetHeaders = "getheaders"
CmdHeaders = "headers"
CmdPing = "ping"
CmdPong = "pong"
CmdAlert = "alert"
CmdMemPool = "mempool"
CmdFilterAdd = "filteradd"
CmdFilterClear = "filterclear"
CmdFilterLoad = "filterload"
CmdMerkleBlock = "merkleblock"
CmdReject = "reject"
CmdSendHeaders = "sendheaders"
CmdFeeFilter = "feefilter"
CmdGetCFilters = "getcfilters"
CmdGetCFHeaders = "getcfheaders"
CmdGetCFCheckpt = "getcfcheckpt"
CmdCFilter = "cfilter"
CmdCFHeaders = "cfheaders"
CmdCFCheckpt = "cfcheckpt"
)
// MessageError describes an issue with a message.
// An example of some potential issues are messages from the wrong bitcoin
// network, invalid commands, mismatched checksums, and exceeding max payloads.
//
// This provides a mechanism for the caller to type assert the error to
// differentiate between general io errors such as io.EOF and issues that
// resulted from malformed messages.
type MessageError struct {
Func string // Function name
Description string // Human readable description of the issue
}
// Error satisfies the error interface and prints human-readable errors.
func (e *MessageError) Error() string {
if e.Func != "" {
return fmt.Sprintf("%v: %v", e.Func, e.Description)
}
return e.Description
}
// messageError creates an error for the given function and description.
func messageError(f string, desc string) *MessageError {
return &MessageError{Func: f, Description: desc}
}
|
package bitmap
type Bitmap struct {
data []byte // 保存实际的 bit 数据
bitsize uint // 指示该 Bitmap 的 bit 容量
}
func NewBitmap(size uint) *Bitmap {
if size == 0 {
size = 0x01 << 32
} else if remainder := size % 8; remainder != 0 {
size += 8 - remainder
}
return &Bitmap{
data: make([]byte, size>>3),
bitsize: size,
}
}
// 将 offset 位置的 bit 置为 value (非 0 即判定为 1)
func (bitmap *Bitmap) SetBit(offset uint, value uint) bool {
if bitmap.bitsize < offset {
return false
}
index, pos := offset/8, offset%8
if value == 0 {
// &^ 是 Go 中的按位置零
bitmap.data[index] &^= 0x01 << pos
} else {
bitmap.data[index] |= 0x01 << pos
}
return true
}
// 判断 offset 位置的值是否为 1
func (bitmap *Bitmap) GetBit(offset uint) bool {
index, pos := offset/8, offset%8
if bitmap.bitsize < offset {
return false
}
return (bitmap.data[index] & (0x01 << pos)) != 0
}
|
package mr
//
// RPC definitions.
//
// remember to capitalize all names.
//
import (
"os"
"strconv"
)
//
// example to show how to declare the arguments
// and reply for an RPC.
//
type ExampleArgs struct {
X int
}
type ExampleReply struct {
Y int
}
// RequestTaskArgs is the Request Message for RequestTask
type RequestTaskArgs struct{}
// RequestTaskReply is the Reply Message for RequestTask
type RequestTaskReply struct {
// True if there is a task assigned.
HasTask bool
// True if the task is a map task, otherwise it is a reduce task.
IsMapTask bool
// The index of the task.
Index int
// Number of mapper of this MapReduce Job.
NMapper int
// Number of reducer of this MapReduce Job.
NReducer int
// The paht of the file to be mapped. Only vaid if IsMapTask is True.
MapFile string
}
// SubmitTaskArgs is the Request Message for SubmitTask
type SubmitTaskArgs struct {
// True if the task to be submitted is a map task, otherwise is a reduce task.
IsMapTask bool
// The index of the task.
Index int
}
// SubmitTaskReply is the Reply Message for SubmitTask
type SubmitTaskReply struct{}
// DoneArgs is the Request Message for QueryDone
type DoneArgs struct{}
// DoneReply is the Reply Message for QueryDone
type DoneReply struct {
// True if the map reduce job is done. Woker should exit upon seeing true.
IsDone bool
}
// Add your RPC definitions here.
// Cook up a unique-ish UNIX-domain socket name
// in /var/tmp, for the master.
// Can't use the current directory since
// Athena AFS doesn't support UNIX-domain sockets.
func masterSock() string {
s := "/var/tmp/824-mr-"
s += strconv.Itoa(os.Getuid())
return s
}
|
package main
import "fmt"
/*
sliceの長さは要素数。
sliceの容量は、sliceの元となる配列の要素数。
要素数を超えた参照や、容量を超えた拡張などはruntime errorが起きる
*/
func main() {
s := []int{2, 3, 5, 7, 11, 13}
printSlice(s)
// 要素数0のsliceを作成
s = s[:0]
printSlice(s)
// 要素を拡張。最初に定義した6以上は参照できない。
s = s[:4]
printSlice(s)
// 最初から2つの要素を削除
s = s[2:]
printSlice(s)
// runtime error: slice bounds out of range [:5] with capacity 4
// s = s[:5]
}
func printSlice(s []int) {
fmt.Printf("len=%d cap=%d %v\n", len(s), cap(s), s)
} |
/*
Inputs:
Two single digits (let's call them m and n) and two chars (let's call them a and b) in your input format of choice.
Output:
For the walkthrough, pretend m=2, n=5, a='a', b='b'.
Your output will be a string built from your four inputs. Let's call the string result, with value "". First, concatenate a onto result m times, so concatenate a onto result 2 times. result now equals aa. Second, concatenate b onto result m times, so concatenate b onto result 2 times. result now equals aabb. Lastly, if result is already longer than n, truncate result so that it has length n. Otherwise, continue alternating with m length runs of a and b until result has length n. The final result is aabba, which has length 5.
Test Cases:
Input: m = 2, n = 4, a = A, b = B
Output: AABB
Input: m = 3, n = 8, a = A, b = B
Output: AAABBBAA
Input: m = 4, n = 3, a = A, b = B
Output: AAA
Input: m = 2, n = 10, a = A, b = B
Output: AABBAABBAA
As all knows, lesser one will rule the world, so the smallest programs, in bytes, win! :)
*/
package main
import (
"strings"
)
func main() {
assert(pattern(2, 4, 'A', 'B') == "AABB")
assert(pattern(3, 8, 'A', 'B') == "AAABBBAA")
assert(pattern(4, 3, 'A', 'B') == "AAA")
assert(pattern(2, 10, 'A', 'B') == "AABBAABBAA")
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
func pattern(m, n int, a, b rune) string {
p := []string{string(a), string(b)}
s := ""
for i := 0; len(s) < n; i ^= 1 {
s += strings.Repeat(p[i], m)
}
return s[:n]
}
|
package instapi
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"os"
"path/filepath"
"strings"
"time"
"github.com/instapi/client-go/internal/csvutil"
"github.com/instapi/client-go/types"
"github.com/tomnomnom/linkheader"
)
// Instapi client constants.
const (
DefaultEndpoint = "https://api.instapi.com/v1/"
)
// Client related errors.
var (
ErrNotFound = errors.New("resource not found")
ErrUnsupportedType = errors.New("unsupported type")
ErrForbidden = errors.New("forbidden")
ErrUnauthorized = errors.New("unauthorized")
ErrStatus = errors.New("unexpected HTTP status")
)
// Client represents a client implementation.
type Client struct {
doer Doer
debugFunc func(*http.Request, *http.Response, Debug)
endpoint string
token string
}
// Doer defines the HTTP Do() interface.
type Doer interface {
Do(*http.Request) (*http.Response, error)
}
// ClientOption represents a client option.
type ClientOption func(*Client)
// HTTPClient option.
func HTTPClient(doer Doer) ClientOption {
return func(c *Client) {
c.doer = doer
}
}
// Endpoint option.
func Endpoint(endpoint string) ClientOption {
return func(c *Client) {
c.endpoint = endpoint
}
}
// Token option.
func Token(token string) ClientOption {
return func(c *Client) {
c.token = "Bearer " + token
}
}
// DebugFunc option.
func DebugFunc(f func(*http.Request, *http.Response, Debug)) ClientOption {
return func(c *Client) {
c.debugFunc = f
}
}
// New initializes a new client instance.
func New(options ...ClientOption) *Client {
c := &Client{}
for _, option := range options {
option(c)
}
if c.endpoint == "" {
c.endpoint = DefaultEndpoint
}
if c.doer == nil {
c.doer = http.DefaultClient
}
return c
}
// Default returns a default client configured from environment variables.
func Default() *Client {
return New(Endpoint(os.Getenv("API_ENDPOINT")), Token(os.Getenv("TOKEN")))
}
func (c *Client) newRequest(ctx context.Context, method, url string, body io.Reader) (*http.Request, error) {
req, err := http.NewRequestWithContext(ctx, method, url, body)
if err != nil {
return nil, err
}
req.Header.Add("Authorization", c.token)
return req, nil
}
// Debug information returned by a debug function.
type Debug struct {
Payload []byte
Duration time.Duration
}
func (c *Client) doRequest(ctx context.Context, method, contentType, endpoint string, statusCode int, src, dst interface{}, options ...RequestOption) (*http.Response, []byte, error) {
var (
r io.Reader
payload []byte
)
if src != nil {
if v, ok := src.(io.Reader); ok {
// Optimize sending large CSV payloads with a record limit
if contentType == types.CSV {
limit := 0
headers := true // Headers are assumed by default
for _, option := range options {
switch option.param {
case "limit":
limit = option.value.(int)
case "headers":
headers = option.value.(bool)
}
}
if limit > 0 {
if headers {
limit++
}
r = csvutil.NewLineLimitReader(v, limit)
}
}
if r == nil {
r = v
}
} else {
var err error
payload, err = json.Marshal(src)
if err != nil {
return nil, nil, err
}
r = bytes.NewReader(payload)
}
}
req, err := c.newRequest(ctx, method, endpoint, r)
if err != nil {
return nil, nil, err
}
req.Header.Add("Accept", contentType)
req.Header.Add("Content-Type", contentType)
nilDst := dst == nil
if nilDst {
switch method {
case http.MethodPatch, http.MethodPost, http.MethodPut:
req.Header.Add("No-Response-Body", "1")
}
}
q := req.URL.Query()
for _, option := range options {
option.fn(&q)
}
req.URL.RawQuery = q.Encode()
start := time.Now()
resp, err := c.doer.Do(req)
if err != nil {
return nil, nil, err
}
d := time.Since(start)
defer resp.Body.Close() // nolint: errcheck
if c.debugFunc != nil {
c.debugFunc(req, resp, Debug{Payload: payload, Duration: d})
}
// Early exit for successful HTTP status code and nil destination
if nilDst &&
(resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusNoContent) &&
(statusCode == http.StatusOK || statusCode == http.StatusNoContent) {
return resp, nil, nil
}
b, err := io.ReadAll(resp.Body)
if err != nil {
return nil, nil, err
}
if statusCode > 0 &&
resp.StatusCode != statusCode ||
(resp.StatusCode < http.StatusOK || resp.StatusCode >= http.StatusMultipleChoices) {
err := decodeAPIError(resp.StatusCode, b)
if err != nil {
return nil, nil, err
}
switch resp.StatusCode {
case http.StatusForbidden:
return nil, nil, fmt.Errorf("%w: %s %s", ErrForbidden, method, endpoint)
case http.StatusNotFound:
return nil, nil, fmt.Errorf("%w: %s", ErrNotFound, endpoint)
case http.StatusUnauthorized:
return nil, nil, fmt.Errorf("%w: %s %s", ErrUnauthorized, method, endpoint)
default:
return nil, nil, fmt.Errorf("%w: expected %d, got %d", ErrStatus, statusCode, resp.StatusCode)
}
}
if dst != nil {
return resp, b, json.Unmarshal(b, &dst)
}
return resp, b, nil
}
// Error represents a client error.
type Error struct {
StatusCode int
Err string `json:"error"`
}
func (e Error) Error() string {
return e.Err
}
func decodeAPIError(statusCode int, b []byte) error {
if len(b) == 0 || b[0] != '{' {
return nil
}
var e Error
err := json.Unmarshal(b, &e)
if err != nil {
return err
}
if e.Err == "" {
return nil
}
e.StatusCode = statusCode
return e
}
func nextLink(resp *http.Response) (string, error) {
for _, v := range linkheader.Parse(strings.TrimPrefix(resp.Header.Get("link"), "Link:")) {
if v.Rel != "next" {
continue
}
u, err := url.Parse(v.URL)
if err != nil {
return "", err
}
return u.Query().Get("offset"), nil
}
return "", nil
}
func getContentType(filename string) (string, error) {
return types.TypeFromExt(filepath.Ext(filename))
}
|
// Copyright (C) Microsoft Corporation.
package mssqlcommon
import (
"fmt"
"testing"
)
func TestDiagnose(t *testing.T) {
t.Parallel()
for _, system := range []bool{true, false} {
for _, resource := range []bool{true, false} {
for _, queryProcessing := range []bool{true, false} {
// Local copies of loop variables for the closure to capture
system := system
resource := resource
queryProcessing := queryProcessing
t.Run(fmt.Sprintf("system = %t, resource = %t, queryProcessing = %t", system, resource, queryProcessing), func(t *testing.T) {
t.Parallel()
diagnostics := Diagnostics{System: system, Resource: resource, QueryProcessing: queryProcessing}
err := Diagnose(diagnostics)
if system && resource && queryProcessing {
if err != nil {
t.Fatalf("Expected Diagnose to succeed but it failed: %s", err)
}
} else {
if err == nil {
t.Fatal("Expected Diagnose to fail but it succeeded")
}
switch serverUnhealthyError := err.(type) {
case *ServerUnhealthyError:
if !system {
if serverUnhealthyError.RawValue != ServerCriticalError {
t.Fatalf("Diagnose did not fail with ServerCriticalError: %d", serverUnhealthyError.RawValue)
}
if serverUnhealthyError.Inner.Error() != "sp_server_diagnostics result indicates system error" {
t.Fatalf("Diagnose did not fail with an error about system error: %s", serverUnhealthyError.Inner.Error())
}
} else if !resource {
if serverUnhealthyError.RawValue != ServerModerateError {
t.Fatalf("Diagnose did not fail with ServerModerateError: %d", serverUnhealthyError.RawValue)
}
if serverUnhealthyError.Inner.Error() != "sp_server_diagnostics result indicates resource error" {
t.Fatalf("Diagnose did not fail with an error about resource error: %s", serverUnhealthyError.Inner.Error())
}
} else if !queryProcessing {
if serverUnhealthyError.RawValue != ServerAnyQualifiedError {
t.Fatalf("Diagnose did not fail with ServerAnyQualifiedError: %d", serverUnhealthyError.RawValue)
}
if serverUnhealthyError.Inner.Error() != "sp_server_diagnostics result indicates query processing error" {
t.Fatalf("Diagnose did not fail with an error about query processing error: %s", serverUnhealthyError.Inner.Error())
}
} else {
t.Fatal("Unreachable")
}
default:
t.Fatal("Diagnose did not return an error of type ServerUnhealthyError")
}
}
})
}
}
}
}
|
package none
// Platform stores any global configuration used for generic
// platforms.
type Platform struct{}
|
package main
import (
"encoding/json"
)
func getJson(properties []string, data []string) (strResponse string) {
fields := map[string]string{}
for i, field := range properties {
fields[field] = data[i]
}
jsonBytes, err := json.MarshalIndent(fields, "", " ")
if err == nil {
strResponse = string(jsonBytes)
}
return
}
|
package hcledit
import (
"io"
"io/ioutil"
"os"
"path/filepath"
"github.com/hashicorp/hcl/v2"
"github.com/hashicorp/hcl/v2/hclwrite"
)
// New constructs a new HCL file with no content which is ready to be mutated.
func New() (*HCLEditor, error) {
return &HCLEditor{
writeFile: hclwrite.NewEmptyFile(),
}, nil
}
// ReadFile reads HCL file in the given path and returns operation interface for it.
func ReadFile(path string) (*HCLEditor, error) {
path, err := filepath.Abs(path)
if err != nil {
return nil, err
}
f, err := os.Open(path)
if err != nil {
return nil, err
}
defer f.Close()
editor, err := Read(f, filepath.Base(path))
if err != nil {
return nil, err
}
editor.path = path
return editor, err
}
// Read reads HCL file from the given io.Reader and returns operation interface for it.
func Read(r io.Reader, filename string) (*HCLEditor, error) {
buf, err := ioutil.ReadAll(r)
if err != nil {
return nil, err
}
writeFile, diags := hclwrite.ParseConfig(buf, filename, hcl.Pos{Line: 1, Column: 1})
if diags.HasErrors() {
return nil, diags
}
return &HCLEditor{
filename: filename,
writeFile: writeFile,
}, nil
}
|
package xtp_wrapper
/*
#cgo CFLAGS: -Wno-error=implicit-function-declaration -I../../C_porting_XTP/include/XTP -I../../C_porting_XTP/include/CXTPApi
#cgo LDFLAGS: -L../../C_porting_XTP/lib/CXTPApi -lCXTPApi -lxtpquoteapi -lxtptraderapi
#include <string.h>
#include "xtp_cmessage.h"
#include "LCxtp_trader_api.h"
*/
import "C"
import (
"os"
"unsafe"
)
func GoCreateLCTraderApi(client_id int8, folder string) unsafe.Pointer {
folder = folder + "/trader/"
os.MkdirAll(folder, 0777)
cs := C.CString(folder)
defer C.free(unsafe.Pointer(cs))
return C.CreateLCTraderApi(C.uint8_t(client_id), cs, 0)
}
func GoCreateLCTraderSpi() unsafe.Pointer {
return C.CreateLCTraderSpi()
}
func Go_trader_apiRegisterSpi(trader_api unsafe.Pointer, trader_spi unsafe.Pointer) {
C._trader_apiRegisterSpi(trader_api, trader_spi)
}
//_quote_apiLogin(void * pLC_Api, const char* ip, int port, const char* user, const char* password, XTP_PROTOCOL_TYPE sock_type);
func Go_trader_apiLogin(trader_api unsafe.Pointer, ip_addr string, port int, user string, pwd string, softKey string) uint64 {
C._trader_apiSubscribePublicTopic(trader_api, C.XTP_TERT_QUICK)
C._trader_apiSetSoftwareKey(trader_api, (*C.char)(unsafe.Pointer(&softKey)))
var version = "1.0.0"
C._trader_apiSetSoftwareVersion(trader_api, (*C.char)(unsafe.Pointer(&version)))
ip_addr_s := C.CString(ip_addr)
defer C.free(unsafe.Pointer(ip_addr_s))
user_s := C.CString(user)
defer C.free(unsafe.Pointer(user_s))
pwd_s := C.CString(pwd)
defer C.free(unsafe.Pointer(pwd_s))
return uint64(C._trader_apiLogin(trader_api, ip_addr_s, C.int(port), user_s, pwd_s, C.XTP_PROTOCOL_TCP))
}
func GoReleaseLCTraderApi(trader_api unsafe.Pointer) {
C.ReleaseLCTraderApi(&trader_api)
}
func GoReleaseLCTraderSpi(trader_api unsafe.Pointer) {
C.ReleaseLCTraderSpi(&trader_api)
}
|
package dispatcher
import (
"fmt"
)
type JsonRequest struct {
Login string `json:"login"`
Token string `json:"token"`
Method string `json:"method"`
Database string `json:"database"`
Collection string `json:"collection"`
Data interface{} `json:"data"`
}
func (r JsonRequest) String() string {
return fmt.Sprintf("JsonRequest: %s, %s", r.Method, r.Collection)
}
|
package main
import "fmt"
type float float32
func main() {
var f float = 52.2
// var g float32 = 52.2
fmt.Printf("f has value %v and type %T\n", f, f)
// This trow an error (mismatched types float and float32)
// fmt.Println("f == g", f == g)
}
|
package main
import (
"fmt"
"gopkg.in/yaml.v2"
"io/ioutil"
"log"
"github.com/robbiemcmichael/auth-mux/internal/config"
)
func main() {
data, err := ioutil.ReadFile("config.yaml")
if err != nil {
log.Fatal(err)
}
var config config.Config
if err := yaml.Unmarshal(data, &config); err != nil {
log.Fatal(err)
}
fmt.Printf("%+v\n", config)
fmt.Printf("%+v\n", config.Inputs[0].Config)
fmt.Printf("%+v\n", config.Outputs[0].Config)
}
|
/*
Boating season is over for this year, and Theseus has parked his boat on land. Of course, the boat looks nothing like it did as of the beginning of the season; it never does.
You see, Theseus is constantly looking for ways to improve his boat.
At every day of the boating season, Theseus bought exactly one type of item at his local supply store, and replaced the existing part on his boat with it.
Now, as the season has ended, Theseus wonders what day he replaced all the parts from the previous season.
Input
The first line of the input consists of two space-separated integers P
and N, representing the number of parts the boat consists of, and the number of days in the boating season respectively.
Then follows N lines, each line has a single word wi, the type of boat part that Theseus bought on day i.
Output
Output the day Theseus ended up replacing the last existing part from the previous season, or paradox avoided if Theseus never ended up replacing all the different parts.
Limits
1≤P≤N≤1000.
Each word w[i] will consist only of the letters a–z and _ (underscore).
Each word w[i] will be between 1 and 20 characters long.
The number of distinct w[i] will be at most P.
*/
package main
func main() {
assert(boating(3, 5, []string{"left_oar", "right_oar", "left_oar", "hull", "right_oar"}) == 4)
assert(boating(4, 5, []string{"motor", "hull", "left_oar", "hull", "motor"}) == "paradox avoided")
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
func boating(p, n int, a []string) interface{} {
m := make(map[string]bool)
for i, v := range a {
if i >= n {
break
}
if m[v] = true; len(m) >= p {
return i + 1
}
}
return "paradox avoided"
}
|
package sort
import (
"fmt"
"testing"
"github.com/stretchr/testify/require"
)
func TestSort(t *testing.T) {
tests := []struct {
nums []int
want []int
}{
{
nums: []int{5, 9, 1, 6, 8, 14, 6, 49, 25, 4, 6, 3},
want: []int{1, 3, 4, 5, 6, 6, 6, 8, 9, 14, 25, 49},
},
{
nums: []int{5},
want: []int{5},
},
{
nums: []int{3, 6},
want: []int{3, 6},
},
{
nums: []int{2, 5, 4, 1, 3},
want: []int{1, 2, 3, 4, 5},
},
}
for idx, f := range []func([]int){
bubbleSort,
selectSort,
selectSort1,
insertSort,
shellSort,
mergeSort,
mergeSort1,
mergeSort2,
quickSort,
quickSort1,
quickSort2,
quickSort3,
quickSort4,
} {
t.Run(fmt.Sprintf("func#%d", idx), func(t *testing.T) {
for i, tt := range tests {
t.Run(fmt.Sprintf("case#%d", i), func(t *testing.T) {
assert := require.New(t)
var nums = make([]int, len(tt.nums))
copy(nums, tt.nums)
f(nums)
assert.Equal(tt.want, nums)
})
}
})
}
}
|
package utils
import (
"time"
)
//获取唯一ID(serverId,seqId 小于2046 占11位)
func GetMessageId(serverId, seqId uint16) uint64 {
return uint64(time.Now().UnixNano()/1000000)<<22 | uint64(serverId)<<11 | uint64(seqId)
}
|
# https://leetcode.com/problems/kth-smallest-element-in-a-sorted-matrix/
- heap に一列つっこんで、そこから pop するごとに次の候補も heap に追加していく
- 実際には heap ぽいことを全探索で実現
- キューに入っているものを全て調べて最小を取り出している
- priority がついていないキューと同じ
- go にも標準でヒープパッケージがあったらしい
- https://golang.org/pkg/container/heap/
- インタフェースの実装から必要なので面倒だけど
- 正直ヒープのことは完全に忘れているので、そろそろ体系的にアルゴリズムを復習しても良さそう
- もう一つの解法は二分探索
- 二次元の領域を半分ずつ狭めていく
- 0,0 が low, n,n が high で、その値のちょうど半分を mid に
- mid を得るには何ステップ必要か調べる(全探索で)
- k 以上かかったなら high = mid という具合
- これの計算量どうなんだろう。全部配列に入れてソートかけた方が早そうな
- k の計算をメモしたりするのかな
|
package ca
import (
"io/ioutil"
"reflect"
"testing"
"time"
)
func TestConfig(t *testing.T) {
conf, err := ioutil.ReadFile("testdata/root_ca.json")
if err != nil {
t.Fatal(err)
}
cfg, err := LoadConfig(conf)
if err != nil {
t.Fatal(err)
}
req := cfg.CertificateRequest()
if req.Name().CommonName != cfg.CN {
t.Errorf("unexpected common name: got %s, want %s", req.Name().CommonName, cfg.CN)
}
if req.Name().Locality[0] != cfg.Name.L {
t.Errorf("unexpected locality: got %s, want %s", req.Name().Locality[0], cfg.Name.L)
}
if req.KeyRequest.Algo() != DefaultConfig.KeyRequest.A {
t.Errorf("unexpected key request algo: got %s, want %s", req.KeyRequest.Algo(), DefaultConfig.KeyRequest.A)
}
if req.KeyRequest.Size() != DefaultConfig.KeyRequest.S {
t.Errorf("unexpected key request size: got %d, want %d", req.KeyRequest.Size(), DefaultConfig.KeyRequest.S)
}
policy, err := cfg.Signing()
if err != nil {
t.Fatal(err)
}
if !policy.Valid() {
t.Errorf("signing policy is invalid")
}
if !reflect.DeepEqual(policy.Default.Usage, DefaultConfig.Usage) {
t.Errorf("unexpected usage: got %v, ant %v", policy.Default.Usage, DefaultConfig.Usage)
}
if expectedExpiry := 87600 * time.Hour; policy.Default.Expiry != expectedExpiry {
t.Errorf("unexpected expiry: got %s, ant %s", policy.Default.Expiry, expectedExpiry)
}
if !policy.Default.CAConstraint.IsCA {
t.Errorf("unexpected ca constraint: got %t, ant %t", false, true)
}
}
|
package schema
import mapset "github.com/deckarep/golang-set"
import "encoding/json"
// SchemaGraph represent the graph of a source
type SchemaGraph struct {
Vertices mapset.Set
Edges mapset.Set
}
// SchemaGraphJSON is the json representation of a schema graph
type SchemaGraphJSON struct {
Vertices []AssetType `json:"vertices"`
Edges []RelationType `json:"edges"`
}
// NewSchemaGraph create a source graph
func NewSchemaGraph() SchemaGraph {
return SchemaGraph{
Vertices: mapset.NewSet(),
Edges: mapset.NewSet(),
}
}
// AddAsset add an asset type as a vertex in the source graph
func (sg *SchemaGraph) AddAsset(assetType string) AssetType {
t := AssetType(assetType)
sg.Vertices.Add(t)
return t
}
// Assets return all the assets in the graph
func (sg *SchemaGraph) Assets() []AssetType {
assets := []AssetType{}
for a := range sg.Vertices.Iter() {
assets = append(assets, a.(AssetType))
}
return assets
}
// AddRelation add a relation between asset types
func (sg *SchemaGraph) AddRelation(fromType AssetType, relationType string, toType AssetType) RelationType {
rt := RelationType{
Type: RelationKeyType(relationType),
FromType: fromType,
ToType: toType,
}
sg.Edges.Add(rt)
return rt
}
// Relations return all the relations in the graph
func (sg *SchemaGraph) Relations() []RelationType {
relations := []RelationType{}
for r := range sg.Edges.Iter() {
relations = append(relations, r.(RelationType))
}
return relations
}
// Merge merge other graph into the current graph
func (sg *SchemaGraph) Merge(other SchemaGraph) {
for vertex := range other.Vertices.Iter() {
sg.Vertices.Add(vertex)
}
for edge := range other.Edges.Iter() {
sg.Edges.Add(edge)
}
}
// Equal check if two schema graphs are equal
func (sg *SchemaGraph) Equal(other SchemaGraph) bool {
if !sg.Vertices.Equal(other.Vertices) {
return false
}
if !sg.Edges.Equal(other.Edges) {
return false
}
return true
}
// MarshalJSON marshal the schema graph into json format
func (sg *SchemaGraph) MarshalJSON() ([]byte, error) {
schemaJSON := SchemaGraphJSON{}
schemaJSON.Vertices = []AssetType{}
schemaJSON.Edges = []RelationType{}
for v := range sg.Vertices.Iter() {
vertice := v.(AssetType)
schemaJSON.Vertices = append(schemaJSON.Vertices, vertice)
}
for e := range sg.Edges.Iter() {
edge := e.(RelationType)
schemaJSON.Edges = append(schemaJSON.Edges, edge)
}
return json.Marshal(schemaJSON)
}
// UnmarshalJSON unmarshal the schema graph from json payload
func (sg *SchemaGraph) UnmarshalJSON(b []byte) error {
j := SchemaGraphJSON{}
if err := json.Unmarshal(b, &j); err != nil {
return err
}
sg.Vertices = mapset.NewSet()
sg.Edges = mapset.NewSet()
for _, v := range j.Vertices {
sg.Vertices.Add(v)
}
for _, e := range j.Edges {
sg.Edges.Add(e)
}
return nil
}
|
// Copyright 2017 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package ipcserver
import (
"fmt"
"log"
"strings"
"fidl/bindings"
"syscall/zx"
"syscall/zx/mxerror"
"garnet/amber/api/amber"
"amber/daemon"
"amber/pkg"
)
type ControlSrvr struct {
daemon *daemon.Daemon
stubs []*bindings.Stub
}
func NewControlSrvr(d *daemon.Daemon) *ControlSrvr {
return &ControlSrvr{daemon: d}
}
func (c *ControlSrvr) DoTest(in int32) (out string, err error) {
r := fmt.Sprintf("Your number was %d\n", in)
return r, nil
}
func (c *ControlSrvr) AddSrc(url string, rateLimit int32, pubKey string) (bool, error) {
return true, nil
}
func (c *ControlSrvr) RemoveSrc(url string) (bool, error) {
return true, nil
}
func (c *ControlSrvr) Check() (bool, error) {
return true, nil
}
func (c *ControlSrvr) ListSrcs() ([]string, error) {
return []string{}, nil
}
func (c *ControlSrvr) GetUpdate(name string, version *string) (*string, error) {
d := ""
if version == nil {
version = &d
}
if len(name) == 0 {
return nil, fmt.Errorf("No name provided")
}
if name[0] != '/' {
name = fmt.Sprintf("/%s", name)
}
ps := pkg.NewPackageSet()
pkg := pkg.Package{Name: name, Version: *version}
ps.Add(&pkg)
updates := c.daemon.GetUpdates(ps)
res, ok := updates[pkg]
if !ok {
return nil, fmt.Errorf("No update available")
}
if res.Err != nil {
return nil, res.Err
}
_, err := daemon.WriteUpdateToPkgFS(res)
if err != nil {
return nil, err
}
return &res.Update.Merkle, nil
}
func (c *ControlSrvr) GetBlob(merkle string) error {
if len(strings.TrimSpace(merkle)) == 0 {
return fmt.Errorf("Supplied merkle root is empty")
}
return c.daemon.GetBlob(merkle)
}
func (c *ControlSrvr) Quit() {
for _, s := range c.stubs {
s.Close()
}
c.stubs = []*bindings.Stub{}
}
func (c *ControlSrvr) Bind(req amber.Control_Request) {
s := req.NewStub(c, bindings.GetAsyncWaiter())
c.stubs = append(c.stubs, s)
go func(b *bindings.Stub) {
for {
if err := b.ServeRequest(); err != nil {
if mxerror.Status(err) != zx.ErrPeerClosed {
log.Printf("Request error %v \n", err)
}
break
}
}
}(s)
}
|
package string
import "strings"
// MakeUppercase transforms a string to all caps with an exclamation point
func MakeUppercase(s string) string {
return strings.ToUpper(s) + "!"
}
|
package main
import (
//"database/sql"
"encoding/json"
"fmt"
_ "github.com/go-sql-driver/mysql"
"io/ioutil"
"log"
"net/http"
"strconv"
"time"
)
type userData struct {
UserId int
NickName string
HeadUrl string
Gender byte
Age string
}
type shareJson struct {
Code int `json:"code,omitempty"`
Gold int `json:"gold,omitempty"`
Message string `json:"message,omitempty"`
}
type readJson struct {
Code int `json:"code,omitempty"`
Gold int `json:"gold,omitempty"`
Message string `json:"message,omitempty"`
}
func updateUserData(w http.ResponseWriter, req *http.Request) {
//主要是检查用户年龄age是否更新
var data userData
result, _ := ioutil.ReadAll(req.Body)
req.Body.Close()
log.Println("updateUserData:", string(result))
json.Unmarshal([]byte(result), &data)
userId := strconv.Itoa(data.UserId)
age := data.Age
flag := checkUserId(userId, Db)
log.Println(flag)
if !flag {
str := `{"code":400,"message":"userId not in db"}`
fmt.Fprint(w, str, "\n")
log.Println("用户未注册userId", userId)
return
}
taskId := 2
//today_date := time.Now().Format("2006-01-02 00:00:00")
rows, err := Db.Query(`SELECT taskStatus, eventId, addGoldCoin, addCash FROM eventTable where
userId=? and taskId=?`, userId, taskId)
defer rows.Close()
if err != nil {
log.Println("updateUserData---query error info:", err)
return
}
var taskStatus int
var eventId int
var addGoldCoin int
var addCash float32
if rows.Next() {
err := rows.Scan(&taskStatus, &eventId, &addGoldCoin, &addCash)
if err != nil {
log.Println(err)
}
log.Println("get userId, taskId, taskStatus, eventId:", userId, taskId, taskStatus, eventId)
} else {
retValue := `{"code":400,"message":"userId no register"}`
fmt.Fprint(w, retValue, "\n")
log.Println("db no userId:", userId)
return
}
if taskStatus > 0 {
retValue := `{"code":300,"message":"user had updated the information"}`
fmt.Fprint(w, retValue, "\n")
log.Println("用户更新过资料")
} else {
retValue := `{"code":200,"gold":400,"message":"success"}`
fmt.Fprint(w, retValue, "\n")
//1、修改userInfo表 2、修改evecntTable 3、插入gold_cash金币零钱明细
Db.Exec("update userInfo set age = ?, gold = gold + ? where userId = ? ", age, addGoldCoin, userId)
//修改eventTable表
Db.Exec("update eventTable set taskStatus=? where userId=? and taskId=? and eventId=?",
1, userId, taskId, eventId)
var data2 goldData
data2.userId = userId
data2.taskId = taskId
data2.addGoldCoin = addGoldCoin
data2.addCash = 0
data2.eventId = eventId
goldDataChan <- data2
}
}
func shareApp(w http.ResponseWriter, req *http.Request) {
//分享app至微信
taskId := 4
req.ParseForm()
param_userId, _ := req.Form["userId"]
userId := param_userId[0]
flag := checkUserId(userId, Db)
if !flag {
str := `{"code":400,"message":"userId not in db"}`
fmt.Fprint(w, str, "\n")
log.Println("用户未注册userId", userId)
return
}
if flag {
today_date := time.Now().Format("2006-01-02 00:00:00")
eventMap := getEventMap(userId, taskId, today_date, Db)
taskStatus := eventMap["taskStatus"].(int)
log.Println("tody_shareApp_status:", taskStatus)
if taskStatus > 0 {
retValue := `{"code":300,"message":"user had shared the APP"}`
fmt.Fprint(w, retValue, "\n")
log.Println("今天用户已经分享过app了")
} else {
var retValue shareJson
retValue.Code = 200
retValue.Gold = eventMap["addGoldCoin"].(int)
retValue.Message = "success"
bytes, _ := json.Marshal(retValue)
fmt.Fprint(w, string(bytes), "\n")
//1、修改userInfo表 2、修改evecntTable 3、插入gold_cash金币零钱明细
Db.Exec("update userInfo set gold=gold+? where userId=?", eventMap["addGoldCoin"], userId)
var data1 eventData
data1.userId = userId
data1.taskId = taskId // taskId = 4
data1.date = today_date
data1.taskStatus = 1
eventChan <- data1
var data2 goldData
data2.userId = userId
data2.taskId = taskId
data2.addGoldCoin = eventMap["addGoldCoin"].(int)
data2.addCash = 0
data2.eventId = eventMap["eventId"].(int)
goldDataChan <- data2
}
} else {
retValue := `{"code":400,"message":"userId not in db"}`
fmt.Fprint(w, retValue, "\n")
log.Println("用户未注册userId", userId)
}
}
func readAward(w http.ResponseWriter, req *http.Request) {
req.ParseForm()
param_userId, _ := req.Form["userId"]
param_type, _ := req.Form["type"]
userId := param_userId[0]
tab_type := param_type[0]
taskId, _ := strconv.Atoi(tab_type)
if tab_type == "5" || tab_type == "6" || tab_type == "7" || tab_type == "8" {
flag := checkUserId(userId, Db)
if !flag {
str := `{"Code":400,"message":"userId not in db"}`
fmt.Fprint(w, str, "\n")
log.Println("用户未注册userId", userId)
return
}
today_date := time.Now().Format("2006-01-02 00:00:00")
eventMap := getEventMap(userId, taskId, today_date, Db)
taskStatus := eventMap["taskStatus"].(int)
log.Println("taskId:", taskId, "tody_read_status:", taskStatus)
if taskStatus > 0 {
retValue := `{"Code":300,"message":"user had finish read"}`
fmt.Fprint(w, retValue, "\n")
log.Println("今天用户已经完成taskId=", taskId, "的阅读任务")
} else {
var retValue readJson
retValue.Code = 200
retValue.Gold = eventMap["addGoldCoin"].(int)
retValue.Message = "success"
bytes, _ := json.Marshal(retValue)
fmt.Fprint(w, string(bytes), "\n")
//1、修改userInfo表 2、修改evecntTable 3、插入gold_cash金币零钱明细
Db.Exec("update userInfo set gold=gold+? where userId=?", eventMap["addGoldCoin"], userId)
var data1 eventData
data1.userId = userId
data1.taskId = taskId // taskId = 5
data1.date = today_date
data1.taskStatus = 1
eventChan <- data1
var data2 goldData
data2.userId = userId
data2.taskId = taskId
data2.addGoldCoin = eventMap["addGoldCoin"].(int)
data2.addCash = 0
data2.eventId = eventMap["eventId"].(int)
goldDataChan <- data2
}
} else {
var retValue string = `{"message":"error: event type is not read task" }`
fmt.Fprint(w, retValue, "\n")
//return
}
}
|
// DRUNKWATER TEMPLATE(add description and prototypes)
// Question Title and Description on leetcode.com
// Function Declaration and Function Prototypes on leetcode.com
//357. Count Numbers with Unique Digits
//Given a non-negative integer n, count all numbers with unique digits, x, where 0 ≤ x < 10n.
//Example:
//Given n = 2, return 91. (The answer should be the total numbers in the range of 0 ≤ x < 100, excluding [11,22,33,44,55,66,77,88,99])
//Credits:
//Special thanks to @memoryless for adding this problem and creating all test cases.
//func countNumbersWithUniqueDigits(n int) int {
//}
// Time Is Money |
// Package rules contains useful pre-defined rego AST rules.
package rules
import "github.com/open-policy-agent/opa/ast"
// GetSession gets the session for the given id.
func GetSession() *ast.Rule {
return ast.MustParseRule(`
get_session(id) = v {
v = get_databroker_record("type.googleapis.com/user.ServiceAccount", id)
v != null
} else = iv {
v = get_databroker_record("type.googleapis.com/session.Session", id)
v != null
object.get(v, "impersonate_session_id", "") != ""
iv = get_databroker_record("type.googleapis.com/session.Session", v.impersonate_session_id)
iv != null
} else = v {
v = get_databroker_record("type.googleapis.com/session.Session", id)
v != null
object.get(v, "impersonate_session_id", "") == ""
} else = {} {
true
}
`)
}
// GetUser returns the user for the given session.
func GetUser() *ast.Rule {
return ast.MustParseRule(`
get_user(session) = v {
v = get_databroker_record("type.googleapis.com/user.User", session.user_id)
v != null
} else = {} {
true
}
`)
}
// GetUserEmail gets the user email, either the impersonate email, or the user email.
func GetUserEmail() *ast.Rule {
return ast.MustParseRule(`
get_user_email(session, user) = v {
v = user.email
} else = "" {
true
}
`)
}
// GetDeviceCredential gets the device credential for the given session.
func GetDeviceCredential() *ast.Rule {
return ast.MustParseRule(`
get_device_credential(session, device_type_id) = v {
device_credential_id := [x.Credential.Id|x:=session.device_credentials[_];x.type_id==device_type_id][0]
v = get_databroker_record("type.googleapis.com/pomerium.device.Credential", device_credential_id)
v != null
} else = {} {
true
}
`)
}
// GetDeviceEnrollment gets the device enrollment for the given device credential.
func GetDeviceEnrollment() *ast.Rule {
return ast.MustParseRule(`
get_device_enrollment(device_credential) = v {
v = get_databroker_record("type.googleapis.com/pomerium.device.Enrollment", device_credential.enrollment_id)
v != null
} else = {} {
true
}
`)
}
// MergeWithAnd merges criterion results using `and`.
func MergeWithAnd() *ast.Rule {
return ast.MustParseRule(`
merge_with_and(results) = [true, reasons, additional_data] {
true_results := [x|x:=results[i];x[0]]
count(true_results) == count(results)
reasons := union({x|x:=true_results[i][1]})
additional_data := object_union({x|x:=true_results[i][2]})
} else = [false, reasons, additional_data] {
false_results := [x|x:=results[i];not x[0]]
reasons := union({x|x:=false_results[i][1]})
additional_data := object_union({x|x:=false_results[i][2]})
}
`)
}
// MergeWithOr merges criterion results using `or`.
func MergeWithOr() *ast.Rule {
return ast.MustParseRule(`
merge_with_or(results) = [true, reasons, additional_data] {
true_results := [x|x:=results[i];x[0]]
count(true_results) > 0
reasons := union({x|x:=true_results[i][1]})
additional_data := object_union({x|x:=true_results[i][2]})
} else = [false, reasons, additional_data] {
false_results := [x|x:=results[i];not x[0]]
reasons := union({x|x:=false_results[i][1]})
additional_data := object_union({x|x:=false_results[i][2]})
}
`)
}
// InvertCriterionResult changes the criterion result's value from false to
// true, or vice-versa.
func InvertCriterionResult() *ast.Rule {
return ast.MustParseRule(`
invert_criterion_result(in) = out {
in[0]
out = array.concat([false], array.slice(in, 1, count(in)))
} else = out {
not in[0]
out = array.concat([true], array.slice(in, 1, count(in)))
}
`)
}
// NormalizeCriterionResult converts a criterion result into a standard form.
func NormalizeCriterionResult() *ast.Rule {
return ast.MustParseRule(`
normalize_criterion_result(result) = v {
is_boolean(result)
v = [result, set()]
} else = v {
is_array(result)
v = result
} else = v {
v = [false, set()]
}
`)
}
// ObjectGet recursively gets a value from an object.
func ObjectGet() *ast.Rule {
return ast.MustParseRule(`
# object_get is like object.get, but supports converting "/" in keys to separate lookups
# rego doesn't support recursion, so we hard code a limited number of /'s
object_get(obj, key, def) = value {
undefined := "10a0fd35-0f1a-4e5b-97ce-631e89e1bafa"
value = object.get(obj, key, undefined)
value != undefined
} else = value {
segments := split(replace(key, ".", "/"), "/")
count(segments) == 2
o1 := object.get(obj, segments[0], {})
value = object.get(o1, segments[1], def)
} else = value {
segments := split(replace(key, ".", "/"), "/")
count(segments) == 3
o1 := object.get(obj, segments[0], {})
o2 := object.get(o1, segments[1], {})
value = object.get(o2, segments[2], def)
} else = value {
segments := split(replace(key, ".", "/"), "/")
count(segments) == 4
o1 := object.get(obj, segments[0], {})
o2 := object.get(o1, segments[1], {})
o3 := object.get(o2, segments[2], {})
value = object.get(o3, segments[3], def)
} else = value {
segments := split(replace(key, ".", "/"), "/")
count(segments) == 5
o1 := object.get(obj, segments[0], {})
o2 := object.get(o1, segments[1], {})
o3 := object.get(o2, segments[2], {})
o4 := object.get(o3, segments[3], {})
value = object.get(o4, segments[4], def)
} else = value {
value = object.get(obj, key, def)
}
`)
}
// ObjectUnion merges objects together. It expects a set of objects.
func ObjectUnion() *ast.Rule {
return ast.MustParseRule(`
object_union(xs) = merged {
merged = { k: v |
some k
xs[_][k]
vs := [ xv | xv := xs[_][k] ]
v := vs[count(vs)-1]
}
}
`)
}
|
/*
EIGER is a brand-new, made-up computer language.
It’s very exciting, and very simple! EIGER only allows the programmer to do two things: define a name for an integer, and compare two names.
Write a metaprogram – a program which can simulate the EIGER language.
Input
Input consists of one command per line, up to 10000 commands, ending at end of file.
A definition command has the form define i x, where i is an integer in the range [−10000,10000] 1and x is a string of up to 20 lowercase alphabet characters (a–z).
A comparison command has the form eval x y z, where x and z are strings of the same format as in definitions, and y is one of <, >, or =.
Output
For each definition, use the string as a label for the given integer, but don’t output anything. Redefinitions are allowed.
For each comparison, state whether it is true or false, depending on the current definitions. If the result is not known, output ‘undefined’.
*/
package main
import (
"fmt"
"strconv"
"strings"
)
func main() {
test([]string{
"define 5 hellothere",
"define 6 goodbye",
"eval hellothere < goodbye",
"eval hellothere > goodbye",
"eval hellothere = goodbye",
"eval hellothere = hi",
"define 5 hi",
"eval hellothere = hi",
"define 6 hi",
"eval hellothere = hi",
})
}
func test(lines []string) {
e := NewEiger()
for _, l := range lines {
r, err := e.Line(l)
if err != nil {
panic(fmt.Errorf("%s: %v", l, err))
}
if r != "success" {
fmt.Println(r)
}
}
}
type Eiger struct {
vars map[string]int
}
func NewEiger() *Eiger {
return &Eiger{
vars: make(map[string]int),
}
}
func (c *Eiger) Line(line string) (string, error) {
ftab := map[string]string{
"define": "is",
"eval": "scs",
}
toks := strings.Split(line, " ")
fn := toks[0]
arg, ok := ftab[fn]
if !ok {
return "error", fmt.Errorf("unknown operation %q", fn)
}
if len(arg) != len(toks)-1 {
return "error", fmt.Errorf("invalid number of arguments passed")
}
var (
iv [4]int
cv [4]rune
sv [4]string
err error
)
for i := range arg {
switch arg[i] {
case 'i':
_, err = fmt.Sscan(toks[i+1], &iv[i])
case 'c':
cv[i] = rune(toks[i+1][0])
case 's':
_, err = fmt.Sscan(toks[i+1], &sv[i])
}
if err != nil {
return "error", fmt.Errorf("invalid argument %d", i)
}
}
r := "success"
switch fn {
case "define":
c.vars[sv[1]] = iv[0]
case "eval":
x, xf := c.vars[sv[0]]
y, yf := c.vars[sv[2]]
if !xf || !yf {
r = "undefined"
break
}
switch cv[1] {
case '<':
r = strconv.FormatBool(x < y)
case '>':
r = strconv.FormatBool(x > y)
case '=':
r = strconv.FormatBool(x == y)
default:
return "error", fmt.Errorf("invalid operation %q", cv[1])
}
}
return r, nil
}
|
package enum
//前面统一加abana 防止与其他系统出现相同的key
const (
REDIS_KEY_USER_INFO = "abana_user_info_" //用户信息
)
|
package main
import (
"context"
"crypto/tls"
"crypto/x509"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"os"
"os/signal"
"runtime"
"strings"
"syscall"
"time"
"github.com/alecthomas/kong"
"github.com/docker/libkv/store"
"github.com/docker/libkv/store/boltdb"
"github.com/docker/libkv/store/consul"
"github.com/docker/libkv/store/etcd"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/metalmatze/alertmanager-bot/pkg/alertmanager"
"github.com/metalmatze/alertmanager-bot/pkg/telegram"
"github.com/oklog/run"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
const (
storeBolt = "bolt"
storeConsul = "consul"
storeEtcd = "etcd"
levelDebug = "debug"
levelInfo = "info"
levelWarn = "warn"
levelError = "error"
)
var (
// Version of alertmanager-bot.
Version string
// Revision or Commit this binary was built from.
Revision string
// GoVersion running this binary.
GoVersion = runtime.Version()
// StartTime has the time this was started.
StartTime = time.Now()
)
var cli struct {
AlertmanagerURL *url.URL `name:"alertmanager.url" default:"http://localhost:9093/" help:"The URL that's used to connect to the alertmanager"`
ListenAddr string `name:"listen.addr" default:"0.0.0.0:8080" help:"The address the alertmanager-bot listens on for incoming webhooks"`
LogJSON bool `name:"log.json" default:"false" help:"Tell the application to log json and not key value pairs"`
LogLevel string `name:"log.level" default:"info" enum:"error,warn,info,debug" help:"The log level to use for filtering logs"`
TemplatePaths []string `name:"template.paths" default:"/templates/default.tmpl" help:"The paths to the template"`
cliTelegram
Store string `required:"true" name:"store" enum:"bolt,consul,etcd" help:"The store to use"`
StorePrefix string `name:"storeKeyPrefix" default:"telegram/chats" help:"Prefix for store keys"`
cliBolt
cliConsul
cliEtcd
}
type cliBolt struct {
Path string `name:"bolt.path" type:"path" default:"/tmp/bot.db" help:"The path to the file where bolt persists its data"`
}
type cliConsul struct {
URL *url.URL `name:"consul.url" default:"localhost:8500" help:"The URL that's used to connect to the consul store"`
}
type cliEtcd struct {
URL *url.URL `name:"etcd.url" default:"localhost:2379" help:"The URL that's used to connect to the etcd store"`
TLSInsecure bool `name:"etcd.tls.insecure" default:"false" help:"Use TLS or not"`
TLSInsecureSkipVerify bool `name:"etcd.tls.insecureSkipVerify" default:"false" help:"Skip server certificates verification"`
TLSCert string `name:"etcd.tls.cert" type:"path" help:"Path to the TLS cert file"`
TLSKey string `name:"etcd.tls.key" type:"path" help:"Path to the TLS key file"`
TLSCA string `name:"etcd.tls.ca" type:"path" help:"Path to the TLS trusted CA cert file"`
}
type cliTelegram struct {
Admins []int `required:"true" name:"telegram.admin" help:"The ID of the initial Telegram Admin"`
Token string `required:"true" name:"telegram.token" env:"TELEGRAM_TOKEN" help:"The token used to connect with Telegram"`
}
func main() {
_ = kong.Parse(&cli,
kong.Name("alertmanager-bot"),
)
var err error
levelFilter := map[string]level.Option{
levelError: level.AllowError(),
levelWarn: level.AllowWarn(),
levelInfo: level.AllowInfo(),
levelDebug: level.AllowDebug(),
}
logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr))
if cli.LogJSON {
logger = log.NewJSONLogger(log.NewSyncWriter(os.Stderr))
}
logger = level.NewFilter(logger, levelFilter[cli.LogLevel])
logger = log.With(logger,
"ts", log.DefaultTimestampUTC,
"caller", log.DefaultCaller,
)
reg := prometheus.NewRegistry()
reg.MustRegister(
prometheus.NewGoCollector(),
prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{}),
)
var am *alertmanager.Client
{
client, err := alertmanager.NewClient(cli.AlertmanagerURL)
if err != nil {
level.Error(logger).Log("msg", "failed to create alertmanager client", "err", err)
os.Exit(1)
}
am = client
}
var kvStore store.Store
{
switch strings.ToLower(cli.Store) {
case storeBolt:
kvStore, err = boltdb.New([]string{cli.cliBolt.Path}, &store.Config{Bucket: "alertmanager"})
if err != nil {
level.Error(logger).Log("msg", "failed to create bolt store backend", "err", err)
os.Exit(1)
}
case storeConsul:
kvStore, err = consul.New([]string{cli.cliConsul.URL.String()}, nil)
if err != nil {
level.Error(logger).Log("msg", "failed to create consul store backend", "err", err)
os.Exit(1)
}
case storeEtcd:
tlsConfig := &tls.Config{}
if cli.cliEtcd.TLSCert != "" {
cert, err := tls.LoadX509KeyPair(cli.cliEtcd.TLSCert, cli.cliEtcd.TLSKey)
if err != nil {
level.Error(logger).Log("msg", "failed to create etcd store backend, could not load certificates", "err", err)
os.Exit(1)
}
tlsConfig.Certificates = []tls.Certificate{cert}
}
if cli.cliEtcd.TLSCA != "" {
caCert, err := ioutil.ReadFile(cli.cliEtcd.TLSCA)
if err != nil {
level.Error(logger).Log("msg", "failed to create etcd store backend, could not load ca certificate", "err", err)
os.Exit(1)
}
caCertPool := x509.NewCertPool()
caCertPool.AppendCertsFromPEM(caCert)
tlsConfig.RootCAs = caCertPool
}
tlsConfig.InsecureSkipVerify = cli.cliEtcd.TLSInsecureSkipVerify
if !cli.cliEtcd.TLSInsecure {
kvStore, err = etcd.New([]string{cli.cliEtcd.URL.String()}, &store.Config{TLS: tlsConfig})
} else {
kvStore, err = etcd.New([]string{cli.cliEtcd.URL.String()}, nil)
}
if err != nil {
level.Error(logger).Log("msg", "failed to create etcd store backend", "err", err)
os.Exit(1)
}
default:
level.Error(logger).Log("msg", "please provide one of the following supported store backends: bolt, consul, etcd")
os.Exit(1)
}
}
defer kvStore.Close()
ctx, cancel := context.WithCancel(context.Background())
// TODO Needs fan out for multiple bots
webhooks := make(chan alertmanager.TelegramWebhook, 32)
var g run.Group
{
tlogger := log.With(logger, "component", "telegram")
commandCounter := prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "alertmanagerbot_commands_total",
Help: "Number of commands received by command name",
}, []string{"command"})
reg.MustRegister(commandCounter)
commandCount := func(command string) {
commandCounter.WithLabelValues(command).Inc()
}
chats, err := telegram.NewChatStore(kvStore, cli.StorePrefix)
if err != nil {
level.Error(logger).Log("msg", "failed to create chat store", "err", err)
os.Exit(1)
}
bot, err := telegram.NewBot(
chats, cli.cliTelegram.Token, cli.cliTelegram.Admins[0],
telegram.WithLogger(tlogger),
telegram.WithCommandEvent(commandCount),
telegram.WithAddr(cli.ListenAddr),
telegram.WithAlertmanager(am),
telegram.WithTemplates(cli.AlertmanagerURL, cli.TemplatePaths...),
telegram.WithRevision(Revision),
telegram.WithStartTime(StartTime),
telegram.WithExtraAdmins(cli.cliTelegram.Admins[1:]...),
)
if err != nil {
level.Error(tlogger).Log("msg", "failed to create bot", "err", err)
os.Exit(2)
}
g.Add(func() error {
level.Info(tlogger).Log(
"msg", "starting alertmanager-bot",
"version", Version,
"revision", Revision,
"goVersion", GoVersion,
)
// Runs the bot itself communicating with Telegram
return bot.Run(ctx, webhooks)
}, func(err error) {
cancel()
})
}
{
wlogger := log.With(logger, "component", "webserver")
// TODO: Use Heptio's healthcheck library
handleHealth := func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
}
webhooksCounter := prometheus.NewCounter(prometheus.CounterOpts{
Name: "alertmanagerbot_webhooks_total",
Help: "Number of webhooks received by this bot",
})
reg.MustRegister(webhooksCounter)
m := http.NewServeMux()
m.HandleFunc("/webhooks/telegram/", alertmanager.HandleTelegramWebhook(wlogger, webhooksCounter, webhooks))
m.Handle("/metrics", promhttp.HandlerFor(reg, promhttp.HandlerOpts{}))
m.HandleFunc("/health", handleHealth)
m.HandleFunc("/healthz", handleHealth)
s := http.Server{
Addr: cli.ListenAddr,
Handler: m,
}
g.Add(func() error {
level.Info(wlogger).Log("msg", "starting webserver", "addr", cli.ListenAddr)
return s.ListenAndServe()
}, func(err error) {
_ = s.Shutdown(context.Background())
})
}
{
sig := make(chan os.Signal)
signal.Notify(sig, os.Interrupt, syscall.SIGTERM)
g.Add(func() error {
<-sig
return nil
}, func(err error) {
cancel()
close(sig)
})
}
if err := g.Run(); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}
|
package simulator
import (
"backend/api"
"math/rand"
"time"
)
type Simulator struct {
time int
game *api.Game
}
func SimulateGame(game *api.Game, timeScale float64) *Simulator {
sim := Simulator{
time: 0,
game: game,
}
t := time.Now()
game.StartsAt = &t
go func() {
for {
if sim.time >= 48 * 60 {
tFinish := t.Add(time.Second * 48 * 60)
game.FinishedAt = &tFinish
return
}
time.Sleep(time.Duration(float64(time.Second) * timeScale))
p := rand.Float64()
if p < 0.004 {
game.ScoreA += 1
if p > 0.002 {
game.Assists += 1
}
} else if p >= 0.004 && p < 0.008 {
game.ScoreB += 1
if p > 0.006 {
game.Assists += 1
}
}
if p < 0.016 {
game.Attacks += 1
}
sim.time += 1
}
}()
return &sim
}
func (sim *Simulator) GetTime() int {
return sim.time
}
func (sim *Simulator) GetGame() api.Game {
return *sim.game
} |
package main
import "fmt"
// https://leetcode-cn.com/problems/permutation-sequence/
// solution
// 基于 nextPermutation 的 solution1
// * 计算k对应的各位置的逆序对数
// * 根据逆序对数得出排列
func getPermutation(n int, k int) string {
if n == 0 {
return ""
}
if n == 1 {
return "1"
}
fac := []int{1, 1, 2, 6, 24, 120, 720, 5040, 40320, 362880}
nums := []byte("123456789")
nums = nums[:n]
ro := make([]int, n)
for tmp, i := k-1, 0; i < n; i++ {
ro[i] = tmp / fac[n-i-1]
tmp %= fac[n-i-1]
}
res := make([]byte, n)
for i := 0; i < n; i++ {
res[i] = nums[ro[i]]
nums = append(nums[:ro[i]], nums[ro[i]+1:]...)
}
return string(res)
}
func main() {
cases := [][]int{
{3, 3},
}
realCase := cases[0:]
for i, c := range realCase {
fmt.Println("## case", i)
// solve
fmt.Println(getPermutation(c[0], c[1]))
}
}
|
package lib
import (
"math/rand"
"strconv"
"time"
)
type Generator struct {
Operator string
Range int
}
func (gen *Generator) Init(operator string, limit int) {
gen.Operator = operator
gen.Range = limit
}
func swap(a, b *int) {
c := *a
*a = *b
*b = c
}
func (gen *Generator) Generate(Operand int) string {
rand.Seed(time.Now().UTC().UnixNano())
switch gen.Operator {
case "+":
ret := ""
for i := 0; i < Operand; i++ {
if ret == "" {
ret = ret + strconv.Itoa(rand.Intn(gen.Range))
} else {
ret = ret + gen.Operator + strconv.Itoa(rand.Intn(gen.Range))
}
}
return ret + "="
case "-":
first := rand.Intn(gen.Range)
second := rand.Intn(gen.Range)
if first < second {
swap(&first, &second)
}
return strconv.Itoa(first) + gen.Operator + strconv.Itoa(second) + "="
default:
return ""
}
}
|
package main
import "fmt"
func main() {
i := 7
inc(i) //We are passing a value so no change will be done to our original variable
fmt.Println(i)
//Through Pointers we can access the original variable through its memory address
increase(&i)
fmt.Println("After accessing through memory")
fmt.Println(i)
}
func inc(x int) {
x++
}
func increase(x *int) {
*x++
//x++ will increase the memory address put an astrick before the variable
}
|
package main
import "fmt"
func main() {
s1 := []string{"北京", "上海", "深圳"}
// s1[3] = "广州" //错误的写法 会导致编译错误:索引越界
// fmt.Pringln(s1)
s1 = append(s1, "武汉")
fmt.Printf("s1=%v,len(s1)=%d,cap(s1)=%d\n", s1, len(s1), cap(s1))
ss := [...]string{"重庆", "成都"}
s1 = append(s1, ss[:]...)
fmt.Printf("s1=%v,len(s1)=%d,cap(s1)=%d\n", s1, len(s1), cap(s1))
}
|
package cve
import (
"glsamaker/pkg/models/bugzilla"
"glsamaker/pkg/models/gpackage"
"glsamaker/pkg/models/users"
"time"
)
// NVDFeed
type NVDFeed struct {
CVEDataFormat string `json:"CVE_data_format"`
// NVD adds number of CVE in this feed
CVEDataNumberOfCVEs string `json:"CVE_data_numberOfCVEs,omitempty"`
// NVD adds feed date timestamp
CVEDataTimestamp string `json:"CVE_data_timestamp,omitempty"`
CVEDataType string `json:"CVE_data_type"`
CVEDataVersion string `json:"CVE_data_version"`
// NVD feed array of CVE
CVEItems []*DefCveItem `json:"CVE_Items"`
}
// DefConfigurations Defines the set of product configurations for a NVD applicability statement.
type DefConfigurations struct {
CVEDataVersion string `json:"CVE_data_version"`
Nodes []*DefNode `json:"nodes,omitempty"`
}
// DefCpeMatch CPE match string or range
type DefCpeMatch struct {
Cpe22Uri string `json:"cpe22Uri,omitempty"`
Cpe23Uri string `json:"cpe23Uri"`
CpeName []*DefCpeName `json:"cpe_name,omitempty"`
VersionEndExcluding string `json:"versionEndExcluding,omitempty"`
VersionEndIncluding string `json:"versionEndIncluding,omitempty"`
VersionStartExcluding string `json:"versionStartExcluding,omitempty"`
VersionStartIncluding string `json:"versionStartIncluding,omitempty"`
Vulnerable bool `json:"vulnerable"`
}
// DefCpeName CPE name
type DefCpeName struct {
Cpe22Uri string `json:"cpe22Uri,omitempty"`
Cpe23Uri string `json:"cpe23Uri"`
LastModifiedDate string `json:"lastModifiedDate,omitempty"`
}
// DefCveItem Defines a vulnerability in the NVD data feed.
type DefCveItem struct {
Id string `pg:",pk"`
State string `pg:"state"`
Configurations *DefConfigurations `json:"configurations,omitempty"`
Cve CVE `json:"cve"`
Description string
Impact *DefImpact `json:"impact,omitempty"`
LastModifiedDate string `json:"lastModifiedDate,omitempty"`
PublishedDate string `json:"publishedDate,omitempty"`
ManuallyCreated bool `json:"-"`
Comments []Comment `pg:",fk:cve_id"`
Packages []gpackage.Package
Bugs []bugzilla.Bug `pg:"many2many:def_cve_item_to_bugs,joinFK:bug_id"`
}
type DefCveItemToBug struct {
DefCveItemId string `pg:",unique:cve_to_bug"`
BugId int64 `pg:",unique:cve_to_bug"`
}
type Comment struct {
Id int64 `pg:",pk,unique"`
GlsaId int64
CVEId string
UserId int64
User *users.User
UserBadge users.Badge
Type string
Message string
// Date time.Time `pg:"-"`
Date time.Time
}
// DefNode Defines a node or sub-node in an NVD applicability statement.
type DefNode struct {
Children []*DefNode `json:"children,omitempty"`
CpeMatch []*DefCpeMatch `json:"cpe_match,omitempty"`
Negate bool `json:"negate,omitempty"`
Operator string `json:"operator,omitempty"`
}
// DefImpact Impact scores for a vulnerability as found on NVD.
type DefImpact struct {
BaseMetricV3 BaseMetricV3 `json:"baseMetricV3"`
BaseMetricV2 BaseMetricV2 `json:"baseMetricV2"`
}
// BaseMetricV2 CVSS V2.0 score.
type BaseMetricV2 struct {
CvssV2 CvssV2 `json:"cvssV2"`
Severity string `json:"severity"`
ExploitabilityScore float32 `json:"exploitabilityScore"`
ImpactScore float32 `json:"impactScore"`
AcInsufInfo bool `json:"acInsufInfo"`
ObtainAllPrivilege bool `json:"obtainAllPrivilege"`
ObtainUserPrivilege bool `json:"obtainUserPrivilege"`
ObtainOtherPrivilege bool `json:"obtainOtherPrivilege"`
UserInteractionRequired bool `json:"userInteractionRequired"`
}
// BaseMetricV3 CVSS V3.x score.
type BaseMetricV3 struct {
CvssV3 CvssV3 `json:"cvssV3"`
ExploitabilityScore float32 `json:"exploitabilityScore"`
ImpactScore float32 `json:"impactScore"`
}
|
package main
var rawHeaderLen int16 = 16
const (
ProtoTCP = 0
ProtoWebsocket = 1
ProtoWebsocketTLS = 2
)
|
// Copyright 2011 Google Inc. All rights reserved.
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
package counter
// [START push_queues_and_backends]
import (
"net/http"
"net/url"
"google.golang.org/appengine"
"google.golang.org/appengine/taskqueue"
)
func pushHandler(w http.ResponseWriter, r *http.Request) {
ctx := appengine.NewContext(r)
key := r.FormValue("key")
// Create a task pointed at a backend.
t := taskqueue.NewPOSTTask("/path/to/my/worker/", url.Values{
"key": {key},
})
host, err := appengine.ModuleHostname(ctx, "backend1", "", "")
if err != nil {
// Handle err
}
t.Header = http.Header{
"Host": {host},
}
// Add the task to the default queue.
if _, err := taskqueue.Add(ctx, t, ""); err != nil {
// Handle err
}
}
// [END push_queues_and_backends]
|
package model
import "fmt"
type Response interface {
IsError() bool
Error() string
}
type BaseResponse struct {
Code int `json:"code,omitempty"`
Msg string `json:"msg,omitempty"`
}
func (r BaseResponse) IsError() bool {
return r.Code != 0
}
func (r BaseResponse) Error() string {
return fmt.Sprintf("%d:%s", r.Code, r.Msg)
}
|
package worker
import (
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"os"
"testing"
"time"
"github.com/brainly/olowek/config"
"github.com/brainly/olowek/marathon"
"github.com/brainly/olowek/stats"
)
func TestNginxReloaderWorker(t *testing.T) {
c, server := newFakeMarathonClient(t, "./fixtures/marathon.json")
defer server.Close()
tmpFile, err := ioutil.TempFile(".", ".services-test-")
if err != nil {
t.Fatalf("Unexpected error creating tmpfile: '%s'", err)
}
defer func() {
if err := tmpFile.Close(); err != nil {
t.Fatalf("Error closing tmpfile: '%s'", err)
}
os.Remove(tmpFile.Name())
}()
reloadFuncCalledTimes := 0
cfg := &config.Config{
Marathon: server.URL,
NginxConfig: tmpFile.Name(),
NginxTemplate: "./fixtures/services.tpl",
NginxCmd: "/bin/true",
NginxReloadFunc: func(cmd string) error {
reloadFuncCalledTimes++
return nil
},
}
s := stats.NewStats()
reloader := NewNginxReloaderWorker(c, cfg, s)
reloader()
renderedTemplate, err := ioutil.ReadFile(tmpFile.Name())
if err != nil {
t.Fatalf("Unexpected error reading tmpfile: '%s'", err)
}
expectedConf, err := ioutil.ReadFile("./fixtures/services.conf")
if err != nil {
t.Fatalf("Unexpected error reading services.conf: '%s'", err)
}
if string(expectedConf) != string(renderedTemplate) {
t.Fatalf("Rendered template is not as expected. Got:\n %s", string(renderedTemplate))
}
stat, err := os.Stat(tmpFile.Name())
if err != nil {
t.Fatalf("Unexpected error while getting stat for tmpfile: '%s'", err)
}
modtime := stat.ModTime()
// Sleep for 1s and try doing another worker call
time.Sleep(time.Second)
reloader()
stat, err = os.Stat(tmpFile.Name())
if err != nil {
t.Fatalf("Unexpected error while getting stat for tmpfile: '%s'", err)
}
modtimeSecondReload := stat.ModTime()
if modtime != modtimeSecondReload {
t.Fatalf("File should not be modified since no configuration changes were made")
}
if reloadFuncCalledTimes != 1 {
t.Fatalf("Reload func should be called only once since no configuration changes were made")
}
}
func newFakeMarathonClient(t *testing.T, file string) (marathon.Marathon, *httptest.Server) {
buf, err := ioutil.ReadFile(file)
if err != nil {
t.Fatalf("Error reading fixture file: '%s'", err)
}
server := newFakeMarathonAppsServer(string(buf))
c, err := marathon.NewMarathonClient(server.URL)
if err != nil {
t.Fatalf("Unexpected error: '%s'", err)
}
return c, server
}
func newFakeMarathonAppsServer(response string) *httptest.Server {
return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
if r.URL.Path == "/v2/apps" {
fmt.Fprintln(w, response)
}
}))
}
|
package main
func Solve(string) bool {
}
func input() (str string) {
return
}
func main() {
Solve(input())
}
|
package kuber
import (
"bytes"
"context"
"fmt"
"time"
"github.com/MagalixTechnologies/core/logger"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/dynamic/dynamicinformer"
"k8s.io/client-go/informers"
"k8s.io/client-go/tools/cache"
)
type Observer struct {
dynamicinformer.DynamicSharedInformerFactory
ParentsStore *ParentsStore
stopCh chan struct{}
}
func NewObserver(client dynamic.Interface, parentsStore *ParentsStore, stopCh chan struct{}, defaultResync time.Duration) *Observer {
return &Observer{
DynamicSharedInformerFactory: dynamicinformer.NewDynamicSharedInformerFactory(client, defaultResync),
ParentsStore: parentsStore,
stopCh: stopCh,
}
}
func (observer *Observer) Watch(gvrk GroupVersionResourceKind) *watcher {
logger.Debugw("subscribed on changes", "resource", gvrk.String())
watcher := observer.WatcherFor(gvrk)
observer.Start()
return watcher
}
func (observer *Observer) WatchAndWaitForSync(gvrk GroupVersionResourceKind) (*watcher, error) {
watcher := observer.Watch(gvrk)
done := make(chan struct{}, 1)
go func() {
cache.WaitForCacheSync(observer.stopCh, watcher.informer.Informer().HasSynced)
done <- struct{}{}
}()
timeout := 5 * time.Second
select {
case <-done:
return watcher, nil
case <-time.After(timeout):
return nil, fmt.Errorf("Time out waiting for informer sync: (GVRK, %+v), (timeout, %v)", gvrk, timeout)
}
}
func (observer *Observer) WatcherFor(gvrk GroupVersionResourceKind) *watcher {
informer := observer.ForResource(gvrk.GroupVersionResource)
return &watcher{
gvrk: gvrk,
informer: informer,
}
}
func (observer *Observer) Start() {
observer.DynamicSharedInformerFactory.Start(observer.stopCh)
}
func (observer *Observer) Stop() {
observer.stopCh <- struct{}{}
}
func (observer *Observer) WaitForCacheSync() error {
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel()
finished := make(chan struct{})
go func() {
observer.DynamicSharedInformerFactory.WaitForCacheSync(observer.stopCh)
finished <- struct{}{}
}()
for {
select {
case <-finished:
return nil
case <-ctx.Done():
return fmt.Errorf(
"timeout waiting for cache sync",
)
}
}
}
type Watcher interface {
GetGroupVersionResourceKind() GroupVersionResourceKind
Lister() cache.GenericLister
// AddEventHandler adds an event handler to the shared informer using the shared informer's resync
// period. Events to a single handler are delivered sequentially, but there is no coordination
// between different handlers.
AddEventHandler(handler ResourceEventHandler)
// AddEventHandlerWithResyncPeriod adds an event handler to the
// shared informer using the specified resync period. The resync
// operation consists of delivering to the handler a create
// notification for every object in the informer's local cache; it
// does not add any interactions with the authoritative storage.
AddEventHandlerWithResyncPeriod(handler ResourceEventHandler, resyncPeriod time.Duration)
// HasSynced returns true if the shared informer's store has been
// informed by at least one full LIST of the authoritative state
// of the informer's object collection. This is unrelated to "resync".
HasSynced() bool
// LastSyncResourceVersion is the resource version observed when last synced with the underlying
// store. The value returned is not synchronized with access to the underlying store and is not
// thread-safe.
LastSyncResourceVersion() string
}
type watcher struct {
gvrk GroupVersionResourceKind
informer informers.GenericInformer
}
func (w *watcher) GetGroupVersionResourceKind() GroupVersionResourceKind {
return w.gvrk
}
func (w *watcher) Lister() cache.GenericLister {
return w.informer.Lister()
}
func (w *watcher) AddEventHandler(handler ResourceEventHandler) {
w.informer.Informer().AddEventHandler(wrapHandler(handler, w.gvrk))
}
func (w *watcher) AddEventHandlerWithResyncPeriod(handler ResourceEventHandler, resyncPeriod time.Duration) {
w.informer.Informer().AddEventHandlerWithResyncPeriod(wrapHandler(handler, w.gvrk), resyncPeriod)
}
func (w *watcher) HasSynced() bool {
return w.informer.Informer().HasSynced()
}
func (w *watcher) LastSyncResourceVersion() string {
return w.informer.Informer().LastSyncResourceVersion()
}
func wrapHandler(wrapped ResourceEventHandler, gvrk GroupVersionResourceKind) cache.ResourceEventHandler {
return cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
objUn, ok := obj.(*unstructured.Unstructured)
if !ok {
logger.Error("unable to cast obj to *Unstructured")
}
if objUn != nil {
objUn, err := maskUnstructured(objUn)
if err != nil {
logger.Errorw("unable to mask Unstructured", "error", err)
return
}
wrapped.OnAdd(gvrk, *objUn)
}
},
UpdateFunc: func(oldObj, newObj interface{}) {
// TODO: can we have a better way to suppress update events when
// a resync is forced because of a network error
oldUn, oldOk := oldObj.(*unstructured.Unstructured)
if !oldOk {
logger.Error("unable to cast oldObj to *Unstructured")
}
newUn, newOk := newObj.(*unstructured.Unstructured)
if !newOk {
logger.Error("unable to cast newObj to *Unstructured")
}
if oldOk && newOk &&
oldUn.GetResourceVersion() == newUn.GetResourceVersion() {
// deep check that nothing has changed
oldJson, err := oldUn.MarshalJSON()
if err != nil {
logger.Errorw("unable to marshal oldUn to json", "error", err)
}
newJson, err := newUn.MarshalJSON()
if err != nil {
logger.Errorf("unable to marshal newUn to json", "error", err)
}
if err == nil {
if bytes.Equal(oldJson, newJson) {
return
}
}
}
if oldUn != nil && newUn != nil {
oldUn, err := maskUnstructured(oldUn)
if err != nil {
logger.Errorw("unable to mask Unstructured", "error", err)
}
newUn, err := maskUnstructured(newUn)
if err != nil {
logger.Errorw("unable to mask Unstructured", "error", err)
return
}
wrapped.OnUpdate(gvrk, *oldUn, *newUn)
}
},
DeleteFunc: func(obj interface{}) {
objUn, ok := obj.(*unstructured.Unstructured)
if !ok {
logger.Error("unable to cast obj to *Unstructured")
}
if objUn != nil {
objUn, err := maskUnstructured(objUn)
if err != nil {
logger.Errorw("unable to mask Unstructured", "error", err)
return
}
wrapped.OnDelete(gvrk, *objUn)
}
},
}
}
type ResourceEventHandler interface {
OnAdd(gvrk GroupVersionResourceKind, obj unstructured.Unstructured)
OnUpdate(gvrk GroupVersionResourceKind, oldObj, newObj unstructured.Unstructured)
OnDelete(gvrk GroupVersionResourceKind, obj unstructured.Unstructured)
}
// ResourceEventHandlerFuncs is an adaptor to let you easily specify as many or
// as few of the notification functions as you want while still implementing
// ResourceEventHandler.
type ResourceEventHandlerFuncs struct {
Observer *Observer
AddFunc func(gvrk GroupVersionResourceKind, obj unstructured.Unstructured)
UpdateFunc func(gvrk GroupVersionResourceKind, oldObj, newObj unstructured.Unstructured)
DeleteFunc func(gvrk GroupVersionResourceKind, obj unstructured.Unstructured)
}
// OnAdd calls AddFunc if it's not nil.
func (r ResourceEventHandlerFuncs) OnAdd(gvrk GroupVersionResourceKind, obj unstructured.Unstructured) {
if r.AddFunc != nil {
r.AddFunc(gvrk, obj)
}
}
// OnUpdate calls UpdateFunc if it's not nil.
func (r ResourceEventHandlerFuncs) OnUpdate(gvrk GroupVersionResourceKind, oldObj, newObj unstructured.Unstructured) {
if r.UpdateFunc != nil {
r.UpdateFunc(gvrk, oldObj, newObj)
}
}
// OnDelete calls DeleteFunc if it's not nil.
func (r ResourceEventHandlerFuncs) OnDelete(gvrk GroupVersionResourceKind, obj unstructured.Unstructured) {
if r.DeleteFunc != nil {
r.DeleteFunc(gvrk, obj)
}
r.Observer.ParentsStore.Delete(obj.GetNamespace(), obj.GetKind(), obj.GetName())
}
|
package commands
import "github.com/spf13/cobra"
func Execute() {
var rootCmd = &cobra.Command{Use: "bro"}
rootCmd.AddCommand(getEchoCommand())
rootCmd.Execute()
}
|
package subscription
import (
"github.com/dennor/go-paddle/events/types"
"github.com/dennor/phpserialize"
)
const PaymentRefundedAlertName = "subscription_payment_refunded"
// PaymentRefunded refer to https://paddle.com/docs/subscriptions-event-reference/#subscription_payment_refunded
type PaymentRefunded struct {
AlertID int `json:"alert_id,string"`
AlertName string `json:"alert_name"`
Amount *types.CurrencyValue `json:"amount,string"`
BalanceCurrency string `json:"balance_currency"`
BalanceEarningsDecrease *types.CurrencyValue `json:"balance_earnings_decrease,string"`
BalanceFeeRefund *types.CurrencyValue `json:"balance_fee_refund,string"`
BalanceGrossRefund *types.CurrencyValue `json:"balance_gross_refund,string"`
BalanceTaxRefund *types.CurrencyValue `json:"balance_tax_refund,string"`
CheckoutID string `json:"checkout_id"`
Currency string `json:"currency"`
EarningsDecrease *types.CurrencyValue `json:"earnings_decrease,string"`
Email string `json:"email"`
EventTime *types.Datetime `json:"event_time,string"`
FeeRefund *types.CurrencyValue `json:"fee_refund,string"`
GrossRefund *types.CurrencyValue `json:"gross_refund,string"`
InitialPayment int `json:"initial_payment,string"`
Instalments int `json:"instalments,string"`
MarketingConsent *types.MarketingConsent `json:"marketing_consent,string"`
OrderID string `json:"order_id"`
Passthrough string `json:"passthrough"`
Quantity int `json:"quantity,string"`
RefundReason string `json:"refund_reason"`
RefundType string `json:"refund_type"`
Status string `json:"status"`
SubscriptionID int `json:"subscription_id,string"`
SubscriptionPaymentID int `json:"subscription_payment_id,string"`
SubscriptionPlanID int `json:"subscription_plan_id,string"`
TaxRefund *types.CurrencyValue `json:"tax_refund,string"`
UnitPrice *types.CurrencyValue `json:"unit_price,string"`
UserID int `json:"user_id,string"`
PSignature string `json:"p_signature" php:"-"`
}
func (s *PaymentRefunded) Serialize() ([]byte, error) {
return phpserialize.Marshal(s)
}
func (s *PaymentRefunded) Signature() ([]byte, error) {
return []byte(s.PSignature), nil
}
|
package map_slice_array
import (
"fmt"
"gengine/builder"
"gengine/context"
"gengine/engine"
"testing"
"time"
)
type MapArray struct {
Mx map[string]bool
Ax [3]int
Sx []string
}
const ma_rule = `
rule "测试规则" "rule desc"
begin
x = Ma.Mx["hello"]
PrintName(x)
Ma.Mx["hello"] = false
b = "your"
Ma.Mx[b]= true
y = Ma.Mx
PrintName("------",y["hello"])
if x {
PrintName("Single data")
}
if 2 == 2 {
PrintName("true == true")
}
if x == true {
PrintName("haha")
}
if !x {
PrintName("haha")
}else{
PrintName("!x")
}
xx = Ma.Ax[2]
PrintName(xx)
Ma.Ax[2] = 300011111
PrintName(Ma.Ax[2])
yy = Ma.Ax
PrintName(yy[1])
if yy[2] == 20000 {
PrintName("20000")
}
z = Ma.Sx[1]
PrintName("z--1--->",z)
//you can read data from zz,but you can set value to zz
zz = Ma.Sx
if zz[2] == "kkkk"{
PrintName("z--2--->","kkkk")
}
a = 2
Ma.Sx[a] = "MMMM"
PrintName("z--3-->", Ma.Sx[a])
end
`
func Test_map_array(t *testing.T) {
Ma := &MapArray{
Mx: map[string]bool{"hello": true},
Ax: [3]int{1000, 20000, 300},
Sx: []string{"jjj", "lll", "kkkk"},
}
dataContext := context.NewDataContext()
dataContext.Add("PrintName", fmt.Println)
dataContext.Add("Ma", Ma)
//init rule engine
ruleBuilder := builder.NewRuleBuilder(dataContext)
//读取规则
start1 := time.Now().UnixNano()
err := ruleBuilder.BuildRuleFromString(ma_rule)
end1 := time.Now().UnixNano()
println(fmt.Sprintf("rules num:%d, load rules cost time:%d ns", len(ruleBuilder.Kc.RuleEntities), end1-start1))
if err != nil {
panic(err)
}
eng := engine.NewGengine()
start := time.Now().UnixNano()
// true: means when there are many rules, if one rule execute error,continue to execute rules after the occur error rule
err = eng.Execute(ruleBuilder, true)
end := time.Now().UnixNano()
if err != nil {
panic(err)
}
println(fmt.Sprintf("execute rule cost %d ns", end-start))
}
func Test_unptr_map(t *testing.T) {
Ma := make(map[int]string)
dataContext := context.NewDataContext()
dataContext.Add("PrintName", fmt.Println)
dataContext.Add("Ma", Ma)
//init rule engine
ruleBuilder := builder.NewRuleBuilder(dataContext)
err := ruleBuilder.BuildRuleFromString(`
rule "1"
begin
a = 1
Ma[a] = "xxx"
end
`)
if err != nil {
panic(err)
}
eng := engine.NewGengine()
// true: means when there are many rules, if one rule execute error,continue to execute rules after the occur error rule
err = eng.Execute(ruleBuilder, true)
if err != nil {
panic(err)
}
println(Ma[1])
}
|
package main
import "testing"
func Test(t *testing.T) {
var tests = []struct {
arr []int
want int
}{
{[]int{}, 0},
{[]int{8, 4}, 1},
{[]int{8, 12, 4}, 2},
{[]int{8, 6, 1, 16, 4}, 6},
{[]int{2148, 9058, 7742, 3153, 6324, 609, 7628, 5469, 7017, 50}, 21},
}
for _, a := range tests {
got := QuickSort(a.arr, 0, len(a.arr)-1)
if got != a.want {
t.Errorf("QuickSort(%v) == %d comparisons, want %d", a.arr, got, a.want)
}
}
}
|
package random
import (
"math/rand"
"time"
)
func GenerateRandomIntInRange(min, max int) int {
rand.Seed(time.Now().Unix())
randNum := rand.Intn(max - min) + min
return randNum
}
func GenerateRandomFloat() float64 {
rand.Seed(time.Now().Unix())
return rand.Float64()
}
func SleepWithDefaultRange() {
time.Sleep(time.Millisecond * time.Duration(GenerateRandomIntInRange(1000, 20000)))
} |
package wxgamevp
import (
"fmt"
"github.com/birjemin/wxgamevp/utils"
"github.com/spf13/cast"
"log"
)
// Balance model
type Balance struct {
OpenID string
AppID string
OfferID string
Ts int
ZoneID string
Pf string
UserIP string
AccessToken string
Secret string
HTTPRequest *utils.HTTPClient
Debug bool
}
// RespBalance response
type RespBalance struct {
CommonError
Balance int `json:"balance"`
GenBalance int `json:"gen_balance"`
FirstSave int `json:"first_save"`
SaveAmt int `json:"save_amt"`
SaveSum int `json:"save_sum"`
CostSum int `json:"cost_sum"`
PresentSum int `json:"present_sum"`
}
// GetBalance get balance
func (b *Balance) GetBalance() (*RespBalance, error) {
return b.doGetBalance(wechatDomain)
}
// doGetBalance
func (b *Balance) doGetBalance(domain string) (*RespBalance, error) {
params := b.getQueryParams()
jsonStr, err := jsonIter.Marshal(params)
if err != nil {
log.Println("[balance]doGetBalance, json marshal failed", err, string(jsonStr))
return nil, err
}
url := fmt.Sprintf("%s%s?access_token=%s", domain, b.getBalanceURI(), b.AccessToken)
// log.Println("post url: ", url)
// log.Println("post str: ", string(jsonStr))
if err := b.HTTPRequest.HTTPPostJSON(url, string(jsonStr)); err != nil {
log.Println("[balance]doGetBalance, post failed", err)
return nil, err
}
var respBalance = new(RespBalance)
if err = b.HTTPRequest.GetResponseJSON(respBalance); err != nil {
log.Println("[balance]doGetBalance, response json failed", err)
return nil, err
}
return respBalance, nil
}
// getQueryParams
func (b *Balance) getQueryParams() map[string]string {
params := make(map[string]string, 8)
params["openid"] = b.OpenID
params["appid"] = b.AppID
params["offer_id"] = b.OfferID
params["ts"] = cast.ToString(b.Ts)
params["zone_id"] = b.ZoneID
params["pf"] = b.Pf
if b.UserIP != "" {
params["user_ip"] = b.UserIP
}
params["sig"] = GenerateSign(b.getBalanceURI(), "POST", "secret", b.Secret, params)
return params
}
// getBalanceURI
func (b *Balance) getBalanceURI() string {
if b.Debug {
return getSandboxBalanceURI
}
return getBalanceURI
}
|
package cli
import (
"github.com/ronaudinho/dot/api"
)
type App struct {
// printer
be api.Service
}
func NewApp(svc api.Service) *App {
return &App{
be: svc,
}
}
|
package utils
type NestedEntityError struct {
InnerError error
Code int
}
func (e NestedEntityError) Error() string {
return e.InnerError.Error()
}
|
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pmetric // import "go.opentelemetry.io/collector/pdata/pmetric"
import (
"bytes"
"go.opentelemetry.io/collector/pdata/internal"
otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
"go.opentelemetry.io/collector/pdata/pmetric/internal/pmetricjson"
)
var delegate = pmetricjson.JSONMarshaler
var _ Marshaler = (*JSONMarshaler)(nil)
type JSONMarshaler struct{}
func (*JSONMarshaler) MarshalMetrics(md Metrics) ([]byte, error) {
buf := bytes.Buffer{}
pb := internal.MetricsToProto(internal.Metrics(md))
err := delegate.Marshal(&buf, &pb)
return buf.Bytes(), err
}
type JSONUnmarshaler struct{}
func (*JSONUnmarshaler) UnmarshalMetrics(buf []byte) (Metrics, error) {
var md otlpmetrics.MetricsData
if err := pmetricjson.UnmarshalMetricsData(buf, &md); err != nil {
return Metrics{}, err
}
return Metrics(internal.MetricsFromProto(md)), nil
}
|
package config
import (
"errors"
"fmt"
"time"
)
//一些系统变量
const (
dZipkinAddr = ""
dConsulAddr = ""
dFileServerStaticPath = ""
dLocalIP = ""
dFileserverIP = ""
dFileserverPort = 0
dCIDir = ""
dLocalSSHPort = 0
dLocalSSHUser = ""
dLocalSSHPass = ""
dRedisHost = ""
dRedisPort = 0
dNatsAddr = ""
dOsExitSignal = -1
dConfigPrefix = ""
dRegisterTtL = 0
dRegisterInterval = 0
dManagementTimeOutSecond = 0
dClientRequestTimeOutSecond = 0
dClientRequestPoolNum = 0
dClientDialTimeOutSecond = 0
dCloudMysqlHost = ""
dCloudMysqlPort = 0
dCloudMysqlUserName = ""
dCloudMysqlPasswd = ""
dCloudMysqlDatabase = ""
dMysqlHost = ""
dMysqlPort = 0
dMysqlUserName = ""
dMysqlPasswd = ""
dMysqlDatabase = ""
dMysqlMaxOpen = 0
dMysqlMaxIdle = 0
dMaxLifeTime = 0
dExecLogLocationPrefix = ""
dSftpAddr = ""
dSftpPort = 0
dSftpUserName = ""
dSftpPassword = ""
dSftpPath = ""
// dUserServiceAddr = ""
// dUserServicePrefix = ""
// dCloudServiceAddr = ""
// dVmServicePrefix = ""
dGrafanaPrefix = ""
dGraylogPrefix = ""
dAlertManagerPrefix = ""
dFileServerPrefix = ""
dFileServiceAddr = ""
)
const (
dDefaultHost = ""
dAppName = ""
dLogServiceHost = dDefaultHost
dLogServicePort = 0
dLdapHost = ""
dLdapPort = 0
dLdapUser = ""
dLdapPassword = ""
dLdapSearchdn = ""
dK8sUser = ""
dK8sPassword = ""
dMasterip = ""
dELKHost = ""
dELKPort = 0
dELKUser = ""
dELKPassword = ""
dELKIndexMaxResultWindow = 0
dClusterServiceControllerURL = ""
dClusterExternalNetworkPort = 0
dClusterHarbor = ""
dVMEtcdHost = ""
dVMEtcdPort = 0
dVMJenkinsHost = ""
dVMJenkinsPort = 0
dVMJenkinsUser = ""
dVMJenkinsPassword = ""
dVMSonarHost = ""
dVMSonarPort = 0
dVMSonarUser = ""
dVMSonarPassword = ""
dVMSonarEMailHost = ""
dVMSonarEMailPort = 0
dVMSonarEMailUser = ""
dVMSonarEMailPassword = ""
dVMSonarEMailDefaultTitle = ""
dSonarAccessHost = ""
dSonarAccessPort = 0
dSonarUser = ""
dSonarPassword = ""
dwarURL = ""
dConsulHost = ""
dConsulPort = 0
dToolServerHost = ""
//dAgentInstallCMD string = ""
dLinuxAgentInstallCMD string = ""
dLinuxAgentDownloadURL string = ""
dWindowsAgentDownloadURL string = ""
dWindowsAgentDownloadURLShow string = ""
dWindowsAgentInstallCMD string = ""
dLinuxAgentLogPath string = ""
dWindowsAgentLogPath string = ""
dLinuxAgentPath string = ""
dWindowsAgentPath string = ""
dSuperPassword string = ""
)
//一些错误变量
const (
dNoneRowsAffect = ""
ZeroRows = 0
ZeroId = 0
HostInfoLogTable = ""
HostInfoTable = ""
SoftwareInfoTable = ""
SoftwareInfoLogTable = ""
SoftwareConfigTable = ""
TClusterNodeTable = ""
TClusterNodeGroupTable = ""
TClusterNodeGroupLinkTable = ""
TClusterNodeJobTable = ""
ManageTable = ""
ManageJobListTable = ""
ManageTableItemsColoum = ""
UnkownColoumIndex = 0
Deleted = 0
NoDeleted = 0
EmptyStr = ""
)
const KIBANA_ADDR = ""
func CDefaultHost() string {
return cStr("default_host", dDefaultHost)
}
func CAppName() string {
return cStr("app_name", dAppName)
}
func CLogServiceHost() string {
return cStr("log_service_Host", dLogServiceHost)
}
func CLogServicePort() int {
return cInt("log_service_port", dLogServicePort)
}
func CLdapHost() string {
return cStr("ldap_host", dLdapHost)
}
func CLdapPort() int {
return cInt("ldap_port", dLdapPort)
}
func CLdapUser() string {
return cStr("ldap_user", dLdapUser)
}
func CLdapPassword() string {
return cStr("ldap_password", dLdapPassword)
}
func CLdapSearchdn() string {
return cStr("ldap_searchdn", dLdapSearchdn)
}
func CK8sUser() string {
return cStr("k8s_user", dK8sUser)
}
func CK8sMasterip() string {
return cStr("k8s_masterip", dMasterip)
}
func CK8sPassword() string {
return cStr("k8s_password", dK8sPassword)
}
func CELKHost() string {
return cStr("elk_Host", dELKHost)
}
func CRedisHost() string {
return cStr("redis_host", dRedisHost)
}
func CRedisPort() int {
return cInt("redis_port", dRedisPort)
}
func CELKPort() int {
return cInt("elk_port", dELKPort)
}
func CELKUser() string {
return cStr("elk_user", dELKUser)
}
func CELKPassword() string {
return cStr("elk_password", dELKPassword)
}
func CELKIndexMaxResultWindow() int {
return cInt("elk_index_max_result_window", dELKIndexMaxResultWindow)
}
func CZipkinAddr() string {
return cStr("zipkin_addr", dZipkinAddr)
}
func CConsulAddr() string {
return cStr("consul_addr", dConsulAddr)
}
func CLocalIP() string {
return cStr("local_ip", dLocalIP)
}
func CFileserverIP() string {
return cStr("fileserver_ip", dFileserverIP)
}
func CFileserverPort() int {
return cInt("fileserver_port", dFileserverPort)
}
func CCIDir() string {
return cStr("ci_dir", dCIDir)
}
func CLocalSSHPort() int {
return cInt("local_ssh_port", dLocalSSHPort)
}
func CFileServerStaticPath() string {
return cStr("file_server_static_path", dFileServerStaticPath)
}
func CLocalSSHUser() string {
return cStr("local_ssh_user", dLocalSSHUser)
}
func CLocalSSHPass() string {
return cStr("local_ssh_pass", dLocalSSHPass)
}
func CNatsAddr() string {
return cStr("nats_addr", dNatsAddr)
}
func COsExitSignal() int {
return cInt("os_exit_signal", dOsExitSignal)
}
func CConfigPrefix() string {
return cStr("config_prefix", dConfigPrefix)
}
func CCloudMysqlHost() string {
return cStr("cloud_mysql_host", dCloudMysqlHost)
}
func CCloudMysqlPort() int {
return cInt("cloud_mysql_port", dCloudMysqlPort)
}
func CCloudMysqlUserName() string {
return cStr("cloud_mysql_user_name", dCloudMysqlUserName)
}
func CCloudMysqlPasswd() string {
return cStr("cloud_mysql_passwd", dCloudMysqlPasswd)
}
func CCloudMysqlDatabase() string {
return cStr("cloud_mysql_database", dCloudMysqlDatabase)
}
func CMysqlHost() string {
return cStr("mysql_host", dMysqlHost)
}
func CMysqlPort() int {
return cInt("mysql_port", dMysqlPort)
}
func CMysqlUserName() string {
return cStr("mysql_user_name", dMysqlUserName)
}
func CMysqlPasswd() string {
return cStr("mysql_passwd", dMysqlPasswd)
}
func CMysqlDatabase() string {
return cStr("mysql_database", dMysqlDatabase)
}
func CMysqlMaxOpen() int {
return cInt("mysql_max_open", dMysqlMaxOpen)
}
func CMysqlMaxIdle() int {
return cInt("mysql_max_idle", dMysqlMaxIdle)
}
func CMysqlMaxLifeTime() time.Duration {
return time.Duration(cInt("mysql_max_life_time", dMaxLifeTime)) * time.Second
}
func CExecLogLocationPrefix() string {
return cStr("exec_log_location_prefix", dExecLogLocationPrefix)
}
func CSftpAddr() string {
return cStr("sftp_addr", dSftpAddr)
}
func CSftpPort() int {
return cInt("sftp_port", dSftpPort)
}
func CSftpUserName() string {
return cStr("sftp_name", dSftpUserName)
}
func CSftpPassword() string {
return cStr("sftp_password", dSftpPassword)
}
func CSftpPrefixPath() string {
return cStr("sftp_path", dSftpPath)
}
func CManagementTimeOutSecond() int {
return cInt("management_time_out_second", dManagementTimeOutSecond)
}
func CClientRequestTimeOutSecond() time.Duration {
return time.Duration(cInt("client_request_time_out_second", dClientRequestTimeOutSecond)) * time.Second
}
func CClientRequestPoolNum() int {
return cInt("client_request_pool_num", dClientRequestPoolNum)
}
func CClientDialTimeOutSecond() time.Duration {
return time.Duration(cInt("client_dial_time_out_second", dClientDialTimeOutSecond))
}
// func CUserServiceAddr() *url.URL {
// if addrUrl, err := url.Parse(cStr("user_service_addr", dUserServiceAddr)); err != nil {
// return nil
// } else {
// return addrUrl
// }
// }
// func CCloudServiceAddr() *url.URL {
// if addrUrl, err := url.Parse(cStr("cloud_service_addr", dCloudServiceAddr)); err != nil {
// return nil
// } else {
// return addrUrl
// }
// }
// func CVmServicePrefix() string {
// return cStr("vm_service_prefix", dVmServicePrefix)
// }
// func CUserServicePrefix() string {
// return cStr("user_service_prefix", dUserServicePrefix)
// }
func CGrafanaPrefix() string {
return cStr("grafana_prefix", dGrafanaPrefix)
}
func CGraylogPrefix() string {
return cStr("graylog_prefix", dGraylogPrefix)
}
func CAlertManagerPrefix() string {
return cStr("alert_manager_prefix", dAlertManagerPrefix)
}
func CFileServerPrefix() string {
return cStr("file_server_prefix", dFileServerPrefix)
}
func CFileServiceAddr() string {
return cStr("file_service_addr", dFileServiceAddr)
}
//一些额外的Get
func CNoneRowsAffect(tableName string) error {
return errors.New(fmt.Sprintf("table:%s %s", tableName, dNoneRowsAffect))
}
func CNotFind(tableName string, condition interface{}) error {
return errors.New(fmt.Sprintf("can not find item(table:%s condition:%+v", tableName, condition))
}
func CVMEtcdHost() string {
return cStr("VMEtcdHost", dVMEtcdHost)
}
func CVMEtcdPort() int {
return cInt("VMEtcdPort", dVMEtcdPort)
}
func CVMJenkinsHost() string {
return cStr("VMJenkinsHost", dVMJenkinsHost)
}
func CVMJenkinsPort() int {
return cInt("VMJenkinsPort", dVMJenkinsPort)
}
func CVMJenkinsUser() string {
return cStr("VMJenkinsUser", dVMJenkinsUser)
}
func CVMJenkinsPassword() string {
return cStr("VMJenkinsPassword", dVMJenkinsPassword)
}
func CVMSonarHost() string {
return cStr("VMSonarHost", dVMSonarHost)
}
func CVMSonarPort() int {
return cInt("VMSonarPort", dVMSonarPort)
}
func CVMSonarUser() string {
return cStr("VMSonarUser", dVMSonarUser)
}
func CVMSonarPassword() string {
return cStr("VMSonarPassword", dVMSonarPassword)
}
func CVMSonarEMailHost() string {
return cStr("VMSonarEMailHost", dVMSonarEMailHost)
}
func CVMSonarEMailPort() int {
return cInt("VMSonarEMailPort", dVMSonarEMailPort)
}
func CVMSonarEMailUser() string {
return cStr("VMSonarEMailUser", dVMSonarEMailUser)
}
func CVMSonarEMailPassword() string {
return cStr("VMSonarEMailPassword", dVMSonarEMailPassword)
}
func CVMSonarEMailDefaultTitle() string {
return cStr("VMSonarEMailDefaultTitle", dVMSonarEMailDefaultTitle)
}
func SonarAccessHost() string {
return cStr("SonarAccessHost", dSonarAccessHost)
}
func SonarAccessPort() int {
return cInt("SonarAccessPort", dSonarAccessPort)
}
func CSonarUser() string {
return cStr("SonarUser", dSonarUser)
}
func CSonarPassword() string {
return cStr("SonarPassword", dSonarPassword)
}
func WarURL() string {
return cStr("war_url", dwarURL)
}
func CConsulHost() string {
return cStr("ConsulHost", dConsulHost)
}
func CConsulPort() int {
return cInt("ConsulPort", dConsulPort)
}
func ServiceControllerURL() string {
return cStr("ClusterServiceControllerURL", dClusterServiceControllerURL)
}
func ExternalNetworkPort() int {
return cInt("ClusterExternalNetworkPort", dClusterExternalNetworkPort)
}
func Harbor() string {
return cStr("ClusterHarbor", dClusterHarbor)
}
//没用到
//func CAgentInstallCMD() string {
// return cStr("agent_install_cmd", dAgentInstallCMD)
//}
func CLinuxAgentDownloadURL() string {
return cStr("linux_agent_download_url", dLinuxAgentDownloadURL)
}
func CLinuxAgentInstallCMD() string {
return cStr("linux_agent_install_cmd", dLinuxAgentInstallCMD)
}
func CWindowsAgentInstallCMD() string {
return cStr("windows_agent_install_cmd", dWindowsAgentInstallCMD)
}
func CLinuxAgentLogPath() string {
return cStr("linux_agent_log_path", dLinuxAgentLogPath)
}
func CWindowsAgentLogPath() string {
return cStr("windows_agent_log_path", dWindowsAgentLogPath)
}
func CLinuxAgentPath() string {
return cStr("linux_agent_path", dLinuxAgentPath)
}
func CWindowsAgentPath() string {
return cStr("windows_agent_path", dWindowsAgentPath)
}
//dWindowsAgentDownloadURL
func CWindowsAgentDownloadURL() string {
return cStr("windows_agent_download_url", dWindowsAgentDownloadURL)
}
func CWindowsAgentDownloadURLShow() string {
return cStr("windows_agent_download_url_show", dWindowsAgentDownloadURLShow)
}
func CRegisterTtL() time.Duration {
return time.Duration(cInt("register_ttL", dRegisterTtL)) * time.Second
}
func CRegisterInterval() time.Duration {
return time.Duration(cInt("register_interval", dRegisterInterval)) * time.Second
}
func CKibanaAddr() string {
return cStr("kibana_addr", KIBANA_ADDR)
}
func CKibanaAddrExternal() string {
return cStr("kibana_addr_external", KIBANA_ADDR)
}
func SuperPassword() string {
return cStr("super_password", dSuperPassword)
}
func TerraformPath() string {
return cStr("terraform_path", "")
}
func TerraformTPLPath() string {
return cStr("terraform_tpl_path", "")
}
func CToolServerHost() string {
return cStr("tool_server_host", dToolServerHost)
}
|
package main
import (
"log"
"net/smtp"
)
// var (
// subject = flag.String("s", "", "subject of the mail")
// body = flag.String("b", "", "body of themail")
// reciMail = flag.String("m", "", "recipient mail address")
// )
// func main() {
// // Set up authentication information.
// flag.Parse()
// sub := fmt.Sprintf("subject: %s\r\n\r\n", *subject)
// content := *body
// mailList := strings.Split(*reciMail, ",")
// auth := smtp.PlainAuth(
// "",
// "1518522971@qq.com",
// "logudhmiyzqtiffc",
// "smtp.qq.com",
// //"smtp.gmail.com",
// )
// // Connect to the server, authenticate, set the sender and recipient,
// // and send the email all in one step.
// err := smtp.SendMail(
// "smtp.qq.com:25",
// auth,
// "1518522971@qq.com",
// mailList,
// []byte(sub+content),
// )
// if err != nil {
// log.Fatal(err)
// }
// }
func main() {
hostname := "smtp. qq.com"
to := []string{"2593929657@qq.com"}
from := "1518522971@qq.com"
passwd := "logudhmiyzqtiffc"
auth := smtp.PlainAuth("", from, passwd, hostname)
err := smtp.SendMail(hostname+":465", auth, from, to, []byte("hello"))
if err != nil {
log.Fatal(err)
}
}
|
package listener
import (
"app/base/utils"
"context"
"github.com/gin-gonic/gin"
"github.com/segmentio/kafka-go"
ginprometheus "github.com/zsais/go-gin-prometheus"
)
var (
uploadReader *kafka.Reader
eventsReader *kafka.Reader
)
func configure() {
uploadTopic := utils.GetenvOrFail("UPLOAD_TOPIC")
eventsTopic := utils.GetenvOrFail("EVENTS_TOPIC")
kafkaAddress := utils.GetenvOrFail("KAFKA_ADDRESS")
kafkaGroup := utils.GetenvOrFail("KAFKA_GROUP")
utils.Log("KafkaAddress", kafkaAddress).Info("Connecting to kafka")
uploadConfig := kafka.ReaderConfig{
Brokers: []string{kafkaAddress},
Topic: uploadTopic,
GroupID: kafkaGroup,
MinBytes: 1,
MaxBytes: 10e6, // 1MB
}
uploadReader = kafka.NewReader(uploadConfig)
eventsConfig := uploadConfig
eventsConfig.Topic = eventsTopic
eventsReader = kafka.NewReader(eventsConfig)
}
func shutdown() {
err := uploadReader.Close()
if err != nil {
utils.Log("err", err.Error()).Error("unable to shutdown Kafka reader")
}
err = eventsReader.Close()
if err != nil {
utils.Log("err", err.Error()).Error("unable to shutdown Kafka reader")
}
}
func baseListener(reader *kafka.Reader, handler func(message kafka.Message)) {
for {
m, err := reader.ReadMessage(context.Background())
if err != nil {
utils.Log("err", err.Error()).Error("unable to read message from Kafka reader")
panic(err)
}
handler(m)
}
}
func logHandler(m kafka.Message) {
utils.Log().Info("Received message [", m.Topic,"] ", string(m.Value))
}
func runMetrics() {
// create web app
app := gin.New()
prometheus := ginprometheus.NewPrometheus("gin")
prometheus.Use(app)
err := app.Run(":8081")
if err != nil {
utils.Log("err", err.Error()).Error()
panic(err)
}
}
func RunListener() {
utils.Log().Info("listener starting")
// Start a web server for handling metrics so that readiness probe works
go runMetrics()
configure()
defer shutdown()
go baseListener(uploadReader, logHandler)
go baseListener(eventsReader, logHandler)
// Just block. Any error will panic and kill the process.
<- make(chan bool)
}
|
/*
* @lc app=leetcode.cn id=4 lang=golang
*
* [4] 寻找两个正序数组的中位数
*/
package solution
import "math"
// @lc code=start
func findMedianSortedArrays(nums1, nums2 []int) float64 {
totalLen := len(nums1) + len(nums2)
var pos1, pos2 int
if totalLen%2 == 0 {
pos1, pos2 = totalLen/2-1, totalLen/2
} else {
pos1, pos2 = totalLen/2, totalLen/2
}
acc, p, q := 0, 0, 0
target1, target2 := 0, 0
nums1 = append(nums1, math.MaxInt64)
nums2 = append(nums2, math.MaxInt64)
for acc <= pos2 {
if nums1[p] <= nums2[q] {
if acc == pos1 {
target1 = nums1[p]
}
if acc == pos2 {
target2 = nums1[p]
}
p++
acc++
} else {
if acc == pos1 {
target1 = nums2[q]
}
if acc == pos2 {
target2 = nums2[q]
}
q++
acc++
}
}
return float64(target1+target2) / 2
}
// @lc code=end
|
package action
import (
"context"
"log"
"github.com/artemrys/go-all-repos/internal/config"
"github.com/artemrys/go-all-repos/internal/helpers"
"github.com/artemrys/go-all-repos/internal/repo"
"github.com/google/go-github/github"
)
// GoFmtAction declares "go fmt" action.
type GoFmtAction struct {
Repo *repo.Repo
GithubClient *github.Client
dryRun bool
}
// NewGoFmtAction returns a new instance of GoFmtAction.
func NewGoFmtAction(repo *repo.Repo, githubClient *github.Client, config *config.Config) *GoFmtAction {
return &GoFmtAction{
Repo: repo,
GithubClient: githubClient,
dryRun: config.DryRun,
}
}
// Do does "go fmt" action for a particular repo.
func (a GoFmtAction) Do() {
runPath := a.Repo.ClonedPath
helpers.RunGit(
[]string{
"checkout",
"-b",
"go-all-repos-update",
},
runPath)
helpers.Run(
"/usr/local/go/bin/go",
[]string{
"/usr/local/go/bin/go",
"fmt",
},
runPath,
)
helpers.RunGit(
[]string{
"add",
".",
},
runPath,
)
helpers.RunGit(
[]string{
"commit",
"-m",
"[go-all-repos] update",
},
runPath,
)
if !a.dryRun {
helpers.RunGit(
[]string{
"push",
"origin",
"-u",
"go-all-repos-update",
},
runPath,
)
pullRequest := &github.NewPullRequest{
Title: github.String("[go-all-repos] update"),
Head: github.String("go-all-repos-update"),
Base: github.String("master"),
}
resp, _, err := a.GithubClient.PullRequests.Create(context.Background(), "artemrys", a.Repo.Name, pullRequest)
if err != nil {
log.Printf("Error while creating pull request for %v: %v\n", a.Repo, err)
} else {
log.Printf("Created pull request for %v: %v\n", a.Repo, resp)
}
}
}
|
package configuration
import (
"flag"
"reflect"
)
func NewFlagProvider(ptrToCfg interface{}) flagProvider {
fp := flagProvider{flags: map[string]func() *string{}}
fp.initFlagProvider(ptrToCfg)
flag.Parse()
return fp
}
type flagProvider struct {
flags map[string]func() *string
}
func (fp flagProvider) initFlagProvider(i interface{}) {
var (
t = reflect.TypeOf(i)
v = reflect.ValueOf(i)
)
switch t.Kind() {
case reflect.Ptr:
t = t.Elem()
v = v.Elem()
default:
panic("not a pointer to a struct")
}
for i := 0; i < t.NumField(); i++ {
if t.Field(i).Type.Kind() == reflect.Struct {
fp.initFlagProvider(v.Field(i).Addr().Interface())
continue
}
fp.getValFromFlags(t.Field(i))
}
}
func (fp flagProvider) getValFromFlags(field reflect.StructField) {
key := getFlagTag(field)
if len(key) == 0 {
logf("flagProvider: getFlagTag returns empty value")
// if "flag" is not set try to use regular json tag
key = getJSONTag(field)
}
if len(key) == 0 {
logf("flagProvider: key [%s] is empty", key)
// field doesn't have a proper tag
return
}
if _, ok := fp.flags[key]; ok {
logf("flagProvider: cannot find value for key [%s]", key)
return
}
valStr := flag.String(key, "", "")
fp.flags[key] = func() *string {
return valStr
}
}
func (fp flagProvider) Provide(field reflect.StructField, v reflect.Value) bool {
key := getFlagTag(field)
if len(key) == 0 {
logf("flagProvider: getFlagTag returns empty value")
// if "flag" is not set try to use regular json tag
key = getJSONTag(field)
}
if len(key) == 0 {
logf("flagProvider: key is empty")
// field doesn't have proper tags
return false
}
if len(fp.flags) == 0 {
logf("flagProvider: map of flags is empty, nothing to fetch")
return false
}
fn, ok := fp.flags[key]
if !ok {
logf("flagProvider: flag for key [%s] already exists", key)
return false
}
val := fn()
setField(field, v, *val)
logf("flagProvider: set [%s] to field [%s] with tags [%v]", *val, field.Name, field.Tag)
return len(*val) > 0
}
|
package main
import (
"log"
"math/rand"
"reflect"
)
type color int
const (
red color = iota
green
blue
black
white
)
type person struct {
name string
age int
favoriteColor color
}
func (p person) Generate(rand *rand.Rand, size int) reflect.Value {
randomP := person{
name: stringWithLength(rand, intBetween(rand, 1, 32)),
age: intBetween(rand, 0, 100),
favoriteColor: oneOfColor(red, green, blue),
}
log.Printf("person: %#v", randomP)
return reflect.ValueOf(randomP)
}
func (p person) IsCorrect() bool {
if len(p.name) < 1 || len(p.name) > 32 {
return false
}
if p.age < 0 || p.age > 100 {
return false
}
if p.favoriteColor == black || p.favoriteColor == white {
return false
}
return true
}
|
package endpoints
import (
"context"
"encoding/json"
"fmt"
"net/http"
"time"
"github.com/go-kit/kit/endpoint"
kithttp "github.com/go-kit/kit/transport/http"
"github.com/google/uuid"
"github.com/gorilla/mux"
"github.com/sumelms/microservice-course/internal/matrix/domain"
"github.com/sumelms/microservice-course/pkg/validator"
)
type updateSubjectRequest struct {
UUID uuid.UUID `json:"uuid" validate:"required"`
Code string `json:"code" validate:"required,max=45"`
Name string `json:"name" validate:"required,max=100"`
Objective string `json:"objective" validate:"max=245"`
Credit float32 `json:"credit"`
Workload float32 `json:"workload"`
}
type updateSubjectResponse struct {
UUID uuid.UUID `json:"uuid"`
Code string `json:"code" validate:"required,max=45"`
Name string `json:"name" validate:"required,max=100"`
Objective string `json:"objective,omitempty" validate:"max=245"`
Credit float32 `json:"credit,omitempty"`
Workload float32 `json:"workload,omitempty"`
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at"`
}
func NewUpdateSubjectHandler(s domain.ServiceInterface, opts ...kithttp.ServerOption) *kithttp.Server {
return kithttp.NewServer(
makeUpdateSubjectEndpoint(s),
decodeUpdateSubjectRequest,
encodeUpdateSubjectResponse,
opts...,
)
}
//nolint:dupl
func makeUpdateSubjectEndpoint(s domain.ServiceInterface) endpoint.Endpoint {
return func(ctx context.Context, request interface{}) (interface{}, error) {
req, ok := request.(updateSubjectRequest)
if !ok {
return nil, fmt.Errorf("invalid argument")
}
v := validator.NewValidator()
if err := v.Validate(req); err != nil {
return nil, err
}
c := domain.Subject{}
data, _ := json.Marshal(req)
if err := json.Unmarshal(data, &c); err != nil {
return nil, err
}
if err := s.UpdateSubject(ctx, &c); err != nil {
return nil, err
}
return updateSubjectResponse{
UUID: c.UUID,
Code: c.Code,
Name: c.Name,
Objective: c.Objective,
Credit: c.Credit,
Workload: c.Workload,
CreatedAt: c.CreatedAt,
UpdatedAt: c.UpdatedAt,
}, nil
}
}
func decodeUpdateSubjectRequest(_ context.Context, r *http.Request) (interface{}, error) {
vars := mux.Vars(r)
id, ok := vars["uuid"]
if !ok {
return nil, fmt.Errorf("invalid argument")
}
var req updateSubjectRequest
err := json.NewDecoder(r.Body).Decode(&req)
if err != nil {
return nil, err
}
req.UUID = uuid.MustParse(id)
return req, nil
}
func encodeUpdateSubjectResponse(ctx context.Context, w http.ResponseWriter, response interface{}) error {
return kithttp.EncodeJSONResponse(ctx, w, response)
}
|
// Copyright 2018 Andrew Bates
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package plm
import "github.com/abates/insteon"
type connection struct {
sendCmd Command
matches []Command
sendCh chan *insteon.PacketRequest
upstreamSendCh chan<- *CommandRequest
recvCh chan []byte
upstreamRecvCh <-chan *Packet
}
func newConnection(sendCh chan<- *CommandRequest, recvCh <-chan *Packet, sendCmd Command, recvCmds ...Command) *connection {
conn := &connection{
sendCmd: sendCmd,
matches: recvCmds,
sendCh: make(chan *insteon.PacketRequest, 1),
upstreamSendCh: sendCh,
recvCh: make(chan []byte, 1),
upstreamRecvCh: recvCh,
}
go conn.process()
return conn
}
func (conn *connection) process() {
for {
select {
case request, open := <-conn.sendCh:
if !open {
close(conn.upstreamSendCh)
close(conn.recvCh)
return
}
conn.send(request)
case packet, open := <-conn.upstreamRecvCh:
if !open {
close(conn.upstreamSendCh)
close(conn.recvCh)
return
}
conn.receive(packet)
}
}
}
func (conn *connection) send(request *insteon.PacketRequest) {
doneCh := make(chan *CommandRequest)
payload := request.Payload
// PLM expects that the payload begins with the
// destinations address so we have to slice off
// the src address
if conn.sendCmd == CmdSendInsteonMsg && len(payload) > 3 {
payload = payload[3:]
}
conn.upstreamSendCh <- &CommandRequest{Command: conn.sendCmd, Payload: payload, DoneCh: doneCh}
upstreamRequest := <-doneCh
request.Err = upstreamRequest.Err
request.DoneCh <- request
}
func (conn *connection) receive(packet *Packet) {
if len(conn.matches) > 0 {
for _, match := range conn.matches {
if match == packet.Command {
conn.recvCh <- packet.Payload
return
}
}
} else {
conn.recvCh <- packet.Payload
}
}
|
package offer_merge
import "testing"
func TestSolve(t *testing.T) {
a := []int{1, 2, 5, -1, -1}
merge(a, []int{3, 8}, 3)
t.Log(a)
}
|
package main
import (
"archive/zip"
"encoding/xml"
"fmt"
"io"
// "strings"
)
func main() {
r, err := zip.OpenReader("Document.docx")
if err != nil {
panic(err)
}
defer r.Close()
// Iterate through the files in the archive
for _, f := range r.File {
//fmt.Printf("File %s\n", f.Name)
switch {
case f.Name == "word/document.xml":
rc, err := f.Open()
if err != nil {
panic(err)
}
defer rc.Close()
ProcessDocument(rc)
}
}
}
type Body struct {
Paragraphs []Paragraph `xml:"p"`
}
type Paragraph struct {
Runs []Run `xml:"r"`
}
type Run struct {
Texts []string `xml:"t"`
Properties RunProperties `xml:"rPr"`
}
// Run Properties for the Paragraph Mark
type RunProperties struct {
Fonts struct {
ASCII string `xml:"ascii,attr"`
HighANSI string `xml:"hAnsi,attr"`
ComplexScript string `xml:"cs,attr"`
ASCIIThemeFont string `xml:"asciiTheme,attr"`
ComplexScriptThemeFont string `xml:"cstheme,attr"`
} `xml:"rFonts"`
Color struct {
Val string `xml:"val,attr"`
} `xml:"color"`
}
func ProcessDocument(in io.Reader) bool {
d := xml.NewDecoder(in)
//s := -1
//var txt string
//var tag string
body := Body{}
for {
t, _ := d.Token()
if t == nil {
break
}
switch se := t.(type) {
case xml.StartElement:
if se.Name.Local == "p" {
p := Paragraph{}
d.DecodeElement(&p, &se)
body.Paragraphs = append(body.Paragraphs, p)
}
}
}
BodyPrinter(body)
return true
}
func BodyPrinter(body Body) {
fmt.Printf("Body {\n")
for _, p := range body.Paragraphs {
fmt.Printf(" P {\n")
for _, r := range p.Runs {
fmt.Printf(" R {\n")
fmt.Printf(" %+v", r)
fmt.Printf(" }\n")
}
fmt.Printf(" }\n")
}
fmt.Printf(" }\n")
}
|
package main
import (
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"strings"
"github.com/gorilla/mux"
)
var (
outputDir = "files"
)
func main() {
http.Handle("/", handlers())
log.Printf("Listening on port 8080 ...")
log.Fatal(http.ListenAndServe(":8080", nil))
}
func handlers() *mux.Router {
r := mux.NewRouter().StrictSlash(true)
r.HandleFunc("/upload", uploadHandler).Methods("POST")
r.HandleFunc("/images/{img}", imageHandler).Methods("GET")
return r
}
func uploadHandler(w http.ResponseWriter, r *http.Request) {
image, header, err := r.FormFile("image")
if err != nil {
http.Error(w, fmt.Sprintf("failed to get the image: %s", err.Error()), http.StatusBadRequest)
return
}
defer image.Close()
f, err := os.OpenFile(outputDir+"/"+header.Filename, os.O_WRONLY|os.O_CREATE, 0666)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
defer f.Close()
_, err = io.Copy(f, image)
if err != nil {
http.Error(w, "failed to copy the image to the file: "+err.Error(), http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusCreated)
w.Write([]byte(fmt.Sprintf("%s://%s/images/%s", r.URL.Scheme, r.Host, header.Filename)))
}
func imageHandler(w http.ResponseWriter, r *http.Request) {
/** vars from gorilla mux empty, in test case, we do not execute the router */
hash := strings.Split(r.URL.Path, "/")
filename := outputDir + "/" + hash[2]
image, err := ioutil.ReadFile(filename)
if err != nil {
http.Error(w, "image not found : "+err.Error(), http.StatusNotFound)
return
}
w.Write(image)
}
|
package main
import (
"fmt"
"os"
"os/signal"
"strings"
suggest "github.com/picatz/suggest/core"
)
func init() {
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt)
go func() {
for range c {
os.Exit(0)
}
}()
}
var (
StatusNoArgs = 1
StatusGetErr = 2
StatusNoSuggestions = 3
)
func main() {
args := os.Args[1:]
if len(args) <= 0 {
fmt.Println("no arguments given to get suggestions for")
os.Exit(StatusNoArgs)
}
suggestions, err := suggest.Get(strings.Join(args, " "))
if err != nil {
fmt.Println(err)
os.Exit(StatusGetErr)
}
if len(suggestions) == 0 {
os.Exit(StatusNoSuggestions)
}
for _, suggestion := range suggestions {
fmt.Println(suggestion)
}
}
|
package mclock
import (
"time"
"github.com/aristanetworks/goarista/monotime"
)
// AbsTime represents absolute monotonic time.
type AbsTime time.Duration
// Now returns the current absolute monotonic time.
func Now() AbsTime {
return AbsTime(monotime.Now())
}
|
package routes
import (
"github.com/labstack/echo"
"github.com/prometheus/client_golang/prometheus/promhttp"
// HOFSTADTER_START import
// HOFSTADTER_END import
)
// HOFSTADTER_START start
// HOFSTADTER_END start
func addPrometheusHandlers(G *echo.Group) (err error) {
group := G.Group("")
group.GET("/metrics", echo.WrapHandler(promhttp.Handler()))
return nil
}
// HOFSTADTER_BELOW
|
package main
import (
"bytes"
"flag"
"fmt"
"io/ioutil"
"log"
"math"
"os"
"regexp"
"strconv"
"strings"
"github.com/christopherL91/Parser/toki"
)
type instruction struct {
name string
val int
color string
instructions []instruction
}
const (
NUMBER toki.Token = iota + 1
STRING
DOWN
FORW
LEFT
BACK
RIGHT
UP
COLORKEYWORD
REP
DOT
COLOR
OTHER
)
var (
debug bool
removeComments = regexp.MustCompile("%[^\\n]*")
defintions = []toki.Def{
{Token: STRING, Pattern: `\"`},
{Token: FORW, Pattern: `FORW\s+`},
{Token: LEFT, Pattern: `LEFT\s+`},
{Token: DOWN, Pattern: "DOWN"},
{Token: BACK, Pattern: "BACK"},
{Token: REP, Pattern: `REP\s+`},
{Token: NUMBER, Pattern: `[0-9]+`},
{Token: COLORKEYWORD, Pattern: `COLOR\s+`},
{Token: RIGHT, Pattern: "RIGHT"},
{Token: UP, Pattern: "UP"},
{Token: COLOR, Pattern: `\#[A-F 0-9]{6}`},
{Token: DOT, Pattern: `\.`},
{Token: OTHER, Pattern: `.*`},
}
)
func init() {
flag.BoolVar(&debug, "debug", false, "Debug")
flag.Parse()
}
func main() {
input, err := ioutil.ReadAll(os.Stdin) // Read input from stdin
if err != nil {
log.Fatal(err)
}
s := toki.NewScanner(defintions)
noComments := removeComments.ReplaceAllString(string(input), "")
s.SetInput(noComments)
buffer := []*toki.Result{} // Holds all the tokens.
for r := s.Next(); r.Token != toki.EOF; r = s.Next() {
buffer = append(buffer, r) // Append new token to list.
}
if debug {
prettyPrint(buffer)
fmt.Println("Length of buffer:", len(buffer))
}
inst, _, err := parse(buffer, 0, false, false)
if err != nil {
log.Fatalln(err)
os.Exit(-1)
}
var buf bytes.Buffer
pen := newPen()
evaluateProgram(inst, &buf, pen)
fmt.Print(buf.String())
}
func parse(buffer []*toki.Result, i int, parseone bool, inrep bool) ([]instruction, int, error) {
instructions := []instruction{}
for i < len(buffer) {
// fmt.Println(i)
switch buffer[i].Token {
case STRING:
if inrep {
return instructions, i, nil
} else {
return nil, 0, syntaxError(buffer[i].Pos.Line)
}
case UP:
inst := instruction{
name: "UP",
}
instructions = append(instructions, inst)
i += 2
case DOWN:
if len(buffer) <= i+1 {
return nil, 0, syntaxError(buffer[i].Pos.Line)
}
// Next token must be a DOT
if !(buffer[i+1].Token == DOT) {
return nil, 0, syntaxError(buffer[i+1].Pos.Line)
}
inst := instruction{
name: "DOWN",
}
instructions = append(instructions, inst)
i += 2
case FORW:
if len(buffer) <= i+2 {
return nil, 0, syntaxError(buffer[i].Pos.Line)
}
// Next token must be a NUMBER
if !(buffer[i+1].Token == NUMBER) {
return nil, 0, syntaxError(buffer[i+1].Pos.Line)
}
// Next token must be a DOT
if !(buffer[i+2].Token == DOT) {
return nil, 0, syntaxError(buffer[i+2].Pos.Line)
}
num, _ := strconv.Atoi(string(buffer[i+1].Value))
inst := instruction{
name: "FORW",
val: num,
}
instructions = append(instructions, inst)
i += 3
case LEFT:
if len(buffer) <= i+2 {
return nil, 0, syntaxError(buffer[i].Pos.Line)
}
// Next token must be a NUMBER
if !(buffer[i+1].Token == NUMBER) {
return nil, 0, syntaxError(buffer[i+1].Pos.Line)
}
// Next token must be a DOT
if !(buffer[i+2].Token == DOT) {
return nil, 0, syntaxError(buffer[i+2].Pos.Line)
}
num, _ := strconv.Atoi(string(buffer[i+1].Value))
inst := instruction{
name: "LEFT",
val: num,
}
instructions = append(instructions, inst)
i += 3
case BACK:
if len(buffer) <= i+2 {
return nil, 0, syntaxError(buffer[i].Pos.Line)
}
// Next token must be a NUMBER
if !(buffer[i+1].Token == NUMBER) {
return nil, 0, syntaxError(buffer[i+1].Pos.Line)
}
// Next token must be a DOT
if !(buffer[i+2].Token == DOT) {
return nil, 0, syntaxError(buffer[i+2].Pos.Line)
}
num, _ := strconv.Atoi(string(buffer[i+1].Value))
inst := instruction{
name: "BACK",
val: num,
}
instructions = append(instructions, inst)
i += 3
case RIGHT:
if len(buffer) <= i+2 {
return nil, 0, syntaxError(buffer[i].Pos.Line)
}
// Next token must be a NUMBER
if !(buffer[i+1].Token == NUMBER) {
return nil, 0, syntaxError(buffer[i+1].Pos.Line)
}
// Next token must be a DOT
if !(buffer[i+2].Token == DOT) {
return nil, 0, syntaxError(buffer[i+2].Pos.Line)
}
num, _ := strconv.Atoi(string(buffer[i+1].Value))
inst := instruction{
name: "RIGHT",
val: num,
}
instructions = append(instructions, inst)
i += 3
case COLORKEYWORD:
if len(buffer) <= i+2 {
return nil, 0, syntaxError(buffer[i].Pos.Line)
}
// Next token must be COLOR
if !(buffer[i+1].Token == COLOR) {
return nil, 0, syntaxError(buffer[i+1].Pos.Line)
}
//and after that DOT
if !(buffer[i+2].Token == DOT) {
return nil, 0, syntaxError(buffer[i+2].Pos.Line)
}
inst := instruction{
name: "COLOR",
color: strings.ToUpper(string(buffer[i+1].Value)),
}
instructions = append(instructions, inst)
i += 3
case REP:
if len(buffer) <= i+3 {
return nil, 0, syntaxError(buffer[i].Pos.Line)
}
if !(buffer[i+1].Token == NUMBER) {
return nil, 0, syntaxError(buffer[i+1].Pos.Line)
}
if buffer[i+2].Token == STRING {
subinstructions, nextpos, err := parse(buffer, i+3, false, true)
if err != nil {
return nil, 0, err
}
numberLine := buffer[i+1].Pos.Line
stringLine := buffer[i+2].Pos.Line
numberCol := buffer[i+1].Pos.Column
stringCol := buffer[i+2].Pos.Column
// They are on the same line
if numberLine == stringLine {
// 5" <- illegal
if stringCol-numberCol == 1 {
return nil, 0, syntaxError(buffer[i+1].Pos.Line)
}
}
num, _ := strconv.Atoi(string(buffer[i+1].Value))
inst := instruction{
name: "REP",
val: num,
instructions: subinstructions,
}
// No tokens left, and no `"` has shown up before.
if len(buffer[nextpos:]) < 1 {
return nil, 0, syntaxError(buffer[nextpos-1].Pos.Line)
}
if !(buffer[nextpos].Token == STRING) {
return nil, 0, syntaxError(buffer[nextpos].Pos.Line)
}
instructions = append(instructions, inst)
i = nextpos + 1
} else {
subinstructions, nextpos, err := parse(buffer, i+2, true, false)
if err != nil {
return nil, 0, err
}
num, _ := strconv.Atoi(string(buffer[i+1].Value))
inst := instruction{
name: "REP",
val: num,
instructions: subinstructions,
}
instructions = append(instructions, inst)
i = nextpos
}
default:
// Found token that's invalid
return nil, 0, syntaxError(buffer[i].Pos.Line)
}
if parseone {
return instructions, i, nil
}
}
return instructions, i, nil // Validation complete. Everything seems to work!
}
func evaluateProgram(program []instruction, buffer *bytes.Buffer, pen *Pen) string {
for _, inst := range program {
switch inst.name {
case "FORW":
if pen.down {
buffer.WriteString(pen.color)
buffer.WriteRune(' ')
buffer.WriteString(pen.String())
buffer.WriteRune(' ')
}
pen.currentVector.X += pen.direction.X * float64(inst.val)
pen.currentVector.Y += pen.direction.Y * float64(inst.val)
if pen.down {
buffer.WriteString(pen.String())
buffer.WriteRune('\n')
}
case "BACK":
if pen.down {
buffer.WriteString(pen.color)
buffer.WriteRune(' ')
buffer.WriteString(pen.String())
buffer.WriteRune(' ')
}
pen.currentVector.X -= pen.direction.X * float64(inst.val)
pen.currentVector.Y -= pen.direction.Y * float64(inst.val)
if pen.down {
buffer.WriteString(pen.String())
buffer.WriteRune('\n')
}
case "UP":
pen.down = false
case "DOWN":
pen.down = true
case "LEFT":
deg := inst.val
rad := float64(deg) * math.Pi / 180.0
pen.direction.rotateLeft(rad)
case "RIGHT":
deg := inst.val
rad := float64(deg) * math.Pi / 180.0
pen.direction.rotateRight(rad)
case "COLOR":
pen.color = inst.color
case "REP":
n := inst.val
for ; n > 0; n-- {
evaluateProgram(inst.instructions, buffer, pen)
}
}
}
return buffer.String()
}
func syntaxError(line int) error {
return fmt.Errorf("Syntaxfel på rad %d", line)
}
|
package main
func main() {
var v interface{}
v = v
}
|
package util
import (
"unicode"
"github.com/Nv7-Github/Nv7Haven/eod/types"
)
func IsASCII(s string) bool {
for i := 0; i < len(s); i++ {
if s[i] > unicode.MaxASCII {
return false
}
}
return true
}
var Wildcards = map[rune]types.Empty{
'%': {},
'*': {},
'?': {},
'[': {},
']': {},
'!': {},
'-': {},
'#': {},
'^': {},
'_': {},
}
func IsWildcard(s string) bool {
for _, char := range s {
_, exists := Wildcards[char]
if exists {
return true
}
}
return false
}
|
package dump
import (
"strings"
"testing"
)
type (
Integer int
String string
StringPtr *string
StringAlias = string
car struct {
Speed int
Owner interface{}
}
Person struct {
Name String
age int
Interests []string
friends [4]*Person
Cars []*car
action []func() string
}
)
func TestSerialize(t *testing.T) {
type args struct {
originValue interface{}
}
tests := []struct {
name string
originValue interface{}
wantSerialized string
}{
// TODO: Add test cases.
{"byte", byte('a'), "a"},
{"uint8", uint8('a'), "a"},
{"int", 3, "3"},
{"Integer", Integer(3), "3"},
{"float", 0.3, "0.3"},
{"string", "abc", `"abc"`},
{"*string", ptrString("abc"), `"abc"`},
{"String", String("abc"), `"abc"`},
{"[]byte", []byte("abc"), `[]uint8 (len=3) "abc"`},
{"map", map[string]int{"a": 1, "b": 2, "c": 3}, `map[string]int(len=3){"a"=>1"b"=>2"c"=>3}`},
{"slice", []int{1, 3, 2}, "[]int(len=3)[0=>1\n1=>3\n2=>2]"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if gotSerialized := Serialize(tt.originValue); !equalNoSpace(gotSerialized, tt.wantSerialized) {
t.Errorf("Serialize() = %v want %v", gotSerialized, tt.wantSerialized)
// t.Errorf("Serialize() = %v escape = %v want %v", gotSerialized, escapeSpace(gotSerialized, " \t\n\r"), tt.wantSerialized)
}
})
}
}
func ptrString(s string) *string {
return &s
}
// 忽略空格比较
func equalNoSpace(a, b string) bool {
charset := " \t\n\r"
return escapeSpace(a, charset) == escapeSpace(b, charset)
}
func escapeSpace(a string, charset string) string {
for _, char := range charset {
a = strings.Replace(a, string(char), "", -1)
}
return a
}
|
package models
// Employees export functions
type Employees struct {
ID int64 `form:"id" json:"id"`
Name string `form:"name" json:"name"`
City string `form:"city" json:"city"`
Phone string `form:"phone" json:"phone"`
}
// EmployeeResponse export functions
type EmployeeResponse struct {
Status int `json:"status"`
Message string `json:"message"`
Data []Employees
}
|
package extract
import (
"log"
"os"
"os/exec"
"path/filepath"
"sync"
"testing"
"time"
"github.com/gamejolt/joltron/test"
)
const (
xzFile = ".gj-bigTempFile.tar.xz"
xzURL = test.AWS + xzFile
xzChecksum = "ca292a1cfa2d93f6e07feffa6d53e836"
gzipFile = ".gj-bigTempFile.tar.gz"
gzipURL = test.AWS + gzipFile
gzipChecksum = "9c48bcb8e17b16e835b1b7c051ce4ef0"
)
func getFixtureOrDie(wg *sync.WaitGroup, url, checksum string) {
wg.Add(1)
go func() {
defer wg.Done()
if err := <-test.DownloadFixture(url, checksum); err != nil {
panic(err.Error())
}
}()
}
func TestMain(m *testing.M) {
// Parallelize fixture downloading
wg := &sync.WaitGroup{}
test.DoOrDie(test.DownloadFixture(xzFile, xzChecksum), wg)
test.DoOrDie(test.DownloadFixture(gzipFile, gzipChecksum), wg)
wg.Wait()
os.Exit(m.Run())
}
func TestBenchmarkGzip(t *testing.T) {
_, dir := test.PrepareNextTest(t)
now := time.Now()
benchmarkExtract(t, dir, gzipFile)
delta := time.Now().Sub(now).Seconds()
log.Printf("Delta extractor gzip: %f\n", delta)
}
func TestBenchmarkOsGzip(t *testing.T) {
_, dir := test.PrepareNextTest(t)
test.RequireFixture(t, gzipFile, dir, ".tempDownload")
now := time.Now()
for i := 0; i < 100; i++ {
cmd := exec.Command("tar", "-xzf", filepath.Join(dir, ".tempDownload"), "--warning=none", "-C", dir)
output, err := cmd.CombinedOutput()
if err != nil {
t.Fatal(err.Error() + ":" + string(output))
}
test.OS.RemoveAll(dir)
test.RequireFixture(t, gzipFile, dir, ".tempDownload")
}
delta := time.Now().Sub(now).Seconds()
log.Printf("Delta os gzip: %f\n", delta)
}
func TestBenchmarkXz(t *testing.T) {
_, dir := test.PrepareNextTest(t)
now := time.Now()
benchmarkExtract(t, dir, xzFile)
delta := time.Now().Sub(now).Seconds()
log.Printf("Delta extractor xz: %f\n", delta)
}
func TestBenchmarkOsXz(t *testing.T) {
_, dir := test.PrepareNextTest(t)
test.RequireFixture(t, xzFile, dir, ".tempDownload")
now := time.Now()
for i := 0; i < 100; i++ {
cmd := exec.Command("tar", "-xJf", filepath.Join(dir, ".tempDownload"), "--warning=none", "-C", dir)
output, err := cmd.CombinedOutput()
if err != nil {
t.Fatal(err.Error() + ":" + string(output))
}
test.OS.RemoveAll(dir)
test.RequireFixture(t, xzFile, dir, ".tempDownload")
}
delta := time.Now().Sub(now).Seconds()
log.Printf("Delta os xz: %f\n", delta)
}
func benchmarkExtract(t *testing.T, dir, fixture string) {
test.RequireFixture(t, fixture, dir, ".tempDownload")
for i := 0; i < 100; i++ {
extract, err := NewExtraction(nil, filepath.Join(dir, ".tempDownload"), dir, test.OS, nil)
if err != nil {
t.Fatal(err.Error())
}
<-extract.Done()
if err := extract.Result().Err; err != nil {
t.Fatal(err.Error())
}
test.OS.RemoveAll(dir)
test.RequireFixture(t, fixture, dir, ".tempDownload")
}
}
|
package mytest
import (
"fmt"
"regexp"
"testing"
"time"
)
func TestSplit(t *testing.T) {
ucmValue := "TH,CN"
vals := regexp.MustCompile("\\s*,\\s*").Split(ucmValue, -1)
fmt.Println(vals)
}
func TestArrayDefault(t *testing.T) {
input := []interface{}{}
if nil == input {
fmt.Println("is nil")
}
}
func TestFloatDivide(t *testing.T) {
val := float64(24.9)
v := int64(val / 24.0)
fmt.Println(v)
}
func TestTime(t *testing.T) {
curtime := time.Now()
fmt.Println(curtime.Year())
fmt.Println(curtime.Format("20060102"))
fmt.Println(curtime.Day())
fmt.Println(curtime.Date())
fmt.Println(curtime.Weekday())
fmt.Println(curtime.YearDay())
}
func TestReplace(t *testing.T) {
str := "`name_pic`"
fmt.Println(str)
re := regexp.MustCompile("`")
fmt.Println(re.ReplaceAllString(str, ""))
}
func TestValueConvert(t *testing.T) {
var v interface{}
v = "20"
fmt.Println(convert(v))
v = float64(20.21)
fmt.Println(convert(v))
v = float32(32.32)
fmt.Println(convert(v))
v = int64(20)
fmt.Println(convert(v))
v = int(21)
fmt.Println(convert(v))
v = "20"
fmt.Println(convert(v))
}
func convert(val interface{}) string {
return fmt.Sprintf("%v", val)
}
type MapOne struct {
EntityNames map[string]string
}
func TestMap(t *testing.T) {
ev := &MapOne{
}
ev.EntityNames["test"] = "hello"
fmt.Println(ev)
} |
package util
import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"path/filepath"
"reflect"
"strings"
)
const CDNJS_API_URL = "https://api.cdnjs.com/libraries"
const CDNJS_AJAX_URL = "http://cdnjs.cloudflare.com/ajax/libs"
// GenerateLink - Generate download link of the lib
func GenerateLink(lib, ver, file string) string {
return fmt.Sprintf("%s/%s/%s/%s", CDNJS_AJAX_URL, lib, ver, file)
}
// SearchWithName - search libs with lib names
func SearchWithName(name string) SearchResults {
var sr SearchResults
resp, err := http.Get(fmt.Sprintf("%s?search=%s", CDNJS_API_URL, name))
defer resp.Body.Close()
if err != nil {
return SearchResults{}
}
bytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
return SearchResults{}
}
json.Unmarshal(bytes, &sr)
return sr
}
// InfoPackage - get info of one lib
func InfoPackage(name string) Package {
var p Package
resp, err := http.Get(fmt.Sprintf("%s/%s", CDNJS_API_URL, name))
defer resp.Body.Close()
if err != nil {
return Package{}
}
bytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
return Package{}
}
err = json.Unmarshal(bytes, &p)
if err != nil {
return Package{}
}
return p
}
// PackageURLWithVersion - get the urls with specific version, latest version default
func PackageURLWithVersion(name, version string) []string {
p := InfoPackage(name)
if reflect.DeepEqual(Package{}, p) {
return []string{}
}
var rs []string
if version == "" {
version = p.Version
}
for _, v := range p.Assets {
if v.Version == version {
for _, f := range v.Files {
rs = append(rs, GenerateLink(name, version, f))
}
break
}
}
return rs
}
// PackageTagWithVersion - get lib tag with specific version , latest version default
func PackageTagWithVersion(name, version string) []string {
r := PackageURLWithVersion(name, version)
var rs []string
for _, v := range r {
if v != "" {
if strings.HasSuffix(v, "js") {
rs = append(rs, fmt.Sprintf(`<script src="%s"></script>`, v))
}
if strings.HasSuffix(v, "css") {
rs = append(rs, fmt.Sprintf(`<link href="%s" rel="stylesheet">`, v))
}
}
}
return rs
}
// DownloadFileWithVersion - download the files with specific version, latest version default
func DownloadFileWithVersion(name, version string) {
p := InfoPackage(name)
if reflect.DeepEqual(Package{}, p) {
return
}
if version == "" {
version = p.Version
}
for _, v := range p.Assets {
if v.Version == version {
for _, f := range v.Files {
link := GenerateLink(name, version, f)
log.Printf("Downloading……%s\n", f)
err := DownloadFile(f, link)
if err != nil {
log.Println(err)
}
}
}
}
}
// DownloadFile - download one file to current folder
func DownloadFile(filename, url string) error {
resp, err := http.Get(url)
defer resp.Body.Close()
if err != nil {
return err
}
// solve the case when filename has nested folder
basedir := filepath.Dir(filename)
if _, err := os.Stat(basedir); os.IsNotExist(err) {
err = os.MkdirAll(filepath.Dir(filename), os.ModePerm)
if err != nil {
return err
}
}
file, err := os.Create(filename)
defer file.Close()
if err != nil {
return err
}
_, err = io.Copy(file, resp.Body)
return err
}
|
package html
import (
"fmt"
"io"
)
const (
defaultBackground = "white"
defaultForeground = "black"
headingBackground = "#f0f0f0"
highlightBackground = "#fafafa"
)
func newTableWriter(writer io.Writer, doHighlighting bool,
columns []string) (*TableWriter, error) {
if len(columns) > 0 {
if doHighlighting {
fmt.Fprintf(writer, " <tr style=\"background-color:%s\">\n",
headingBackground)
} else {
fmt.Fprintln(writer, " <tr>")
}
}
for _, column := range columns {
fmt.Fprintf(writer, " <th>%s</th>\n", column)
}
fmt.Fprintln(writer, " </tr>")
return &TableWriter{
doHighlighting: doHighlighting,
writer: writer,
}, nil
}
func (tw *TableWriter) closeRow() error {
_, err := fmt.Fprintln(tw.writer, " </tr>")
return err
}
func (tw *TableWriter) openRow(foreground, background string) error {
if foreground == "" {
foreground = defaultForeground
}
if background == "" {
background = defaultBackground
}
if background == defaultBackground &&
tw.lastBackground == defaultBackground {
background = highlightBackground
}
var err error
if background == defaultBackground {
if foreground == defaultForeground {
_, err = fmt.Fprintln(tw.writer, " <tr>")
} else {
_, err = fmt.Fprintf(tw.writer, " <tr style=\"color:%s\">\n",
foreground)
}
} else {
if foreground == defaultForeground {
_, err = fmt.Fprintf(tw.writer,
" <tr style=\"background-color:%s\">\n", background)
} else {
_, err = fmt.Fprintf(tw.writer,
" <tr style=\"background-color:%s;color:%s\">\n",
background, foreground)
}
}
tw.lastBackground = background
return err
}
func (tw *TableWriter) writeData(foreground, data string) error {
if foreground == "" {
foreground = defaultForeground
}
var err error
if foreground == defaultForeground {
_, err = fmt.Fprintf(tw.writer, " <td>%s</td>\n", data)
} else {
_, err = fmt.Fprintf(tw.writer,
" <td><font color=\"%s\">%s</font></td>\n",
foreground, data)
}
return err
}
func (tw *TableWriter) writeRow(foreground, background string,
columns []string) error {
if err := tw.OpenRow(foreground, background); err != nil {
return err
}
for _, column := range columns {
fmt.Fprintf(tw.writer, " <td>%s</td>\n", column)
}
return tw.CloseRow()
}
|
package doc
import (
"fmt"
)
type swaggerInfo struct {
Version string
Host string
BasePath string
Schemes []string
Title string
Description string
}
// SwaggerInfo holds exported Swagger Info so clients can modify it
var SwaggerInfo = swaggerInfo{
Version: "1.0",
Host: "",
BasePath: "",
Schemes: []string{},
Title: "Golang Gin API",
Description: "An example of gin",
}
type s struct{}
func (s *s) ReadDoc() string {
// sInfo := SwaggerInfo
// sInfo.Description = strings.Replace(sInfo.Description, "\n", "\\n", -1)
//
// t, err := template.New("swagger_info").Funcs(template.FuncMap{
// "marshal": func(v interface{}) string {
// a, _ := json.Marshal(v)
// return string(a)
// },
// }).Parse(doc)
// if err != nil {
// return doc
// }
//
// var tpl bytes.Buffer
// if err := t.Execute(&tpl, sInfo); err != nil {
// return doc
// }
//
// return tpl.String()
return ""
}
func init() {
//swag.Register(swag.Name, &s{})
fmt.Println(SwaggerInfo)
}
|
package main
import (
"InkaTry/warehouse-storage-be/cmd/webservice"
"InkaTry/warehouse-storage-be/internal/pkg/config"
"InkaTry/warehouse-storage-be/internal/pkg/logger"
"gopkg.in/ini.v1"
"log"
"math/rand"
"os"
"os/signal"
"syscall"
"time"
)
func main() {
// if in the future some random number is needed, it needs to be seeded
// especially for security issue
rand.Seed(time.Now().UnixNano())
// load config
cfg := loadAppConfig()
// custom log message
location, err := time.LoadLocation(cfg.Location)
if err != nil {
panic(err)
}
log.SetOutput(&logger.LogWriter{
AppName: cfg.AppName,
Loc: location,
Env: cfg.Env,
})
log.SetFlags(0)
ch := make(chan os.Signal)
signal.Notify(ch, os.Interrupt, syscall.SIGTERM, syscall.SIGHUP, syscall.SIGINT, syscall.SIGKILL, syscall.SIGQUIT, syscall.SIGSTOP)
go webservice.Start(&cfg)
<-ch
}
func loadAppConfig() config.Config {
appConfig := config.Config{}
source := "./config/config.ini"
if err := ini.MapTo(&appConfig, source); err != nil {
panic(err)
}
return appConfig
}
|
package config
import (
"github.com/iikmaulana/gateway/libs/helper/serror"
"github.com/iikmaulana/uzzeet-api/controller"
"github.com/iikmaulana/uzzeet-api/service/handler"
"github.com/iikmaulana/uzzeet-api/service/repository/core"
)
func (cfg Config) InitService() serror.SError {
vehiclesRepo, serr := core.NewVehiclesRepository(cfg.Registry)
if serr != nil {
return serr
}
deviceRepo, serr := core.NewDeviceRepository(cfg.Registry)
if serr != nil {
return serr
}
deviceUsecase := controller.NewDeviceUsecase(deviceRepo)
gpstypeUsecase := controller.NewGpsTypeUsecase(deviceRepo)
historyUsecase := controller.NewHistoryUsecase(deviceRepo)
vehicleUsecase := controller.NewVehicleUsecase(vehiclesRepo)
handler.NewGatewayHandler(cfg.Gateway, deviceUsecase, vehicleUsecase, gpstypeUsecase, historyUsecase)
return nil
}
|
package ircserver
func init() {
// These just use exactly the same code as clients. We can directly assign
// the contents of Commands[x] because cmd_ping.go is sorted lexically
// before scmd_ping.go. For details, see
// http://golang.org/ref/spec#Package_initialization.
Commands["server_PING"] = Commands["PING"]
}
|
package main
import "fmt"
type Customer struct {
Name, Address string
Age int
}
func main() {
var customer Customer
customer.Age = 25
customer.Address = "Jakarta"
customer.Name = "Nabil"
fmt.Println(customer)
// Struct Literal 1
joko := Customer{
Name: "Joko",
Address: "Bandung",
Age: 22,
}
fmt.Println(joko)
// Struct Literal 2
budi := Customer{"Budi","Bekasi",23}
fmt.Println(budi)
}
|
package ibmcloud
import (
"context"
"errors"
"fmt"
"github.com/IBM/vpc-go-sdk/vpcv1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/validation/field"
"github.com/openshift/installer/pkg/types"
"github.com/openshift/installer/pkg/types/ibmcloud"
)
// Validate executes platform-specific validation.
func Validate(client API, ic *types.InstallConfig) error {
allErrs := field.ErrorList{}
platformPath := field.NewPath("platform").Child("ibmcloud")
allErrs = append(allErrs, validatePlatform(client, ic, platformPath)...)
if ic.ControlPlane != nil && ic.ControlPlane.Platform.IBMCloud != nil {
machinePool := ic.ControlPlane.Platform.IBMCloud
fldPath := field.NewPath("controlPlane").Child("platform").Child("ibmcloud")
allErrs = append(allErrs, validateMachinePool(client, ic.Platform.IBMCloud, machinePool, fldPath)...)
}
for idx, compute := range ic.Compute {
machinePool := compute.Platform.IBMCloud
fldPath := field.NewPath("compute").Index(idx).Child("platform").Child("ibmcloud")
if machinePool != nil {
allErrs = append(allErrs, validateMachinePool(client, ic.Platform.IBMCloud, machinePool, fldPath)...)
}
}
return allErrs.ToAggregate()
}
func validatePlatform(client API, ic *types.InstallConfig, path *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if ic.Platform.IBMCloud.ResourceGroupName != "" {
allErrs = append(allErrs, validateResourceGroup(client, ic.IBMCloud.ResourceGroupName, "resourceGroupName", path)...)
}
if ic.Platform.IBMCloud.NetworkResourceGroupName != "" || ic.Platform.IBMCloud.VPCName != "" {
allErrs = append(allErrs, validateExistingVPC(client, ic, path)...)
}
if ic.Platform.IBMCloud.DefaultMachinePlatform != nil {
allErrs = append(allErrs, validateMachinePool(client, ic.IBMCloud, ic.Platform.IBMCloud.DefaultMachinePlatform, path)...)
}
return allErrs
}
func validateMachinePool(client API, platform *ibmcloud.Platform, machinePool *ibmcloud.MachinePool, path *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if machinePool.InstanceType != "" {
allErrs = append(allErrs, validateMachinePoolType(client, machinePool.InstanceType, path.Child("type"))...)
}
if len(machinePool.Zones) > 0 {
allErrs = append(allErrs, validateMachinePoolZones(client, platform.Region, machinePool.Zones, path.Child("zones"))...)
}
if machinePool.BootVolume != nil {
allErrs = append(allErrs, validateMachinePoolBootVolume(client, *machinePool.BootVolume, path.Child("bootVolume"))...)
}
if len(machinePool.DedicatedHosts) > 0 {
allErrs = append(allErrs, validateMachinePoolDedicatedHosts(client, machinePool.DedicatedHosts, machinePool.InstanceType, machinePool.Zones, platform.Region, path.Child("dedicatedHosts"))...)
}
return allErrs
}
func validateMachinePoolDedicatedHosts(client API, dhosts []ibmcloud.DedicatedHost, machineType string, zones []string, region string, path *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
// Get list of supported profiles in region
dhostProfiles, err := client.GetDedicatedHostProfiles(context.TODO(), region)
if err != nil {
allErrs = append(allErrs, field.InternalError(path, err))
}
for i, dhost := range dhosts {
if dhost.Name != "" {
// Check if host with name exists
dh, err := client.GetDedicatedHostByName(context.TODO(), dhost.Name, region)
if err != nil {
allErrs = append(allErrs, field.InternalError(path.Index(i).Child("name"), err))
}
if dh != nil {
// Check if instance is provisionable on host
if !*dh.InstancePlacementEnabled || !*dh.Provisionable {
allErrs = append(allErrs, field.Invalid(path.Index(i).Child("name"), dhost.Name, "dedicated host is unable to provision instances"))
}
// Check if host is in zone
if *dh.Zone.Name != zones[i] {
allErrs = append(allErrs, field.Invalid(path.Index(i).Child("name"), dhost.Name, fmt.Sprintf("dedicated host not in zone %s", zones[i])))
}
// Check if host profile supports machine type
if !isInstanceProfileInList(machineType, dh.SupportedInstanceProfiles) {
allErrs = append(allErrs, field.Invalid(path.Index(i).Child("name"), dhost.Name, fmt.Sprintf("dedicated host does not support machine type %s", machineType)))
}
}
} else {
// Check if host profile is supported in region
if !isDedicatedHostProfileInList(dhost.Profile, dhostProfiles) {
allErrs = append(allErrs, field.Invalid(path.Index(i).Child("profile"), dhost.Profile, fmt.Sprintf("dedicated host profile not supported in region %s", region)))
}
// Check if host profile supports machine type
for _, profile := range dhostProfiles {
if *profile.Name == dhost.Profile {
if !isInstanceProfileInList(machineType, profile.SupportedInstanceProfiles) {
allErrs = append(allErrs, field.Invalid(path.Index(i).Child("profile"), dhost.Profile, fmt.Sprintf("dedicated host profile does not support machine type %s", machineType)))
break
}
}
}
}
}
return allErrs
}
func isInstanceProfileInList(profile string, list []vpcv1.InstanceProfileReference) bool {
for _, each := range list {
if *each.Name == profile {
return true
}
}
return false
}
func isDedicatedHostProfileInList(profile string, list []vpcv1.DedicatedHostProfile) bool {
for _, each := range list {
if *each.Name == profile {
return true
}
}
return false
}
func validateMachinePoolType(client API, machineType string, path *field.Path) field.ErrorList {
vsiProfiles, err := client.GetVSIProfiles(context.TODO())
if err != nil {
return field.ErrorList{field.InternalError(path, err)}
}
for _, profile := range vsiProfiles {
if *profile.Name == machineType {
return nil
}
}
return field.ErrorList{field.NotFound(path, machineType)}
}
func validateMachinePoolZones(client API, region string, zones []string, path *field.Path) field.ErrorList {
regionalZones, err := client.GetVPCZonesForRegion(context.TODO(), region)
if err != nil {
return field.ErrorList{field.InternalError(path, err)}
}
for idx, zone := range zones {
validZones := sets.NewString(regionalZones...)
if !validZones.Has(zone) {
return field.ErrorList{field.Invalid(path.Index(idx), zone, fmt.Sprintf("zone must be in region %q", region))}
}
}
return nil
}
func validateMachinePoolBootVolume(client API, bootVolume ibmcloud.BootVolume, path *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if bootVolume.EncryptionKey == "" {
return allErrs
}
// Make sure the encryptionKey exists
key, err := client.GetEncryptionKey(context.TODO(), bootVolume.EncryptionKey)
if err != nil {
return field.ErrorList{field.InternalError(path.Child("encryptionKey"), err)}
}
if key == nil {
return field.ErrorList{field.NotFound(path.Child("encryptionKey"), bootVolume.EncryptionKey)}
}
return allErrs
}
func validateResourceGroup(client API, resourceGroupName string, platformField string, path *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if resourceGroupName == "" {
return allErrs
}
resourceGroups, err := client.GetResourceGroups(context.TODO())
if err != nil {
return append(allErrs, field.InternalError(path.Child(platformField), err))
}
found := false
for _, rg := range resourceGroups {
if *rg.ID == resourceGroupName || *rg.Name == resourceGroupName {
found = true
}
}
if !found {
return append(allErrs, field.NotFound(path.Child(platformField), resourceGroupName))
}
return allErrs
}
func validateExistingVPC(client API, ic *types.InstallConfig, path *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if ic.IBMCloud.VPCName == "" {
return append(allErrs, field.Invalid(path.Child("vpcName"), ic.IBMCloud.VPCName, fmt.Sprintf("vpcName cannot be empty when providing a networkResourceGroupName: %s", ic.IBMCloud.NetworkResourceGroupName)))
}
if ic.IBMCloud.NetworkResourceGroupName == "" {
return append(allErrs, field.Invalid(path.Child("networkResourceGroupName"), ic.IBMCloud.NetworkResourceGroupName, fmt.Sprintf("networkResourceGroupName cannot be empty when providing a vpcName: %s", ic.IBMCloud.VPCName)))
}
allErrs = append(allErrs, validateResourceGroup(client, ic.IBMCloud.NetworkResourceGroupName, "networkResourceGroupName", path)...)
vpcs, err := client.GetVPCs(context.TODO(), ic.IBMCloud.Region)
if err != nil {
return append(allErrs, field.InternalError(path.Child("vpcName"), err))
}
found := false
for _, vpc := range vpcs {
if *vpc.Name == ic.IBMCloud.VPCName {
if *vpc.ResourceGroup.ID != ic.IBMCloud.NetworkResourceGroupName && *vpc.ResourceGroup.Name != ic.IBMCloud.NetworkResourceGroupName {
return append(allErrs, field.Invalid(path.Child("vpcName"), ic.IBMCloud.VPCName, fmt.Sprintf("vpc is not in provided Network ResourceGroup: %s", ic.IBMCloud.NetworkResourceGroupName)))
}
found = true
allErrs = append(allErrs, validateExistingSubnets(client, ic, path, *vpc.ID)...)
break
}
}
if !found {
allErrs = append(allErrs, field.NotFound(path.Child("vpcName"), ic.IBMCloud.VPCName))
}
return allErrs
}
func validateExistingSubnets(client API, ic *types.InstallConfig, path *field.Path, vpcID string) field.ErrorList {
allErrs := field.ErrorList{}
var regionalZones []string
if len(ic.IBMCloud.ControlPlaneSubnets) == 0 {
allErrs = append(allErrs, field.Invalid(path.Child("controlPlaneSubnets"), ic.IBMCloud.ControlPlaneSubnets, fmt.Sprintf("controlPlaneSubnets cannot be empty when providing a vpcName: %s", ic.IBMCloud.VPCName)))
} else {
controlPlaneSubnetZones := make(map[string]int)
for _, controlPlaneSubnet := range ic.IBMCloud.ControlPlaneSubnets {
subnet, err := client.GetSubnetByName(context.TODO(), controlPlaneSubnet, ic.IBMCloud.Region)
if err != nil {
if errors.Is(err, &VPCResourceNotFoundError{}) {
allErrs = append(allErrs, field.NotFound(path.Child("controlPlaneSubnets"), controlPlaneSubnet))
} else {
allErrs = append(allErrs, field.InternalError(path.Child("controlPlaneSubnets"), err))
}
} else {
if *subnet.VPC.ID != vpcID {
allErrs = append(allErrs, field.Invalid(path.Child("controlPlaneSubnets"), controlPlaneSubnet, fmt.Sprintf("controlPlaneSubnets contains subnet: %s, not found in expected vpcID: %s", controlPlaneSubnet, vpcID)))
}
if *subnet.ResourceGroup.ID != ic.IBMCloud.NetworkResourceGroupName && *subnet.ResourceGroup.Name != ic.IBMCloud.NetworkResourceGroupName {
allErrs = append(allErrs, field.Invalid(path.Child("controlPlaneSubnets"), controlPlaneSubnet, fmt.Sprintf("controlPlaneSubnets contains subnet: %s, not found in expected networkResourceGroupName: %s", controlPlaneSubnet, ic.IBMCloud.NetworkResourceGroupName)))
}
controlPlaneSubnetZones[*subnet.Zone.Name]++
}
}
var controlPlaneActualZones []string
// Verify the supplied ControlPlane Subnets cover the provided ControlPlane Zones, or default Regional Zones if not provided
if zones := getMachinePoolZones(*ic.ControlPlane); zones != nil {
controlPlaneActualZones = zones
} else {
regionalZones, err := client.GetVPCZonesForRegion(context.TODO(), ic.IBMCloud.Region)
if err != nil {
allErrs = append(allErrs, field.InternalError(path.Child("controlPlaneSubnets"), err))
}
controlPlaneActualZones = regionalZones
}
// If lenght of found zones doesn't match actual or if an actual zone was not found from provided subnets, that is an invalid configuration
if len(controlPlaneSubnetZones) != len(controlPlaneActualZones) {
allErrs = append(allErrs, field.Invalid(path.Child("controlPlaneSubnets"), ic.IBMCloud.ControlPlaneSubnets, fmt.Sprintf("number of zones (%d) covered by controlPlaneSubnets does not match number of provided or default zones (%d) for control plane in %s", len(controlPlaneSubnetZones), len(controlPlaneActualZones), ic.IBMCloud.Region)))
} else {
for _, actualZone := range controlPlaneActualZones {
if _, okay := controlPlaneSubnetZones[actualZone]; !okay {
allErrs = append(allErrs, field.Invalid(path.Child("controlPlaneSubnets"), ic.IBMCloud.ControlPlaneSubnets, fmt.Sprintf("%s zone does not have a provided control plane subnet", actualZone)))
}
}
}
}
if len(ic.IBMCloud.ComputeSubnets) == 0 {
allErrs = append(allErrs, field.Invalid(path.Child("computeSubnets"), ic.IBMCloud.ComputeSubnets, fmt.Sprintf("computeSubnets cannot be empty when providing a vpcName: %s", ic.IBMCloud.VPCName)))
} else {
computeSubnetZones := make(map[string]int)
for _, computeSubnet := range ic.IBMCloud.ComputeSubnets {
subnet, err := client.GetSubnetByName(context.TODO(), computeSubnet, ic.IBMCloud.Region)
if err != nil {
if errors.Is(err, &VPCResourceNotFoundError{}) {
allErrs = append(allErrs, field.NotFound(path.Child("computeSubnets"), computeSubnet))
} else {
allErrs = append(allErrs, field.InternalError(path.Child("computeSubnets"), err))
}
} else {
if *subnet.VPC.ID != vpcID {
allErrs = append(allErrs, field.Invalid(path.Child("computeSubnets"), computeSubnet, fmt.Sprintf("computeSubnets contains subnet: %s, not found in expected vpcID: %s", computeSubnet, vpcID)))
}
if *subnet.ResourceGroup.ID != ic.IBMCloud.NetworkResourceGroupName && *subnet.ResourceGroup.Name != ic.IBMCloud.NetworkResourceGroupName {
allErrs = append(allErrs, field.Invalid(path.Child("computeSubnets"), computeSubnet, fmt.Sprintf("computeSubnets contains subnet: %s, not found in expected networkResourceGroupName: %s", computeSubnet, ic.IBMCloud.NetworkResourceGroupName)))
}
computeSubnetZones[*subnet.Zone.Name]++
}
}
// Verify the supplied Compute(s) Subnets cover the provided Compute Zones, or default Region Zones if not specified, for each Compute block
for index, compute := range ic.Compute {
var computeActualZones []string
if zones := getMachinePoolZones(compute); zones != nil {
computeActualZones = zones
} else {
if regionalZones == nil {
var err error
regionalZones, err = client.GetVPCZonesForRegion(context.TODO(), ic.IBMCloud.Region)
if err != nil {
allErrs = append(allErrs, field.InternalError(path.Child("computeSubnets"), err))
}
}
computeActualZones = regionalZones
}
// If length of found zones doesn't match actual or if an actual zone was not found from provided subnets, that is an invalid configuration
if len(computeSubnetZones) != len(computeActualZones) {
allErrs = append(allErrs, field.Invalid(path.Child("computeSubnets"), ic.IBMCloud.ComputeSubnets, fmt.Sprintf("number of zones (%d) covered by computeSubnets does not match number of provided or default zones (%d) for compute[%d] in %s", len(computeSubnetZones), len(computeActualZones), index, ic.IBMCloud.Region)))
} else {
for _, actualZone := range computeActualZones {
if _, okay := computeSubnetZones[actualZone]; !okay {
allErrs = append(allErrs, field.Invalid(path.Child("computeSubnets"), ic.IBMCloud.ComputeSubnets, fmt.Sprintf("%s zone does not have a provided compute subnet", actualZone)))
}
}
}
}
}
return allErrs
}
func validateSubnetZone(client API, subnetID string, validZones sets.String, subnetPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if subnet, err := client.GetSubnet(context.TODO(), subnetID); err == nil {
zoneName := *subnet.Zone.Name
if !validZones.Has(zoneName) {
allErrs = append(allErrs, field.Invalid(subnetPath, subnetID, fmt.Sprintf("subnet is not in expected zones: %s", validZones.List())))
}
} else {
if errors.Is(err, &VPCResourceNotFoundError{}) {
allErrs = append(allErrs, field.NotFound(subnetPath, subnetID))
} else {
allErrs = append(allErrs, field.InternalError(subnetPath, err))
}
}
return allErrs
}
// ValidatePreExistingPublicDNS ensure no pre-existing DNS record exists in the CIS
// DNS zone for cluster's Kubernetes API.
func ValidatePreExistingPublicDNS(client API, ic *types.InstallConfig, metadata *Metadata) error {
// If this is an internal cluster, this check is not necessary
if ic.Publish == types.InternalPublishingStrategy {
return nil
}
// Get CIS CRN
crn, err := metadata.CISInstanceCRN(context.TODO())
if err != nil {
return err
}
// Get CIS zone ID by name
zoneID, err := client.GetDNSZoneIDByName(context.TODO(), ic.BaseDomain, ic.Publish)
if err != nil {
return field.InternalError(field.NewPath("baseDomain"), err)
}
// Get CIS DNS record by name
recordName := fmt.Sprintf("api.%s", ic.ClusterDomain())
records, err := client.GetDNSRecordsByName(context.TODO(), crn, zoneID, recordName)
if err != nil {
return field.InternalError(field.NewPath("baseDomain"), err)
}
// DNS record exists
if len(records) != 0 {
return fmt.Errorf("record %s already exists in CIS zone (%s) and might be in use by another cluster, please remove it to continue", recordName, zoneID)
}
return nil
}
// getMachinePoolZones will return the zones if they have been specified or return nil if the MachinePoolPlatform or values are not specified
func getMachinePoolZones(mp types.MachinePool) []string {
if mp.Platform.IBMCloud == nil || mp.Platform.IBMCloud.Zones == nil {
return nil
}
return mp.Platform.IBMCloud.Zones
}
|
package xbase
import (
"bytes"
"crypto/md5"
"encoding/hex"
"fmt"
"io/ioutil"
"os"
"strings"
"testing"
)
const (
dbPath = "/tmp/xbase"
tmpPath = "/tmp/xbase_tmp.txt"
tmpPath2 = "/tmp/xbase_tmp2.txt"
)
func TestPut(t *testing.T) {
xb := NewXBase(dbPath, nil)
defer xb.Close()
k := []byte("a")
v := []byte("1")
if err := xb.Put(k, v); err != nil {
panic(err)
}
// xb.PrettyPrint(xb.root, 0, 1)
k = []byte("b")
v = []byte("1")
if err := xb.Put(k, v); err != nil {
panic(err)
}
// xb.PrettyPrint(xb.root, 0, 1)
k = []byte("ab")
v = []byte("1")
if err := xb.Put(k, v); err != nil {
panic(err)
}
// xb.PrettyPrint(xb.root, 0, 1)
rootByte := xb.Commit()
t.Logf("root.seq: %x", rootByte)
ioutil.WriteFile(tmpPath, []byte(fmt.Sprintf("%x", rootByte)), os.ModePerm)
}
func Test2Put(t *testing.T) {
//root := "be69daa92c5e771de594f98fb266309be9af203fefa922a6134a21c851dc9aa1"
root, err := ioutil.ReadFile(tmpPath)
if err != nil {
t.Fatal(err)
}
xb := NewXBase(dbPath, MustHexDecode([]byte(root)))
defer xb.Close()
k := []byte("abc")
v := []byte("1")
if err := xb.Put(k, v); err != nil {
panic(err)
}
// xb.PrettyPrint(xb.root, 0, 1)
rootByte := xb.Commit()
t.Logf("root.seq: %x", rootByte)
ioutil.WriteFile(tmpPath, []byte(fmt.Sprintf("%x", rootByte)), os.ModePerm)
}
func TestGet(t *testing.T) {
//root := "3b0c14023a5bd3bd9b1c905694904430088b427e3c274ca0a4772d456cc7bda0"
root, err := ioutil.ReadFile(tmpPath)
if err != nil {
t.Fatal(err)
}
xb := NewXBase(dbPath, MustHexDecode([]byte(root)))
defer xb.Close()
k := []byte("a")
v := []byte("1")
if vInDb, err := xb.Get(k); err != nil {
panic(err)
} else if !bytes.Equal(v, vInDb) {
t.Fatalf("unequal : get %s", vInDb)
}
k = []byte("b")
if vInDb, err := xb.Get(k); err != nil {
panic(err)
} else if !bytes.Equal(v, vInDb) {
t.Fatalf("unequal : get %s", vInDb)
}
k = []byte("ab")
if vInDb, err := xb.Get(k); err != nil {
panic(err)
} else if !bytes.Equal(v, vInDb) {
t.Fatalf("unequal : get %s", vInDb)
}
k = []byte("abc")
if vInDb, err := xb.Get(k); err != nil {
panic(err)
} else if !bytes.Equal(v, vInDb) {
t.Fatalf("unequal : get %s", vInDb)
}
rootByte := xb.Commit()
t.Logf("root.seq: %x", rootByte)
ioutil.WriteFile(tmpPath, []byte(fmt.Sprintf("%x", rootByte)), os.ModePerm)
}
// 生成32位MD5
func MD5(text string) string {
ctx := md5.New()
ctx.Write([]byte(text))
return hex.EncodeToString(ctx.Sum(nil))
}
func BenchmarkPutPerf(t *testing.B) {
//root := "3b0c14023a5bd3bd9b1c905694904430088b427e3c274ca0a4772d456cc7bda0"
root, err := ioutil.ReadFile(tmpPath)
if err != nil {
t.Fatal(err)
}
xb := NewXBase(dbPath, MustHexDecode([]byte(root)))
defer xb.Close()
rounds := 100000
var lastKey []byte
for i := 0; i < rounds; i++ {
k := []byte(MD5(strings.Repeat(fmt.Sprintf("%d", i), 10)))
lastKey = k
v := []byte("1")
if err := xb.Put(k, v); err != nil {
t.Fatalf("k=%s,v=%s,error=%s", k, v, err.Error())
}
if i%3 == 1 {
if vInDb, err := xb.Get(lastKey); err != nil {
panic(err)
} else if !bytes.Equal(v, vInDb) {
t.Fatalf("unequal: get %s", vInDb)
}
}
}
rootByte := xb.Commit()
t.Logf("root.seq: %x", rootByte)
ioutil.WriteFile(tmpPath, []byte(fmt.Sprintf("%x", rootByte)), os.ModePerm)
}
func TestDel(t *testing.T) {
root, err := ioutil.ReadFile(tmpPath)
if err != nil {
t.Fatal(err)
}
xb := NewXBase(dbPath, MustHexDecode([]byte(root)))
defer xb.Close()
k := []byte("a")
if err := xb.Delete(k); err != nil {
panic(err)
}
if v, err := xb.Get(k); err == nil {
panic(fmt.Errorf("should be not found, %s", v))
}
// xb.PrettyPrint(xb.root, 0, 1)
rootByte := xb.Commit()
t.Logf("root.seq: %x", rootByte)
ioutil.WriteFile(tmpPath, []byte(fmt.Sprintf("%x", rootByte)), os.ModePerm)
ioutil.WriteFile(tmpPath2, []byte(fmt.Sprintf("%x", rootByte)), os.ModePerm)
}
func Test2Del(t *testing.T) {
root, err := ioutil.ReadFile(tmpPath)
if err != nil {
t.Fatal(err)
}
xb := NewXBase(dbPath, MustHexDecode([]byte(root)))
defer xb.Close()
k := []byte("a")
if v, err := xb.Get(k); err == nil {
panic(fmt.Errorf("should be not found, %s", v))
}
v := []byte("1")
if err := xb.Put(k, v); err != nil {
t.Fatal(err)
}
rootByte := xb.Commit()
t.Logf("root.seq: %x", rootByte)
ioutil.WriteFile(tmpPath, []byte(fmt.Sprintf("%x", rootByte)), os.ModePerm)
}
func Test3DelFromOldBranch(t *testing.T) {
root, err := ioutil.ReadFile(tmpPath2)
if err != nil {
t.Fatal(err)
}
xb := NewXBase(dbPath, MustHexDecode([]byte(root)))
defer xb.Close()
k := []byte("a")
if v, err := xb.Get(k); err == nil {
panic(fmt.Errorf("should be not found, %s", v))
}
v := []byte("1")
if err := xb.Put(k, v); err != nil {
t.Fatal(err)
}
rootByte := xb.Commit()
t.Logf("root.seq: %x", rootByte)
}
|
package model
import (
"strings"
"regexp"
)
type Manifest struct {
Team string `yaml:"team"`
Repo Repo `yaml:"repo"`
Tasks []Task `yaml:"tasks"`
}
type Repo struct {
Uri string `yaml:"uri"`
PrivateKey string `yaml:"private_key"`
}
func (r Repo) RepoName() string {
if strings.HasPrefix(r.Uri, "git@github.com") {
re, _ := regexp.Compile(`.*/(.*).git$`)
return re.FindAllStringSubmatch(r.Uri, -1)[0][1]
}
if strings.HasPrefix(r.Uri, "https://github.com/") {
re, _ := regexp.Compile(`.*/(.*)$`)
return re.FindAllStringSubmatch(r.Uri, -1)[0][1]
}
return ""
}
type Task interface{}
type RunTask struct {
Script string
Image string
Vars map[string]string
}
type DockerTask struct {
Username string
Password string
Repository string
}
type DeployTask struct {
Api string
Org string
Space string
Username string
Password string
Manifest string
Vars map[string]string
}
|
package personinfor
import "fmt"
type Personinfor struct {
Name string // name字段可以随便访问~~就大写
age int // age 不可以随意访问,所以小写~~~
salary float64 // salary 保密也小写~~
}
func NewPerson(name string) *Personinfor {
// 新建一个人名字~~~只有名字~~其他内容都系统默认~0
return &Personinfor{
Name: name,
}
}
//为了访问age和salary所以需要对这两个函数进行专用的方法
func (p *Personinfor) SetAge(age int) {
if age > 0 && age < 150 {
p.age = age
} else {
fmt.Println("输入的年龄有误~~")
}
}
// GetAge这个方法的意义: 从结构体中取值~~
// 将age这个字段进行封装,在其他地方不能随意使用,如果非得用,那就得通过getage方法,
func (p *Personinfor) GetAge() int {
return p.age
}
func (p *Personinfor) SetSalary(sal float64) {
if sal > 3000 && sal <= 30000 {
p.salary = sal
} else {
fmt.Println("输入的金额范围有误~~")
}
}
func (p *Personinfor) GetSalary() float64 {
return p.salary
}
|
package main
import (
"encoding/json"
"fmt"
"os"
)
type Config struct {
WebServerPort int `json:"web_server_port"`
BaseUri string `json:"base_uri"` // without a trailing slash
RedisEndpoint string `json:"redis_endpoint"`
RedisPassword string `json:"redis_password"`
KeyCategories string `json:"key_categories"`
KeyProductCounter string `json:"key_product_counter"`
KeyImageCounter string `json:"key_image_counter"`
KeyImage string `json:"key_image"`
KeyImages string `json:"key_images"`
KeyProduct string `json:"key_product"`
KeyProductImages string `json:"key_product_images"`
KeyAllProducts string `json:"key_all_products"`
KeyProductsInCategory string `json:"key_products_in_category"`
ResultsPerPage int `json:"results_per_page"`
BugsnagKey string `json:"bugsnag_key"`
}
func getConfiguration() Config {
// Get configuration values
configFile, _ := os.Open("conf.json")
defer configFile.Close()
decoder := json.NewDecoder(configFile)
config := Config{}
err := decoder.Decode(&config)
// If there's an error in the json config file we resort to default values
if err != nil {
fmt.Println("❌ Config file error: ", err)
fmt.Println("Reading default configuration.")
config = getDefaultConfiguration()
}
return config
}
func getDefaultConfiguration() Config {
return Config{
WebServerPort: 8080,
BaseUri: "http://localhost:8080",
RedisEndpoint: "localhost:6379",
RedisPassword: "",
KeyCategories: "categories",
KeyProductCounter: "product_counter",
KeyImageCounter: "image_counter",
KeyProduct: "product:%v",
KeyImage: "image:%v",
KeyImages: "images",
KeyProductImages: "product:%v:images",
KeyAllProducts: "products",
KeyProductsInCategory: "products:cat:%v",
ResultsPerPage: 20,
BugsnagKey: "",
}
}
|
package middleware_test
import (
"testing"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
func TestEnvoyMiddlewareSuite(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Envoy Middleware Suite")
}
|
package main
import (
"bytes"
"encoding/json"
"fmt"
"os"
"os/exec"
"sort"
"time"
)
func main() {
target := os.Args[1]
deployment := os.Args[2]
cmd := exec.Command("bosh", "-e", target, "-d", deployment, "vms", "--vitals", "--json")
j, err := cmd.Output()
if err != nil {
panic("Could not run command: " + err.Error())
}
type response struct {
Tables []struct {
Rows []struct {
Instance string `json:"instance"`
CreatedAt string `json:"vm_created_at"`
} `json:"rows"`
} `json:"tables"`
}
r := &response{}
err = json.Unmarshal(j, &r)
if err != nil {
panic("Could not unmarshal json: " + err.Error())
}
type intermediate struct {
Instance string
CreatedAt int64
}
inters := make([]intermediate, 0, len(r.Tables[0].Rows))
for _, v := range r.Tables[0].Rows {
t, err := time.Parse(time.UnixDate, v.CreatedAt)
if err != nil {
panic(fmt.Sprintf("Could not parse date `%s': %s", v.CreatedAt, err))
}
inters = append(inters, intermediate{
Instance: v.Instance,
CreatedAt: t.Unix(),
})
}
sort.Slice(inters, func(i, j int) bool { return inters[i].CreatedAt < inters[j].CreatedAt })
type final struct {
VM string `json:"vm"`
CreatedAt string `json:"created_at"`
}
finals := make([]final, 0, len(inters))
for _, v := range inters {
finals = append(finals, final{
VM: v.Instance,
CreatedAt: time.Unix(v.CreatedAt, 0).Format(time.UnixDate),
})
}
j, err = json.Marshal(&finals)
if err != nil {
panic("Could not marshal into json: " + err.Error())
}
jInd := &bytes.Buffer{}
json.Indent(jInd, j, "", " ")
fmt.Println(jInd.String())
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.