text stringlengths 11 4.05M |
|---|
package main
import (
"fmt"
"net/http"
"time"
)
const numJobs = 3
func main(){
var urls = []string{
"http://ozon.ru",
"https://ozon.ru",
"http://google.com",
"http://somesite.com",
"http://non-existent.domain.tld",
"https://ya.ru",
"http://ya.ru",
"http://ёёёё",
}
result:=make(chan string, len(urls))
urlsChan:= make(chan string, len(urls))
now:=time.Now()
client:= http.Client{
Timeout: 5 * time.Second,
}
for w:=0;w<numJobs;w++{
go workerPool(client,result,urlsChan)
}
for _, url:=range urls{
urlsChan<-url
}
close(urlsChan)
for i:=0;i<len(urls);i++{
fmt.Println(<-result)
}
close(result)
fmt.Println(time.Since(now))
}
func workerPool(client http.Client, result,urlsChan chan string){
for j:= range urlsChan{
resp, _:=client.Get(j)
if resp != nil{
result<-resp.Status
} else {
result<-"404 Not found"
}
}
}
|
package user
import (
"encoding/json"
"log"
"net/http"
"strings"
"github.com/dgrijalva/jwt-go"
"github.com/jmoiron/sqlx"
)
// Update - update
func Update(db *sqlx.DB) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
c := r.Header.Get("Authorization")
if c == "" {
w.WriteHeader(http.StatusUnauthorized)
return
}
c = strings.Fields(c)[1]
tknStr := c
claims := &Claims{}
tkn, err := jwt.ParseWithClaims(tknStr, claims, func(token *jwt.Token) (interface{}, error) {
return JwtKey, nil
})
if !tkn.Valid {
w.WriteHeader(http.StatusUnauthorized)
return
}
if err != nil {
if err == jwt.ErrSignatureInvalid {
w.WriteHeader(http.StatusUnauthorized)
return
}
w.WriteHeader(http.StatusBadRequest)
return
}
w.Header().Set("Content-type", "application/json")
decoder := json.NewDecoder(r.Body)
var user User
err = decoder.Decode(&user)
if err != nil {
log.Fatal(err)
}
query := "Update user SET last_name = ?, first_name = ?, email = ?, username= ?, role = ?, number_of_reservations = ?, password = ? where id = ?"
_, err = db.Exec(query, user.LastName, user.FirstName, user.Email, user.Username, user.Role, user.NumberOfReservations, user.Password, user.ID)
if err != nil {
log.Fatal(err)
}
json.NewEncoder(w).Encode(user)
}
}
|
// https://blog.golang.org/error-handling-and-go
//
// In Go it's idiomatic to communicate errors via an explicit, separate
// return value. This makes it easy to see which functions return
// errors and to handle them using the same language constructs
// employed for any other, non-error tasks.
//
// A common example of the use of the error type involves the os.Open function
//
// built-in:
// func Open(name, string) (file *File, err error)
//
// sample code:
// f, err := os.Open("filename.ext")
// if err != nil {
// log.Fatal(err)
// }
//
// the built-in error type:
// type error interface {
// Error() string
// }
package main
import (
"errors"
"fmt"
)
// By convention, errors are the last return value and have type error,
// a built-in interface
func f1(arg int) (int, error) {
if arg == 42 {
// errors.New constructs a basic error value with the given error message
return -1, errors.New("can't work with 42")
}
// a nil value in the error position indicates that there was no error
return arg + 3, nil
}
// It's possible to use custom types as errors by implementing the Error()
// method on them.
type argError struct {
arg int
prob string
}
// defines Error() for the argError type
func (e *argError) Error() string {
return fmt.Sprintf("%d - %s", e.arg, e.prob)
}
func f2(arg int) (int, error) {
if arg == 42 {
// In this case we use &argError syntax to build a new struct, supplying
// values for the two fields arg and prob.
return -1, &argError{arg, "can't work with it"}
}
return arg + 3, nil
}
func main() {
// The two loops below test out each of the error-returning functions.
// Note the use of an inline error check on the if line is a common idiom
// in Go code.
for _, i := range []int{7, 42} {
if r, e := f1(i); e != nil {
fmt.Println("f1 failed:", e)
} else {
fmt.Println("f1 worked:", r)
}
}
for _, i := range []int{7, 42} {
if r, e := f2(i); e != nil {
// calling fmt.Println on an error type variable logs the return value
// of the custom Error() function defined on that type
fmt.Println("f2 failed:", e)
} else {
fmt.Println("f2 worked:", r)
}
}
// If you want to programmatically use the data in a custom error, you'll
// need to get the error as an instance of the custom error type via
// type assertion.
_, e := f2(42)
if ae, ok := e.(*argError); ok {
fmt.Println(ae.arg)
fmt.Println(ae.prob)
}
}
|
package main
import (
"fmt"
)
/*
Задача 2. Три числа
Напишите программу, которая запрашивает у пользователя три числа и сообщает, есть ли среди них число, большее, чем 5.
*/
func main() {
var total, examScore int
cntNumber := 3
contolNumber := 5
arr := make([]int, cntNumber)
fmt.Println("Программа Три числа")
for i := 1; i <= cntNumber; i++ {
fmt.Printf("Введите %v число:\n", i)
fmt.Scan(&arr[i-1])
total += examScore
}
flag := true
for i, n := range arr {
if n > contolNumber {
i += 1
fmt.Printf("Число %v, которое ввели %v, больше 5:\n", n, i)
flag = false
}
}
if flag {
fmt.Printf("Среде веденных чисел %v нет ниодного больше 5:\n", arr)
}
}
|
package cache
import (
"container/list"
"errors"
"sync"
)
// a cache can hold special num ID:Data pairs for quick use, like map, but will drop oldest ID:Data when full
// each R/W operation will set the ID:Data to newest
// ID is a type which can be used in map index, Data is interface{}
type CacheProvider struct {
cacheLimitNum int
count int
idxs map[interface{}]*list.Element
list *list.List
lock sync.Mutex
}
type cacheData struct {
id interface{}
data interface{}
}
func NewCache(cacheLimitNum int) (c *CacheProvider, err error) {
if cacheLimitNum <= 0 {
err = errors.New("cache num is <=0")
return
}
c = new(CacheProvider)
c.cacheLimitNum = cacheLimitNum
c.idxs = make(map[interface{}]*list.Element, cacheLimitNum)
c.list = list.New()
return
}
func (c *CacheProvider) Set(id, data interface{}) (err error) {
if c.count > c.cacheLimitNum {
err = errors.New("cache count is over limit, something is wrong")
return
}
c.lock.Lock()
defer c.lock.Unlock()
e, ok := c.idxs[id]
if ok {
// id in cache, update diretly
e.Value.(*cacheData).data = data
c.list.MoveToFront(e) // move this to front(which is newest)
return
}
err = c.addIDData(id, data)
return
}
func (c *CacheProvider) Get(id interface{}) (data interface{}, ok bool) {
c.lock.Lock()
defer c.lock.Unlock()
e, ok := c.idxs[id]
if ok {
// id in cache
data = e.Value.(*cacheData).data
c.list.MoveToFront(e) // move this to front(which is newest)
}
return
}
// get a data by id, if not exist set it
func (c *CacheProvider) GetSet(id, newData interface{}) (data interface{}, err error) {
c.lock.Lock()
defer c.lock.Unlock()
e, ok := c.idxs[id]
if ok {
// id in cache
data = e.Value.(*cacheData).data
c.list.MoveToFront(e) // move this to front(which is newest r/w)
return
}
err = c.addIDData(id, newData)
if err == nil {
data = newData
}
return
}
func (c *CacheProvider) Del(id interface{}) (ok bool) {
c.lock.Lock()
defer c.lock.Unlock()
e, ok := c.idxs[id]
if ok {
// id in cache
c.list.Remove(e)
delete(c.idxs, id)
c.count--
}
return
}
// query cache usage
func (c *CacheProvider) Usage() (count, limit int) {
return c.count, c.cacheLimitNum
}
// add a id/data, if cache full, drop oldest one
func (c *CacheProvider) addIDData(id, data interface{}) (err error) {
if c.count == c.cacheLimitNum {
// cache is full, replace back element
e := c.list.Back()
if e == nil {
err = errors.New("last element data is nil, something is wrong")
return
}
delete(c.idxs, e.Value.(*cacheData).id)
c.idxs[id] = e
e.Value.(*cacheData).id = id
e.Value.(*cacheData).data = data
c.list.MoveToFront(e)
return
}
e := c.list.PushFront(&cacheData{id: id, data: data})
c.idxs[id] = e
c.count++
return
}
|
package db
import "github.com/boltdb/bolt"
import "strconv"
func openDB() *bolt.DB {
db, err := bolt.Open("tasks.db", 0777, nil)
if err != nil {
panic(err)
}
return db
}
// Init bold for storage
func Init() {
db := openDB()
defer db.Close()
err := db.Update(func(tx *bolt.Tx) error {
_, err := tx.CreateBucketIfNotExists([]byte("Tasks"))
if err != nil {
panic(err)
}
return nil
})
if err != nil {
panic(err)
}
}
// CreateNewTask in Bolt
func CreateNewTask(task string) {
db := openDB()
defer db.Close()
db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte("Tasks"))
id, _ := b.NextSequence()
b.Put([]byte(string(id + '0')), []byte(task))
return nil
})
}
// GetIncompleteTasks returns list of pending tasks
func GetIncompleteTasks() map[string]string {
db := openDB()
defer db.Close()
tasks := make(map[string]string)
db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte("Tasks"))
c := b.Cursor()
for k, task := c.First(); k != nil; k, task = c.Next() {
tasks[string(k)] = string(task)
}
return nil
})
return tasks
}
// MarkComplete removes task from database
func MarkComplete(taskKey string) {
db := openDB()
defer db.Close()
db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte("Tasks"))
b.Delete([]byte(taskKey))
return nil
})
db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte("Tasks"))
min := []byte(taskKey)
c := b.Cursor()
for k, task := c.Seek(min); k != nil; k, task = c.Next() {
index, e := strconv.Atoi(string(k))
if e != nil {
panic(e)
}
k = []byte(string(index + '0' - 1))
b.Put(k, task)
}
k, _ := c.Last()
b.Delete([]byte(k))
return nil
})
}
|
package user
import (
"fmt"
"gopkg.in/mgo.v2/bson"
)
//
type User struct {
ID bson.ObjectId `bson:"_id,omitempty" json:"id"`
Name string `json:"name"`
Email string `json:"email"`
Password string `json:"password,omitempty" bson:"-"`
HashedPassword []byte `json:"hashedpassword,omitempty" bson:"-"`
}
func (u User) String() string {
s := fmt.Sprintf("&{Name: %v, Email: %v}", u.Name, u.Email)
return s
}
|
// Package gb11643 GB 11643-1999 公民身份号码 / Citizen identification number
package gb11643
|
package main
import "sort"
//1894. 找到需要补充粉笔的学生编号
//一个班级里有n个学生,编号为 0到 n - 1。每个学生会依次回答问题,编号为 0的学生先回答,然后是编号为 1的学生,以此类推,直到编号为 n - 1的学生,然后老师会重复这个过程,重新从编号为 0的学生开始回答问题。
//
//给你一个长度为 n且下标从 0开始的整数数组chalk和一个整数k。一开始粉笔盒里总共有k支粉笔。当编号为i的学生回答问题时,他会消耗 chalk[i]支粉笔。如果剩余粉笔数量 严格小于chalk[i],那么学生 i需要 补充粉笔。
//
//请你返回需要 补充粉笔的学生 编号。
//
//
//
//示例 1:
//
//输入:chalk = [5,1,5], k = 22
//输出:0
//解释:学生消耗粉笔情况如下:
//- 编号为 0 的学生使用 5 支粉笔,然后 k = 17 。
//- 编号为 1 的学生使用 1 支粉笔,然后 k = 16 。
//- 编号为 2 的学生使用 5 支粉笔,然后 k = 11 。
//- 编号为 0 的学生使用 5 支粉笔,然后 k = 6 。
//- 编号为 1 的学生使用 1 支粉笔,然后 k = 5 。
//- 编号为 2 的学生使用 5 支粉笔,然后 k = 0 。
//编号为 0 的学生没有足够的粉笔,所以他需要补充粉笔。
//示例 2:
//
//输入:chalk = [3,4,1,2], k = 25
//输出:1
//解释:学生消耗粉笔情况如下:
//- 编号为 0 的学生使用 3 支粉笔,然后 k = 22 。
//- 编号为 1 的学生使用 4 支粉笔,然后 k = 18 。
//- 编号为 2 的学生使用 1 支粉笔,然后 k = 17 。
//- 编号为 3 的学生使用 2 支粉笔,然后 k = 15 。
//- 编号为 0 的学生使用 3 支粉笔,然后 k = 12 。
//- 编号为 1 的学生使用 4 支粉笔,然后 k = 8 。
//- 编号为 2 的学生使用 1 支粉笔,然后 k = 7 。
//- 编号为 3 的学生使用 2 支粉笔,然后 k = 5 。
//- 编号为 0 的学生使用 3 支粉笔,然后 k = 2 。
//编号为 1 的学生没有足够的粉笔,所以他需要补充粉笔。
//
//
//提示:
//
//chalk.length == n
//1 <= n <= 10^5
//1 <= chalk[i] <= 10^5
//1 <= k <= 10^9
func chalkReplacer(chalk []int, k int) int {
sum := 0
for i, v := range chalk {
sum += v
chalk[i] = sum
}
k %= sum
index := sort.SearchInts(chalk, k)
if k == chalk[index] {
return index + 1
}
return index
}
func main() {
println(chalkReplacer([]int{1, 1, 1, 1, 1}, 10))
}
|
package priority_queue
import (
"container/heap"
"errors"
)
// Package pq implements a priority queue data structure on top of container/heap.
// As an addition to regular operations, it allows an update of an items priority,
// allowing the queue to be used in graph search algorithms like Dijkstra's algorithm.
// Computational complexities of operations are mainly determined by container/heap.
// In addition, a map of items is maintained, allowing O(1) lookup needed for priority updates,
// which themselves are O(log n).
// RPriorityQueue represents the queue
type RPriorityQueue struct {
itemHeap *itemHeapR
lookup map[interface{}]*item
}
// New initializes an empty priority queue.
func NewRPriorityQueue() RPriorityQueue {
return RPriorityQueue{
itemHeap: &itemHeapR{},
lookup: make(map[interface{}]*item),
}
}
// Len returns the number of elements in the queue.
func (p *RPriorityQueue) Len() int {
return p.itemHeap.Len()
}
// Insert inserts a new element into the queue. No action is performed on duplicate elements.
func (p *RPriorityQueue) Insert(v interface{}, time_stamp int64, price float64) {
_, ok := p.lookup[v]
if ok {
return
}
newItem := &item{
value: v,
time_stamp: time_stamp,
price: price,
}
heap.Push(p.itemHeap, newItem)
p.lookup[v] = newItem
}
// remove 删除一个元素
func (p *RPriorityQueue) Remove(value interface{}) (interface{}, error) {
p.UpdatePriority(value, time_stamp_min, price_min)
return p.Pop()
}
// Pop removes the element with the highest priority from the queue and returns it.
// In case of an empty queue, an error is returned.
func (p *RPriorityQueue) Pop() (interface{}, error) {
if len(*p.itemHeap) == 0 {
return nil, errors.New("empty queue")
}
item := heap.Pop(p.itemHeap).(*item)
delete(p.lookup, item.value)
return item.value, nil
}
// UpdatePriority changes the priority of a given item.
// If the specified item is not present in the queue, no action is performed.
func (p *RPriorityQueue) UpdatePriority(x interface{}, time_stamp int64, price float64) {
item, ok := p.lookup[x]
if !ok {
return
}
item.time_stamp = time_stamp
item.price = price
heap.Fix(p.itemHeap, item.index)
}
type itemHeapR []*item
func (ih *itemHeapR) Len() int {
return len(*ih)
}
func (ih *itemHeapR) Less(i, j int) bool {
//return (*ih)[i].priority < (*ih)[j].priority
// 价格低优先
if ((*ih)[i].price < (*ih)[j].price) {
return true
}
// 时间优先
if ((*ih)[i].price == (*ih)[j].price) {
if ((*ih)[i].time_stamp < (*ih)[j].time_stamp) {
return true
}
}
return false
}
func (ih *itemHeapR) Swap(i, j int) {
(*ih)[i], (*ih)[j] = (*ih)[j], (*ih)[i]
(*ih)[i].index = i
(*ih)[j].index = j
}
func (ih *itemHeapR) Push(x interface{}) {
it := x.(*item)
it.index = len(*ih)
*ih = append(*ih, it)
}
func (ih *itemHeapR) Pop() interface{} {
old := *ih
item := old[len(old)-1]
*ih = old[0 : len(old)-1]
return item
}
|
package main
import (
"fmt"
"time"
)
type User struct {
username string
}
func (this *User) Close() {
fmt.Println(this.username, "Closed!!!")
}
func main() {
user := &User{"liuruichao"}
defer user.Close()
user2 := &User{"liuruichao2"}
defer user2.Close()
time.Sleep(10 * time.Second)
fmt.Println("done")
}
|
package solutions
func searchInsert(nums []int, target int) int {
if target < nums[0] {
return 0
}
for index, value := range nums {
if target <= value {
return index
}
}
return len(nums)
} |
package gosqs
import (
"fmt"
"reflect"
"testing"
"github.com/aws/aws-sdk-go/service/sns"
"github.com/aws/aws-sdk-go/service/sqs"
)
type sample struct {
Val string `json:"val"`
}
func (s *sample) ModelName() string {
return "sample"
}
func TestNewPublisher(t *testing.T) {
t.Run("with_arn", func(t *testing.T) {
conf := Config{
Region: "us-west-1",
Key: "key",
Secret: "secret",
Hostname: "http://localhost:4100",
TopicARN: "arn:aws:sns:local:000000000000:todolist-dev",
}
_, err := NewPublisher(conf)
if err != nil {
t.Fatalf("error creating publisher, got %v", err)
}
})
t.Run("without_arn", func(t *testing.T) {
conf := Config{
Region: "local",
Key: "key",
Secret: "secret",
Env: "dev",
Hostname: "http://localhost:4100",
AWSAccountID: "000000000000",
TopicPrefix: "todolist",
}
pub, err := NewPublisher(conf)
if err != nil {
t.Fatalf("error creating publisher, got %v", err)
}
arn := pub.(*publisher).arn
if arn != "arn:aws:sns:local:000000000000:todolist-dev" {
t.Errorf("did not properly create the arn name, expected %s, got %s", "arn:aws:sns:local:000000000000:todolist-dev", arn)
}
})
}
func retrievePubMessage(t *testing.T, p *publisher, queue string) Message {
name := fmt.Sprintf("%s-%s", p.env, queue)
output, err := p.sqs.ReceiveMessage(&sqs.ReceiveMessageInput{QueueUrl: &name, MessageAttributeNames: []*string{&all}})
if err != nil {
t.Fatalf("unable to retrieve message, got: %v", err)
}
if len(output.Messages) != 1 {
t.Fatalf("expected 1 message, got %d", len(output.Messages))
}
_, err = p.sqs.DeleteMessage(&sqs.DeleteMessageInput{QueueUrl: &name, ReceiptHandle: output.Messages[0].ReceiptHandle})
if err != nil {
t.Errorf("could not delete published message, got %v", err)
}
return newMessage(output.Messages[0])
}
func getPublisher(t *testing.T) *publisher {
conf := Config{
Region: "us-west-1",
Key: "key",
Env: "dev",
Secret: "secret",
Hostname: "http://localhost:4100",
TopicARN: "arn:aws:sns:local:000000000000:todolist-dev",
}
sess, err := newSession(conf)
if err != nil {
t.Fatalf("could not create session, got %v", err)
}
return &publisher{
sqs: sqs.New(sess),
sns: sns.New(sess),
arn: conf.TopicARN,
env: conf.Env,
}
}
func TestCreate(t *testing.T) {
p := getPublisher(t)
p.Create(&sample{})
msg := retrievePubMessage(t, p, "post-worker")
expected := "sample_created"
if msg.Route() != expected {
t.Fatalf("did not create correct route, expected %s, got %s", expected, msg.Route())
}
}
func TestDelete(t *testing.T) {
p := getPublisher(t)
p.Delete(&sample{})
msg := retrievePubMessage(t, p, "post-worker")
expected := "sample_deleted"
if msg.Route() != expected {
t.Fatalf("did not create correct route, expected %s, got %s", expected, msg.Route())
}
}
func TestUpdate(t *testing.T) {
p := getPublisher(t)
p.Update(&sample{})
msg := retrievePubMessage(t, p, "post-worker")
expected := "sample_updated"
if msg.Route() != expected {
t.Fatalf("did not create correct route, expected %s, got %s", expected, msg.Route())
}
}
func TestModify(t *testing.T) {
p := getPublisher(t)
changes := map[string]string{
"oldName": "newName",
}
p.Modify(&sample{Val: "val"}, &changes)
msg := retrievePubMessage(t, p, "post-worker")
expected := "sample_modified"
if msg.Route() != expected {
t.Fatalf("did not create correct route, expected %s, got %s", expected, msg.Route())
}
dch := map[string]string{}
var res sample
if err := msg.DecodeModified(&res, &dch); err != nil {
t.Errorf("could not decode modified content, got %v", err)
}
if res.Val != "val" {
t.Errorf("did not properly return struct value, expected val got %s", res.Val)
}
if v, ok := dch["oldName"]; !ok {
t.Errorf("changes did not retain values, expected newName, got %s", v)
}
}
func TestDispatch(t *testing.T) {
p := getPublisher(t)
p.Dispatch(&sample{}, "some_event")
msg := retrievePubMessage(t, p, "post-worker")
expected := "sample_some_event"
if msg.Route() != expected {
t.Fatalf("did not create correct route, expected %s, got %s", expected, msg.Route())
}
}
func TestDirectMessage(t *testing.T) {
p := getPublisher(t)
p.Message("post-worker", "some_event", &sample{})
msg := retrievePubMessage(t, p, "post-worker")
expected := "some_event"
if msg.Route() != expected {
t.Fatalf("did not create correct route, expected %s, got %s", expected, msg.Route())
}
}
func TestDefaultSNSAttributs(t *testing.T) {
st := "String"
event := "some_event"
att := defaultSNSAttributes(event)
expected := map[string]*sns.MessageAttributeValue{
"route": &sns.MessageAttributeValue{DataType: &st, StringValue: &event},
}
if !reflect.DeepEqual(expected, att) {
t.Fatalf("unexpected results,\nexpected %+v,\ngot: %+v", expected, att)
}
}
func TestDefaultSQSAttributs(t *testing.T) {
st := "String"
event := "some_event"
att := defaultSQSAttributes(event)
expected := map[string]*sqs.MessageAttributeValue{
"route": &sqs.MessageAttributeValue{DataType: &st, StringValue: &event},
}
if !reflect.DeepEqual(expected, att) {
t.Fatalf("unexpected results,\nexpected %+v,\ngot: %+v", expected, att)
}
}
|
package quark
import (
"fmt"
"net/http"
)
func defaultRecovery(hc *Context) {
if hc.Written() {
return
}
switch err := hc.Error().(type) {
case int:
hc.WriteText(err, fmt.Sprintf("ERROR %d", err))
case error:
hc.WriteText(http.StatusInternalServerError, err.Error())
case string:
hc.WriteText(http.StatusInternalServerError, err)
case Handler:
err.HandleRequest(hc)
case func(hc *Context):
err(hc)
default:
hc.WriteText(http.StatusInternalServerError, "Unknown error")
}
}
|
package user_active_record
import (
"context"
"github.com/rs/xid"
"hero/database/ent"
tableUserActiveRecord "hero/database/ent/useractiverecord"
"hero/pkg/db/mysql"
"hero/pkg/logger"
"time"
)
type SelectScoreCounts struct {
ScoreCounts []struct {
UserID string `json:"user_id"`
}
}
func Create(ctx context.Context, userActiveRecord *ent.UserActiveRecord) (*ent.UserActiveRecord, error) {
userActiveRecordXid := xid.New()
userActiveRecordID := "UAR_" + userActiveRecordXid.String()
return mysql.Client().UserActiveRecord.Create().
SetID(userActiveRecordID).
SetUserID(userActiveRecord.UserID).
SetActiveType(userActiveRecord.ActiveType).
SetNillableStartedAt(userActiveRecord.StartedAt).
SetNillableEndedAt(userActiveRecord.EndedAt).
SetNillableCreatedAt(userActiveRecord.CreatedAt).
SetNillableUpdatedAt(userActiveRecord.UpdatedAt).
Save(ctx)
}
func FindByID(ctx context.Context, ID string) (*ent.UserActiveRecord, error) {
return mysql.Client().UserActiveRecord.Get(ctx, ID)
}
func CountRecord(ctx context.Context, activeType string, startAt, endAt time.Time) (int, error) {
return mysql.Client().UserActiveRecord.Query().
Where(
tableUserActiveRecord.And(
tableUserActiveRecord.ActiveType(activeType),
tableUserActiveRecord.CreatedAtGTE(startAt),
tableUserActiveRecord.CreatedAtLTE(endAt),
),
).Count(ctx)
}
func CountScore(ctx context.Context, score int, startAt, endAt string) int {
rows, err := mysql.DB().QueryContext(ctx, "select count(distinct(user_id)) from user_active_records where score = ? and started_at >= ? and ended_at <= ? ", score, startAt, endAt)
if err != nil {
logger.Print("err", err.Error())
}
defer rows.Close()
userIDCount := 0
for rows.Next() {
var count int
if err := rows.Scan(&count); err != nil {
logger.Print("err", err.Error())
}
userIDCount = count
}
return userIDCount
}
|
package utils
import (
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"io/ioutil"
"net/http"
)
type TlsStruct struct {
TlsVerify bool
TlsCaCert string
TlsCert string
TlsKey string
}
func NewTlsStruct(params map[string]interface{}) (*TlsStruct, error) {
tlsverify := false
if tlsv, ok := params["tlsverify"]; ok {
tlsverify, ok = tlsv.(bool)
if !ok {
return nil, fmt.Errorf("El parametro tlsverify debe ser un boolean")
}
}
tls := new(TlsStruct)
if tlsverify {
var tlscacert interface{}
var tlscert interface{}
var tlskey interface{}
tlscacert, ok := params["tlscacert"]
if !ok || fmt.Sprint(tlscacert) == "" {
return nil, errors.New("Parametro tlscacert no existe")
}
tlscert, ok = params["tlscert"]
if !ok || fmt.Sprint(tlscert) == "" {
return nil, errors.New("Parametro tlscert no existe")
}
tlskey, ok = params["tlskey"]
if !ok || fmt.Sprint(tlskey) == "" {
return nil, errors.New("Parametro tlskey no existe")
}
tls = &TlsStruct{
TlsVerify: tlsverify,
TlsCaCert: fmt.Sprint(tlscacert),
TlsCert: fmt.Sprint(tlscert),
TlsKey: fmt.Sprint(tlskey)}
}
return tls, nil
}
func BuildHttpClient(tlsStruct *TlsStruct) (*http.Client, error) {
// Load client cert
cert, err := tls.LoadX509KeyPair(tlsStruct.TlsCert, tlsStruct.TlsKey)
if err != nil {
return nil, errors.New("Error loading ssl files")
}
caCert, err := ioutil.ReadFile(tlsStruct.TlsCaCert)
if err != nil {
return nil, errors.New("Error loading CA cert")
}
caCertPool := x509.NewCertPool()
caCertPool.AppendCertsFromPEM(caCert)
tlsConfig := &tls.Config{
Certificates: []tls.Certificate{cert},
RootCAs: caCertPool,
}
tlsConfig.BuildNameToCertificate()
transport := &http.Transport{TLSClientConfig: tlsConfig}
client := &http.Client{Transport: transport}
return client, nil
}
|
package day5
import (
"bufio"
"fmt"
"os"
)
func main() {
result := GetMaxSeatID()
fmt.Println("Highest seat ID is :", result)
missingID := FindMissingSeatID()
fmt.Println("Your seat ID is :", missingID)
}
func parseInput(fileName string) []string {
file, err := os.Open(fileName)
handleError(err)
var seats []string
scanner := bufio.NewScanner(file)
for scanner.Scan() {
seats = append(seats, scanner.Text())
}
return seats
}
func handleError(err error) {
if err != nil {
panic(err)
}
}
|
package routers
import (
"HeartBolg/controllers"
"HeartBolg/models"
"HeartBolg/models/utils"
"encoding/json"
"fmt"
"github.com/astaxie/beego"
"github.com/astaxie/beego/context"
"github.com/astaxie/beego/orm"
"io"
"log"
"os"
"strconv"
"time"
)
func init() {
// /admin and /index is static resource path
beego.Get("/admin", func(context *context.Context) {
context.Redirect(302, "/static/login.html")
})
beego.Get("/index", func(context *context.Context) {
context.Redirect(302, "/static/index.html")
})
//this is Front end part path
beego.Router("/", &controllers.IndexController{})
beego.Router("/share", &controllers.ShareController{})
beego.Router("/about", &controllers.AboutController{})
beego.Router("/info", &controllers.InfoController{})
beego.Router("/tagcloud", &controllers.TagCloudController{})
beego.Router("/articletype", &controllers.ArticleTypeController{})
beego.Router("/search", &controllers.SearchController{})
beego.Router("/comment", &controllers.CommentController{})
beego.Put("/praise", func(i *context.Context) {
var reader models.Reader
json.Unmarshal(i.Input.RequestBody, &reader)
o := orm.NewOrm()
if o.Read(&reader) == nil {
reader.Appreciate = reader.Appreciate + 1
if num, err := o.Update(&reader); err == nil {
fmt.Println(num)
}
}
bytes, e := json.Marshal(reader)
if e != nil {
log.Fatal(e)
return
}
fmt.Println(string(bytes))
i.Output.Body(bytes)
})
beego.Put("/recommendclick", func(i *context.Context) {
var recommend models.Recommend
json.Unmarshal(i.Input.RequestBody, &recommend)
o := orm.NewOrm()
if o.Read(&recommend) == nil {
recommend.Clicks = recommend.Clicks + 1
if num, err := o.Update(&recommend); err == nil {
fmt.Println(num)
}
}
bytes, e := json.Marshal(recommend)
if e != nil {
log.Fatal(e)
return
}
i.Output.Body(bytes)
})
//=====================后台页面初始化===========================================
beego.Router("/dashboard", &controllers.DashboardController{})
beego.Router("/article", &controllers.ArticleController{})
beego.Router("/category", &controllers.CategoryController{})
beego.Router("/link", &controllers.LinkController{})
beego.Router("/album", &controllers.AlbumController{})
beego.Router("/file", &controllers.FileController{})
beego.Router("/setme", &controllers.SetMeController{})
beego.Router("/tagcloud_manager", &controllers.ManagerTabCloudController{})
beego.Router("/comments", &controllers.ManagerCommentController{})
//========================类别管理========================================
beego.Post("/categorys", func(i *context.Context) {
var categorys models.Category
json.Unmarshal(i.Input.RequestBody, &categorys)
o := orm.NewOrm()
insert, e := o.Insert(&categorys)
if e != nil {
fmt.Println(insert, e)
} else {
categorys.Id = int32(insert)
bytes, _ := json.Marshal(categorys)
i.Output.Body(bytes)
}
})
beego.Delete("/categorys", func(i *context.Context) {
var categorys models.Category
json.Unmarshal(i.Input.RequestBody, &categorys)
o := orm.NewOrm()
_, e := o.Delete(&categorys)
if e != nil {
fmt.Println(e)
} else {
bytes, _ := json.Marshal(categorys)
i.Output.Body(bytes)
}
})
beego.Put("/categorys", func(i *context.Context) {
var categorys models.Category
json.Unmarshal(i.Input.RequestBody, &categorys)
o := orm.NewOrm()
insert, e := o.Update(&categorys)
if e != nil {
fmt.Println(insert, e)
} else {
categorys.Id = int32(insert)
bytes, _ := json.Marshal(categorys)
i.Output.Body(bytes)
}
})
beego.Get("/categorys", func(i *context.Context) {
var categorys models.Category
id := i.Input.Query("id")
atoi, _ := strconv.Atoi(id)
categorys.Id = int32(atoi)
o := orm.NewOrm()
err := o.Read(&categorys)
if err != nil {
fmt.Println(err)
} else {
bytes, _ := json.Marshal(categorys)
i.Output.Body(bytes)
}
})
beego.Post("/cate_pageables", func(i *context.Context) {
var pageables utils.CategoryPageables
json.Unmarshal(i.Input.RequestBody, &pageables)
o := orm.NewOrm()
//获取总数
count, _ := o.QueryTable("category").Count()
//分页
//pageables.TotalNumber = 6
//pageables.CurrentPage = 1
pageables.SetInitialNumber()
pageables.TotalCount = int(count)
pageables.SetPageCount()
pageables.SetPageableData()
bytes, _ := json.Marshal(pageables)
i.Output.Body(bytes)
})
//========================文章管理===============================================
beego.Post("/art_pageables", func(i *context.Context) {
var pageables utils.ArticlePageables
json.Unmarshal(i.Input.RequestBody, &pageables)
o := orm.NewOrm()
//获取总数
count, _ := o.QueryTable("article").Count()
//分页
//pageables.TotalNumber = 6
//pageables.CurrentPage = 1
pageables.SetInitialNumber()
pageables.TotalCount = int(count)
pageables.SetPageCount()
pageables.SetPageableData()
bytes, _ := json.Marshal(pageables)
i.Output.Body(bytes)
})
beego.Delete("/articles", func(i *context.Context) {
var article models.Article
json.Unmarshal(i.Input.RequestBody, &article)
o := orm.NewOrm()
_, e := o.Delete(&article)
if e != nil {
fmt.Println(e)
} else {
bytes, _ := json.Marshal(article)
i.Output.Body(bytes)
}
})
//上传封面图
beego.Post("/cover_file", func(i *context.Context) {
file, header, err := i.Request.FormFile("cover")
if err != nil {
fmt.Println(header.Filename, ":", err)
}
path := beego.AppConfig.String("filepath") + "/" + strconv.Itoa(time.Now().Year()) + "/" + time.Now().Month().String() + "/" + strconv.Itoa(time.Now().Day())
all := os.MkdirAll(path, 0777)
if all != nil {
fmt.Println(all)
}
filepath := path + "/" + strconv.FormatInt(time.Now().Unix(), 10) + header.Filename
defer file.Close()
f, e := os.OpenFile(filepath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
if e != nil {
fmt.Println(header.Filename, ":", e)
}
defer f.Close()
io.Copy(f, file)
var article models.Article
article.Cover = filepath[1:len(filepath)]
bytes, _ := json.Marshal(article)
i.Output.Body(bytes)
})
beego.Get("/categoryall", func(i *context.Context) {
var categoryall []models.Category
o := orm.NewOrm()
all, err := o.QueryTable("category").All(&categoryall)
if err != nil {
fmt.Println(err, all)
} else {
bytes, _ := json.Marshal(categoryall)
i.Output.Body(bytes)
}
})
//
beego.Post("/articles", func(i *context.Context) {
var a models.Article
json.Unmarshal(i.Input.RequestBody, &a)
var r models.Reader
o := orm.NewOrm()
i2, i3 := o.Insert(&r)
if i3 != nil {
fmt.Println(i2, i3)
}
a.Readers = &r
fmt.Println(a.Category, a, a.Readers)
insert, e := o.Insert(&a)
if e != nil {
fmt.Println(insert, e)
}
bytes, _ := json.Marshal(a)
i.Output.Body(bytes)
})
}
|
package odoo
import (
"fmt"
)
// BaseLanguageImport represents base.language.import model.
type BaseLanguageImport struct {
LastUpdate *Time `xmlrpc:"__last_update,omptempty"`
Code *String `xmlrpc:"code,omptempty"`
CreateDate *Time `xmlrpc:"create_date,omptempty"`
CreateUid *Many2One `xmlrpc:"create_uid,omptempty"`
Data *String `xmlrpc:"data,omptempty"`
DisplayName *String `xmlrpc:"display_name,omptempty"`
Filename *String `xmlrpc:"filename,omptempty"`
Id *Int `xmlrpc:"id,omptempty"`
Name *String `xmlrpc:"name,omptempty"`
Overwrite *Bool `xmlrpc:"overwrite,omptempty"`
WriteDate *Time `xmlrpc:"write_date,omptempty"`
WriteUid *Many2One `xmlrpc:"write_uid,omptempty"`
}
// BaseLanguageImports represents array of base.language.import model.
type BaseLanguageImports []BaseLanguageImport
// BaseLanguageImportModel is the odoo model name.
const BaseLanguageImportModel = "base.language.import"
// Many2One convert BaseLanguageImport to *Many2One.
func (bli *BaseLanguageImport) Many2One() *Many2One {
return NewMany2One(bli.Id.Get(), "")
}
// CreateBaseLanguageImport creates a new base.language.import model and returns its id.
func (c *Client) CreateBaseLanguageImport(bli *BaseLanguageImport) (int64, error) {
ids, err := c.CreateBaseLanguageImports([]*BaseLanguageImport{bli})
if err != nil {
return -1, err
}
if len(ids) == 0 {
return -1, nil
}
return ids[0], nil
}
// CreateBaseLanguageImport creates a new base.language.import model and returns its id.
func (c *Client) CreateBaseLanguageImports(blis []*BaseLanguageImport) ([]int64, error) {
var vv []interface{}
for _, v := range blis {
vv = append(vv, v)
}
return c.Create(BaseLanguageImportModel, vv)
}
// UpdateBaseLanguageImport updates an existing base.language.import record.
func (c *Client) UpdateBaseLanguageImport(bli *BaseLanguageImport) error {
return c.UpdateBaseLanguageImports([]int64{bli.Id.Get()}, bli)
}
// UpdateBaseLanguageImports updates existing base.language.import records.
// All records (represented by ids) will be updated by bli values.
func (c *Client) UpdateBaseLanguageImports(ids []int64, bli *BaseLanguageImport) error {
return c.Update(BaseLanguageImportModel, ids, bli)
}
// DeleteBaseLanguageImport deletes an existing base.language.import record.
func (c *Client) DeleteBaseLanguageImport(id int64) error {
return c.DeleteBaseLanguageImports([]int64{id})
}
// DeleteBaseLanguageImports deletes existing base.language.import records.
func (c *Client) DeleteBaseLanguageImports(ids []int64) error {
return c.Delete(BaseLanguageImportModel, ids)
}
// GetBaseLanguageImport gets base.language.import existing record.
func (c *Client) GetBaseLanguageImport(id int64) (*BaseLanguageImport, error) {
blis, err := c.GetBaseLanguageImports([]int64{id})
if err != nil {
return nil, err
}
if blis != nil && len(*blis) > 0 {
return &((*blis)[0]), nil
}
return nil, fmt.Errorf("id %v of base.language.import not found", id)
}
// GetBaseLanguageImports gets base.language.import existing records.
func (c *Client) GetBaseLanguageImports(ids []int64) (*BaseLanguageImports, error) {
blis := &BaseLanguageImports{}
if err := c.Read(BaseLanguageImportModel, ids, nil, blis); err != nil {
return nil, err
}
return blis, nil
}
// FindBaseLanguageImport finds base.language.import record by querying it with criteria.
func (c *Client) FindBaseLanguageImport(criteria *Criteria) (*BaseLanguageImport, error) {
blis := &BaseLanguageImports{}
if err := c.SearchRead(BaseLanguageImportModel, criteria, NewOptions().Limit(1), blis); err != nil {
return nil, err
}
if blis != nil && len(*blis) > 0 {
return &((*blis)[0]), nil
}
return nil, fmt.Errorf("base.language.import was not found with criteria %v", criteria)
}
// FindBaseLanguageImports finds base.language.import records by querying it
// and filtering it with criteria and options.
func (c *Client) FindBaseLanguageImports(criteria *Criteria, options *Options) (*BaseLanguageImports, error) {
blis := &BaseLanguageImports{}
if err := c.SearchRead(BaseLanguageImportModel, criteria, options, blis); err != nil {
return nil, err
}
return blis, nil
}
// FindBaseLanguageImportIds finds records ids by querying it
// and filtering it with criteria and options.
func (c *Client) FindBaseLanguageImportIds(criteria *Criteria, options *Options) ([]int64, error) {
ids, err := c.Search(BaseLanguageImportModel, criteria, options)
if err != nil {
return []int64{}, err
}
return ids, nil
}
// FindBaseLanguageImportId finds record id by querying it with criteria.
func (c *Client) FindBaseLanguageImportId(criteria *Criteria, options *Options) (int64, error) {
ids, err := c.Search(BaseLanguageImportModel, criteria, options)
if err != nil {
return -1, err
}
if len(ids) > 0 {
return ids[0], nil
}
return -1, fmt.Errorf("base.language.import was not found with criteria %v and options %v", criteria, options)
}
|
package middlewares
import (
"fmt"
"net/http"
"time"
"github.com/fatih/color"
"github.com/juliotorresmoreno/unravel-server/helper"
)
func Cors(handler http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Method == "OPTIONS" {
helper.Cors(w, r)
w.WriteHeader(http.StatusOK)
return
}
handler.ServeHTTP(w, r)
})
}
type logger struct {
*data
http.ResponseWriter
}
type data struct {
statusCode int
}
func (el logger) Write(p []byte) (int, error) {
return el.ResponseWriter.Write(p)
}
func (el logger) Header() http.Header {
return el.ResponseWriter.Header()
}
func (el logger) WriteHeader(statusCode int) {
el.data.statusCode = statusCode
el.ResponseWriter.WriteHeader(statusCode)
}
func Logger(handler http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
t1 := time.Now()
log := logger{
data: &data{},
ResponseWriter: w,
}
handler.ServeHTTP(log, r)
t2 := time.Now()
t3 := int(t2.UnixNano()-t1.UnixNano()) / (1000 * 1000)
var d *color.Color
if log.data.statusCode >= 400 {
d = color.New(color.FgRed, color.Bold)
} else if log.data.statusCode >= 300 {
d = color.New(color.FgYellow, color.Bold)
} else if log.data.statusCode >= 200 {
d = color.New(color.FgGreen, color.Bold)
} else if log.data.statusCode >= 100 {
d = color.New(color.FgBlue, color.Bold)
}
d.Printf("%v ", log.data.statusCode)
fmt.Println(r.Method, r.URL.Path, t3, "ms")
})
}
|
package ffmpeg
import "C"
type Rational struct {
Num,
Den int
}
func (rational Rational) ctype() C.struct_AVRational {
return C.struct_AVRational{C.int(rational.Num), C.int(rational.Den)}
}
|
package interfaces
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"time"
"pocket-organize-app/entity"
)
func Get_access_token(w http.ResponseWriter, r *http.Request) {
access_token_param := new(entity.AccessTokenParam)
access_token_param.ConsumerKey = os.Getenv(("POCKET_COSUMER_KEY"))
access_token_param.Code = r.FormValue("code")
access_token_json, _ := json.Marshal(access_token_param)
fmt.Printf("[access_token] %s\n", string(access_token_json))
req, err := http.NewRequest("POST", "https://getpocket.com/v3/oauth/authorize", bytes.NewBuffer(access_token_json))
req.Header.Set("Content-Type", "application/json")
req.Header.Set("X-Accept", "application/json")
client := &http.Client{Timeout: time.Duration(180) * time.Second}
res, err := client.Do(req)
if err != nil {
fmt.Println("http response error")
log.Fatal(err)
}
defer res.Body.Close()
body, err := ioutil.ReadAll(res.Body)
var token_response entity.AccessTokenResponse
if err := json.Unmarshal(body, &token_response); err != nil {
log.Fatal(err)
}
if err != nil {
fmt.Println("http request error")
log.Fatal(err)
} else {
fmt.Println("http request success")
fmt.Println(token_response.AccessToken)
}
w.Write(body)
}
|
package main
import (
"fmt"
"log"
"net/http"
"github.com/JieeiroSst/LapTRWeb/controllers/admin"
"github.com/gorilla/mux"
)
func main() {
route := mux.NewRouter()
files := http.FileServer(http.Dir("./public"))
route.Handle("/", files)
route.HandleFunc("/admin", admin.HomeAdmin)
route.HandleFunc("/admin/auth", admin.IndexAuthor)
route.HandleFunc("/admin/auth/show", admin.IndexSignleauthor)
route.HandleFunc("/admin/auth/delete", admin.DeleteAuth)
route.HandleFunc("/admin/auth/edit", admin.EditAuthor)
route.HandleFunc("/admin/auth/create", admin.CreateAuth)
route.HandleFunc("/admin/auth/insert", admin.InsertAuth)
route.HandleFunc("/admin/auth/update", admin.UpdateAuth)
route.HandleFunc("/admin/book", admin.ShowListBook)
route.HandleFunc("/admin/book/show", admin.ShowSingleBook)
route.HandleFunc("/admin/book/delete", admin.DeleteBook)
route.HandleFunc("/admin/book/edit", admin.EditBook)
route.HandleFunc("/admin/book/create", admin.CreateBook)
route.HandleFunc("/admin/book/insert", admin.InsertBook)
route.HandleFunc("/admin/book/update", admin.UpdateBook)
route.HandleFunc("/admin/buy-sell", admin.ShowListBuySell)
route.HandleFunc("/admin/buy-sell/show", admin.ShowSingleSell)
route.HandleFunc("/admin/buy-sell/delete", admin.DeleteSell)
route.HandleFunc("/admin/buy-sell/edit", admin.EditBuySell)
route.HandleFunc("/admin/buy-sell/create", admin.CreateBuysell)
route.HandleFunc("/admin/buy-sell/insert", admin.InsertBuySell)
route.HandleFunc("/admin/course", admin.ShowListcourse)
route.HandleFunc("/admin/course/show", admin.ShowSigleCourse)
route.HandleFunc("/admin/course/delete", admin.DeleteCourse)
route.HandleFunc("/admin/course/create", admin.CreateCourse)
route.HandleFunc("/admin/course/insert", admin.InsertCourse)
route.HandleFunc("/admin/course/update", admin.UpdateCourse)
route.HandleFunc("/admin/library", admin.ShowListLibrary)
route.HandleFunc("/admin/library/show", admin.ShowSingleLibrary)
route.HandleFunc("/admin/library/delete", admin.DeleteLibrary)
route.HandleFunc("/admin/library/edit", admin.EditLibrary)
route.HandleFunc("/admin/library/create", admin.CreateLibrary)
route.HandleFunc("/admin/library/update", admin.UpdateLibrary)
route.HandleFunc("/admin/profile", admin.ShowListProfile)
route.HandleFunc("/admin/profile/show", admin.ShowSingleProfile)
route.HandleFunc("/admin/profile/delete", admin.DeleteProfile)
route.HandleFunc("/admin/profile/edit", admin.EditProfile)
route.HandleFunc("/admin/profile/update", admin.UpdateProdile)
route.HandleFunc("/admin/selle", admin.ShowListSeller)
route.HandleFunc("/admin/selle/show", admin.ShowSingleSell)
route.HandleFunc("/admin/selle/delete", admin.DeleteSell)
fmt.Println("server running port 9000...")
log.Fatal(http.ListenAndServe(":9000", route))
}
|
package config
var TCPport = ":54809"
var UDPport = ":54810"
|
package main
import "fmt"
import "os"
import "bufio"
import "strings"
type Name struct {
fname string;
lname string;
}
func main() {
fmt.Printf("Please enter the name file: ")
name_slice := make([]Name, 0, 0)
var file_path string
_, err := fmt.Scan(&file_path)
file, err := os.Open(file_path)
if err != nil {
fmt.Printf("Exiting cz %v", err)
os.Exit(3)
}
defer file.Close()
scanner := bufio.NewScanner(file)
for scanner.Scan() {
line := scanner.Text()
name := strings.Split(line, " ")
name_slice = append(name_slice, Name{fname: name[0], lname: name[1]})
}
for _, name := range name_slice {
fmt.Printf("First name: %s Last Name: %s \n", name.fname, name.lname)
}
} |
package main
import (
"path/filepath"
"os"
"fmt"
"strings"
)
func getCurrentDirectory() string {
dir, err := filepath.Abs(filepath.Dir(os.Args[0]))
if err != nil {
panic(err)
}
return strings.Replace(dir, "\\", "/", -1)
}
func main() {
fmt.Println(getCurrentDirectory())
}
|
package backend
import (
"fmt"
"os"
"github.com/lidouf/gst"
)
const (
srcName = "src"
decoderName = "decoder"
converterName = "converter"
sinkName = "sink"
)
// Pipeline Pipeline
type Pipeline struct {
*gst.Pipeline
src *gst.Element
decoder *gst.Element
converter *gst.Element
sink *gst.Element
}
// NewPipeline NewPipeline
func NewPipeline() (error, *Pipeline) {
p := &Pipeline{}
p.Pipeline = gst.NewPipeline("main")
if p.Pipeline == nil {
return fmt.Errorf("can not create pipeline"), nil
}
p.src = gst.ElementFactoryMake("filesrc", srcName)
if p.src == nil {
return fmt.Errorf("can not create filesrc"), nil
}
p.decoder = gst.ElementFactoryMake("decodebin", decoderName)
if p.decoder == nil {
return fmt.Errorf("can not create decodebin"), nil
}
p.converter = gst.ElementFactoryMake("videoconvert", converterName)
if p.converter == nil {
return fmt.Errorf("can not create videoconvert"), nil
}
p.sink = gst.ElementFactoryMake("aasink", sinkName)
if p.sink == nil {
return fmt.Errorf("can not create aasink"), nil
}
p.Pipeline.Add(p.src, p.decoder, p.converter, p.sink)
p.src.Link(p.decoder)
p.converter.Link(p.sink)
p.decoder.ConnectNoi("pad-added", p.connectDynPad, p.converter.GetStaticPad("sink"))
return nil, p
}
// SetSource SetSource
func (p *Pipeline) SetSource(file string) {
p.src.SetProperty("location", file)
}
// connectDynPad function for "pad-added" event
func (p *Pipeline) connectDynPad(targetPad, createdPad *gst.Pad) {
if !createdPad.CanLink(targetPad) {
fmt.Fprintln(os.Stderr, "can't link:", createdPad.GetName(), targetPad.GetName())
return
}
if createdPad.Link(targetPad) != gst.PAD_LINK_OK {
fmt.Fprintln(os.Stderr, "link error:", createdPad.GetName(), targetPad.GetName())
}
}
|
package main
import "fmt"
var a = "包级别的变量a" //包范围
var b, c string = "包级别的变量b", "包级别变量c" //包范围
var d string
func main() {
d = "函数级别变量d" //以上声明;此处分配;包范围
var e = 42 //函数作用域-后续变量具有函数作用域:
f := 42
g := "函数变量g"
h, i := "函数变量h", "函数变量i"
j, k, l, m := 1, true, 22.2, 'm' //single quotes 单引号
n := "n" //double quotes 双引号
o := `o` //back ticks
fmt.Println("a - ", a)
fmt.Println("b - ", b)
fmt.Println("c - ", c)
fmt.Println("d - ", d)
fmt.Println("e - ", e)
fmt.Println("f - ", f)
fmt.Println("g - ", g)
fmt.Println("h - ", h)
fmt.Println("i - ", i)
fmt.Println("j - ", j)
fmt.Println("k - ", k)
fmt.Println("l - ", l)
fmt.Println("m - ", m)
fmt.Println("n - ", n)
fmt.Println("o - ", o)
fmt.Println(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o)
}
|
package main
import (
"bufio"
"fmt"
"os"
"runtime/pprof"
"sort"
)
var stdin *bufio.Reader
var stdout *bufio.Writer
func init() {
stdin = bufio.NewReader(os.Stdin)
stdout = bufio.NewWriter(os.Stdout)
}
func scan(args ...interface{}) (int, error) {
return fmt.Fscan(stdin, args...)
}
func printf(format string, args ...interface{}) {
fmt.Fprintf(stdout, format, args...)
}
func MinInt(args ...int) int {
res := 0
for i, v := range args {
if i == 0 {
res = v
} else if res > v {
res = v
}
}
return res
}
func MaxInt(args ...int) int {
res := 0
for i, v := range args {
if i == 0 {
res = v
} else if res < v {
res = v
}
}
return res
}
func log2(n int) uint {
i := uint(0)
for t := 1; t * 2 <= n; t *= 2 {
i++
}
return i
}
type Student struct {
id, value int
}
type StudentArray []Student
func (a StudentArray) Len() int { return len(a) }
func (a StudentArray) Less(i, j int) bool {
return a[i].value > a[j].value
}
func (a StudentArray) Swap(i, j int) {
a[i], a[j] = a[j], a[i]
}
// Wrong answer1: last diffCD
// Wrong answer2: isAllowed(, maxC)
// Wrong answer3: a <= 2 * min(b, c).
// Wrong answer4: maxA.
var n int
var s [3000]Student
// diff[i] = s[i].value - s[i+1].value
var diff [3000]int
// rangeMin[j][i] = pos of max(diff[i]..diff[i+(1<<j) - 1])
var rangeMax [12][3000]int
var res[3000]int
func preprocessRangeMax() {
for i := 0; i < n; i++ {
rangeMax[0][i] = i
}
maxJ := log2(n)
for j := uint(1); j <= maxJ; j++ {
for i := 0; i < n; i++ {
t := rangeMax[j-1][i]
if i + (1<<(j-1)) < n {
t2 := rangeMax[j-1][i + (1<<(j-1))]
if diff[t2] > diff[t] {
t = t2
}
}
rangeMax[j][i] = t
}
}
}
func getRangeMaxPos(start, end int) int {
level := log2(end - start + 1)
t1 := rangeMax[level][start]
t2 := rangeMax[level][end - (1<<level)+1]
if diff[t1] > diff[t2] {
return t1
}
return t2
}
func isAllowed(a, b, maxC int) bool {
maxVal := MinInt(a, b, maxC) * 2
return a <= maxVal && b <= maxVal
}
func getMaxB(a int) int {
low := ceilDiv(a, 2)
high := MinInt(a * 2, 2 * (n - a) / 3)
for low + 1 < high {
mid := (low + high) / 2
if isAllowed(a, mid, n - a - mid) {
low = mid
} else {
high = mid - 1
}
}
if low + 1 == high && isAllowed(a, high, n - a - high) {
return high
}
return low
}
func isAllowedA(a int) bool {
left := n - a
half := left / 2
return half * 2 >= a
}
func getMaxA() int {
low := 1
high := n / 2
for low + 1 < high {
mid := (low + high) / 2
if isAllowedA(mid) {
low = mid
} else {
high = mid - 1
}
}
if low + 1 == high && isAllowedA(high) {
return high
}
return low
}
func ceilDiv(a, b int) int {
res := a / b
if res * b < a {
res++
}
return res
}
func main() {
defer stdout.Flush()
if false {
f, _ := os.Create("profile")
defer f.Close()
pprof.StartCPUProfile(f)
defer pprof.StopCPUProfile()
}
for true {
if _, err := scan(&n); err != nil {
break
}
for i := 0; i < n; i++ {
s[i].id = i
scan(&s[i].value)
}
sort.Sort(StudentArray(s[0:n]))
for i := 0; i < n - 1; i++ {
diff[i] = s[i].value - s[i+1].value
}
diff[n-1] = s[n-1].value
preprocessRangeMax()
// a is length of level 1
// b is length of level 2
// c is length of level 3
bestA := -1
bestB := -1
bestC := -1
bestDiffAB := -1
bestDiffBC := -1
bestDiffCD := -1
maxA := getMaxA()
bestDiffAB = diff[getRangeMaxPos(0, maxA - 1)]
for a := 1; a <= maxA; a++ {
diffAB := diff[a-1]
if diffAB < bestDiffAB {
continue
}
minB := ceilDiv(a, 2)
maxB := getMaxB(a)
//printf("a = %d, n = %d, maxB = %d\n", a, n, maxB)
tmpBestDiffBC := diff[getRangeMaxPos(a + minB - 1, a + maxB - 1)]
/*
printf("a = %d, tmpBestDiffBC = %d, bestDiffBC = %d\n", a, tmpBestDiffBC,
bestDiffBC)
printf("minB %d, maxB %d, pos = %d\n", minB, maxB,
getRangeMaxPos(a + minB - 1, a + maxB - 1))
*/
if bestDiffBC > tmpBestDiffBC {
continue
}
for b := minB; b <= maxB; b++ {
diffBC := diff[a + b - 1]
if diffBC < tmpBestDiffBC {
continue
}
minC := MaxInt(ceilDiv(a, 2), ceilDiv(b, 2))
maxC := MinInt(a * 2, b * 2, n - a - b)
//printf("minC %d, maxC %d\n", minC, maxC)
pos := getRangeMaxPos(a + b + minC - 1, a + b + maxC - 1)
c := pos + 1 - a - b
diffCD := diff[pos]
if diffBC > bestDiffBC || diffCD > bestDiffCD {
bestA = a
bestB = b
bestC = c
bestDiffBC = diffBC
bestDiffCD = diffCD
}
}
}
/*
printf("diff: ")
for i := 0; i < n; i++ {
printf("%d ", diff[i])
}
printf("\n")
printf("bestA %d, b %d c %d, diffAB %d BC %d CD %d\n",
bestA, bestB, bestC, bestDiffAB, bestDiffBC, bestDiffCD)
*/
for i := 0; i < n; i++ {
id := s[i].id
level := -1
if i < bestA {
level = 1
} else if i < bestA + bestB {
level = 2
} else if i < bestA + bestB + bestC {
level = 3
}
res[id] = level
}
for i := 0; i < n; i++ {
if i > 0 {
printf(" ")
}
printf("%d", res[i])
}
printf("\n")
}
} |
// Copyright (C) 2018 Satoshi Konno. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package echonet
const (
NodeManufacturerUnknown = ObjectManufacturerUnknown
)
// Node is an interface for Echonet node.
type Node interface {
// GetObjects returns all objects.
GetObjects() []*Object
// GetObject returns the specified object.
GetObject(code ObjectCode) (*Object, error)
// AddDevice adds a new device into the node.
AddDevice(dev *Device) error
// GetDevices returns all device objects.
GetDevices() []*Device
// GetDevice returns the specified device object.
GetDevice(code ObjectCode) (*Device, error)
// AddProfile adds a new profile object into the node.
AddProfile(prof *Profile) error
// GetProfiles returns all profile objects.
GetProfiles() []*Profile
// GetProfile returns the specified profile object.
GetProfile(code ObjectCode) (*Profile, error)
// GetAddress returns the bound address.
GetAddress() string
// GetPort returns the bound address.
GetPort() int
// Equals returns true whether the specified node is same, otherwise false.
Equals(Node) bool
}
// nodeEquals returns true whether the specified node is same, otherwise false.
func nodeEquals(node1, node2 Node) bool {
if node1.GetPort() != node2.GetPort() {
return false
}
if node1.GetAddress() != node2.GetAddress() {
return false
}
return true
}
|
// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package arc
import (
"context"
"strings"
"time"
"chromiumos/tast/local/arc"
"chromiumos/tast/local/bundles/cros/arc/storage"
"chromiumos/tast/local/chrome/mtp"
"chromiumos/tast/local/chrome/uiauto/faillog"
"chromiumos/tast/testing"
)
// mtpURIPrefix is the expected prefix of the Content URI for the Android
// device under test. The full URI would contain the device's serial number,
// which would be different for different devices.
const (
mtpURIPrefix = "content://org.chromium.arc.chromecontentprovider/externalfile%3Afileman-mtp-mtp"
)
// arc.Mtp / arc.Mtp.vm tast tests depend on the use of actual Android device in the lab.
// As part of the test, a file will be pushed and read from it. Therefore, these tests have
// the following constraints:
// 1. It can only be run on a special lab setup.
// 2. The device folder names etc being used are hard-coded for the setup.
func init() {
testing.AddTest(&testing.Test{
Func: MTP,
LacrosStatus: testing.LacrosVariantUnneeded,
Desc: "ARC++/ARCVM Android app can read files on external Android device (with MTP) via FilesApp",
Contacts: []string{
"youkichihosoi@chromium.org",
"arc-storage@google.com",
"cros-arc-te@google.com",
},
Attr: []string{"group:mtp"},
SoftwareDeps: []string{"chrome"},
Timeout: 5 * time.Minute,
Fixture: "mtpWithAndroid",
Params: []testing.Param{
{
ExtraSoftwareDeps: []string{"android_p"},
}, {
Name: "vm",
ExtraSoftwareDeps: []string{"android_vm"},
},
},
})
}
func MTP(ctx context.Context, s *testing.State) {
cr := s.FixtValue().(*mtp.FixtData).Chrome
tconn := s.FixtValue().(*mtp.FixtData).TestConn
a, err := arc.New(ctx, s.OutDir())
if err != nil {
s.Fatal("Failed to start ARC: ", err)
}
//TODO(b/187740535): Investigate and reserve time for cleanup.
defer a.Close(ctx)
d, err := a.NewUIDevice(ctx)
if err != nil {
s.Fatal("Failed initializing UI Automator: ", err)
}
defer d.Close(ctx)
defer faillog.DumpUITreeOnError(ctx, s.OutDir(), s.HasError, tconn)
config := storage.TestConfig{DirName: "Nexus/Pixel (MTP+ADB)", DirTitle: "Files - Nexus/Pixel (MTP+ADB)",
SubDirectories: []string{"Download"}, FileName: "storage.txt"}
expectations := []storage.Expectation{
{LabelID: storage.ActionID, Value: storage.ExpectedAction},
{LabelID: storage.URIID, Predicate: func(actual string) bool {
return strings.HasPrefix(actual, mtpURIPrefix) &&
strings.HasSuffix(actual, "%2FDownload%2Fstorage.txt")
}},
{LabelID: storage.FileContentID, Value: storage.ExpectedFileContent}}
storage.TestOpenWithAndroidApp(ctx, s, a, cr, d, config, expectations)
}
|
package utils
import (
"fmt"
"io"
"net/http"
"os"
log "github.com/sirupsen/logrus"
)
// Static URL for retrieving the bootloader
const iPXEURL = "https://boot.ipxe.org/undionly.kpxe"
// This header is used by all configurations
const iPXEHeader = `#!ipxe
dhcp
echo .
echo .
echo .
echo .
echo +-------------------- Plunder -------------------------------
echo |
echo | address.: ${net0/ip}
echo | mac.....: ${net0/mac}
echo | gateway.: ${net0/gateway}
echo +------------------------------------------------------------
echo .
echo .
echo .
echo .`
//////////////////////////////
//
// Helper Functions
//
//////////////////////////////
// IPXEReboot -
func IPXEReboot() string {
script := `
echo MAC ADDRESS is set to reboot, plunder will reboot the server in 5 seconds
sleep 5
reboot
`
return iPXEHeader + script
}
// IPXEAutoBoot -
func IPXEAutoBoot() string {
script := `
echo Unknown MAC address, PXE boot will keep retrying until configuration changes
:retry_boot
autoboot || goto retry_boot
`
return iPXEHeader + script
}
// IPXEPreeseed - This will build an iPXE boot script for Debian/Ubuntu
func IPXEPreeseed(webserverAddress, kernel, initrd, cmdline string) string {
script := `
kernel http://%s/%s auto=true url=http://%s/${mac:hexhyp}.cfg priority=critical %s netcfg/choose_interface=${netX/mac}
initrd http://%s/%s
boot
`
// Replace the addresses inline
buildScript := fmt.Sprintf(script, webserverAddress, kernel, webserverAddress, cmdline, webserverAddress, initrd)
return iPXEHeader + buildScript
}
// IPXEKickstart - This will build an iPXE boot script for RHEL/CentOS
func IPXEKickstart(webserverAddress, kernel, initrd, cmdline string) string {
script := `
kernel http://%s/%s auto=true url=http://%s/${mac:hexhyp}.cfg priority=critical %s
initrd http://%s/%s
boot
`
// Replace the addresses inline
buildScript := fmt.Sprintf(script, webserverAddress, kernel, webserverAddress, cmdline, webserverAddress, initrd)
return iPXEHeader + buildScript
}
// IPXEVSphere - This will build an iPXE boot script for VMware vSphere/ESXi
func IPXEVSphere(webserverAddress, kernel, cmdline string) string {
script := `
kernel http://%s/%s -c http://%s/${mac:hexhyp}.cfg ks=http://%s/${mac:hexhyp}.ks %s
boot
`
// Replace the addresses inline
buildScript := fmt.Sprintf(script, webserverAddress, kernel, webserverAddress, webserverAddress, cmdline)
return iPXEHeader + buildScript
}
// IPXEBOOTy - This will build an iPXE boot script for the BOOTy boot loader
func IPXEBOOTy(webserverAddress, kernel, initrd, cmdline string) string {
script := `
kernel http://%s/%s BOOTYURL=http://%s %s
initrd http://%s/%s
boot
`
// Replace the addresses inline
buildScript := fmt.Sprintf(script, webserverAddress, kernel, webserverAddress, cmdline, webserverAddress, initrd)
return iPXEHeader + buildScript
}
// IPXEAnyBoot - This will build an iPXE boot script for anything wanting to PXE boot
func IPXEAnyBoot(webserverAddress string, kernel, initrd, cmdline string) string {
script := `
kernel http://%s/%s auto=true url=http://%s/${mac:hexhyp}.cfg %s
initrd http://%s/%s
boot
`
// Replace the addresses inline
buildScript := fmt.Sprintf(script, webserverAddress, kernel, webserverAddress, cmdline, webserverAddress, initrd)
return iPXEHeader + buildScript
}
// PullPXEBooter - This will attempt to download the iPXE bootloader
func PullPXEBooter() error {
log.Infoln("Beginning of iPXE download... ")
// Create the file
out, err := os.Create("undionly.kpxe")
if err != nil {
return err
}
defer out.Close()
// Get the data
resp, err := http.Get(iPXEURL)
if err != nil {
return err
}
defer resp.Body.Close()
// Writer the body to file
_, err = io.Copy(out, resp.Body)
if err != nil {
return err
}
log.Infoln("Completed")
return nil
}
|
// Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package iio
import (
"context"
"encoding/binary"
"os"
"path"
"reflect"
"sync"
"testing"
"time"
"golang.org/x/sys/unix"
)
func TestNewBuffer(t *testing.T) {
defer setupTestFiles(t, map[string]string{
"iio:device0/name": "cros-ec-accel",
"iio:device0/location": "lid",
"iio:device0/buffer/enable": "0",
"iio:device0/buffer/length": "2",
"iio:device0/scan_elements/el_a_en": "0",
"iio:device0/scan_elements/el_a_index": "0",
"iio:device0/scan_elements/el_a_type": "le:s8/8>>0",
"iio:device0/scan_elements/el_b_en": "0",
"iio:device0/scan_elements/el_b_index": "1",
"iio:device0/scan_elements/el_b_type": "be:u14/16>>2",
"iio:device0/scan_elements/el_c_en": "0",
"iio:device0/scan_elements/el_c_index": "3",
"iio:device0/scan_elements/el_c_type": "le:s29/32>>3",
"iio:device0/scan_elements/el_d_en": "0",
"iio:device0/scan_elements/el_d_index": "2",
"iio:device0/scan_elements/el_d_type": "le:u64/64>>0",
})()
sensors, err := GetSensors(context.Background())
if err != nil {
t.Fatal("Error getting sensors: ", err)
}
buffer, err := sensors[0].NewBuffer()
if err != nil {
t.Fatal("Error getting buffer: ", err)
}
expected := &Buffer{
sensors[0], []*ChannelSpec{
{0, "el_a", true, 8, 8, 0, LE, binary.LittleEndian, 1},
{1, "el_b", false, 14, 16, 2, BE, binary.BigEndian, 2},
{2, "el_d", false, 64, 64, 0, LE, binary.LittleEndian, 8},
{3, "el_c", true, 29, 32, 3, LE, binary.LittleEndian, 4},
}, nil, nil,
}
if !reflect.DeepEqual(expected, buffer) {
t.Errorf("Unexpected buffer: got %v; want %v", buffer, expected)
}
}
func TestOpenBuffer(t *testing.T) {
defer setupTestFiles(t, map[string]string{
"iio:device0/name": "cros-ec-accel",
"iio:device0/location": "lid",
"iio:device0/buffer/enable": "0",
"iio:device0/buffer/length": "2",
"iio:device0/scan_elements/el_a_en": "0",
"iio:device0/scan_elements/el_a_index": "0",
"iio:device0/scan_elements/el_a_type": "le:s16/16>>0",
"iio:device0/scan_elements/el_b_en": "0",
"iio:device0/scan_elements/el_b_index": "1",
"iio:device0/scan_elements/el_b_type": "be:u30/32>>2",
})()
sensors, err := GetSensors(context.Background())
if err != nil {
t.Fatal("Error getting sensors: ", err)
}
buffer, err := sensors[0].NewBuffer()
if err != nil {
t.Fatal("Error getting buffer: ", err)
}
if err := os.MkdirAll(path.Join(basePath, "dev"), 0755); err != nil {
t.Fatal("Error making dev dir: ", err)
}
fifoFile := path.Join(basePath, "dev/iio:device0")
// Use mkfifo to simulate an iio buffer
if err := unix.Mkfifo(fifoFile, 0600); err != nil {
t.Fatal("Error making buffer fifo: ", err)
}
var wg sync.WaitGroup
defer wg.Wait()
wg.Add(1)
go func() {
defer wg.Done()
var s16 int16
var u32 uint32
bytes := make([]byte, 6)
sBytes := bytes[0:2]
uBytes := bytes[2:6]
f, err := os.OpenFile(fifoFile, os.O_WRONLY, 0)
if err != nil {
t.Error("Error opening named pipe for writing: ", err)
return
}
defer f.Close()
for i := 0; i < 5; i++ {
s16 = -10 - int16(i)
binary.LittleEndian.PutUint16(sBytes, uint16(s16))
u32 = (20 + uint32(i)) << 2
binary.BigEndian.PutUint32(uBytes, u32)
_, err = f.Write(bytes)
if err != nil {
t.Errorf("Error writing to named pipe %v: %v", i, err)
return
}
}
}()
var recvData []BufferData
data, err := buffer.Open()
if err != nil {
t.Fatal("Error opening buffer: ", err)
}
defer buffer.Close()
timeout := time.After(5 * time.Second)
l:
for {
select {
case d, ok := <-data:
recvData = append(recvData, d)
if len(recvData) == 5 || !ok {
break l
}
case <-timeout:
t.Fatal("Timeout reading from buffer")
}
}
if len(recvData) != 5 {
t.Fatalf("Error reading buffer: got %v; want 5 elements", recvData)
}
for i := 0; i < 5; i++ {
s, _ := recvData[i].Int16(0)
if s != int16(-10-i) {
t.Errorf("Wrong data value at index %v channel 0: got %v; want %v",
i, s, -10-i)
}
u, _ := recvData[i].Uint32(1)
if u != uint32(20+i) {
t.Errorf("Wrong data value at index %v channel 1: got %v; want %v",
i, u, 20+i)
}
}
}
|
package controller
import (
"net/http"
"github.com/Oxynger/JournalApp/httputils"
"github.com/Oxynger/JournalApp/model"
"github.com/gin-gonic/gin"
)
// GetItemSchemes Получить все схемы объектов
// @Summary Список схем объектов
// @Description Метод, который получает все списки объектов
// @Tags ItemScheme
// @Accept json
// @Produce json
// @Success 200 {array} model.ItemScheme
// @Failure 404 {object} httputils.HTTPError
// @Failure 500 {object} httputils.HTTPError
// @Router /scheme/item [get]
func (c *Controller) GetItemSchemes(ctx *gin.Context) {
schemes, err := model.ItemSchemeAll()
if err != nil {
httputils.NewError(ctx, http.StatusNotFound, err)
return
}
ctx.JSON(http.StatusOK, schemes)
}
// GetItemScheme Получить схему объекта с id
// @Summary Схему объекта с id
// @Description Метод, который получает схему объекта с заданным id
// @Tags ItemScheme
// @Accept json
// @Produce json
// @Param itemscheme_id path string true "ItemSheme id"
// @Success 200 {object} model.ItemScheme
// @Failure 400 {object} httputils.HTTPError
// @Failure 404 {object} httputils.HTTPError
// @Failure 500 {object} httputils.HTTPError
// @Router /scheme/item/{itemscheme_id} [get]
func (c *Controller) GetItemScheme(ctx *gin.Context) {
id := ctx.Param("itemscheme_id")
scheme, err := model.ItemSchemeOne(id)
if err != nil {
httputils.NewError(ctx, http.StatusNotFound, err)
return
}
ctx.JSON(http.StatusOK, scheme)
}
// NewItemScheme Создать новую схему объектов
// @Summary Новая схема объектов
// @Description Метод, который создает новую схему объектов
// @Tags ItemScheme
// @Accept json
// @Produce json
// @Param NewItemScheme body model.NewItemScheme true "New Item Scheme"
// @Success 200 {object} model.NewItemScheme
// @Failure 400 {object} httputils.HTTPError
// @Failure 404 {object} httputils.HTTPError
// @Failure 500 {object} httputils.HTTPError
// @Router /scheme/item [post]
func (c *Controller) NewItemScheme(ctx *gin.Context) {
var newItemScheme model.NewItemScheme
if err := ctx.ShouldBindJSON(&newItemScheme); err != nil {
httputils.NewError(ctx, http.StatusBadRequest, err)
return
}
if err := newItemScheme.Validation(); err != nil {
httputils.NewError(ctx, http.StatusBadRequest, err)
return
}
err := newItemScheme.Insert()
if err != nil {
httputils.NewError(ctx, http.StatusBadRequest, err)
return
}
ctx.JSON(http.StatusOK, newItemScheme)
}
// UpdateItemScheme Изменить схему объектов с id
// @Summary Изменить схему объектов с id
// @Description Метод, который изменяет схему объекта с заданным id
// @Tags ItemScheme
// @Accept json
// @Produce json
// @Param itemscheme_id path string true "ItemSheme id"
// @Param UpdateItemScheme body model.ItemScheme true "Update Item Scheme"
// @Success 200 {object} model.ItemScheme
// @Failure 400 {object} httputils.HTTPError
// @Failure 404 {object} httputils.HTTPError
// @Failure 500 {object} httputils.HTTPError
// @Router /scheme/item/{itemscheme_id} [put]
func (c *Controller) UpdateItemScheme(ctx *gin.Context) {
id := ctx.Param("itemscheme_id")
var updateItemScheme model.UpdateItemScheme
if err := ctx.ShouldBindJSON(&updateItemScheme); err != nil {
httputils.NewError(ctx, http.StatusBadRequest, err)
return
}
if err := updateItemScheme.Validation(); err != nil {
httputils.NewError(ctx, http.StatusBadRequest, err)
return
}
err := updateItemScheme.Update(id)
if err != nil {
httputils.NewError(ctx, http.StatusNotFound, err)
return
}
ctx.JSON(http.StatusOK, updateItemScheme)
}
// DeleteItemScheme Удалить схему объектов с id
// @Summary Удалить схему объектов с id
// @Description Метод, который удаляет схему объектов с заданным id
// @Tags ItemScheme
// @Accept json
// @Produce json
// @Param itemscheme_id path string true "ItemSheme id"
// @Success 200 {object} model.ItemScheme
// @Failure 400 {object} httputils.HTTPError
// @Failure 404 {object} httputils.HTTPError
// @Failure 500 {object} httputils.HTTPError
// @Router /scheme/item/{itemscheme_id} [delete]
func (c *Controller) DeleteItemScheme(ctx *gin.Context) {
id := ctx.Param("itemscheme_id")
err := model.DeleteSchemeOne(id)
if err != nil {
httputils.NewError(ctx, http.StatusNotFound, err)
return
}
ctx.JSON(http.StatusOK, id)
}
|
package kafka
import (
"context"
"fmt"
"strconv"
"strings"
"time"
"github.com/Shopify/sarama"
"github.com/project-flogo/core/data/metadata"
"github.com/project-flogo/core/support/log"
"github.com/project-flogo/core/trigger"
)
var triggerMd = trigger.NewMetadata(&Settings{}, &HandlerSettings{}, &Output{})
func init() {
_ = trigger.Register(&Trigger{}, &Factory{})
}
// Factory is a kafka trigger factory
type Factory struct {
}
// Metadata implements trigger.Factory.Metadata
func (*Factory) Metadata() *trigger.Metadata {
return triggerMd
}
// New implements trigger.Factory.New
func (*Factory) New(config *trigger.Config) (trigger.Trigger, error) {
s := &Settings{}
err := metadata.MapToStruct(config.Settings, s, true)
if err != nil {
return nil, err
}
return &Trigger{settings: s}, nil
}
// Trigger is a kafka trigger
type Trigger struct {
settings *Settings
conn *KafkaConnection
kafkaHandlers []*Handler
}
// Initialize initializes the trigger
func (t *Trigger) Initialize(ctx trigger.InitContext) error {
var err error
t.conn, err = getKafkaConnection(ctx.Logger(), t.settings)
for _, handler := range ctx.GetHandlers() {
kafkaHandler, err := NewKafkaHandler(ctx.Logger(), handler, t.conn.Connection())
if err != nil {
return err
}
t.kafkaHandlers = append(t.kafkaHandlers, kafkaHandler)
}
return err
}
// Start starts the kafka trigger
func (t *Trigger) Start() error {
for _, handler := range t.kafkaHandlers {
_ = handler.Start()
}
return nil
}
// Stop implements ext.Trigger.Stop
func (t *Trigger) Stop() error {
for _, handler := range t.kafkaHandlers {
_ = handler.Stop()
}
_ = t.conn.Stop()
return nil
}
// NewKafkaHandler creates a new kafka handler to handle a topic
func NewKafkaHandler(logger log.Logger, handler trigger.Handler, consumer sarama.Consumer) (*Handler, error) {
kafkaHandler := &Handler{logger: logger, shutdown: make(chan struct{}), handler: handler}
handlerSetting := &HandlerSettings{}
err := metadata.MapToStruct(handler.Settings(), handlerSetting, true)
if err != nil {
return nil, err
}
if handlerSetting.Topic == "" {
return nil, fmt.Errorf("topic string was not provided for handler: [%s]", handler)
}
logger.Debugf("Subscribing to topic [%s]", handlerSetting.Topic)
offset := sarama.OffsetNewest
//offset
if handlerSetting.Offset != 0 {
offset = handlerSetting.Offset
}
var partitions []int32
validPartitions, err := consumer.Partitions(handlerSetting.Topic)
if err != nil {
return nil, err
}
logger.Debugf("Valid partitions for topic [%s] detected as: [%v]", handlerSetting.Topic, validPartitions)
if handlerSetting.Partitions != "" {
parts := strings.Split(handlerSetting.Partitions, ",")
for _, p := range parts {
n, err := strconv.Atoi(p)
if err == nil {
for _, validPartition := range validPartitions {
if int32(n) == validPartition {
partitions = append(partitions, int32(n))
break
}
logger.Errorf("Configured partition [%d] on topic [%s] does not exist and will not be subscribed", n, handlerSetting.Topic)
}
} else {
logger.Warnf("Partition [%s] specified for handler [%s] is not a valid number and was discarded", p, handler)
}
}
} else {
partitions = validPartitions
}
for _, partition := range partitions {
logger.Debugf("Creating PartitionConsumer for partition: [%s:%d]", handlerSetting.Topic, partition)
partitionConsumer, err := consumer.ConsumePartition(handlerSetting.Topic, partition, offset)
if err != nil {
logger.Errorf("Creating PartitionConsumer for valid partition: [%s:%d] failed for reason: %s", handlerSetting.Topic, partition, err)
return nil, err
}
kafkaHandler.consumers = append(kafkaHandler.consumers, partitionConsumer)
}
return kafkaHandler, nil
}
// Handler is a kafka topic handler
type Handler struct {
shutdown chan struct{}
logger log.Logger
handler trigger.Handler
consumers []sarama.PartitionConsumer
}
func (h *Handler) consumePartition(consumer sarama.PartitionConsumer) {
for {
select {
case err := <-consumer.Errors():
if err == nil {
//was shutdown
return
}
time.Sleep(time.Millisecond * 100)
case <-h.shutdown:
return
case msg := <-consumer.Messages():
if h.logger.DebugEnabled() {
h.logger.Debugf("Kafka subscriber triggering action from topic [%s] on partition [%d] with key [%s] at offset [%d]",
msg.Topic, msg.Partition, msg.Key, msg.Offset)
h.logger.Debugf("Kafka message: '%s'", string(msg.Value))
}
out := &Output{}
out.Message = string(msg.Value)
_, err := h.handler.Handle(context.Background(), out)
if err != nil {
h.logger.Errorf("Run action for handler [%s] failed for reason [%s] message lost", h.handler.Name(), err)
}
}
}
}
// Start starts the handler
func (h *Handler) Start() error {
for _, consumer := range h.consumers {
go h.consumePartition(consumer)
}
return nil
}
// Stop stops the handler
func (h *Handler) Stop() error {
close(h.shutdown)
for _, consumer := range h.consumers {
_ = consumer.Close()
}
return nil
}
|
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package profiler
import (
"context"
"os"
"path/filepath"
"strings"
"time"
"chromiumos/tast/errors"
"chromiumos/tast/testing"
)
func init() {
testing.AddFixture(&testing.Fixture{
Name: "profilerRunning",
Desc: "Started profilers specified by profiler.AccessVars.mode variable",
Contacts: []string{"jacobraz@google.com"},
Impl: newProfilerFixture(),
SetUpTimeout: 10 * time.Second,
ResetTimeout: 10 * time.Second,
TearDownTimeout: 10 * time.Second,
PreTestTimeout: 10 * time.Second,
PostTestTimeout: 10 * time.Second,
})
}
type mode string
const (
modeStat mode = "stat"
modeSched mode = "sched"
modeStatRecord mode = "statrecord"
modeRecord mode = "record"
)
type profilerFixture struct {
modes []mode
// Store outDir to keep results from profiler in fixture specific dir
// ie use /tast/results/..../fixtures/profilerRunning instead of /tast/results/..../tests
// This is done to maintain consistency with keeping logs gathered by fixtures independent of those gathered by tests
outDir string
runningProfs *RunningProf
}
// newProfilerFixture creates new profilerFixture struct
func newProfilerFixture() *profilerFixture {
return &profilerFixture{}
}
// newProfilers creates an array of profilers from runtime var args, also sets f.modes to corresponding modes.
func (f *profilerFixture) newProfilers() ([]Profiler, error) {
var profs []Profiler
var stat PerfStatOutput
var sched PerfSchedOutput
args := strings.Split(profilerMode.Value(), ",")
for _, arg := range args {
switch mode(arg) {
case modeStat:
f.modes = append(f.modes, modeStat)
profs = append(profs, Perf(PerfStatOpts(&stat, 0)))
case modeSched:
f.modes = append(f.modes, modeSched)
profs = append(profs, Perf(PerfSchedOpts(&sched, "")))
case modeRecord:
f.modes = append(f.modes, modeRecord)
profs = append(profs, Perf(PerfRecordOpts("", nil, PerfRecordCallgraph)))
case modeStatRecord:
f.modes = append(f.modes, modeStatRecord)
profs = append(profs, Perf(PerfStatRecordOpts()))
default:
return nil, errors.Errorf("Unidentified profiler: %s not recognized, cannot start profiler", string(arg))
}
}
return profs, nil
}
// filePaths gets paths to all files written by a profiler.
func (f *profilerFixture) filePaths(outDir string) []string {
var paths []string
for _, arg := range f.modes {
switch arg {
case modeStat:
paths = append(paths, filepath.Join(outDir, perfStatFileName))
case modeSched:
paths = append(paths, filepath.Join(outDir, perfSchedFileName))
case modeRecord:
paths = append(paths, filepath.Join(outDir, perfRecordFileName))
case modeStatRecord:
paths = append(paths, filepath.Join(outDir, perfStatRecordFileName))
}
}
return paths
}
// filesExist checks if the specified files have been created.
func filesExist(files []string) (bool, error) {
success := true
for _, file := range files {
if _, err := os.Stat(file); err == nil {
continue
} else if errors.Is(err, os.ErrNotExist) {
success = false
} else {
return false, err
}
}
return success, nil
}
func (f *profilerFixture) SetUp(ctx context.Context, s *testing.FixtState) interface{} {
// TODO(jacobraz): handle aarch64 devices that cant run perf
f.outDir = s.OutDir()
return nil
}
func (f *profilerFixture) TearDown(ctx context.Context, s *testing.FixtState) {
// All necessary work for tearing down the profiler is done in PostTest
}
func (f *profilerFixture) Reset(ctx context.Context) error {
// All necessary work for resetting the profiler state is done in pre/post test
return nil
}
func (f *profilerFixture) PreTest(ctx context.Context, s *testing.FixtTestState) {
profs, err := f.newProfilers()
if err != nil {
s.Error("Failure in starting the profiler: ", err)
return
}
if profs == nil {
return
}
rp, err := Start(s.TestContext(), f.outDir, profs...)
if err != nil {
s.Error("Failure in starting the profiler: ", err)
return
}
f.runningProfs = rp
outFiles := f.filePaths(f.outDir)
if err := testing.Poll(ctx, func(ctx context.Context) error {
if ok, err := filesExist(outFiles); err != nil {
return testing.PollBreak(errors.Wrap(err, "failed to check for file"))
} else if !ok {
return errors.New("failed waiting for file(s)" + strings.Join(outFiles, " "))
}
return nil
}, &testing.PollOptions{Timeout: 30 * time.Second}); err != nil {
s.Error("Failed to wait for profiler file creation: ", err)
}
}
func (f *profilerFixture) PostTest(ctx context.Context, s *testing.FixtTestState) {
if f.runningProfs == nil {
return
}
if err := f.runningProfs.End(s.TestContext()); err != nil {
s.Error("Failure in ending the profiler: ", err)
}
}
|
package kafka2
func NewClient() {
}
|
package commonservice
import (
"errors"
"fmt"
//"github.com/astaxie/beego"
"github.com/astaxie/beego/validation"
"net/http"
"tripod/convert"
"webserver/common"
"webserver/controllers"
"webserver/models/maccount"
)
type PublishListController struct {
controllers.BaseController
orderType int
count int
offset int
}
func (c *PublishListController) Post() {
defer c.Recover()
if err := c.ParseJsonBody(); err != nil {
c.WriteCommonResponse(http.StatusBadRequest, 0, err)
return
}
if err := c.validParam(); err != nil {
c.WriteCommonResponse(http.StatusOK, 0, err)
return
}
if respBody, err := c.getPublishList(); err != nil {
c.WriteCommonResponse(http.StatusOK, 0, err)
} else {
c.WriteBodyResponse(respBody)
}
}
func (c *PublishListController) validParam() error {
valid := validation.Validation{}
page := convert.Atoi(c.GetParam("page"))
c.count = convert.Atoi(c.GetParam("page_num"))
c.offset = (page - 1) * c.count
if valid.HasErrors() {
var reason string
for _, err := range valid.Errors {
reason += err.Key + err.Message
}
return errors.New(reason)
}
return nil
}
func (c *PublishListController) getPublishList() (interface{}, error) {
data := make([]map[string]interface{}, 0, 20)
query := fmt.Sprintf("user_id = %d and status = 1", c.User.Id)
serviceList, err := maccount.FindArticlesByFilter(query, c.offset, c.count)
controllers.CheckError("FindArticlesByFilter", err)
for _, service := range serviceList {
item := make(map[string]interface{})
controllers.GetArticleInfo(item, &service)
data = append(data, item)
}
var resp common.GetResponse
resp.Code = 1
resp.Data = data
return resp, nil
}
|
package satokencerts
import (
"github.com/openshift/cluster-kube-apiserver-operator/pkg/operator/operatorclient"
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"github.com/openshift/library-go/pkg/operator/configobserver"
"github.com/openshift/library-go/pkg/operator/events"
"github.com/openshift/cluster-kube-apiserver-operator/pkg/operator/configobservation"
)
// ObserveSATokenCerts checks which configmaps exist and bases the configuration for verifying sa tokens on them.
// There are two possible sources: the installer and the kube-controller-manger-operator, but we wire to the target namespace
// to avoid setting a config we cannot fulfill which would crash the kube-apiserver.
func ObserveSATokenCerts(genericListers configobserver.Listers, recorder events.Recorder, existingConfig map[string]interface{}) (map[string]interface{}, []error) {
listers := genericListers.(configobservation.Listers)
errs := []error{}
prevObservedConfig := map[string]interface{}{}
// copy non-empty .serviceAccountPublicKeyFiles from existingConfig to prevObservedConfig
saTokenCertsPath := []string{"serviceAccountPublicKeyFiles"}
existingSATokenCerts, _, err := unstructured.NestedStringSlice(existingConfig, saTokenCertsPath...)
if err != nil {
return prevObservedConfig, append(errs, err)
}
if len(existingSATokenCerts) > 0 {
if err := unstructured.SetNestedStringSlice(prevObservedConfig, existingSATokenCerts, saTokenCertsPath...); err != nil {
errs = append(errs, err)
}
}
observedConfig := map[string]interface{}{}
saTokenCertDirs := []string{}
// add initial-sa-token-signing-certs configmap mount path to saTokenCertDirs if configmap exists
initialCerts, err := listers.ConfigmapLister.ConfigMaps(operatorclient.TargetNamespace).Get("initial-sa-token-signing-certs")
switch {
case errors.IsNotFound(err):
// do nothing because we aren't going to add a path to a missing configmap
case err != nil:
// we had an error, return what we had before and exit. this really shouldn't happen
return prevObservedConfig, append(errs, err)
case len(initialCerts.Data) > 0:
// this means we have this configmap and it has values, so wire up the directory
saTokenCertDirs = append(saTokenCertDirs, "/etc/kubernetes/static-pod-resources/configmaps/initial-sa-token-signing-certs")
default:
// do nothing because aren't going to add a path to a configmap with no files
}
kcmCerts, err := listers.ConfigmapLister.ConfigMaps(operatorclient.TargetNamespace).Get("kube-controller-manager-sa-token-signing-certs")
switch {
case errors.IsNotFound(err):
// do nothing because we aren't going to add a path to a missing configmap
case err != nil:
// we had an error, return what we had before and exit. this really shouldn't happen
return prevObservedConfig, append(errs, err)
case len(kcmCerts.Data) > 0:
// this means we have this configmap and it has values, so wire up the directory
saTokenCertDirs = append(saTokenCertDirs, "/etc/kubernetes/static-pod-resources/configmaps/kube-controller-manager-sa-token-signing-certs")
default:
// do nothing because aren't going to add a path to a configmap with no files
}
if len(saTokenCertDirs) > 0 {
if err := unstructured.SetNestedStringSlice(observedConfig, saTokenCertDirs, saTokenCertsPath...); err != nil {
errs = append(errs, err)
}
}
if !equality.Semantic.DeepEqual(existingSATokenCerts, saTokenCertDirs) && len(errs) == 0 {
recorder.Eventf("ObserveSATokenCerts", "serviceAccountPublicKeyFiles changed to %v", saTokenCertDirs)
}
return observedConfig, errs
}
|
package main
import (
"flag"
"log"
"net/http"
"net/http/httputil"
"net/url"
"os"
)
var addr string
func main() {
flag.StringVar(&addr, "addr", ":9090", "The address to listen on for HTTP requests.")
flag.Parse()
const endpointEnv = "ECS_CONTAINER_METADATA_URI_V4"
endpoint := os.Getenv(endpointEnv)
if endpoint == "" {
log.Fatalf("%q environmental variable is not set, are you running this on ECS?", endpointEnv)
}
endpointURL, err := url.Parse(endpoint)
if err != nil {
log.Fatalf("Failed to parse endpoint: %v", err)
}
proxy := httputil.NewSingleHostReverseProxy(endpointURL)
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
log.Printf("Serving %v", r.URL)
proxy.ServeHTTP(w, r)
})
log.Printf("Starting server on %s", addr)
log.Fatal(http.ListenAndServe(addr, nil))
}
|
/*
* Copyright 1999-2020 Alibaba Group Holding Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package exec
import (
"context"
"fmt"
"path"
"strconv"
"github.com/chaosblade-io/chaosblade-spec-go/channel"
"github.com/chaosblade-io/chaosblade-spec-go/spec"
"github.com/chaosblade-io/chaosblade-spec-go/util"
"github.com/chaosblade-io/chaosblade-exec-os/exec/category"
)
const AppendFileBin = "chaos_appendfile"
type FileAppendActionSpec struct {
spec.BaseExpActionCommandSpec
}
func NewFileAppendActionSpec() spec.ExpActionCommandSpec {
return &FileAppendActionSpec{
spec.BaseExpActionCommandSpec{
ActionMatchers: fileCommFlags,
ActionFlags: []spec.ExpFlagSpec{
&spec.ExpFlag{
Name: "content",
Desc: "append content",
Required: true,
},
&spec.ExpFlag{
Name: "count",
Desc: "the number of append count, must be a positive integer, default 1",
},
&spec.ExpFlag{
Name: "interval",
Desc: "append interval, must be a positive integer, default 1s",
},
&spec.ExpFlag{
Name: "escape",
Desc: "symbols to escape, use --escape, at this --count is invalid",
NoArgs: true,
},
&spec.ExpFlag{
Name: "enable-base64",
Desc: "append content enable base64 encoding",
NoArgs: true,
},
},
ActionExecutor: &FileAppendActionExecutor{},
ActionExample: `
# Appends the content "HELLO WORLD" to the /home/logs/nginx.log file
blade create file append --filepath=/home/logs/nginx.log --content="HELL WORLD"
# Appends the content "HELLO WORLD" to the /home/logs/nginx.log file, interval 10 seconds
blade create file append --filepath=/home/logs/nginx.log --content="HELL WORLD" --interval 10
# Appends the content "HELLO WORLD" to the /home/logs/nginx.log file, enable base64 encoding
blade create file append --filepath=/home/logs/nginx.log --content=SEVMTE8gV09STEQ=
# mock interface timeout exception
blade create file append --filepath=/home/logs/nginx.log --content="@{DATE:+%Y-%m-%d %H:%M:%S} ERROR invoke getUser timeout [@{RANDOM:100-200}]ms abc mock exception"
`,
ActionPrograms: []string{AppendFileBin},
ActionCategories: []string{category.SystemFile},
},
}
}
func (*FileAppendActionSpec) Name() string {
return "append"
}
func (*FileAppendActionSpec) Aliases() []string {
return []string{}
}
func (*FileAppendActionSpec) ShortDesc() string {
return "File content append"
}
func (f *FileAppendActionSpec) LongDesc() string {
return "File content append. "
}
type FileAppendActionExecutor struct {
channel spec.Channel
}
func (*FileAppendActionExecutor) Name() string {
return "append"
}
func (f *FileAppendActionExecutor) Exec(uid string, ctx context.Context, model *spec.ExpModel) *spec.Response {
commands := []string{"echo", "kill"}
if response, ok := channel.NewLocalChannel().IsAllCommandsAvailable(commands); !ok {
return response
}
if f.channel == nil {
util.Errorf(uid, util.GetRunFuncName(), spec.ResponseErr[spec.ChannelNil].ErrInfo)
return spec.ResponseFail(spec.ChannelNil, spec.ResponseErr[spec.ChannelNil].ErrInfo)
}
filepath := model.ActionFlags["filepath"]
if _, ok := spec.IsDestroy(ctx); ok {
return f.stop(filepath, ctx)
}
// default 1
count := 1
// 1000 ms
interval := 1
content := model.ActionFlags["content"]
countStr := model.ActionFlags["count"]
intervalStr := model.ActionFlags["interval"]
if countStr != "" {
var err error
count, err = strconv.Atoi(countStr)
if err != nil || count < 1 {
util.Errorf(uid, util.GetRunFuncName(), fmt.Sprintf("`%s` value must be a positive integer", "count"))
return spec.ResponseFailWaitResult(spec.ParameterIllegal, fmt.Sprintf(spec.ResponseErr[spec.ParameterIllegal].Err, "count"),
fmt.Sprintf(spec.ResponseErr[spec.ParameterIllegal].ErrInfo, "count"))
}
}
if intervalStr != "" {
var err error
interval, err = strconv.Atoi(intervalStr)
if err != nil || interval < 1 {
util.Errorf(uid, util.GetRunFuncName(), fmt.Sprintf("`%s` value must be a positive integer", "interval"))
return spec.ResponseFailWaitResult(spec.ParameterIllegal, fmt.Sprintf(spec.ResponseErr[spec.ParameterIllegal].Err, "interval"),
fmt.Sprintf(spec.ResponseErr[spec.ParameterIllegal].ErrInfo, "interval"))
}
}
escape := model.ActionFlags["escape"] == "true"
enableBase64 := model.ActionFlags["enable-base64"] == "true"
if !util.IsExist(filepath) {
util.Errorf(uid, util.GetRunFuncName(), fmt.Sprintf("`%s`: file does not exist", filepath))
return spec.ResponseFailWaitResult(spec.ParameterInvalid, fmt.Sprintf(spec.ResponseErr[spec.ParameterInvalid].Err, "filepath"),
fmt.Sprintf(spec.ResponseErr[spec.ParameterInvalid].ErrInfo, "filepath"))
}
return f.start(filepath, content, count, interval, escape, enableBase64, ctx)
}
func (f *FileAppendActionExecutor) start(filepath string, content string, count int, interval int, escape bool, enableBase64 bool, ctx context.Context) *spec.Response {
flags := fmt.Sprintf(`--start --filepath "%s" --content "%s" --count %d --interval %d --debug=%t`, filepath, content, count, interval, util.Debug)
if escape {
flags = fmt.Sprintf("%s --escape=true", flags)
}
if enableBase64 {
flags = fmt.Sprintf("%s --enable-base64=true", flags)
}
return f.channel.Run(ctx, path.Join(f.channel.GetScriptPath(), AppendFileBin), flags)
}
func (f *FileAppendActionExecutor) stop(filepath string, ctx context.Context) *spec.Response {
return f.channel.Run(ctx, path.Join(f.channel.GetScriptPath(), AppendFileBin),
fmt.Sprintf(`--stop --filepath %s --debug=%t`, filepath, util.Debug))
}
func (f *FileAppendActionExecutor) SetChannel(channel spec.Channel) {
f.channel = channel
}
|
package binfiles
import (
"bytes"
"net/source/proto/repe"
"errors"
"fmt"
"net/source/proto/pools"
"sync"
bytes2 "net/source/utils/bytes"
"net/source/userapi"
)
type ProtoBinPack struct {
Ver int32
len int32 //设想发送长度
Rlen int32 //实际获得长度
Bytes []byte
refPoolBytes []byte //Bytes字段的 吃对象引用
S_cliconnId int64 //服务器端 的客户连接id
Buf *bytes.Buffer
}
var ProtobinPool = sync.Pool{
New: func() interface{} {
p := &ProtoBinPack{
Ver: 0,
len: 0,
S_cliconnId: -1,
Rlen: 0,
}
p.Bytes = nil
p.Buf = nil
return p
},
}
var ModBusProtobinPool_03 = sync.Pool{
New: func() interface{} {
p := &Mod03_ProtoBinPack{
}
return p
},
}
var ModBusProtobinPool_06 = sync.Pool{
New: func() interface{} {
p := &Mod06_ProtoBinPack{
}
return p
},
}
var ModBusProtobinPool_10 = sync.Pool{
New: func() interface{} {
p := &Mod10_ProtoBinPack{
}
return p
},
}
func (proto *ProtoBinPack) reset() {
proto.Ver = 0
proto.len = 0
proto.Rlen = 0
proto.S_cliconnId = -1
if proto.refPoolBytes != nil {
pools.CtlBytesSlicePool.Put(proto.refPoolBytes)
proto.refPoolBytes = nil
}
proto.Buf.Reset()
proto.Bytes = nil
proto.Buf = nil
}
func (this *ProtoBinPack) SetClientId(id int64) {
this.S_cliconnId = id
}
func (this *ProtoBinPack) GetClientId() int64 {
return this.S_cliconnId
}
func (proto *ProtoBinPack) Release() {
pools.CtlBytesSlicePool.Put(proto.refPoolBytes)
proto.reset()
}
func CreateProtoBin(ver, sizel int32, cliconnId int64) *ProtoBinPack {
tmp := ProtobinPool.Get().(*ProtoBinPack)
tmp.refPoolBytes = pools.CtlBytesSlicePool.Get().([]byte)
tmp.Bytes = tmp.refPoolBytes[0:sizel]
tmp.Buf = bytes.NewBuffer(tmp.Bytes)
tmp.Ver = ver
tmp.len = sizel
tmp.S_cliconnId = cliconnId
return tmp
}
type Mod03_ProtoBinPack struct {
Modbase_ProtoBinPack
StartRegH8 byte
StartRegL8 byte
ReadRegH byte
ReadRegL byte
Crc16 int16
}
/*
[设备地址] +[命令号03H] + <指令头>
[返回的字节个数] +
[数据1] +
[数据2] +
...+ [数据n] +
[CRC校验的低8位] + [CRC校验的高8位]*/
type Mod03_ProtoBinPackResp struct {
Modbase_ProtoBinPack
DataFieldLength byte
DataFields []byte
Crc16 int16
}
var bytesize =255
var ModBusProtobinRespPool_03 = sync.Pool{
New: func() interface{} {
p := &Mod03_ProtoBinPackResp{
DataFields:make([]byte,bytesize),
DataFieldLength:0,
}
return p
},
}
type Mod06_ProtoBinPack struct {
Modbase_ProtoBinPack `json:"-"`
RegSetterH8 byte `json:"reg_setter_h_8"`
RegSetterL8 byte `json:"reg_setter_l_8"`
DataH8 byte `json:"data_h_8"`
DataL8 byte `json:"data_l_8"`
Crc16 uint16 `json:"-"`
}
func (this *Mod06_ProtoBinPack) SetFnCode(code byte) {
this.FnCode = code
}
func (this *Mod06_ProtoBinPack) SetMac(mac byte) {
this.Mac = mac
}
func (this *Mod06_ProtoBinPack) Reset() {
//this.Mac = mac
}
type Modbase_ProtoBinPack struct {
userapi.IModBusProtoBinPack `json:"-"`
clientId int64 `json:"client_id"`
Mac byte `json:"mac"`
FnCode byte `json:"fn_code"`
EndTimestamp int64
}
func (this *Modbase_ProtoBinPack) GetTimestamp() int64{
return this.EndTimestamp
}
func (this *Modbase_ProtoBinPack) GetData()interface{}{
return this
}
func (this *Modbase_ProtoBinPack) GetFnCode() byte {
return this.FnCode
}
func (this *Modbase_ProtoBinPack) SetClientId(id int64) {
this.clientId = id
}
func (this *Modbase_ProtoBinPack) GetClientId() int64 {
return this.clientId
}
func (this *Modbase_ProtoBinPack) SetFnCode(code byte) {
this.FnCode = code
}
func (this *Modbase_ProtoBinPack) SetMac(mac byte) {
this.Mac = mac
}
func (this *Modbase_ProtoBinPack) GetMac() byte {
return this.Mac
}
func (this *Modbase_ProtoBinPack) Reset() {
//this.Mac = mac
}
type Mod10_ProtoBinPack struct {
Modbase_ProtoBinPack
StartRegH8 byte
StartRegL8 byte
RegNum byte
DataFields16 []int16
Crc16 uint16
}
var creator map[byte]*sync.Pool
var code03 byte = 0x03
var code06 byte = 0x06
var code10 byte = 0x10
var creator_resp map[byte]*sync.Pool
func init() {
creator = make(map[byte]*sync.Pool, 5)
creator[code03] = &ModBusProtobinPool_03
creator[code06] = &ModBusProtobinPool_06
creator[code10] = &ModBusProtobinPool_10
creator_resp = make(map[byte]*sync.Pool, 5)
creator_resp[code03] = &ModBusProtobinRespPool_03
}
func CreateModBusProtoBin(mac, fnCode byte, cliconnId int64) userapi.IModBusProtoBinPack {
creatorIns := creator[fnCode]
if creatorIns == nil {
return nil
}
tmp := creatorIns.Get().(userapi.IModBusProtoBinPack)
tmp.Reset()
tmp.SetFnCode(fnCode)
tmp.SetMac(mac)
tmp.SetClientId(cliconnId)
return tmp
}
func CreateModBusProtoBinResp(mac, fnCode byte, cliconnId int64) userapi.IModBusProtoBinPack {
creatorIns := creator_resp[fnCode]
if creatorIns == nil {
return nil
}
tmp := creatorIns.Get().(userapi.IModBusProtoBinPack)
tmp.Reset()
tmp.SetFnCode(fnCode)
tmp.SetMac(mac)
tmp.SetClientId(cliconnId)
return tmp
}
// translate proto to app msg
var dstBytes = [1024]byte{0}
func (proto *ProtoBinPack) Len() int32 {
return proto.len
}
func (proto *ProtoBinPack) GetBytes() []byte {
return proto.Bytes
}
func (proto *ProtoBinPack) SetRLen(rl int32) {
proto.Rlen = rl
}
func (proto *ProtoBinPack) Parse() error {
defer func() {
if err := recover(); err != nil {
fmt.Println("parse proto bin err:", err)
}
}()
processor := repe.RepertoryGet(proto.Ver)
if processor != nil {
processor.Ver = proto.Ver
if proto.Rlen > 0 {
//bytesEntity_Ref := pools.CtlBytesSlicePool.Get().([]byte)
useBytes := dstBytes[0:proto.Rlen]
var bt = bytes2.NewByteArray(useBytes)
bt.Write(proto.Bytes)
processor.DepartDomain(processor.Ver, proto.S_cliconnId, bt.Bytes())
//pools.CtlBytesSlicePool.Put(bytesEntity_Ref)
}
return nil
}
return errors.New(fmt.Sprintln("err: processor := repe.RepertoryGet(proto.Ver) :processor==nil not exist %d ", proto.Ver))
}
|
package main
import (
"fmt"
"net/http"
"strings"
"time"
_ "net/http/pprof"
"github.com/uber/makisu/lib/log"
)
func main44() {
go func() {
ch := make(chan string, 0)
go func() {
fmt.Println("pp")
time.Sleep(2 * time.Second)
ch <- "wwwww"
// close(ch)
}()
go func() {
for e := range ch {
if e == "" {
fmt.Println("ch nil")
}
fmt.Println("====", e)
}
fmt.Println("end")
}()
}()
log.Error(http.ListenAndServe(":6060", nil))
}
func main() {
fmt.Println(ParseLikeSql("gssfsfdgsfj%_"))
}
func ParseLikeSql(s string) string {
var escapes, keywords = `\`, []string{"%", "_"}
for _, keyword := range keywords {
if strings.Contains(s, keyword) {
s = strings.Replace(s, keyword, escapes+keyword, -1)
}
}
return s
}
|
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package raftconsensus
import (
"context"
"encoding/json"
"errors"
"fmt"
"kto/block"
"kto/server"
"kto/txpool"
"kto/until/logger"
"net/http"
"net/rpc"
"net/url"
"os"
"strconv"
"strings"
"time"
"github.com/coreos/etcd/etcdserver/stats"
"github.com/coreos/etcd/pkg/fileutil"
"github.com/coreos/etcd/pkg/types"
"github.com/coreos/etcd/raft"
"github.com/coreos/etcd/raft/raftpb"
"github.com/coreos/etcd/rafthttp"
"github.com/coreos/etcd/snap"
"github.com/coreos/etcd/wal"
"github.com/coreos/etcd/wal/walpb"
)
// A key-value stream backed by raft
// newRaftNode initiates a raft instance and returns a committed log entry
// channel and error channel. Proposals for log updates are sent over the
// provided the proposal channel. All log entries are replayed over the
// commit channel, followed by a nil message (to indicate the channel is
// current), then new log entries. To shutdown, close proposeC and read errorC.
func newRaftNode(cf *cfgInfo, s *server.Server) <-chan error {
errorC := make(chan error)
rn := &raftNode{
clusterid: 0x1000,
id: cf.id,
addr: cf.addr,
addrPort: cf.peer,
peers: cf.peers,
join: cf.join,
waldir: cf.waldir,
snapdir: cf.snapdir,
DS: cf.Ds,
Cm: cf.Cm,
QTJ: cf.QTJ,
srv: s,
snapCount: uint64(cf.snapCount),
errorC: errorC,
stopc: make(chan struct{}),
raftStorage: raft.NewMemoryStorage(),
idPeer: make(map[int]string),
logFile: cf.logFile,
logSaveDays: cf.logSaveDays,
logLevel: cf.logLevel,
logSaveMode: cf.logSaveMode,
logFileSize: cf.logFileSize,
appliedIndex: 1,
lastHeight: 1,
committedIndex: 0,
snapshotIndex: 0,
snapshot: new(raftpb.Snapshot),
}
// 日志初始化
logger.Config(rn.logFile, rn.logLevel)
logger.SetSaveMode(rn.logSaveMode)
if rn.logSaveMode == 3 && rn.logFileSize > 0 {
logger.SetSaveSize(rn.logFileSize * 1024 * 1024)
}
logger.SetSaveDays(rn.logSaveDays)
logger.Infof("start raft...\n")
go rn.startNode()
return errorC
}
func (rn *raftNode) committedEntriesToApply(ents []raftpb.Entry) (entrs []raftpb.Entry) {
if len(ents) <= 0 {
return nil
}
firstIdx := ents[0].Index
if firstIdx > rn.committedIndex {
logger.Fatalf("Fatal error:first index of committedEntries[%d] should <= progress appliedIndex[%d]+1\n", firstIdx, rn.committedIndex)
panic("Fatal error:first index of committedEntries should <= progress appliedIndex+1")
}
if rn.committedIndex-firstIdx < uint64(len(ents)) {
entrs = ents[rn.committedIndex-firstIdx:]
}
logger.Infof("committedEntriesToApply:[firstIdx = %v,rn.committedIndex+1 = %v],len(entrs) = %v.\n", firstIdx, rn.committedIndex+1, len(entrs))
return entrs
}
func (rn *raftNode) EntriesToApply(ents []raftpb.Entry) []raftpb.Entry {
if len(ents) <= 0 {
return nil
}
for i, e := range ents {
if len(e.Data) <= 0 {
ents = append(ents[:i], ents[i+1:]...)
}
}
logger.Infof("EntriesToApply: len(ents) = %v\n{commitentries: %v}.\n\r", len(ents), ents)
return ents
}
// replayWAL replays WAL entries into the raft instance.
func (rn *raftNode) replayWAL() *wal.WAL {
rn.createWal()
err := rn.loadSnapshot()
if err != nil {
panic("Raft loadSnapshot error!")
}
return rn.readWAL()
}
// openWAL returns a WAL ready for reading.
func (rn *raftNode) readWAL() *wal.WAL {
logger.Infof("replaying WAL of member %d", rn.id)
walsnap := walpb.Snapshot{}
if rn.snapshot != nil {
walsnap.Index, walsnap.Term = rn.snapshot.Metadata.Index, rn.snapshot.Metadata.Term
}
logger.Infof("loading WAL at term %d and index %d", walsnap.Term, walsnap.Index)
w, err := wal.Open(rn.waldir, walsnap)
if err != nil {
logger.Fatalf("raftconsensus: error loading wal (%v)", err)
}
_, st, ents, err := w.ReadAll()
if err != nil {
logger.Fatalf("raftconsensus: failed to read WAL (%v)", err)
}
if rn.snapshot != nil {
rn.raftStorage.ApplySnapshot(*rn.snapshot)
rn.confState = &rn.snapshot.Metadata.ConfState
rn.snapshotIndex = rn.snapshot.Metadata.Index
logger.Infof("Start: [snapshotIndex = %v,term = %v]\n", rn.snapshot.Metadata.Index, rn.snapshot.Metadata.Term)
}
rn.committedIndex = st.Commit
rn.raftStorage.SetHardState(st)
logger.Infof("Start: [committedIndex = %v,len(ents) = %v]\n", st.Commit, len(ents))
// append to storage so raft starts at the right place in log
rn.raftStorage.Append(ents)
return w
}
// createWal replays WAL entries into the raft instance.
func (rn *raftNode) createWal() {
if !wal.Exist(rn.waldir) {
if err := os.MkdirAll(rn.waldir, 0750); err != nil {
logger.Fatalf("raftconsensus error: cannot create dir for wal (%v)", err)
}
w, err := wal.Create(rn.waldir, nil)
if err != nil {
logger.Fatalf("raftconsensus error: create wal error (%v)", err)
}
w.Close()
}
}
func (rn *raftNode) writeError(err error) {
rn.stopHTTP()
rn.errorC <- err
close(rn.errorC)
rn.node.Stop()
}
func (rn *raftNode) startNode() {
if !fileutil.Exist(rn.snapdir) {
if err := os.MkdirAll(rn.snapdir, 0750); err != nil {
logger.Fatalf("raftconsensus error: cannot create dir for snapshot (%v)", err)
}
}
rn.snapshotter = snap.New(rn.snapdir)
oldwal := wal.Exist(rn.waldir)
w := rn.replayWAL()
if w == nil {
logger.Fatalf("error: cannot create wal !!!")
}
rn.wal = w
rpeers := make([]raft.Peer, len(rn.peers))
for i := range rpeers {
rpeers[i] = raft.Peer{ID: uint64(i + 1)}
rn.idPeer[i+1] = rn.peers[i]
}
c := &raft.Config{
ID: uint64(rn.id),
ElectionTick: 10,
HeartbeatTick: 1,
Storage: rn.raftStorage,
MaxSizePerMsg: 1024 * 1024,
MaxInflightMsgs: 256,
}
if oldwal {
logger.Infof("raft->RestartNode=======")
rn.node = raft.RestartNode(c)
} else {
startPeers := rpeers
if rn.join {
startPeers = nil
}
logger.Infof("raft->StartNode=======")
rn.node = raft.StartNode(c, startPeers)
}
rn.transport = &rafthttp.Transport{
ID: types.ID(rn.id),
ClusterID: types.ID(rn.clusterid),
ServerStats: stats.NewServerStats("", ""),
LeaderStats: stats.NewLeaderStats(strconv.Itoa(rn.id)),
ErrorC: make(chan error),
Raft: rn,
}
rn.transport.Start()
for i := range rn.peers {
if i+1 != rn.id {
rn.transport.AddPeer(types.ID(i+1), []string{rn.peers[i]})
fmt.Println("Transport start: ID = ", i+1, "address = ", rn.peers[i])
}
}
go rn.serveRaft()
go rn.packBlock()
go rn.handleChanel()
nm := &NodeManage{rn: rn}
go nm.RunNodeManage()
}
// stop closes http, closes all channels, and stops raft.
func (rn *raftNode) stop() {
rn.stopHTTP()
close(rn.errorC)
rn.node.Stop()
}
func (rn *raftNode) stopHTTP() {
rn.transport.Stop()
close(rn.httpstopc)
<-rn.httpdonec
}
func (rn *raftNode) handleChanel() {
defer rn.wal.Close()
ticker := time.NewTicker(100 * time.Millisecond)
defer ticker.Stop()
lh, err := rn.srv.Bc.Height()
if err != nil {
panic("handleChanel error: get last block height failed!")
}
rn.lastHeight = lh
// event loop on raft state machine updates
for {
select {
case <-ticker.C:
rn.node.Tick()
// store raft entries to wal, then publish over commit channel
case rd := <-rn.node.Ready():
//save snap:
if !raft.IsEmptySnap(rd.Snapshot) {
rn.recoverSnapshotData(rd.Snapshot)
rn.raftStorage.ApplySnapshot(rd.Snapshot)
}
rn.commitEntries(rd.CommittedEntries)
err := rn.wal.Save(rd.HardState, rd.Entries)
if err != nil {
logger.Errorf("Save wal: %v\n", err)
}
if rd.HardState.Commit != 0 {
logger.Infof("Save wal successfully.\n")
}
if len(rd.Entries) != 0 {
rn.raftStorage.Append(rd.Entries)
}
if !raft.IsEmptyHardState(rd.HardState) {
//logger.Infof("SetHardState Info: committedIndex,rd.HardState.Commit:[%v ,%v] \n", rn.committedIndex, rd.HardState.Commit)
if rn.committedIndex < rd.HardState.Commit {
logger.Infof("SetHardState Info: set rd.HardState.Commit to committedIndex:[%v ,%v] \n", rn.committedIndex, rd.HardState.Commit)
rd.HardState.Commit = rn.committedIndex
}
rn.raftStorage.SetHardState(rd.HardState)
}
if len(rd.Messages) != 0 {
rn.transport.Send(rd.Messages)
}
if rn.committedIndex > rn.snapshotIndex {
if rn.committedIndex-rn.snapshotIndex >= rn.snapCount {
offset := rn.committedIndex % rn.snapCount
err := rn.maybeTriggerSnapshot(rn.committedIndex - offset)
if err != nil {
logger.Infof("Snapshot error: %v; new snap Index = %v,last snap Index = %v\n", err, rn.committedIndex, rn.snapshotIndex)
}
}
}
rn.node.Advance()
case err := <-rn.transport.ErrorC:
rn.writeError(err)
return
case <-rn.stopc:
rn.stop()
return
}
}
}
func (rn *raftNode) commitEntries(ents []raftpb.Entry) {
if len(ents) <= 0 || ents == nil {
return
}
for _, e := range ents {
logger.Infof("commitEntries: e.Index=%v,last committedIndex = %v,e.term =%v.\n", e.Index, rn.committedIndex, e.Term)
if e.Index <= rn.committedIndex {
logger.Infof("Continue warning: log e.Index=%v already commited. Committed index = %v,term =%v.\n", e.Index, rn.committedIndex, e.Term)
continue
}
if len(e.Data) > 0 {
if e.Type == raftpb.EntryNormal {
//commit block
b := &block.Blocks{}
err := json.Unmarshal(e.Data, b)
if err != nil {
logger.Infof("commitEntries Unmarshal warning: %v\n", err)
continue
}
p := rn.srv.Gettxpool()
p.Filter(*b)
logger.Infof("Start: commit block b.height=%v,last block height = %v\n", b.Height, rn.lastHeight)
if b.Height <= rn.lastHeight {
logger.Infof("Commit warning: new block height [%v <= %v] last Height!\n", b.Height, rn.lastHeight)
rn.committedIndex++
continue
}
if b.Height-rn.lastHeight != 1 {
logger.Errorf("Commit block fatal error:b.Height %v - rn.lastHeight %v = %v.\n", b.Height, rn.lastHeight, b.Height-rn.lastHeight)
panic("Commit block error: b.Height - lastHeight >1")
}
if !rn.checkBlock(b) {
logger.Infof("Follow checkBlock error: block height = %v !\n", b.Height)
panic("CheckBlock ERROR!")
}
if !txpool.VerifyBlcok(*b, rn.srv.Bc) {
logger.Infof("Follow verifyBlcok error: block height = %v !\n", b.Height)
panic("VerifyBlcok ERROR!")
}
//start to commit
err = rn.srv.CommitBlock(b, []byte(rn.addr))
if err != nil {
logger.Fatalf("Fatal error: commit block failed, height = %v,e.Index = %v,e.term = %v,txnum = %v\n", b.Height, e.Index, e.Term, len(b.Txs))
fmt.Printf("Fatal error: commit block failed, Height =%v,e.index = %v,e.term = %v,txnum = %v\n", b.Height, e.Index, e.Term, len(b.Txs))
panic("Commit block failed!")
}
//update lastHeight.
rn.lastHeight = b.Height
//logger.Infof("End: commited block height = %v,e.Index = %v,e.term = %v,txnum = %v\n", b.Height, e.Index, e.Term, len(b.Txs))
fmt.Printf("End: commited Block Height =%v,e.index = %v,e.term = %v,txnum = %v\n", b.Height, e.Index, e.Term, len(b.Txs))
}
if e.Type == raftpb.EntryConfChange {
var cc raftpb.ConfChange
cc.Unmarshal(e.Data)
rn.confState = rn.node.ApplyConfChange(cc)
switch cc.Type {
case raftpb.ConfChangeAddNode:
if len(cc.Context) > 0 {
fmt.Println("Add node into cluster :", cc.NodeID, string(cc.Context))
rn.transport.AddRemote(types.ID(cc.NodeID), []string{string(cc.Context)})
rn.transport.AddPeer(types.ID(cc.NodeID), []string{string(cc.Context)})
rn.updateClusterInfo(true, types.ID(cc.NodeID), string(cc.Context))
}
case raftpb.ConfChangeRemoveNode:
fmt.Println("Removed node from the cluster and Shutting down.", cc.NodeID)
if rn.id != int(cc.NodeID) {
rn.transport.RemovePeer(types.ID(cc.NodeID))
}
if rn.id == int(cc.NodeID) {
rn.transport.Stop()
rn.node.Stop()
}
rn.updateClusterInfo(false, types.ID(cc.NodeID), "")
}
}
}
// after commit, update committedIndex.
rn.committedIndex = e.Index
}
}
func (rn *raftNode) maybeTriggerSnapshot(appIndex uint64) error {
logger.Infof("Snapshot info: appliedIndex = %v,last snapshotIndex = %v.\n", rn.committedIndex, rn.snapshotIndex)
data, err := rn.getSnapData(appIndex + 1)
if err != nil {
logger.Infof("getSnapData error!")
return err
}
snap, err := rn.raftStorage.CreateSnapshot(appIndex, rn.confState, data)
if err != nil {
logger.Infof("CreateSnapshot error!")
return err
}
if err := rn.saveSnap(snap); err != nil {
logger.Infof("saveSnap error!")
return err
}
//Compact discards all log entries prior to compactIndex.
if err := rn.raftStorage.Compact(appIndex); err != nil {
logger.Infof("Compact error!")
return err
}
logger.Infof("Compacted log at commited index %d", appIndex)
//update rn.snapshotIndex
rn.snapshotIndex = appIndex
rn.snapshot = &snap
logger.Infof("End: Finish snapshot at Index = %v\n", appIndex)
return nil
}
func (rn *raftNode) unMarshalSnapData(snapData []byte) ([]raftpb.Entry, error) {
if len(snapData) <= 0 || snapData == nil {
logger.Errorf("snapData error: %v\n", snapData)
return nil, errors.New("snapData error.")
}
var ents []raftpb.Entry
err := json.Unmarshal(snapData, &ents)
if err != nil {
logger.Errorf("recoverSnapshotData error: %v\n", err)
return nil, err
}
l := len(ents)
if l <= 0 {
logger.Infof("Warning: Snap entries length should > 0, return.\n")
return nil, errors.New("Snap entries length should > 0, return.")
}
return ents, nil
}
func (rn *raftNode) recoverSnapshotData(snapshotToSave raftpb.Snapshot) {
if raft.IsEmptySnap(snapshotToSave) {
return
}
logger.Infof("publishing snapshot at index %d", rn.snapshotIndex)
defer logger.Infof("finished publishing snapshot at index %d", rn.snapshotIndex)
if snapshotToSave.Metadata.Index <= rn.committedIndex {
logger.Infof("snapshot index [%d] should > progress committedIndex [%d]", snapshotToSave.Metadata.Index, rn.committedIndex)
return
}
ents, err := rn.unMarshalSnapData(snapshotToSave.Data)
if err != nil {
logger.Errorf("recoverSnapshotData unMarshalSnapData: %v.", err)
return
}
l := len(ents)
if l <= 0 {
logger.Errorf("Warning: Snap entries length should > 0, return.\n")
return
}
if rn.snapshotIndex >= ents[l-1].Index {
logger.Infof("Don't need to save snap.[rn.snapshotIndex %v >= %v ents[l-1].Index]\n", rn.snapshotIndex, ents[l-1].Index)
return
}
//missing snap data. so get snap data from leader by rpc then commit.
ci, _, err := rn.raftStorage.InitialState()
if err != nil {
logger.Errorf("commitEntries error: Get HardState failed , %v\n", err)
return
}
rn.committedIndex = ci.Commit
if rn.committedIndex < ents[0].Index && rn.snapshotIndex < ents[0].Index {
var term uint64 = 1
if rn.snapshot != nil {
term = rn.snapshot.Metadata.Term
}
logger.Infof("recover missing SnapshotData: from last commited index = %v to ents[0].Index = %v,snapshotIndex = %v,term = %v,ents[l-1].Index = %v.\n", rn.committedIndex, ents[0].Index, rn.snapshotIndex, term, ents[l-1].Index)
for nextReqSnapIndex := rn.snapshotIndex + rn.snapCount; (nextReqSnapIndex < snapshotToSave.Metadata.Index) && (rn.snapshotIndex%rn.snapCount == 0); {
req := ReqSnaprpc{
MaxTerm: snapshotToSave.Metadata.Term,
Term: term,
NextSnapshotIndex: nextReqSnapIndex,
}
res := ReSSnaprpc{}
st := rn.node.Status()
if st.Lead == 0 {
//logger.Warnf("ResquestSnapData get ip error: leader id = 0. Waiting for electing leader,then restart.\n")
continue
}
add, err := url.Parse(rn.idPeer[int(st.Lead)])
if err != nil {
//logger.Errorf("ResquestSnapData get ip error: %v\n", err)
continue
}
leaderIP := add.Hostname()
if len(leaderIP) <= 0 {
//logger.Warnf("ResquestSnapData get ip error: len(leaderIP) <= 0.\n")
continue
}
port := rn.addrPort
p := strings.Split(port, ":")
if len(p[1]) <= 0 {
continue
}
addport := leaderIP + ":" + p[1]
logger.Infof("ResquestSnapData gets ip:port = %v\n", addport)
logger.Infof("recoverSnapshotData: Start to request snap data by rpc from term [%v-%v].\n", term, snapshotToSave.Metadata.Term)
er := rn.ResquestSnapData(addport, &req, &res)
if er != nil {
logger.Errorf("ResquestSnapData error: %v.term = %v,snapshotindex = %v\n", er, res.Term, res.NextSnapshotIndex)
panic("ResquestSnapData error!")
}
if res.Done {
/***********************************/
logger.Infof("recoverSnapshotData: Request snap data by rpc successfully.\n")
data := res.Data
snap := new(raftpb.Snapshot)
err := json.Unmarshal(data, snap)
if err != nil {
logger.Errorf("Unmarshal error: %v\n", err)
break
}
if raft.IsEmptySnap(*snap) {
logger.Errorf("recoverSnapshotData saveSnap warning: Empty snap data.\n")
break
}
//commit snap data
if len(snap.Data) > 0 {
en, err := rn.unMarshalSnapData(snap.Data)
if err != nil {
logger.Errorf("recover snap unMarshalSnapData error: %v!\n", err)
break
}
entries, err := rn.snapEntriesToApply(rn.committedIndex+1, en)
if err != nil {
logger.Errorf("snapEntriesToApply error: %v!\n", err)
break
}
logger.Infof("recoverSnapshotData: Start recover missing snap data from index %v to index %v,term = %v,snapshotindex = %v.\n", rn.committedIndex, entries[len(entries)-1].Index, res.Term, res.NextSnapshotIndex)
rn.commitEntries(entries)
//store snap data to disk
if err := rn.saveSnap(*snap); err != nil {
logger.Errorf("recoverSnapshotData saveSnap error1: %v!\n", err)
break
}
rn.confState = &snap.Metadata.ConfState
rn.snapshotIndex = snap.Metadata.Index
rn.committedIndex = snap.Metadata.Index
rn.snapshot = snap
term = snap.Metadata.Term
nextReqSnapIndex = rn.snapshotIndex + rn.snapCount
}
}
}
if rn.snapshotIndex+rn.snapCount != snapshotToSave.Metadata.Index {
logger.Errorf("recoverSnapshotData error: recover missing snap data failed![snapIndex = %v,term = %v]\n", rn.snapshotIndex, term)
panic("recoverSnapshotData error: recover missing snap data failed\n")
}
logger.Infof("recoverSnapshotData: Finish recover missing snap data.[snapIndex = %v,term = %v]\n", rn.snapshotIndex, term)
}
//no missing snap data, so recover data from newest sanp.
logger.Infof("recoverSnapshotData: rn.snapshotIndex+rn.snapCount[%v + %v] [<=>] snapshotToSave.Metadata.Index [%v].\n", rn.snapshotIndex, rn.snapCount, snapshotToSave.Metadata.Index)
if (rn.snapshotIndex+rn.snapCount == snapshotToSave.Metadata.Index) && (rn.snapshotIndex%rn.snapCount == 0) {
//store snap data
entries, err := rn.snapEntriesToApply(rn.committedIndex+1, ents)
if err != nil {
logger.Errorf("snapEntriesToApply error: %v!\n", err)
return
}
logger.Infof("recoverSnapshotData: Start recover snap data.\n")
rn.commitEntries(entries)
//store snap data to disk
if err := rn.saveSnap(snapshotToSave); err != nil {
logger.Errorf("recoverSnapshotData saveSnap error2: %v!\n", err)
panic("recoverSnapshotData saveSnap error2")
}
rn.confState = &snapshotToSave.Metadata.ConfState
rn.snapshotIndex = snapshotToSave.Metadata.Index
rn.committedIndex = snapshotToSave.Metadata.Index
rn.snapshot = &snapshotToSave
logger.Infof("recoverSnapshotData: Finish recover snap data.[snapIndex = %v,term = %v]\n", rn.snapshotIndex, snapshotToSave.Metadata.Term)
// //discard log before committedIndex
// fst, _ := rn.raftStorage.FirstIndex()
// last, _ := rn.raftStorage.LastIndex()
// if rn.committedIndex >= fst && rn.committedIndex <= last {
// if err := rn.raftStorage.Compact(rn.committedIndex); err != nil {
// logger.Infof("recoverSnapshotData compact error!")
// return
// }
// logger.Infof("recoverSnapshotData compacted log at commited index %d", rn.committedIndex)
// }
}
}
func (rn *raftNode) snapEntriesToApply(ci uint64, ents []raftpb.Entry) ([]raftpb.Entry, error) {
le := len(ents)
if ci < ents[0].Index {
logger.Infof("Commited index = %v ,ents[0].index = %v\n", ci, ents[0].Index)
return nil, errors.New("Commited index should >= ents[0].index.")
}
if ci > ents[le-1].Index {
return nil, errors.New("Already Commited, index should < ents[len - 1].index.")
}
return ents[ci-ents[0].Index:], nil
}
func (rn *raftNode) ResquestSnapData(address string, req *ReqSnaprpc, res *ReSSnaprpc) error {
client, err := rpc.DialHTTP("tcp", address)
if err != nil {
fmt.Println(err)
return err
}
err = client.Call("NodeManage.HandleSnap", req, res)
if err != nil {
logger.Errorf("Handle snap error:", err)
return err
}
return nil
}
func (rn *raftNode) loadSnapshot() error {
snapshot, err := rn.snapshotter.Load()
if err != nil && err != snap.ErrNoSnapshot {
logger.Fatalf("raftconsensus: error loading snapshot (%v)", err)
return err
}
rn.snapshot = snapshot
return nil
}
func (rn *raftNode) checkBlock(b *block.Blocks) bool {
logger.Infof("====================Start checkBlock====================")
if !rn.srv.Bc.Checkresults(b, []byte(rn.DS), []byte(rn.Cm), []byte(rn.QTJ)) {
return false
}
logger.Infof("=====================End checkBlock====================")
return true
}
func (rn *raftNode) packBlock() {
for {
time.Sleep(time.Second * 1)
st := rn.node.Status()
fmt.Printf("Leader ID:%v; SelfNode Id:%v\n", st.Lead, rn.id)
logger.Infof("Leader ID:%v; SelfNode Id:%v\n", st.Lead, rn.id)
if st.SoftState.RaftState == raft.StateLeader {
li, err := rn.raftStorage.LastIndex()
if err != nil {
logger.Infof("Before package Warning: Get last Index failed !\n")
continue
}
ci, _, err := rn.raftStorage.InitialState()
if err != nil {
logger.Infof("Warning: Get commit Index failed, %v.\n", err)
continue
}
if li > ci.Commit {
logger.Infof("Warning: Last Index [%v != %v] commited Index !\n", li, ci.Commit)
continue
}
logger.Infof("Start to pack Block: Last log Index [%v] , Commited Index [%v].\n", li, ci.Commit)
b, err := rn.srv.PackBlock([]byte(rn.addr), []byte(rn.DS), []byte(rn.Cm), []byte(rn.QTJ))
if err != nil {
logger.Infof("Leader: PackBlock failed,do it again.\n")
continue
}
logger.Infof("Leader: Pack new Block height = %v successfully!\n", b.Height)
b.Miner = []byte(rn.addr)
b = rn.srv.Bc.Calculationresults(b)
bs := block_change(b)
pb, err := json.Marshal(bs)
if err != nil {
logger.Infof("Marshal error: package again.\n")
continue
}
currentHeight, _ := rn.srv.Bc.Height()
if bs.Height <= currentHeight {
logger.Infof("Warning: New pack Height [%v <= %v] current db height.\n", bs.Height, currentHeight)
continue
}
er := rn.node.Propose(context.TODO(), []byte(pb))
if er != nil {
logger.Errorf("Raft Propose: %v,continue...", er)
continue
}
logger.Infof("End: Finish Propose new block height = %v.\n", bs.Height)
}
}
}
func (rn *raftNode) serveRaft() {
url, err := url.Parse(rn.idPeer[rn.id])
if err != nil {
logger.Fatalf("raftconsensus error: Failed parsing URL (%v)", err)
}
ln, err := newStoppableListener(url.Host, rn.httpstopc)
if err != nil {
logger.Fatalf("raftconsensus error: Failed to listen rafthttp (%v)", err)
}
err = (&http.Server{Handler: rn.transport.Handler()}).Serve(ln)
select {
case <-rn.httpstopc:
default:
logger.Fatalf("raftconsensus error: Failed to serve rafthttp (%v)", err)
}
close(rn.httpdonec)
}
func (rn *raftNode) Process(ctx context.Context, m raftpb.Message) error {
return rn.node.Step(ctx, m)
}
func (rn *raftNode) IsIDRemoved(id uint64) bool { return false }
func (rn *raftNode) ReportUnreachable(id uint64) {}
func (rn *raftNode) ReportSnapshot(id uint64, status raft.SnapshotStatus) {}
func block_change(b *block.Block) *block.Blocks {
var brs *block.Block_Res = &block.Block_Res{}
var rst block.Blocks
for addr, bal := range b.Txres {
brs.Address = addr
brs.Balance = bal
rst.Res = append(rst.Res, *brs)
}
for _, mtx := range b.FirstTx {
rst.FirstTx = append(rst.FirstTx, mtx)
}
var bs *block.Blocks = &block.Blocks{
Height: b.Height,
PrevBlockHash: b.PrevBlockHash,
Txs: b.Txs,
Root: b.Root,
Version: b.Version,
Timestamp: b.Timestamp,
Hash: b.Hash,
Miner: b.Miner,
Res: rst.Res,
FirstTx: rst.FirstTx,
}
return bs
}
func (rn *raftNode) saveSnap(snap raftpb.Snapshot) error {
// must save the snapshot index to the WAL before saving the
// snapshot to maintain the invariant that we only Open the
// wal at previously-saved snapshot indexes.
walSnap := walpb.Snapshot{
Index: snap.Metadata.Index,
Term: snap.Metadata.Term,
}
if err := rn.wal.SaveSnapshot(walSnap); err != nil {
return err
}
if err := rn.snapshotter.SaveSnap(snap); err != nil {
return err
}
return rn.wal.ReleaseLockTo(snap.Metadata.Index)
}
func (rn *raftNode) getSnapData(hi uint64) ([]byte, error) {
lo := rn.snapshotIndex + 1
ents, err := rn.raftStorage.Entries(lo, hi, 5*1024*1024*1024)
if err != nil {
logger.Infof("Get entries error from index %v to %v。", lo, hi)
return nil, err
}
logger.Infof("getSnapData: Get entries from index %v to %v, get length = %v.\n", lo, hi, len(ents))
data, err := json.Marshal(ents)
if err != nil {
logger.Infof("get Entries Marshal error!")
return nil, err
}
return data, nil
}
func (rn *raftNode) updateClusterInfo(confchange bool, id types.ID, ps string) {
fmt.Println("Update cluster info when config changed.")
if confchange {
if ps != "" {
rn.peers = append(rn.peers, ps)
if _, ok := rn.idPeer[int(id)]; !ok {
rn.idPeer[int(id)] = ps
fmt.Println("Clustermanage add node :", int(id), ps)
}
}
} else {
fmt.Println("Clustermanage delete node id =", int(id))
for indx, p := range rn.peers {
if p == rn.idPeer[int(id)] {
rn.peers = append(rn.peers[:indx], rn.peers[indx+1:]...)
}
}
if _, ok := rn.idPeer[int(id)]; ok {
delete(rn.idPeer, int(id))
}
}
}
func (rn *raftNode) NodeManagement(req *Request, res *NodeInfo) {
st := rn.node.Status()
if st.SoftState.RaftState == raft.StateLeader {
switch req.RequestType {
case RaftAddNode:
//如果已经存在,添加失败返回并带回结果信息。
for k, v := range rn.idPeer {
if k == req.RequestId {
re := fmt.Sprintf("Failed: node %v is alrady exist!", req.RequestId)
res.Result = re
break
}
if v == req.RequestPeer {
re := fmt.Sprintf("Node %v is alrady exist!", req.RequestPeer)
res.Result = re
break
}
}
logger.Infof("Leader ADDs NODE ======", req.RequestId, "++++", req.RequestPeer)
data, _ := json.Marshal(req.RequestPeer)
cc := raftpb.ConfChange{
Type: 0,
NodeID: uint64(req.RequestId),
Context: data,
}
rn.node.ProposeConfChange(context.TODO(), cc)
case RaftRemoveNode:
//如果要删除的id不存在,删除失败返回并带回结果信息
if _, ok := rn.idPeer[req.RequestId]; !ok {
re := fmt.Sprintf("Failed: node id %v is not existing!!", req.RequestId)
res.Result = re
break
}
logger.Infof("Leader Removes NODE ======", req.RequestId)
cc := raftpb.ConfChange{
Type: 1,
NodeID: uint64(req.RequestId),
}
rn.node.ProposeConfChange(context.TODO(), cc)
}
}
}
func (rn *raftNode) GetClusterInfo() *clusterInfo {
maxid := 1
for k, _ := range rn.idPeer {
if maxid < k {
maxid = k
}
}
num := len(rn.peers)
st := rn.node.Status()
cls := &clusterInfo{
ClusterID: rn.clusterid,
Peers: rn.peers,
LeaderID: int(st.Lead),
LeaderPeer: rn.idPeer[int(st.Lead)],
NodeID: rn.id,
NodePeer: rn.idPeer[rn.id],
TotalNodeNum: num,
MaxNodeId: maxid,
}
return cls
// data, err := json.Marshal(cls)
// if err != nil {
// logger.Errorf("Marshal clusterInfo error:", err)
// }
// writeInfo(data)
}
|
package stormpathweb
import (
"bytes"
"encoding/json"
"fmt"
"html/template"
"net/http"
"github.com/jarias/stormpath-sdk-go"
)
type loginHandler struct {
preLoginHandler UserHandler
postLoginHandler UserHandler
application *stormpath.Application
}
func (h loginHandler) serveHTTP(w http.ResponseWriter, r *http.Request, ctx webContext) {
ctx.next = r.URL.Query().Get(NextKey)
if ctx.account != nil {
http.Redirect(w, r, Config.LoginNextURI, http.StatusFound)
return
}
if Config.IDSiteEnabled {
options := stormpath.IDSiteOptions{
Path: Config.IDSiteLoginURI,
CallbackURL: baseURL(r) + Config.CallbackURI,
}
idSiteURL, err := h.application.CreateIDSiteURL(options)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
http.Redirect(w, r, idSiteURL, http.StatusFound)
return
}
if r.Method == http.MethodPost {
h.doPOST(w, r, ctx)
return
}
if r.Method == http.MethodGet {
h.doGET(w, r, ctx)
return
}
methodNotAllowed(w, r, ctx)
}
func (h loginHandler) doGET(w http.ResponseWriter, r *http.Request, ctx webContext) {
contentType := ctx.contentType
model := map[string]interface{}{
"form": Config.LoginForm,
"accountStores": getApplicationAccountStores(h.application),
}
if contentType == stormpath.ApplicationJSON {
respondJSON(w, model, http.StatusOK)
return
}
if contentType == stormpath.TextHTML {
model["registerURI"] = Config.RegisterURI
if isVerifyEnabled(h.application) {
model["verifyURI"] = Config.VerifyURI
}
if isForgotPasswordEnabled(h.application) {
model["forgotURI"] = Config.ForgotPasswordURI
}
//Social
model["googleCallbackUri"] = Config.GoogleCallbackURI
model["googleScope"] = Config.GoogleScope
model["githubCallbackUri"] = Config.GithubCallbackURI
model["githubScope"] = Config.GithubScope
model["facebookCallbackUri"] = Config.FacebookCallbackURI
model["facebookScope"] = Config.FacebookScope
model["linkedinCallbackUri"] = Config.LinkedinCallbackURI
model["linkedinScope"] = Config.LinkedinScope
//End Social
model["postedData"] = ctx.postedData
model["baseURL"] = fmt.Sprintf("http://%s/%s", r.Host, Config.BasePath)
model["status"] = resolveLoginStatus(r.URL.Query().Get("status"))
model["error"] = ctx.webError
respondHTML(w, model, Config.LoginView)
}
}
func (h loginHandler) doPOST(w http.ResponseWriter, r *http.Request, ctx webContext) {
var authenticationResult stormpath.AuthResult
if h.preLoginHandler != nil {
pre := h.preLoginHandler(w, r, nil)
if !pre {
//User halted execution so we return
return
}
}
contentType := ctx.contentType
postedData, originalData := getPostedData(r)
if _, exists := postedData["providerData"]; exists {
//Social account
socialAccount := &stormpath.SocialAccount{}
json.NewDecoder(bytes.NewBuffer(originalData)).Decode(socialAccount)
account, err := h.application.RegisterSocialAccount(socialAccount)
if err != nil {
handleError(w, r, ctx.withError(postedData, err), h.doGET)
return
}
authenticationResult = transientAuthenticationResult(account)
} else {
err := validateForm(Config.LoginForm, postedData)
if err != nil {
handleError(w, r, ctx.withError(postedData, err), h.doGET)
return
}
authenticationResult, err = stormpath.NewOAuthPasswordAuthenticator(h.application).Authenticate(postedData["login"], postedData["password"])
if err != nil {
handleError(w, r, ctx.withError(postedData, err), h.doGET)
return
}
}
err := saveAuthenticationResult(w, r, authenticationResult, h.application)
if err != nil {
handleError(w, r, ctx.withError(postedData, err), h.doGET)
return
}
account := authenticationResult.GetAccount()
if account == nil {
handleError(w, r, ctx.withError(postedData, fmt.Errorf("can't get account from authentication result")), h.doGET)
return
}
if h.postLoginHandler != nil {
post := h.postLoginHandler(w, r, account)
if !post {
//User halted execution so we return
return
}
}
if contentType == stormpath.ApplicationJSON {
respondJSON(w, accountModel(account), http.StatusOK)
return
}
redirectURL := Config.LoginNextURI
if ctx.next != "" {
redirectURL = ctx.next
}
http.Redirect(w, r, redirectURL, http.StatusFound)
}
func resolveLoginStatus(status string) template.HTML {
statusMessage := ""
switch status {
case "unverified":
statusMessage = fmt.Sprintf("Your account verification email has been sent! Before you can log into your account, you need to activate your account by clicking the link we sent to your inbox. Didn't get the email? <a href=\"%s\">Click Here</a>", Config.VerifyURI)
break
case "verified":
statusMessage = "Your Account Has Been Verified. You may now login."
break
case "created":
statusMessage = "Your Account Has Been Created. You may now login."
break
case "forgot":
statusMessage = "Password Reset Requested. If an account exists for the email provided, you will receive an email shortly."
break
case "reset":
statusMessage = "Password Reset Successfully. You can now login with your new password."
}
return template.HTML(statusMessage)
}
|
package dbschedules
// A ConflictGraph is a transaction conflict graph.
// An entry in a ConflictGraph stores, for a given node n,
// all of the nodes with edges pointing to n.
type ConflictGraph map[string]map[string]bool
// BuildConflictGraph builds a conflict graph for a
// schedule.
func BuildConflictGraph(s Schedule) ConflictGraph {
res := ConflictGraph{}
history := map[string]map[string]ActionType{}
for _, a := range s {
res[a.Transaction] = map[string]bool{}
if a.Object != "" {
history[a.Object] = map[string]ActionType{}
}
}
for _, a := range s {
if a.Object == "" {
continue
}
hist := history[a.Object]
if a.Type == Read {
for trans, actType := range hist {
if actType == Write && trans != a.Transaction {
res[a.Transaction][trans] = true
}
}
if _, ok := hist[a.Transaction]; !ok {
hist[a.Transaction] = Read
}
} else if a.Type == Write {
for trans := range hist {
if trans != a.Transaction {
res[a.Transaction][trans] = true
}
}
hist[a.Transaction] = Write
}
}
return res
}
// Copy creates a copy of the graph.
func (c ConflictGraph) Copy() ConflictGraph {
res := ConflictGraph{}
for key, val := range c {
res[key] = map[string]bool{}
for k, v := range val {
res[key][k] = v
}
}
return res
}
// Cyclic returns if there is a cycle in the graph.
func (c ConflictGraph) Cyclic() bool {
graph := c.Copy()
topNodes := map[string]bool{}
for k, v := range graph {
if len(v) == 0 {
topNodes[k] = true
}
}
// An N^2 algorithm for topological sort is sub-optimal
// but, for our purposes, perfectly acceptable.
for len(topNodes) > 0 {
for n := range topNodes {
delete(graph, n)
}
for _, v := range graph {
for n := range topNodes {
delete(v, n)
}
}
topNodes = map[string]bool{}
for k, v := range graph {
if len(v) == 0 {
topNodes[k] = true
}
}
}
return len(graph) > 0
}
|
package lib
import (
"bufio"
"os"
"strings"
)
type Word struct {
Value string
Part string
}
func streamWords(path string, max int, stream chan Word) {
file, err := os.Open(path)
if err != nil {
panic("Couldn't open " + path)
}
reader := bufio.NewReader(file)
scanner := bufio.NewScanner(reader)
scanner.Split(bufio.ScanWords)
for i := 0; scanner.Scan(); i++ {
if max > 0 && i >= max {
break
}
d := strings.Split(scanner.Text(), "_")
stream <- Word{d[0], d[1]}
}
close(stream)
}
func WordStream(file string, max int) (stream chan Word) {
stream = make(chan Word, 100)
go streamWords(file, max, stream)
return
}
|
package lbclient
import (
"encoding/json"
"fmt"
)
type updatePart interface {
fmt.Stringer
GetMap() map[string]interface{}
}
// Represents a {$set:{field:rvalue}} operation
type SetOperation struct {
field string
value RValue
}
func (s SetOperation) GetMap() map[string]interface{} {
return map[string]interface{}{
"$set": map[string]interface{}{s.field: s.value}}
}
// Returns {$set:{field:value}}
func (s SetOperation) String() string {
return fmt.Sprintf("{\"$set\":{\"%s\":%s}}", s.field, s.value)
}
// Represents an {$unset:field} operation
type UnsetOperation struct {
field string
}
// Returns $unset
func (s UnsetOperation) GetMap() map[string]interface{} {
return map[string]interface{}{
"$unset": s.field}
}
// Returns {$unset:field}
func (s UnsetOperation) String() string {
return fmt.Sprintf("{\"$unset\":\"%s\"}", s.field)
}
// Represents a {$add:{field:rvalue}} operation
type AddOperation struct {
field string
value RValue
}
// Returns $add
func (s AddOperation) GetMap() map[string]interface{} {
return map[string]interface{}{
"$add": map[string]interface{}{s.field: s.value}}
}
// Returns {$add:{field:value}}
func (s AddOperation) String() string {
return fmt.Sprintf("{\"$add\":{\"%s\":%s}}", s.field, s.value)
}
// Represents an array append operation {$append:{field:[values]}}
type AppendOperation struct {
field string
values []RValue
}
func (s AppendOperation) GetMap() map[string]interface{} {
return map[string]interface{}{
"$append": map[string]interface{}{s.field: s.values}}
}
// Returns {$append:{field:[values]}}
func (s AppendOperation) String() string {
return fmt.Sprintf("{\"$append\":{\"%s\":%s}}", s.field, s.values)
}
// Represents an array insert operation {$insert:{field:[values]}
type InsertOperation struct {
field string
values []RValue
}
func (s InsertOperation) GetMap() map[string]interface{} {
return map[string]interface{}{
"$insert": map[string]interface{}{s.field: s.values}}
}
// Returns {$insert:{field:[values]}}
func (s InsertOperation) String() string {
return fmt.Sprintf("{\"$insert\":{\"%s\":%s}}", s.field, s.values)
}
// Represents a foreach operation
type ForEachOperation struct {
field string
q struct {
q Query
isAll bool
}
u struct {
u Update
isRemove bool
}
}
func (s ForEachOperation) GetMap() map[string]interface{} {
m := make(map[string]interface{})
if s.q.isAll {
m["field"] = "$all"
} else {
m["field"] = s.q.q
}
if s.u.isRemove {
m["$update"] = "$remove"
} else {
m["$update"] = s.u.u
}
return map[string]interface{}{
"$foreach": m}
}
// Returns {$foreach:{field:q, $update:u}}
func (s ForEachOperation) String() string {
return fmt.Sprintf("%s", s.GetMap())
}
// An opaque Update structure containing the update expression parts
type Update struct {
u []updatePart
}
func (u Update) Empty() bool {
return u.u == nil || len(u.u) == 0
}
func (u *Update) add(part updatePart) *Update {
if u.u == nil {
u.u = make([]updatePart, 0, 4)
}
u.u = append(u.u, part)
return u
}
// Adds a $set operation to the update expression
func (u *Update) Set(fld string, val RValue) *Update {
return u.add(SetOperation{field: fld, value: val})
}
// Adds an $unset operation to the update expression
func (u *Update) Unset(fld string) *Update {
return u.add(UnsetOperation{fld})
}
// Adds a $add operation to the update expression
func (u *Update) Add(fld string, val RValue) *Update {
return u.add(AddOperation{field: fld, value: val})
}
// Adds an $append operation to the update epxression
func (u *Update) Append(fld string, val ...RValue) *Update {
return u.add(AppendOperation{field: fld, values: val})
}
// Adds an $append operation to the update expression
func (u *Update) AppendList(fld string, val []RValue) *Update {
return u.add(AppendOperation{field: fld, values: val})
}
// Adds an $insert operation to the update expression
func (u *Update) Insert(fld string, val ...RValue) *Update {
return u.add(InsertOperation{field: fld, values: val})
}
// Adds an $insert operation to the update expression
func (u *Update) InsertList(fld string, val []RValue) *Update {
return u.add(InsertOperation{field: fld, values: val})
}
// Adds a $foreach operation to the update expression
func (u *Update) ForEach(fld string, query Query, all bool, update Update, remove bool) *Update {
f := ForEachOperation{field: fld}
f.q.q = query
f.q.isAll = all
f.u.u = update
f.u.isRemove = remove
return u.add(f)
}
func (u Update) MarshalJSON() ([]byte, error) {
switch len(u.u) {
case 0:
return []byte("[]"), nil
case 1:
return json.Marshal(u.u[0].GetMap())
default:
v := make([]map[string]interface{}, len(u.u))
for i, x := range u.u {
v[i] = x.GetMap()
}
return json.Marshal(v)
}
}
func (u *Update) String() string {
x, _ := u.MarshalJSON()
return string(x)
}
|
package main
type Entry struct {
Id uint
Status string
Value string
Votes []*Vote
}
type Vote struct {
EntryID uint
UserID uint
User *User
Weight int
}
type User struct {
Id uint
Votes []*Vote
Name string
} |
package main
import (
"errors"
"fmt"
"regexp"
"strconv"
"strings"
)
var (
ErrUnrecognizedToken = errors.New("Lex: unrecognized token")
ErrIncompleteExpression = errors.New("Parse: incomplete expression")
ErrOvercompleteExpression = errors.New("Parse: overcomplete expression")
)
func Lex(src string) ([]string, error) {
// declare regexp strings
reStrings := []string{
`(#t)|(#f)`, // boolean literals
`[(]|[)]`, // parens
`[123456789]\d*`, // integer literals
`\+|\-|\*|/|<|>|(<=)|(>=)|=`, // operators
`(\w|\-|\?)+`, // identifiers
`;.*`, // single-line comments
`((?s)[[:space:]]+)`, // whitespace
}
// compile strings to regexp objects
regexes := make([]*regexp.Regexp, len(reStrings))
for i, v := range reStrings {
regexes[i] = regexp.MustCompile(v)
}
// let i be the current index of input
// advance and tokenize input until end of input or error
tokens := []string{}
for i := 0; i < len(src); {
// check if any regex matches input
reMatched := false
for _, re := range regexes {
loc := re.FindStringIndex(src[i:])
if loc != nil && loc[0] == 0 {
tokens = append(tokens, src[i:][loc[0]:loc[1]])
reMatched = true
i += (loc[1] - loc[0])
break
}
}
// error if no regex can match current input
if !reMatched {
return nil, ErrUnrecognizedToken
}
}
return tokens, nil
}
// Remove whitespace and comment tokens
func Preprocess(tokens []string) []string {
preprocessedTokens := make([]string, 0, len(tokens))
for _, token := range tokens {
if strings.ContainsRune("\t\n\v\f\r ", rune(token[0])) {
continue
}
if token[0] == ';' {
continue
}
preprocessedTokens = append(preprocessedTokens, token)
}
return preprocessedTokens
}
func Parse(tokens []string) ([]interface{}, error) {
stk := NewStack()
stk.Push(NewStack())
for _, token := range tokens {
if token == "(" {
stk.Push(NewStack())
} else if token == ")" {
childExpr := stk.Pop().(Stack)
if stk.Len() == 0 {
return nil, ErrOvercompleteExpression
}
parentExpr := stk.Pop().(Stack)
parentExpr.Push(childExpr)
stk.Push(parentExpr)
} else {
expr := stk.Pop().(Stack)
expr.Push(token)
stk.Push(expr)
}
}
if stk.Len() > 1 {
return nil, ErrIncompleteExpression
}
return stk.Pop().(Stack).ToSlice(), nil
}
func Eval(expr interface{}, env map[string]interface{}) (interface{}, error) {
switch expr.(type) {
case []interface{}:
// must be a function application
lst := expr.([]interface{})
if len(lst) == 0 {
return nil, fmt.Errorf("Eval: cannot evaluate empty expression ()")
}
function, err := Eval(lst[0], env)
if err != nil {
return nil, err
}
switch function.(type) {
case specialForm:
return function.(specialForm)(lst[1:], env)
case proc:
proc := function.(proc)
args := lst[1:]
if len(args) != len(proc.params) {
return nil, fmt.Errorf("Eval: wrong number of params")
}
procEnv := copyEnv(env)
evaluatedArgs := make([]interface{}, len(args))
for i := range args {
evaluatedArg, err := Eval(args[i], env)
evaluatedArgs[i] = evaluatedArg
if err != nil {
return nil, err
}
}
for i := range evaluatedArgs {
procEnv[proc.params[i]] = evaluatedArgs[i]
}
return proc.body(procEnv)
case variadicProc:
vproc := function.(variadicProc)
args := lst[1:]
procEnv := copyEnv(env)
evaluatedArgs := make([]interface{}, len(args))
for i := range args {
evaluatedArg, err := Eval(args[i], env)
evaluatedArgs[i] = evaluatedArg
if err != nil {
return nil, err
}
}
procEnv[vproc.param] = evaluatedArgs
return vproc.body(procEnv)
default:
return nil, fmt.Errorf(
"Eval: expected special form or procedure but received type '%T'",
function)
}
case string:
// must be either literal or a binding
s := expr.(string)
if s == "#t" {
// true literal
return true, nil
} else if s == "#f" {
// false literal
return false, nil
} else if i, err := strconv.Atoi(s); err == nil {
// integer literal
return i, nil
} else {
// identifier
val, ok := env[s]
if !ok {
return nil, fmt.Errorf("Eval: identifier not found: '%s'", s)
} else {
return val, nil
}
}
default:
return nil, fmt.Errorf(`Eval: received invalid expression
type: '%T'
value: '%v'`,
expr, expr)
}
}
func Exec(src string) (interface{}, error) {
// initialize execution environment
env := copyEnv(defaultEnv)
var err error
// lex source into tokens
tokens, err := Lex(src)
if err != nil {
return nil, err
}
// remove comments and whitespace
preprocessedTokens := Preprocess(tokens)
// parse into AST
exprs, err := Parse(preprocessedTokens)
if err != nil {
return nil, err
}
// evaluate expressions
var retval interface{}
for _, expr := range exprs {
retval, err = Eval(expr, env)
if err != nil {
return nil, err
}
}
return retval, nil
}
|
package cmd
import (
"os"
"strings"
"github.com/pkg/errors"
)
func exportedEnvVar(envVar string) (string, string, error) {
export := strings.SplitN(envVar, "=", 2)
if len(export) != 2 {
return "", "", errors.Errorf("environment variable %q cannot be splitted", envVar)
}
return export[0], export[1], nil
}
func computeWasmEnv() ([]string, []string) {
wasmEnv := map[string]string{}
// Inherited envvars with CHIMERA_EXPORT_ prefix: trim and forward to the Wasm guest
for _, env := range os.Environ() {
exportVarName, exportVarValue, err := exportedEnvVar(env)
if err != nil {
continue
}
exportVarNameToShare := strings.TrimPrefix(exportVarName, exportedEnvVarPrefix)
if exportVarNameToShare != exportVarName {
wasmEnv[exportVarNameToShare] = exportVarValue
}
}
// Explicitly set envvars with (--env): set on the Wasm guest directly
for _, env := range wasmEnvVars.Value() {
exportVarName, exportVarValue, err := exportedEnvVar(env)
if err != nil {
continue
}
wasmEnv[exportVarName] = exportVarValue
}
wasmEnvKeys := []string{}
wasmEnvValues := []string{}
for wasmEnvKey, wasmEnvValue := range wasmEnv {
wasmEnvKeys = append(wasmEnvKeys, wasmEnvKey)
wasmEnvValues = append(wasmEnvValues, wasmEnvValue)
}
return wasmEnvKeys, wasmEnvValues
}
|
package tokens
type TokenType int
type Token struct {
Typ TokenType
Value string
}
const (
NONE TokenType = iota
NUMBER
NAME
SYMBOL
STRING
DOT
EOF
COMMENT
LPAREN
RPAREN
LBRACKET
RBRACKET
LBRACER
RBRACER
)
func New(typ TokenType, value string) Token {
return Token{typ, value}
}
var typeNames = []string{
"TOKEN_NONE",
"TOKEN_NUMBER",
"TOKEN_NAME",
"TOKEN_SYMBOL",
"TOKEN_STRING",
"TOKEN_DOT",
"TOKEN_EOF",
"TOKEN_COMMENT",
"TOKEN_LPAREN",
"TOKEN_RPAREN",
"TOKEN_LBRACKET",
"TOKEN_RBRACKET",
"TOKEN_LBRACER",
"TOKEN_RBRACER",
}
func (typ TokenType) String() string {
return typeNames[int(typ)]
}
|
package events
import (
"github.com/bwmarrin/discordgo"
)
func (config *Events) ChannelUpdate(session *discordgo.Session, event *discordgo.ChannelUpdate) {
log := config.Log.WithField("GuildID", event.GuildID)
widget, ok := config.Widgets[event.GuildID]
if !ok {
log.Errorln("Could not find widget for guild")
return
}
if widget.IsListenChannel(event.Channel.ID) {
widget.RenameListenChannel(event.Channel.Name)
} else if widget.IsMemberChannel(event.Channel.ID) {
widget.RenameMemberChannel(event.Channel.ID, event.Channel.Name)
}
}
|
// SPDX-License-Identifier: MIT OR Unlicense
package main
import (
"bytes"
str "github.com/boyter/go-string"
"sort"
"unicode"
)
const (
SnipSideMax int = 10 // Defines the maximum bytes either side of the match we are willing to return
// The below are used for adding boosts to match conditions of snippets to hopefully produce the best match
PhraseHeavyBoost = 20
SpaceBoundBoost = 5
ExactMatchBoost = 5
// Below is used to control CPU burn time trying to find the most relevant snippet
RelevanceCutoff = 10_000
)
type bestMatch struct {
StartPos int
EndPos int
Score float64
Relevant []relevantV3
}
// Internal structure used just for matching things together
type relevantV3 struct {
Word string
Start int
End int
}
type Snippet struct {
Content string
StartPos int
EndPos int
Score float64
LineStart int
LineEnd int
}
// Looks through the locations using a sliding window style algorithm
// where it "brute forces" the solution by iterating over every location we have
// and look for all matches that fall into the supplied length and ranking
// based on how many we have.
//
// This algorithm ranks using document frequencies that are kept for
// TF/IDF ranking with various other checks. Look though the source
// to see how it actually works as it is a constant work in progress.
// Some examples of what it can produce which I consider good results,
//
// corpus: Jane Austens Pride and Prejudice
// searchtext: ten thousand a year
// result: before. I hope he will overlook
//
// it. Dear, dear Lizzy. A house in town! Every thing that is
// charming! Three daughters married! Ten thousand a year! Oh, Lord!
// What will become of me. I shall go distracted.”
//
// This was enough to prove that her approbation need not be
//
// searchtext: poor nerves
// result: your own children in such a way?
//
// You take delight in vexing me. You have no compassion for my poor
// nerves.”
//
// “You mistake me, my dear. I have a high respect for your nerves.
// They are my old friends. I have heard you mention them with
// consideration these last
//
// The above are captured in the tests for this method along with extractions from rhyme of the ancient mariner
// and generally we do not want them to regress for any other gains.
//
// Please note that testing this is... hard. This is because what is considered relevant also happens
// to differ between people. Heck a few times I have been disappointed with results that I was previously happy with.
// As such this is not tested as much as other methods and you should not rely on the results being static over time
// as the internals will be modified to produce better results where possible
func extractRelevantV3(res *FileJob, documentFrequencies map[string]int, relLength int) []Snippet {
wrapLength := relLength / 2
var bestMatches []bestMatch
rv3 := convertToRelevant(res)
// if we have a huge amount of matches we want to reduce it because otherwise it takes forever
// to return something if the search has many matches.
if len(rv3) > RelevanceCutoff {
rv3 = rv3[:RelevanceCutoff]
}
// Slide around looking for matches that fit in the length
for i := 0; i < len(rv3); i++ {
m := bestMatch{
StartPos: rv3[i].Start,
EndPos: rv3[i].End,
Relevant: []relevantV3{rv3[i]},
}
// Slide left
j := i - 1
for {
// Ensure we never step outside the bounds of our slice
if j < 0 {
break
}
// How close is the matches start to our end?
diff := rv3[i].End - rv3[j].Start
// If the diff is greater than the target then break out as there is no
// more reason to keep looking as the slice is sorted
if diff > wrapLength {
break
}
// If we didn't break this is considered a larger match
m.StartPos = rv3[j].Start
m.Relevant = append(m.Relevant, rv3[j])
j--
}
// Slide right
j = i + 1
for {
// Ensure we never step outside the bounds of our slice
if j >= len(rv3) {
break
}
// How close is the matches end to our start?
diff := rv3[j].End - rv3[i].Start
// If the diff is greater than the target then break out as there is no
// more reason to keep looking as the slice is sorted
if diff > wrapLength {
break
}
m.EndPos = rv3[j].End
m.Relevant = append(m.Relevant, rv3[j])
j++
}
// If the match around this isn't long enough expand it out
// roughly based on how large a context we need to add
l := m.EndPos - m.StartPos
if l < relLength {
add := (relLength - l) / 2
m.StartPos -= add
m.EndPos += add
if m.StartPos < 0 {
m.StartPos = 0
}
if m.EndPos > len(res.Content) {
m.EndPos = len(res.Content)
}
}
// Now we see if there are any nearby spaces to avoid us cutting in the
// middle of a word if we can avoid it
sf := false
ef := false
m.StartPos, sf = findSpaceLeft(res, m.StartPos, SnipSideMax)
m.EndPos, ef = findSpaceRight(res, m.EndPos, SnipSideMax)
// Check if we are cutting in the middle of a multibyte char and if so
// go looking till we find the start. We only do so if we didn't find a space,
// and if we aren't at the start or very end of the content
for !sf && m.StartPos != 0 && m.StartPos != len(res.Content) && !str.StartOfRune(res.Content[m.StartPos]) {
m.StartPos--
}
for !ef && m.EndPos != 0 && m.EndPos != len(res.Content) && !str.StartOfRune(res.Content[m.EndPos]) {
m.EndPos--
}
// If we are very close to the start, just push it out so we get the actual start
if m.StartPos <= SnipSideMax {
m.StartPos = 0
}
// As above, but against the end so we just include the rest if we are close
if len(res.Content)-m.EndPos <= 10 {
m.EndPos = len(res.Content)
}
// Now that we have the snippet start to rank it to produce a score indicating
// how good a match it is and hopefully display to the user what they
// were actually looking for
m.Score += float64(len(m.Relevant)) // Factor in how many matches we have
// NB the below is commented out because it seems to make things worse generally
//m.Score += float64(m.EndPos - m.StartPos) // Factor in how large the snippet is
// Apply higher score where the words are near each other
//mid := rv3[i].Start + (rv3[i].End-rv3[i].End)/2 // match word midpoint
mid := rv3[i].Start
for _, v := range m.Relevant {
p := v.Start + (v.End-v.Start)/2 // comparison word midpoint
// If the word is within a reasonable distance of this word boost the score
// weighted by how common that word is so that matches like 'a' impact the rank
// less than something like 'cromulent' which in theory should not occur as much
if abs(mid-p) < (relLength / 3) {
m.Score += 100 / float64(documentFrequencies[v.Word])
}
}
// Try to make it phrase heavy such that if words line up next to each other
// it is given a much higher weight
for _, v := range m.Relevant {
// Use 2 here because we want to avoid punctuation such that a search for
// cat dog will still be boosted if we find cat. dog
if abs(rv3[i].Start-v.End) <= 2 || abs(rv3[i].End-v.Start) <= 2 {
m.Score += PhraseHeavyBoost
}
}
// If the match is bounded by a space boost it slightly
// because its likely to be a better match
if rv3[i].Start >= 1 && unicode.IsSpace(rune(res.Content[rv3[i].Start-1])) {
m.Score += SpaceBoundBoost
}
if rv3[i].End < len(res.Content)-1 && unicode.IsSpace(rune(res.Content[rv3[i].End+1])) {
m.Score += SpaceBoundBoost
}
// If the word is an exact match to what the user typed boost it
// So while the search may be case insensitive the ranking of
// the snippet does consider case when boosting ever so slightly
if string(res.Content[rv3[i].Start:rv3[i].End]) == rv3[i].Word {
m.Score += ExactMatchBoost
}
// This mod applies over the whole score because we want to most unique words to appear in the middle
// of the snippet over those where it is on the edge which this should achieve even if it means
// we may miss out on a slightly better match
m.Score = m.Score / float64(documentFrequencies[rv3[i].Word]) // Factor in how unique the word is
bestMatches = append(bestMatches, m)
}
// Sort our matches by score such that tbe best snippets are at the top
sort.Slice(bestMatches, func(i, j int) bool {
return bestMatches[i].Score > bestMatches[j].Score
})
// Now what we have it sorted lets get just the ones that don't overlap so we have all the unique snippets
var bestMatchesClean []bestMatch
var ranges [][]int
for _, b := range bestMatches {
isOverlap := false
for _, r := range ranges {
if b.StartPos >= r[0] && b.StartPos <= r[1] {
isOverlap = true
}
if b.EndPos >= r[0] && b.EndPos <= r[1] {
isOverlap = true
}
}
if !isOverlap {
ranges = append(ranges, []int{b.StartPos, b.EndPos})
bestMatchesClean = append(bestMatchesClean, b)
}
}
// Limit to the 20 best matches
if len(bestMatchesClean) > 20 {
bestMatchesClean = bestMatchesClean[:20]
}
var snippets []Snippet
for _, b := range bestMatchesClean {
index := bytes.Index(res.Content, res.Content[b.StartPos:b.EndPos])
startLineOffset := 1
for i := 0; i < index; i++ {
if res.Content[i] == '\n' {
startLineOffset++
}
}
contentLineOffset := startLineOffset
for _, i := range res.Content[b.StartPos:b.EndPos] {
if i == '\n' {
contentLineOffset++
}
}
snippets = append(snippets, Snippet{
Content: string(res.Content[b.StartPos:b.EndPos]),
StartPos: b.StartPos,
EndPos: b.EndPos,
Score: b.Score,
LineStart: startLineOffset,
LineEnd: contentLineOffset,
})
}
return snippets
}
// Get all of the locations into a new data structure
// which makes things easy to sort and deal with
func convertToRelevant(res *FileJob) []relevantV3 {
var rv3 []relevantV3
for k, v := range res.MatchLocations {
for _, i := range v {
rv3 = append(rv3, relevantV3{
Word: k,
Start: i[0],
End: i[1],
})
}
}
// Sort the results so when we slide around everything is in order
sort.Slice(rv3, func(i, j int) bool {
return rv3[i].Start < rv3[j].Start
})
return rv3
}
// Looks for a nearby whitespace character near this position (`pos`)
// up to `distance` away. Returns index of space if a space was found and
// true, otherwise returns the original index and false
func findSpaceRight(res *FileJob, pos int, distance int) (int, bool) {
if len(res.Content) == 0 {
return pos, false
}
end := pos + distance
if end > len(res.Content)-1 {
end = len(res.Content) - 1
}
// Look for spaces
for i := pos; i <= end; i++ {
if str.StartOfRune(res.Content[i]) && unicode.IsSpace(rune(res.Content[i])) {
return i, true
}
}
return pos, false
}
// Looks for nearby whitespace character near this position
// up to distance away. Returns index of space if a space was found and tru
// otherwise the original index is return and false
func findSpaceLeft(res *FileJob, pos int, distance int) (int, bool) {
if len(res.Content) == 0 {
return pos, false
}
if pos >= len(res.Content) {
return pos, false
}
end := pos - distance
if end < 0 {
end = 0
}
// Look for spaces
for i := pos; i >= end; i-- {
if str.StartOfRune(res.Content[i]) && unicode.IsSpace(rune(res.Content[i])) {
return i, true
}
}
return pos, false
}
// abs returns the absolute value of x.
func abs(x int) int {
if x < 0 {
return -x
}
return x
}
|
package dashboard
import (
"fmt"
"github.com/keptn-contrib/dynatrace-service/internal/adapter"
"github.com/keptn-contrib/dynatrace-service/internal/common"
"github.com/keptn-contrib/dynatrace-service/internal/dynatrace"
"github.com/keptn-contrib/dynatrace-service/internal/sli/metrics"
keptnv2 "github.com/keptn/go-utils/pkg/lib/v0_2_0"
log "github.com/sirupsen/logrus"
"strings"
"time"
)
type DataExplorerTileProcessing struct {
client dynatrace.ClientInterface
eventData adapter.EventContentAdapter
customFilters []*keptnv2.SLIFilter
startUnix time.Time
endUnix time.Time
}
func NewDataExplorerTileProcessing(client dynatrace.ClientInterface, eventData adapter.EventContentAdapter, customFilters []*keptnv2.SLIFilter, startUnix time.Time, endUnix time.Time) *DataExplorerTileProcessing {
return &DataExplorerTileProcessing{
client: client,
eventData: eventData,
customFilters: customFilters,
startUnix: startUnix,
endUnix: endUnix,
}
}
func (p *DataExplorerTileProcessing) Process(tile *dynatrace.Tile, dashboardFilter *dynatrace.DashboardFilter) []*TileResult {
// get the tile specific management zone filter that might be needed by different tile processors
// Check for tile management zone filter - this would overwrite the dashboardManagementZoneFilter
tileManagementZoneFilter := NewManagementZoneFilter(dashboardFilter, tile.TileFilter.ManagementZone)
// first - lets figure out if this tile should be included in SLI validation or not - we parse the title and look for "sli=sliname"
sloDefinition := common.ParsePassAndWarningWithoutDefaultsFrom(tile.Name)
if sloDefinition.SLI == "" {
log.WithField("tileName", tile.Name).Debug("Data explorer tile not included as name doesnt include sli=SLINAME")
return nil
}
var tileResults []*TileResult
// now lets process that tile - lets run through each query
for _, dataQuery := range tile.Queries {
log.WithField("metric", dataQuery.Metric).Debug("Processing data explorer query")
// First lets generate the query and extract all important metric information we need for generating SLIs & SLOs
metricQuery, err := p.generateMetricQueryFromDataExplorerQuery(dataQuery, tileManagementZoneFilter, p.startUnix, p.endUnix)
// if there was no error we generate the SLO & SLO definition
if err != nil {
log.WithError(err).Warn("generateMetricQueryFromDataExplorerQuery returned an error, SLI will not be used")
continue
}
results := NewMetricsQueryProcessing(p.client).Process(len(dataQuery.SplitBy), sloDefinition, metricQuery)
tileResults = append(tileResults, results...)
}
return tileResults
}
// Looks at the DataExplorerQuery configuration of a data explorer chart and generates the Metrics Query.
//
// Returns a queryComponents object
// - metricId, e.g: built-in:mymetric
// - metricUnit, e.g: MilliSeconds
// - metricQuery, e.g: metricSelector=metric&filter...
// - fullMetricQuery, e.g: metricQuery&from=123213&to=2323
// - entitySelectorSLIDefinition, e.g: ,entityid(FILTERDIMENSIONVALUE)
// - filterSLIDefinitionAggregator, e.g: , filter(eq(Test Step,FILTERDIMENSIONVALUE))
func (p *DataExplorerTileProcessing) generateMetricQueryFromDataExplorerQuery(dataQuery dynatrace.DataExplorerQuery, tileManagementZoneFilter *ManagementZoneFilter, startUnix time.Time, endUnix time.Time) (*queryComponents, error) {
// TODO 2021-08-04: there are too many return values and they are have the same type
// Lets query the metric definition as we need to know how many dimension the metric has
metricDefinition, err := dynatrace.NewMetricsClient(p.client).GetByID(dataQuery.Metric)
if err != nil {
log.WithError(err).WithField("metric", dataQuery.Metric).Debug("Error retrieving metric description")
return nil, err
}
// building the merge aggregator string, e.g: merge("dt.entity.disk"):merge("dt.entity.host") - or merge("dt.entity.service")
// TODO: 2021-09-20: Check for redundant code after update to use dimension keys rather than indexes
metricDimensionCount := len(metricDefinition.DimensionDefinitions)
metricAggregation := metricDefinition.DefaultAggregation.Type
mergeAggregator := ""
filterAggregator := ""
filterSLIDefinitionAggregator := ""
entitySelectorSLIDefinition := ""
entityFilter := ""
// we need to merge all those dimensions based on the metric definition that are not included in the "splitBy"
// so - we iterate through the dimensions based on the metric definition from the back to front - and then merge those not included in splitBy
for metricDimIx := metricDimensionCount - 1; metricDimIx >= 0; metricDimIx-- {
log.WithField("metricDimIx", metricDimIx).Debug("Processing Dimension Ix")
doMergeDimension := true
for _, splitDimension := range dataQuery.SplitBy {
log.WithFields(
log.Fields{
"dimension1": splitDimension,
"dimension2": metricDefinition.DimensionDefinitions[metricDimIx].Key,
}).Debug("Comparing Dimensions %")
if strings.Compare(splitDimension, metricDefinition.DimensionDefinitions[metricDimIx].Key) == 0 {
doMergeDimension = false
}
}
if doMergeDimension {
// this is a dimension we want to merge as it is not split by in the chart
log.WithField("dimension", metricDefinition.DimensionDefinitions[metricDimIx].Key).Debug("merging dimension")
mergeAggregator = mergeAggregator + fmt.Sprintf(":merge(\"%s\")", metricDefinition.DimensionDefinitions[metricDimIx].Key)
}
}
// Create the right entity Selectors for the queries execute
// TODO: we currently only support a single filter - if we want to support more we need to build this in
if dataQuery.FilterBy != nil && len(dataQuery.FilterBy.NestedFilters) > 0 {
if len(dataQuery.FilterBy.NestedFilters[0].Criteria) == 1 {
if strings.HasPrefix(dataQuery.FilterBy.NestedFilters[0].Filter, "dt.entity.") {
entitySelectorSLIDefinition = ",entityId(FILTERDIMENSIONVALUE)"
entityFilter = fmt.Sprintf("&entitySelector=entityId(%s)", dataQuery.FilterBy.NestedFilters[0].Criteria[0].Value)
} else {
filterSLIDefinitionAggregator = fmt.Sprintf(":filter(eq(%s,FILTERDIMENSIONVALUE))", dataQuery.FilterBy.NestedFilters[0].Filter)
filterAggregator = fmt.Sprintf(":filter(%s(%s,%s))", dataQuery.FilterBy.NestedFilters[0].Criteria[0].Evaluator, dataQuery.FilterBy.NestedFilters[0].Filter, dataQuery.FilterBy.NestedFilters[0].Criteria[0].Value)
}
} else {
log.Debug("Code only supports a single filter for data explorer")
}
}
// TODO: we currently only support one split dimension
// but - if we split by a dimension we need to include that dimension in our individual SLI query definitions - thats why we hand this back in the filter clause
if dataQuery.SplitBy != nil {
if len(dataQuery.SplitBy) == 1 {
filterSLIDefinitionAggregator = fmt.Sprintf("%s:filter(eq(%s,FILTERDIMENSIONVALUE))", filterSLIDefinitionAggregator, dataQuery.SplitBy[0])
} else {
log.Debug("Code only supports a single splitby dimension for data explorer")
}
}
// lets create the metricSelector and entitySelector
// ATTENTION: adding :names so we also get the names of the dimensions and not just the entities. This means we get two values for each dimension
metricQuery := fmt.Sprintf("metricSelector=%s%s%s:%s:names%s%s",
dataQuery.Metric, mergeAggregator, filterAggregator, strings.ToLower(metricAggregation),
entityFilter, tileManagementZoneFilter.ForEntitySelector())
// lets build the Dynatrace API Metric query for the proposed timeframe and additonal filters!
fullMetricQuery, metricID, err := metrics.NewQueryBuilder(p.eventData, p.customFilters).Build(metricQuery, startUnix, endUnix)
if err != nil {
return nil, err
}
return &queryComponents{
metricID: metricID,
metricUnit: metricDefinition.Unit,
metricQuery: metricQuery,
fullMetricQueryString: fullMetricQuery,
entitySelectorSLIDefinition: entitySelectorSLIDefinition,
filterSLIDefinitionAggregator: filterSLIDefinitionAggregator,
}, nil
}
|
package models
import (
"github.com/jinzhu/gorm"
"time"
)
type Poll struct {
gorm.Model
Title string `json:"title"`
Start time.Time `json:"start"`
End time.Time `json:"end"`
UserID int `json:"user_id"`
}
|
package response_factory
type defaultResponse struct {
data interface{}
}
func (r defaultResponse) IsServerError() bool {
return false
}
func (r defaultResponse) IsClientError() bool {
return false
}
func (r defaultResponse) GetStatus() string {
return statusOk
}
func (r defaultResponse) HasData() bool {
return r.data != nil
}
func (r defaultResponse) GetData() interface{} {
return r.data
}
|
package main
import (
"fmt"
"strconv"
"sync"
)
// 如果用 共享内存 或 队列(因协程竞争 产生 所用 互斥锁 衍生问题),造成性能问题
// channel 先入先出 (水管) 但是 设定缓冲长度 就不能改,一旦关闭 就不能再放入,但还可以取走
var wg sync.WaitGroup
var ar01 []int //声明一个 切片(引用类型 必须 初始化 才能用)
var ch01 chan int //声明变量类型 channel 是 引用类型,必须初始化才能用
var ch02 chan *string
func main() {
// channel01() //不设置 水管 缓存 的用法
channel02() //设置 100 的容量,一边换一边取
}
func channel02(){
ch02 = make(chan *string ,100) //channel 使用 make 初始化 返回的是一个 内存地址 100单位缓冲区
fmt.Println(ch02)
defer close(ch02) //主程序 退出后 是会自动关闭 的,主动关闭 是 因为有些 场景,主程序 会一直 开着,不用就老实关上
for i := 0; i < 50; i++ {
str := strconv.Itoa(i)
ch02 <- &str
wg.Add(1)
go func(){
wg.Done()
x := <-ch02
fmt.Printf("从水管中取出地址:%v,值:%v \n", x, *x)
}()
}
wg.Wait()
fmt.Println("main 还活着")
}
func channel01(){
ch01 = make(chan int) //初始化 不定义长度,赋值的时候会卡住,但是 如果 后台 有一个携程接收,就 没问题
wg.Add(1)
go func(){
defer wg.Done()
x:=<-ch01 //从 指定的 水管 取出
fmt.Println("匿名协程接收到:",x)
}()
ch01<-10 //放入 指定 的水管
wg.Wait()
fmt.Println("main 还活着")
} |
/*
Copyright 2019 Baidu, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package edgehandler maintenances the websocket connection with cloud server
// and process the receive messages.
package edgehandler
import (
"fmt"
"net/http"
"time"
"github.com/golang/protobuf/proto"
"k8s.io/klog"
"github.com/baidu/ote-stack/pkg/clustermessage"
clusterrouter "github.com/baidu/ote-stack/pkg/clusterrouter"
"github.com/baidu/ote-stack/pkg/clusterselector"
"github.com/baidu/ote-stack/pkg/clustershim"
"github.com/baidu/ote-stack/pkg/config"
"github.com/baidu/ote-stack/pkg/tunnel"
)
var (
subtreeReportDuration = 1 * time.Second
sendToParentTimeout = 1 * time.Second
sendToParentChan = make(chan []byte, 10000)
sendToClusterHandlerTimeout = 1 * time.Second
)
// EdgeHandler is edgehandler interface that process messages from tunnel and transmit to shim.
type EdgeHandler interface {
// Start will start edgehandler.
Start() error
}
// edgeHandler processes message from tunnel and transmit to shim.
type edgeHandler struct {
conf *config.ClusterControllerConfig
edgeTunnel tunnel.EdgeTunnel
shimClient clustershim.ShimServiceClient
stopReportSubtree chan struct{}
rootClusterEnable bool
}
// NewEdgeHandler returns a edgeHandler object.
func NewEdgeHandler(c *config.ClusterControllerConfig) EdgeHandler {
return &edgeHandler{
conf: c,
stopReportSubtree: make(chan struct{}, 1),
rootClusterEnable: false,
}
}
func (e *edgeHandler) valid() error {
if e.conf.ClusterUserDefineName == "" {
return fmt.Errorf("cluster name is empty")
}
if e.conf.K8sClient == nil && !e.isRemoteShim() {
return fmt.Errorf("k8s client is unavailable or remoteshim not set")
}
if e.conf.ParentCluster == "" && !e.isRoot() {
return fmt.Errorf("parent cluster is empty")
}
if e.conf.ParentCluster != "" && e.isRoot() {
return fmt.Errorf("root cc should not have parent cluster")
}
if e.isRoot() && e.isRemoteShim() {
// if it is root cc, and connectes to shim, it should can be a single root cluster.
e.rootClusterEnable = true
}
return nil
}
func (e *edgeHandler) isRoot() bool {
return config.IsRoot(e.conf.ClusterUserDefineName)
}
func (e *edgeHandler) isRemoteShim() bool {
return e.conf.RemoteShimAddr != ""
}
func (e *edgeHandler) Start() error {
if err := e.valid(); err != nil {
return err
}
if e.isRemoteShim() {
klog.Infof("init remote shim client")
e.shimClient = clustershim.NewRemoteShimClient(e.conf.ClusterName, e.conf.RemoteShimAddr)
} else if !e.isRoot() {
klog.Infof("init local shim client")
e.shimClient = clustershim.NewlocalShimClient(e.conf)
}
if e.shimClient == nil && (e.rootClusterEnable || !e.isRoot()) {
return fmt.Errorf("fail to init shim client")
}
if !e.isRoot() {
e.edgeTunnel = tunnel.NewEdgeTunnel(e.conf)
e.edgeTunnel.RegistReceiveMessageHandler(e.receiveMessageFromTunnel)
e.edgeTunnel.RegistAfterConnectToHook(e.afterConnect)
e.edgeTunnel.RegistAfterDisconnectHook(e.afterDisconnect)
if err := e.edgeTunnel.Start(); err != nil {
return err
}
// send the msg from cluster handler and shim.
go e.sendMessageToParent()
// handle the msg from cluster handler.
go e.sendMessageToTunnel()
}
// single cluster's rootcc should handle msg from rootcm.
if e.rootClusterEnable {
go func() {
for {
msg := <-e.conf.RootClusterToEdgeChan
e.handleMessage(msg)
}
}()
}
go e.handleRespFromShimClient()
return nil
}
// sendMessageToParent has multi tasks for reading the message from sendToParentChan,
// and sending the message to parent cluster.
func (e *edgeHandler) sendMessageToParent() {
for {
data := <-sendToParentChan
e.edgeTunnel.Send(data)
}
}
func (e *edgeHandler) sendMessageToTunnel() {
for {
msg := <-e.conf.ClusterToEdgeChan
e.sendToParent(&msg)
}
}
func (e *edgeHandler) receiveMessageFromTunnel(client string, data []byte) (ret error) {
ret = nil
msg := &clustermessage.ClusterMessage{}
err := proto.Unmarshal(data, msg)
if err != nil {
ret = fmt.Errorf("can not deserialize message, error: %s", err.Error())
klog.Error(ret)
return
}
e.conf.EdgeToClusterChan <- *msg
selector := clusterselector.NewSelector(msg.Head.ClusterSelector)
if selector.Has(e.conf.ClusterName) {
e.handleMessage(msg)
}
return
}
func responseErrorStatus(err error) []byte {
resp := &clustermessage.ControllerTaskResponse{
Timestamp: time.Now().Unix(),
Body: []byte(err.Error()),
StatusCode: http.StatusInternalServerError,
}
data, err := proto.Marshal(resp)
if err != nil {
klog.Errorf("marshal controller task resp failed: %v", err)
return nil
}
return data
}
func (e *edgeHandler) handleMessage(msg *clustermessage.ClusterMessage) error {
switch msg.Head.Command {
case clustermessage.CommandType_ControlReq:
klog.V(1).Infof("dispatch message %v to shim", msg.Head.MessageID)
resp, err := e.shimClient.Do(msg)
if resp != nil {
// sync return
if err != nil {
resp.Body = responseErrorStatus(err)
klog.Errorf("handleTask error: %s", err.Error())
}
resp.Head.ClusterName = e.conf.ClusterName
// send to cloudtunnel.
err = e.sendToParent(resp)
} else {
if err != nil {
klog.Errorf("handleTask error: %v", err)
}
}
return err
case clustermessage.CommandType_ControlMultiReq:
klog.V(3).Infof("dispatch ControlMultiReq message to shim")
_, err := e.shimClient.Do(msg)
if err != nil {
klog.Errorf("handleTask error: %s", err.Error())
}
return err
default:
klog.Errorf("command %s is not supported by edge handler", msg.Head.Command.String())
return nil
}
}
func (e *edgeHandler) handleRespFromShimClient() {
// async return
if e.shimClient == nil || e.shimClient.ReturnChan() == nil {
klog.Warningf("shim client or return chan is nil, cannot handle resp")
return
}
respChan := e.shimClient.ReturnChan()
if respChan == nil {
klog.Warningf("async return channel from shim client is nil")
return
}
for {
resp := <-respChan
resp.Head.ClusterName = e.conf.ClusterName
if e.rootClusterEnable {
// send to clusterhandler
select {
case e.conf.RootEdgeToClusterChan <- resp:
klog.V(5).Info("send report msg to cluster handler success")
case <-time.After(sendToClusterHandlerTimeout):
klog.V(5).Info("send report msg to cluster handler timeout")
}
} else {
// send to cloudtunnel.
e.sendToParent(resp)
}
}
klog.Warningf("async return channel from shim client closed")
}
func (e *edgeHandler) afterConnect() {
// start subtree report goroutine
go e.reportSubTreeTimer()
}
func (e *edgeHandler) afterDisconnect() {
// stop subtree report goroutine
e.stopReportSubtree <- struct{}{}
}
func (e *edgeHandler) reportSubTreeTimer() {
klog.Info("start reporting subtree")
// call report once and start timer
e.reportSubTree()
ticker := time.NewTicker(subtreeReportDuration)
for {
select {
case <-e.stopReportSubtree:
klog.Info("stop reporting subtree")
return
case <-ticker.C:
e.reportSubTree()
}
}
}
func (e *edgeHandler) reportSubTree() {
msg := clusterrouter.Router().SubTreeMessage()
if msg == nil {
return
}
msg.Head.ClusterName = e.conf.ClusterName
e.sendToParent(msg)
}
func (e *edgeHandler) sendToParent(msg *clustermessage.ClusterMessage) error {
data, err := proto.Marshal(msg)
if err != nil {
klog.Errorf("marshal cluster message error: %s", err.Error())
return err
}
select {
case sendToParentChan <- data:
klog.V(5).Info("send msg to parent success")
case <-time.After(sendToParentTimeout):
klog.V(5).Info("send msg to parent timeout")
}
return nil
}
|
package golang_blockchain
import sha "crypto/sha256"
func bytes(block *Block, difficulty byte, nonce Nonce) []byte {
parentsum := Hash{}
if block.Parent != nil {
parentsum = block.Parent.Sum
}
difficultylen := 1
parentsumlen := len(parentsum)
noncelen := len(nonce)
datalen := len((block.Data))
bytes := make([]byte, difficultylen+parentsumlen+noncelen+datalen)
numbytes1 := copy(bytes, []byte{difficulty})
numbytes2 := copy(bytes[numbytes1:], parentsum[:])
numbytes3 := copy(bytes[numbytes1+numbytes2:], nonce)
numbytes4 := copy(bytes[numbytes1+numbytes2+numbytes3:], block.Data)
if numbytes1+numbytes2+numbytes3+numbytes4 != len(bytes) {
panic("lengths dont match")
}
return bytes
}
func hash(block *Block, difficulty byte, nonce Nonce) Hash {
bytes := bytes(block, difficulty, nonce)
return sha.Sum256(bytes)
}
func (hash *Hash) MatchesDifficulty(difficulty byte) bool {
for _, b := range hash[0:difficulty] {
if b != 0 {
return false
}
}
return true
}
|
package FindCoordinator
type Response struct {
ThrottleTimeMs int32
ErrorCode int16
ErrorMessage string
NodeId int32
Host string
Port int32
}
|
package main
import "fmt"
func main() {
s := "abcaaaa"
fmt.Println(firstUniqChar(s))
}
func firstUniqChar(s string) int {
lens := len(s)
if lens <= 0 {
return -1
}
m := make(map[uint8]int, lens)
for i := range s {
m[s[i]]++
}
for i := range s {
if m[s[i]] == 1 {
return i
}
}
return -1
}
|
// reference and dereferece operator
// check pointer type
// declare a pointer
package main
import "fmt"
func main() {
ans := 42
fmt.Println(&ans) // location in the memory
// dereference operator
add := &ans
fmt.Println(*add)
// pointer type
fmt.Printf("address is a %T\n", add)
// declare a pointer
canada := "Canada"
var home *string
fmt.Printf("home is a %T\n", home)
home = &canada
fmt.Println(*home)
}
// 0xc000012088
// 42
// address is a *int
// home is a *string
// Canada
|
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
package usm
import (
"testing"
apicommon "github.com/DataDog/datadog-operator/apis/datadoghq/common"
apicommonv1 "github.com/DataDog/datadog-operator/apis/datadoghq/common/v1"
"github.com/DataDog/datadog-operator/apis/datadoghq/v1alpha1"
"github.com/DataDog/datadog-operator/apis/datadoghq/v2alpha1"
apiutils "github.com/DataDog/datadog-operator/apis/utils"
"github.com/DataDog/datadog-operator/controllers/datadogagent/component/agent"
"github.com/DataDog/datadog-operator/controllers/datadogagent/feature"
"github.com/DataDog/datadog-operator/controllers/datadogagent/feature/fake"
"github.com/DataDog/datadog-operator/controllers/datadogagent/feature/test"
"github.com/google/go-cmp/cmp"
"github.com/stretchr/testify/assert"
corev1 "k8s.io/api/core/v1"
)
func Test_usmFeature_Configure(t *testing.T) {
ddav1USMDisabled := v1alpha1.DatadogAgent{
Spec: v1alpha1.DatadogAgentSpec{
Agent: v1alpha1.DatadogAgentSpecAgentSpec{
SystemProbe: &v1alpha1.SystemProbeSpec{
Enabled: apiutils.NewBoolPointer(false),
},
},
},
}
ddav1USMEnabled := ddav1USMDisabled.DeepCopy()
{
ddav1USMEnabled.Spec.Agent.SystemProbe.Enabled = apiutils.NewBoolPointer(true)
ddav1USMEnabled.Spec.Agent.SystemProbe.Env = append(
ddav1USMEnabled.Spec.Agent.SystemProbe.Env,
corev1.EnvVar{
Name: apicommon.DDSystemProbeServiceMonitoringEnabled,
Value: "true",
},
)
}
ddav2USMDisabled := v2alpha1.DatadogAgent{
Spec: v2alpha1.DatadogAgentSpec{
Features: &v2alpha1.DatadogFeatures{
USM: &v2alpha1.USMFeatureConfig{
Enabled: apiutils.NewBoolPointer(false),
},
},
},
}
ddav2USMEnabled := ddav2USMDisabled.DeepCopy()
{
ddav2USMEnabled.Spec.Features.USM.Enabled = apiutils.NewBoolPointer(true)
}
usmAgentNodeWantFunc := func(t testing.TB, mgrInterface feature.PodTemplateManagers) {
mgr := mgrInterface.(*fake.PodTemplateManagers)
// check annotations
wantAnnotations := make(map[string]string)
wantAnnotations[apicommon.SystemProbeAppArmorAnnotationKey] = apicommon.SystemProbeAppArmorAnnotationValue
annotations := mgr.AnnotationMgr.Annotations
assert.True(t, apiutils.IsEqualStruct(annotations, wantAnnotations), "Annotations \ndiff = %s", cmp.Diff(annotations, wantAnnotations))
// check security context capabilities
sysProbeCapabilities := mgr.SecurityContextMgr.CapabilitiesByC[apicommonv1.SystemProbeContainerName]
assert.True(t, apiutils.IsEqualStruct(sysProbeCapabilities, agent.DefaultCapabilitiesForSystemProbe()), "System Probe security context capabilities \ndiff = %s", cmp.Diff(sysProbeCapabilities, agent.DefaultCapabilitiesForSystemProbe()))
// check volume mounts
wantVolumeMounts := []corev1.VolumeMount{
{
Name: apicommon.ProcdirVolumeName,
MountPath: apicommon.ProcdirMountPath,
ReadOnly: true,
},
{
Name: apicommon.CgroupsVolumeName,
MountPath: apicommon.CgroupsMountPath,
ReadOnly: true,
},
{
Name: apicommon.DebugfsVolumeName,
MountPath: apicommon.DebugfsPath,
ReadOnly: false,
},
{
Name: apicommon.SystemProbeSocketVolumeName,
MountPath: apicommon.SystemProbeSocketVolumePath,
ReadOnly: false,
},
}
sysProbeMounts := mgr.VolumeMountMgr.VolumeMountsByC[apicommonv1.SystemProbeContainerName]
assert.True(t, apiutils.IsEqualStruct(sysProbeMounts, wantVolumeMounts), "System Probe volume mounts \ndiff = %s", cmp.Diff(sysProbeMounts, wantVolumeMounts))
coreWantVolumeMounts := []corev1.VolumeMount{
{
Name: apicommon.SystemProbeSocketVolumeName,
MountPath: apicommon.SystemProbeSocketVolumePath,
ReadOnly: true,
},
}
coreAgentMounts := mgr.VolumeMountMgr.VolumeMountsByC[apicommonv1.CoreAgentContainerName]
assert.True(t, apiutils.IsEqualStruct(coreAgentMounts, coreWantVolumeMounts), "Core Agent volume mounts \ndiff = %s", cmp.Diff(coreAgentMounts, coreWantVolumeMounts))
processWantVolumeMounts := []corev1.VolumeMount{
{
Name: apicommon.SystemProbeSocketVolumeName,
MountPath: apicommon.SystemProbeSocketVolumePath,
ReadOnly: true,
},
}
processAgentMounts := mgr.VolumeMountMgr.VolumeMountsByC[apicommonv1.ProcessAgentContainerName]
assert.True(t, apiutils.IsEqualStruct(processAgentMounts, processWantVolumeMounts), "Process Agent volume mounts \ndiff = %s", cmp.Diff(processAgentMounts, processWantVolumeMounts))
// check volumes
wantVolumes := []corev1.Volume{
{
Name: apicommon.ProcdirVolumeName,
VolumeSource: corev1.VolumeSource{
HostPath: &corev1.HostPathVolumeSource{
Path: apicommon.ProcdirHostPath,
},
},
},
{
Name: apicommon.CgroupsVolumeName,
VolumeSource: corev1.VolumeSource{
HostPath: &corev1.HostPathVolumeSource{
Path: apicommon.CgroupsHostPath,
},
},
},
{
Name: apicommon.DebugfsVolumeName,
VolumeSource: corev1.VolumeSource{
HostPath: &corev1.HostPathVolumeSource{
Path: apicommon.DebugfsPath,
},
},
},
{
Name: apicommon.SystemProbeSocketVolumeName,
VolumeSource: corev1.VolumeSource{
EmptyDir: &corev1.EmptyDirVolumeSource{},
},
},
}
volumes := mgr.VolumeMgr.Volumes
assert.True(t, apiutils.IsEqualStruct(volumes, wantVolumes), "Volumes \ndiff = %s", cmp.Diff(volumes, wantVolumes))
// check env vars
wantEnvVars := []*corev1.EnvVar{
{
Name: apicommon.DDSystemProbeServiceMonitoringEnabled,
Value: "true",
},
{
Name: apicommon.DDSystemProbeSocket,
Value: apicommon.DefaultSystemProbeSocketPath,
},
}
systemProbeEnvVars := mgr.EnvVarMgr.EnvVarsByC[apicommonv1.SystemProbeContainerName]
assert.True(t, apiutils.IsEqualStruct(systemProbeEnvVars, wantEnvVars), "System Probe envvars \ndiff = %s", cmp.Diff(systemProbeEnvVars, wantEnvVars))
}
tests := test.FeatureTestSuite{
///////////////////////////
// v1alpha1.DatadogAgent //
///////////////////////////
{
Name: "v1alpha1 USM not enabled",
DDAv1: ddav1USMDisabled.DeepCopy(),
WantConfigure: false,
},
{
Name: "v1alpha1 USM enabled",
DDAv1: ddav1USMEnabled,
WantConfigure: true,
Agent: test.NewDefaultComponentTest().WithWantFunc(usmAgentNodeWantFunc),
},
// ///////////////////////////
// // v2alpha1.DatadogAgent //
// ///////////////////////////
{
Name: "v2alpha1 USM not enabled",
DDAv2: ddav2USMDisabled.DeepCopy(),
WantConfigure: false,
},
{
Name: "v2alpha1 USM enabled",
DDAv2: ddav2USMEnabled,
WantConfigure: true,
Agent: test.NewDefaultComponentTest().WithWantFunc(usmAgentNodeWantFunc),
},
}
tests.Run(t, buildUSMFeature)
}
|
package dao
import (
"github.com/luxingwen/secret-game/model"
)
func (d *Dao) AddWxUser(user *model.WxUser) (err error) {
err = d.DB.Table(TableWxUser).Create(user).Error
return
}
func (d *Dao) GetByOpenId(openId string) (wxUser *model.WxUser, err error) {
wxUser = new(model.WxUser)
err = d.DB.Table(TableWxUser).Where("open_id = ?", openId).First(&wxUser).Error
return
}
func (d *Dao) GetWxUser(id int) (r *model.WxUser, err error) {
r = new(model.WxUser)
err = d.DB.Table(TableWxUser).Where("id = ?", id).First(&r).Error
return
}
func (d *Dao) AddWxCode(wxCode *model.WxCode) (err error) {
err = d.DB.Table(TableWxCode).Create(wxCode).Error
return
}
func (d *Dao) GetWxCode(code string) (res *model.WxCode, err error) {
res = new(model.WxCode)
err = d.DB.Table(TableWxCode).Where("code = ?", code).First(&res).Error
return
}
|
package main
import (
"log"
"os"
"path/filepath"
"sync"
)
func traverseDir(roots []string) []os.FileInfo {
var wg sync.WaitGroup
var fileInfoCh = make(chan os.FileInfo)
var filesInfo = make([]os.FileInfo, 0, filesAmount)
for _, root := range roots {
wg.Add(1)
go walkDir(root, &wg, fileInfoCh)
}
go func() {
wg.Wait()
close(fileInfoCh)
}()
loop:
for {
select {
case <-done:
for range fileInfoCh {
}
return nil
case finfo, ok := <-fileInfoCh:
if !ok {
break loop
}
log.Printf("file %s added", finfo.Name())
// filesInfo = append(filesInfo, finfo)
// let's suppose we'd like to push every found file, not batch, emulating some real system
handlerHTTP(finfo)
}
}
return filesInfo
}
func walkDir(dir string, wg *sync.WaitGroup, fileInfoCh chan<- os.FileInfo) {
defer wg.Done()
if isDone() {
return
}
entries, err := dirEntries(dir)
if err != nil {
panic(err)
}
for _, entry := range entries {
// name := entry.Name()
// if entry.IsDir() && (name != "." && name != "..") {
if entry.IsDir() {
log.Printf("directory: %s", entry.Name())
wg.Add(1)
subdir := filepath.Join(dir, entry.Name())
go walkDir(subdir, wg, fileInfoCh)
} else if entry.Size() > 0 {
log.Printf("regular file: %s", entry.Name())
fileInfoCh <- entry
}
}
}
func dirEntries(dir string) ([]os.FileInfo, error) {
// blocking semaphore with 10 flows
select {
case semaphore <- struct{}{}:
case <-done:
return nil, nil
}
defer func() {
<-semaphore
}()
file, err := os.Open(dir)
defer file.Close()
if err != nil {
log.Printf("error opening file: %v", err)
}
files, err := file.Readdir(0)
if err != nil {
log.Printf("error readdir: %v", err)
}
return files, nil
}
func isDone() bool {
select {
case <-done:
return true
default:
return false
}
}
|
package temple
import (
"errors"
"html/template"
"os"
"sync"
)
var (
ErrNotADirectory = errors.New("Not a directory")
)
// Type Temple allows you to read a directory of templates and either cache them (in production mode) or discard them
// to be re-read (in development mode).
type Temple struct {
Dir string
Base string
Cache bool
// protect the cache map
mu sync.RWMutex
cache map[string]*template.Template
}
// NewTemple returns an initialised Temple. The directory should exist, the baseFilename should exist and cache should
// be true (for each template to be cached) or false (to be discarded and re-read on each invocation).
func NewTemple(dir, baseFilename string, cache bool) (*Temple, error) {
// firstly, check that this directory exists
stat, err := os.Stat(dir)
if os.IsNotExist(err) {
return nil, err
}
// check that this is a directory
if !stat.IsDir() {
return nil, ErrNotADirectory
}
// all okay
tmpl := Temple{
Dir: dir,
Base: baseFilename,
Cache: cache,
cache: make(map[string]*template.Template),
}
return &tmpl, nil
}
// Get will return the html/template you asked for. Note: you should supply the full filename such as "index.html",
// rather than just "index". First time through, the template is read from disk. Second time through depends on whether
// you asked for the templates to be cached or not.
func (t Temple) Get(name string) (*template.Template, error) {
// take a read lock to start off
t.mu.RLock()
tmpl, ok := t.cache[name]
t.mu.RUnlock()
if ok {
return tmpl, nil
}
// doesn't yet exist, so let's read the templates in and store them
tmpl, err := template.ParseFiles(t.Dir+"/"+t.Base, t.Dir+"/"+name)
if err != nil {
return nil, err
}
// if we don't want to cache, then just forget about the template immediately
if !t.Cache {
return tmpl, err
}
// now store this in the cache
t.mu.Lock()
t.cache[name] = tmpl
t.mu.Unlock()
return tmpl, err
}
// MustGet will return the html/template you asked for. The same notes apply as Get. However, in this case, we'll call
// panic if the template doesn't load or parse correctly.
func (t Temple) MustGet(name string) *template.Template {
tmpl, err := t.Get(name)
if err != nil {
panic(err)
}
return tmpl
}
|
package helm
import (
"fmt"
"io"
"os"
"path/filepath"
"github.com/loft-sh/devspace/pkg/devspace/config/versions"
"github.com/loft-sh/devspace/pkg/devspace/config/loader/variable/legacy"
runtimevar "github.com/loft-sh/devspace/pkg/devspace/config/loader/variable/runtime"
"github.com/loft-sh/devspace/pkg/devspace/config/remotecache"
devspacecontext "github.com/loft-sh/devspace/pkg/devspace/context"
"github.com/loft-sh/devspace/pkg/devspace/context/values"
"github.com/loft-sh/devspace/pkg/util/stringutil"
"github.com/loft-sh/devspace/pkg/devspace/helm/types"
yaml "gopkg.in/yaml.v3"
"github.com/loft-sh/devspace/pkg/devspace/deploy/deployer/helm/merge"
"github.com/loft-sh/devspace/pkg/devspace/helm"
hashpkg "github.com/loft-sh/devspace/pkg/util/hash"
"github.com/loft-sh/devspace/pkg/util/yamlutil"
"github.com/pkg/errors"
)
// Deploy deploys the given deployment with helm
func (d *DeployConfig) Deploy(ctx devspacecontext.Context, forceDeploy bool) (bool, error) {
var releaseName string
if d.DeploymentConfig.Helm.ReleaseName != "" {
releaseName = d.DeploymentConfig.Helm.ReleaseName
} else {
releaseName = d.DeploymentConfig.Name
}
var (
chartPath = d.DeploymentConfig.Helm.Chart.Name
hash = ""
)
releaseNamespace := ctx.KubeClient().Namespace()
if d.DeploymentConfig.Namespace != "" {
releaseNamespace = d.DeploymentConfig.Namespace
}
if d.DeploymentConfig.Helm.Chart.Source != nil {
downloadPath, err := d.Helm.DownloadChart(ctx, d.DeploymentConfig.Helm)
if err != nil {
return false, errors.Wrap(err, "download chart")
}
chartPath = downloadPath
}
// Hash the chart directory if there is any
_, err := os.Stat(ctx.ResolvePath(chartPath))
if err == nil {
chartPath = ctx.ResolvePath(chartPath)
// Check if the chart directory has changed
hash, err = hashpkg.DirectoryExcludes(chartPath, []string{
".git/",
".devspace/",
}, true)
if err != nil {
return false, errors.Errorf("Error hashing chart directory: %v", err)
}
}
// Ensure deployment config is there
deployCache, _ := ctx.Config().RemoteCache().GetDeployment(d.DeploymentConfig.Name)
// Check values files for changes
helmOverridesHash := ""
if d.DeploymentConfig.Helm.ValuesFiles != nil {
for _, override := range d.DeploymentConfig.Helm.ValuesFiles {
override = ctx.ResolvePath(override)
hash, err := hashpkg.Directory(override)
if err != nil {
return false, errors.Errorf("Error stating override file %s: %v", override, err)
}
helmOverridesHash += hash
}
}
// Check deployment config for changes
configStr, err := yaml.Marshal(d.DeploymentConfig)
if err != nil {
return false, errors.Wrap(err, "marshal deployment config")
}
deploymentConfigHash := hashpkg.String(string(configStr))
// Get HelmClient if necessary
if d.Helm == nil {
d.Helm, err = helm.NewClient(ctx.Log())
if err != nil {
return false, errors.Errorf("Error creating helm client: %v", err)
}
}
// Get deployment values
redeploy, deployValues, err := d.getDeploymentValues(ctx)
if err != nil {
return false, err
}
// Check deployment values for changes
deployValuesBytes, err := yaml.Marshal(deployValues)
if err != nil {
return false, errors.Wrap(err, "marshal deployment values")
}
deployValuesHash := hashpkg.String(string(deployValuesBytes))
// Check if redeploying is necessary
helmCache := deployCache.Helm
if helmCache == nil {
helmCache = &remotecache.HelmCache{}
}
forceDeploy = forceDeploy || redeploy || deployCache.DeploymentConfigHash != deploymentConfigHash || helmCache.ValuesHash != deployValuesHash || helmCache.OverridesHash != helmOverridesHash || helmCache.ChartHash != hash
if !forceDeploy {
releases, err := d.Helm.ListReleases(ctx, releaseNamespace)
if err != nil {
return false, err
}
forceDeploy = true
for _, release := range releases {
if release.Name == releaseName && release.Revision == helmCache.ReleaseRevision {
forceDeploy = false
break
}
}
}
// Deploy
if forceDeploy {
release, err := d.internalDeploy(ctx, deployValues, nil)
if err != nil {
return false, err
}
deployCache.DeploymentConfigHash = deploymentConfigHash
helmCache.Release = releaseName
helmCache.ReleaseNamespace = releaseNamespace
helmCache.ChartHash = hash
helmCache.ValuesHash = deployValuesHash
helmCache.OverridesHash = helmOverridesHash
if release != nil {
helmCache.ReleaseRevision = release.Revision
}
deployCache.Helm = helmCache
if rootName, ok := values.RootNameFrom(ctx.Context()); ok && !stringutil.Contains(deployCache.Projects, rootName) {
deployCache.Projects = append(deployCache.Projects, rootName)
}
ctx.Config().RemoteCache().SetDeployment(d.DeploymentConfig.Name, deployCache)
return true, nil
}
if rootName, ok := values.RootNameFrom(ctx.Context()); ok && !stringutil.Contains(deployCache.Projects, rootName) {
deployCache.Projects = append(deployCache.Projects, rootName)
}
ctx.Config().RemoteCache().SetDeployment(d.DeploymentConfig.Name, deployCache)
return false, nil
}
func (d *DeployConfig) internalDeploy(ctx devspacecontext.Context, overwriteValues map[string]interface{}, out io.Writer) (*types.Release, error) {
var releaseName string
if d.DeploymentConfig.Helm.ReleaseName != "" {
releaseName = d.DeploymentConfig.Helm.ReleaseName
} else {
releaseName = d.DeploymentConfig.Name
}
releaseNamespace := ctx.KubeClient().Namespace()
if d.DeploymentConfig.Namespace != "" {
releaseNamespace = d.DeploymentConfig.Namespace
}
if out != nil {
str, err := d.Helm.Template(ctx, releaseName, releaseNamespace, overwriteValues, d.DeploymentConfig.Helm)
if err != nil {
return nil, err
}
_, _ = out.Write([]byte("\n" + str + "\n"))
return nil, nil
}
ctx.Log().Infof("Deploying chart %s (%s) with helm...", d.DeploymentConfig.Helm.Chart.Name, d.DeploymentConfig.Name)
valuesOut, _ := yaml.Marshal(overwriteValues)
ctx.Log().Debugf("Deploying chart with values:\n %v\n", string(valuesOut))
// Deploy chart
appRelease, err := d.Helm.InstallChart(ctx, releaseName, releaseNamespace, overwriteValues, d.DeploymentConfig.Helm)
if err != nil {
return nil, errors.Errorf("unable to deploy helm chart: %v", err)
}
// Print revision
if appRelease != nil {
ctx.Log().Donef("Deployed helm chart (Release revision: %s)", appRelease.Revision)
} else {
ctx.Log().Done("Deployed helm chart")
}
return appRelease, nil
}
func (d *DeployConfig) getDeploymentValues(ctx devspacecontext.Context) (bool, map[string]interface{}, error) {
var (
chartPath = d.DeploymentConfig.Helm.Chart.Name
chartValuesPath = ctx.ResolvePath(filepath.Join(chartPath, "values.yaml"))
overwriteValues = map[string]interface{}{}
shouldRedeploy = false
)
// Check if its a local chart
_, err := os.Stat(chartValuesPath)
if err == nil {
err := yamlutil.ReadYamlFromFile(chartValuesPath, overwriteValues)
if err != nil {
return false, nil, errors.Errorf("Couldn't deploy chart, error reading from chart values %s: %v", chartValuesPath, err)
}
if d.DeploymentConfig.UpdateImageTags == nil || *d.DeploymentConfig.UpdateImageTags {
redeploy, err := legacy.ReplaceImageNames(overwriteValues, ctx.Config(), ctx.Dependencies(), nil)
if err != nil {
return false, nil, err
}
shouldRedeploy = shouldRedeploy || redeploy
}
}
// Load override values from path
if d.DeploymentConfig.Helm.ValuesFiles != nil {
for _, overridePath := range d.DeploymentConfig.Helm.ValuesFiles {
overwriteValuesPath := ctx.ResolvePath(overridePath)
overwriteValuesFromPath := map[string]interface{}{}
err = yamlutil.ReadYamlFromFile(overwriteValuesPath, overwriteValuesFromPath)
if err != nil {
return false, nil, fmt.Errorf("error reading from chart dev overwrite values %s: %v", overwriteValuesPath, err)
}
// Replace image names
if d.DeploymentConfig.UpdateImageTags == nil || *d.DeploymentConfig.UpdateImageTags {
redeploy, err := legacy.ReplaceImageNames(overwriteValuesFromPath, ctx.Config(), ctx.Dependencies(), nil)
if err != nil {
return false, nil, err
}
shouldRedeploy = shouldRedeploy || redeploy
}
merge.Values(overwriteValues).MergeInto(overwriteValuesFromPath)
}
}
// Load override values from data and merge them
if d.DeploymentConfig.Helm.Values != nil {
enableLegacy := false
if d.DeploymentConfig.UpdateImageTags == nil || *d.DeploymentConfig.UpdateImageTags {
enableLegacy = true
}
redeploy, _, err := runtimevar.NewRuntimeResolver(ctx.WorkingDir(), enableLegacy).FillRuntimeVariablesWithRebuild(ctx.Context(), d.DeploymentConfig.Helm.Values, ctx.Config(), ctx.Dependencies())
if err != nil {
return false, nil, err
}
shouldRedeploy = shouldRedeploy || redeploy
merge.Values(overwriteValues).MergeInto(d.DeploymentConfig.Helm.Values)
}
// Validate deployment values
err = versions.ValidateComponentConfig(d.DeploymentConfig, overwriteValues)
if err != nil {
return false, nil, err
}
return shouldRedeploy, overwriteValues, nil
}
|
package main
import (
"fmt"
)
func main() {
a1 := [5]int{1, 2, 3, 4, 5}
sum := 0
//求数组总和
for _, v := range a1 {
sum += v
}
fmt.Println("sum=", sum)
//打印出数组总2个元素之和=6的下标
for key1, _ := range a1 {
for key2, _ := range a1 {
if a1[key1]+a1[key2] == 6 && key1 < key2 {
fmt.Printf("key1=%v,key2=%v\n", key1, key2)
}
}
}
//外层循环从索引0开始
//内层循环从外层索引开始
for i := 0; i < len(a1); i++ {
for j := i; j < len(a1); j++ {
if a1[i]+a1[j] == 6 && i != j {
fmt.Printf("i=%v,j=%v\n", i, j)
}
}
}
}
|
package app
import (
"encoding/json"
"errors"
"html/template"
"io/ioutil"
"math"
"net/http"
"time"
"github.com/gorilla/mux"
"muun-paradise/model"
)
// App holds the HTTP server as well as the trained model.
type App struct {
server http.Server
model model.Modeler
}
// New generates a new web app with a trained model passed in as the
// sole argument.
func New(m model.Modeler) *App {
return &App{model: m}
}
type result struct {
File string `json:"file"`
Prediction string `json:"prediction"`
Confidence float64 `json:"confidence"`
}
type errorMsg struct {
Status int
Error string
}
func homeHandler(w http.ResponseWriter, r *http.Request) {
tmpl := template.Must(template.ParseFiles("app/site/index.html"))
tmpl.Execute(w, nil)
}
func (a *App) parseInput(r *http.Request) (*result, error) {
file, handle, err := r.FormFile("uploadfile")
if err != nil {
return nil, errors.New("error retrieving file from request")
}
defer file.Close()
if handle.Header.Get("Content-Type") != "text/plain" {
return nil, errors.New("incorrect file type submit plain text only")
}
contents, err := ioutil.ReadAll(file)
if err != nil {
return nil, errors.New("error reading submitted file")
}
class, score := a.model.Predict(string(contents))
score = 1.0 - math.Pow(math.E, score)
return &result{
File: handle.Filename,
Prediction: class,
Confidence: score,
}, nil
}
func (a *App) uploadHandler(w http.ResponseWriter, r *http.Request) {
if r.Method == http.MethodPost {
res, err := a.parseInput(r)
if err != nil {
tmpl := template.Must(template.ParseFiles("app/site/error.html"))
e := errorMsg{
Error: err.Error(),
Status: http.StatusBadRequest,
}
tmpl.Execute(w, e)
return
}
tmpl := template.Must(template.ParseFiles("app/site/index.html"))
tmpl.Execute(w, res)
}
}
func (a *App) apiHandler(w http.ResponseWriter, r *http.Request) {
if r.Method == http.MethodPost {
res, err := a.parseInput(r)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(res)
}
}
// Start initializes the HTTP server and beings listening for requests.
func (a *App) Start() {
r := mux.NewRouter()
r.HandleFunc("/", homeHandler)
r.HandleFunc("/upload", a.uploadHandler)
r.HandleFunc("/api", a.apiHandler)
r.HandleFunc("/css/muun-paradise.css", func(w http.ResponseWriter, r *http.Request) {
http.ServeFile(w, r, "./app/site/css/muun-paradise.css")
})
s := &http.Server{
Handler: r,
Addr: "ec2-34-207-224-228.compute-1.amazonaws.com:8000",
WriteTimeout: 15 * time.Second,
ReadTimeout: 15 * time.Second,
}
if err := s.ListenAndServe(); err != nil {
panic(err)
}
}
|
package main
import (
"io"
"os"
"net"
"fmt"
"os/exec"
"strings"
"net/http"
"strconv"
"github.com/robfig/cron"
"sync"
"flag"
)
var (
Name = "rabbitmq_exporter"
listenAddress = flag.String("unix-sock", "/dev/shm/rabbitmq_exporter.sock", "Address to listen on for unix sock access and telemetry.")
users = flag.String("users", "", "Rabbitmq users list, multi split with ,.")
vhosts = flag.String("vhosts", "", "Rabbitmq vhost list, multi split with ,.")
metricsPath = flag.String("web.telemetry-path", "/metrics", "Path under which to expose metrics.")
)
var g_lock sync.RWMutex
var g_ret string
var g_users []string
var g_usersMap map[string]string
var g_vhosts []string
var g_vhostsMap map[string]string
var doing bool
type userConnInfo struct {
User string
Blocked float64
Running float64
Closed float64
}
type vhostQueueInfo struct {
Vhost string
UnAck float64
Ready float64
}
func execImpl(cmdStr string) string {
cmd := exec.Command("/bin/sh", "-c", cmdStr)
cmd.Wait()
out, err := cmd.Output()
if err != nil {
return ""
}
return string(out)
}
func up() string {
return execImpl("ss -ntpl | grep beam.smp | grep -v grep | wc -l")
}
func status() string {
return execImpl("/usr/local/rabbitmq/sbin/rabbitmqctl status")
}
func listConnections() string {
return execImpl("/usr/local/rabbitmq/sbin/rabbitmqctl list_connections user state | grep -v Listing | grep -v done | awk -v OFS=',' '{print $1,$2}'")
}
// messages_unacknowledged,messages_ready
func listQueues(msg string, vhost string) string {
if len(msg) == 0 || len(vhost) == 0 {
return ""
}
return execImpl("/usr/local/rabbitmq/sbin/rabbitmqctl list_queues -p %s %s | grep -v Listing | grep -v done | awk '{print $NF}'")
}
func doWork() {
if doing {
return
}
doing = true
alive := up()
alive = strings.TrimRight(alive, "\n")
if !strings.EqualFold(alive, "4") {
g_lock.Lock()
g_ret = "rabbitmq_up 0"
g_lock.Unlock()
doing = false
return
}
status := status()
statusList := strings.Split(status, "\n")
/*if len(statusList) < 45 {
g_lock.Lock()
g_ret = "rabbitmq_up 0"
g_lock.Unlock()
doing = false
return
}*/
ret := "rabbitmq_up 1\n"
nameSpace := "rabbitmq"
total_memory_used_v := 0.0
vm_memory_limit_v := 0.0
processes_limit_v := 0.0
processes_used_v := 0.0
for _, s := range statusList {
if strings.Contains(s,"[{total,") {
// [{total,448090960},
tmp := strings.TrimRight(s, "},")
l := strings.Split(tmp, ",")
total_memory_used, _ := strconv.ParseFloat(l[1], 64)
total_memory_used_v = total_memory_used
ret += fmt.Sprintf("%s_total_memory_used %g\n", nameSpace, total_memory_used)
} else if strings.Contains(s, "{vm_memory_high_watermark,") {
// {vm_memory_high_watermark,0.8},
tmp := strings.TrimRight(s, "},")
l := strings.Split(tmp, ",")
vm_memory_high_watermark, _ := strconv.ParseFloat(l[1],64)
ret += fmt.Sprintf("%s_vm_memory_high_watermark %g\n", nameSpace, vm_memory_high_watermark)
} else if strings.Contains(s, "{vm_memory_limit,") {
// {vm_memory_limit,6576822681},
tmp := strings.TrimRight(s, "},")
l := strings.Split(tmp, ",")
vm_memory_limit, _ := strconv.ParseFloat(l[1],64)
vm_memory_limit_v = vm_memory_limit
ret += fmt.Sprintf("%s_vm_memory_limit %g\n", nameSpace, vm_memory_limit)
} else if strings.Contains(s,"{processes,") {
// {processes,[{limit,1048576},{used,12596}]},
tmp := strings.TrimRight(s, "}]},")
// {processes,[{limit,1048576},{used,12596
tmp = strings.Replace(tmp, " ", "", -1)
//{processes,[{limit,1048576},{used,12596
tmp = strings.Replace(tmp, "{processes,[{limit,", "", -1)
//1048576},{used,12596
tmp = strings.Replace(tmp, "},{used", "", -1)
//1048576,12596
l := strings.Split(tmp, ",")
processes_limit, _ := strconv.ParseFloat(l[0], 64)
processes_limit_v = processes_limit
ret += fmt.Sprintf("%s_processes_limit %g\n", nameSpace, processes_limit)
processes_used, _ := strconv.ParseFloat(l[1], 64)
processes_used_v = processes_used
ret += fmt.Sprintf("%s_processes_used %g\n", nameSpace, processes_used)
}
}
if vm_memory_limit_v > 1 {
ret += fmt.Sprintf("%s_total_memory_used_pct %g\n", nameSpace,
(total_memory_used_v / vm_memory_limit_v) * 100)
} else {
ret += fmt.Sprintf("%s_total_memory_used_pct %g\n", nameSpace, 0.0)
}
if processes_limit_v > 1 {
ret += fmt.Sprintf("%s_processes_limit_pct %g\n", nameSpace,
(processes_used_v / processes_limit_v) * 100)
} else {
ret += fmt.Sprintf("%s_processes_limit_pct %g\n", nameSpace, 0.0)
}
connections := listConnections()
connections = strings.TrimRight(connections, "\n")
connectionsList := strings.Split(connections,"\n")
var cm map[string]*userConnInfo
cm = make(map[string]*userConnInfo)
for _, u := range g_users {
uci := &userConnInfo{
User:u,
Blocked:0.0,
Running:0.0,
Closed:0.0,
}
cm[u] = uci
}
for _, c := range connectionsList {
l := strings.Split(c, ",")
if len(l) != 2 {
continue
}
if _, ok := g_usersMap[l[0]]; ok {
if strings.HasPrefix(l[1], "blocked") {
cm[l[0]].Blocked += 1
} else if strings.HasPrefix(l[1], "running") {
cm[l[0]].Running += 1
} else if strings.HasPrefix(l[1], "closed") {
cm[l[0]].Closed += 1
} else {
}
}
}
for _, val := range cm {
ret += fmt.Sprintf("%s_connections{user=\"%s\",type=\"blocked\"} %g\n",
nameSpace, val.User, val.Blocked)
ret += fmt.Sprintf("%s_connections{user=\"%s\",type=\"running\"} %g\n",
nameSpace, val.User, val.Running)
ret += fmt.Sprintf("%s_connections{user=\"%s\",type=\"closed\"} %g\n",
nameSpace, val.User, val.Closed)
}
var vm map[string]*vhostQueueInfo
vm = make(map[string]*vhostQueueInfo)
for _, v := range g_vhostsMap {
vqi := &vhostQueueInfo{
Vhost:v,
UnAck:0.0,
Ready:0.0,
}
vm[v] = vqi
}
for _, vhost := range g_vhosts {
queues := listQueues("messages_ready", vhost)
queues = strings.TrimRight(queues, "\n")
queuesList := strings.Split(queues,"\n")
for _, q := range queuesList {
if strings.HasPrefix(q, "0") {
continue
}
val, _ := strconv.ParseFloat(q, 64)
vm[vhost].Ready += val
}
queues2 := listQueues("messages_unacknowledged", vhost)
queues2 = strings.TrimRight(queues2, "\n")
queues2List := strings.Split(queues2,"\n")
for _, q := range queues2List {
if strings.HasPrefix(q, "0") {
continue
}
val, _ := strconv.ParseFloat(q, 64)
vm[vhost].UnAck += val
}
}
for _, val := range vm {
ret += fmt.Sprintf("%s_queues_messages{vhost=\"%s\",type=\"ready\"} %g\n",
nameSpace, val.Vhost, val.Ready)
ret += fmt.Sprintf("%s_queues_messages{vhost=\"%s\",type=\"unacknowledged\"} %g\n",
nameSpace, val.Vhost, val.UnAck)
}
g_lock.Lock()
g_ret = ret
g_lock.Unlock()
doing = false
}
func metrics(w http.ResponseWriter, req *http.Request) {
g_lock.RLock()
io.WriteString(w, g_ret)
g_lock.RUnlock()
}
func main() {
flag.Parse()
addr := ""
if listenAddress != nil {
addr = *listenAddress
} else {
addr = "/dev/shm/rabbitmq_exporter.sock"
}
if users == nil {
panic("error users")
}
g_users = strings.Split(*users, ",")
if len(g_users) == 0 {
panic("no users")
}
g_usersMap = make(map[string]string)
for _, u := range g_users {
g_usersMap[u] = u
}
if vhosts == nil {
panic("error vhosts")
}
g_vhosts = strings.Split(*vhosts, ",")
if len(g_vhosts) == 0 {
panic("no vhosts")
}
g_vhostsMap = make(map[string]string)
for _, v := range g_vhosts {
g_vhostsMap[v] = v
}
doing = false
doWork()
c := cron.New()
c.AddFunc("0 */2 * * * ?", doWork)
c.Start()
mux := http.NewServeMux()
mux.HandleFunc("/metrics", metrics)
mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte(`<html>
<head><title>Rabbitmq Exporter</title></head>
<body>
<h1>Rabbitmq Exporter</h1>
<p><a href='` + "/metrics" + `'>Metrics</a></p>
</body>
</html>`))
})
server := http.Server{
Handler: mux, // http.DefaultServeMux,
}
os.Remove(addr)
listener, err := net.Listen("unix", addr)
if err != nil {
panic(err)
}
server.Serve(listener)
} |
package sort
type BubbleSort struct {
}
func (s BubbleSort) Sort(data []interface{}, comparable Comparable) {
dataLen := len(data)
if dataLen < 2 {
return
}
for j := dataLen - 1; j >= 0; j-- {
flag := false
for i := 0; i < j; i++ {
if comparable.Compare(data[i], data[i+1]) > 0 {
data[i], data[i+1] = data[i+1], data[i]
flag = true
}
}
if !flag {
return
}
}
}
|
package com
//在文章或产品的下面有文章和产品标签
//不同文章产品显示它自己的标签,
//点击标签跳转到列表页面,文章或产品列表
import (
"JsGo/JsHttp"
"JsGo/JsLogger"
"fmt"
)
func Find() {
JsHttp.Http("/tagquerylinkap", TagQueryLinkAP) //通过标签查询文章或产品
}
//由前端查询调用网络接口
//by tag query article product标签查询对应的文章或产品链接返回内容或列表
func TagQueryLinkAP(s *JsHttp.Session) {
type Para struct {
Tag string
AP bool //FALSE=文章或TRUE=产品
Num int //所需要查找的数量
startID string //起始查找的ID,如果为空表示从第一个查找
ListOrDetail bool //要返回的是列表还是详细内容,false=详情,TRUE=列表
}
idList := []string{} //放ID的集合(正常ID)
artCommonList := []*XM_Contents{} //放内容详情的集合content
proCommonList := []*XM_Product{} //放产品内容详情的集合
var Needid string //循环暂存Needid
var RecordIdArt string //记录标签中文章ID避免删除找不到下一个
var RecordIdPro string //记录标签中产品ID避免删除找不到下一个
var upidt string //上一个IP地址缓存
st := &Para{}
if err := s.GetPara(st); err != nil {
s.Forward("1", err.Error(), nil)
return
}
// 标签名字或数量,有一个不满足返回
if st.Num <= 0 || st.Tag == "" {
info := fmt.Sprintf("TagQueryLinkAP,tag=%s,Num=%d\n", st.Tag, st.Num)
JsLogger.Error(info)
s.Forward("1", info, nil)
return
}
//取10个内容详情,两种取法,只有第一个取法不同,
///可以把这两种取法的第一个单独拉出去,之后的几种一起处理。
if st.AP == false { //如果是零FALSE,查找文章//文章调用
if st.startID == "" { //如果ID为空
d, eb := GetOneTag(st.Tag) //获取标签内容放到d中
if eb != nil {
JsLogger.Error("tag detail unfild") //标签中的内容相关ID找不到
s.Forward("5", eb.Error(), nil)
return
}
Needid = d.TagFinArt //取标签中的ID
RecordIdArt = d.TagFinArt
} else {
upidt = st.startID
Needid = TagNextA(st.Tag, st.startID) //取出入ID的下一个ID
}
for i := 0; i <= st.Num; { //循环获取详细内容
if Needid == "" { //如果去到的ID为空说明上一次已经是最后一个
return
}
//在每次查找时都去读取其内容
dataart, err := Getcontent(Needid) //查询文章(内容),对应的详情。
if err != nil {
JsLogger.Error("detail fild error")
s.Forward("6", err.Error(), nil)
return
}
if dataart.DelTag && Needid != RecordIdArt { //并且判断中的关键字,
//是否被标记为删除//如果被删除了,就返回顶部再去获取
err := DelteTageConnectA(st.Tag, upidt, Needid)
if err != nil {
JsLogger.Info("detail failure Article")
return
}
} else {
i++ //找到正常的然后进行加一
idList = append(idList, Needid)
artCommonList = append(artCommonList, dataart)
}
upidt = Needid
Needid = TagNextA(st.Tag, upidt) //读取下一个到的ID放在Needid中
}
if !st.ListOrDetail {
s.Forward("0", "success", artCommonList)
}
}
if st.AP == true { //如果是零TRUE,查找产品//产品调用
if st.startID == "" { //如果ID为空
d, eb := GetOneTag(st.Tag) //获取标签内容放到d中
if eb != nil {
JsLogger.Error("tag detail unfild") //标签中的内容相关ID找不到
s.Forward("5", eb.Error(), nil)
return
}
Needid = d.TagFinPro //取标签中的ID
RecordIdPro = d.TagFinPro
} else {
Needid = TagNextA(st.Tag, st.startID) //取出入ID的下一个ID
}
for i := 0; i <= st.Num; { //循环获取详细内容
if Needid == "" { //如果去到的ID为空说明上一次已经是最后一个
break
}
//在每次查找时都去读取其内容
dataPro, err := GetProductInfo(Needid) //查询产品对应的详情。
if err != nil {
JsLogger.Error("detail fild error")
s.Forward("6", err.Error(), nil)
return
}
if dataPro.DelTag && Needid != RecordIdPro { //并且判断中的关键字,
//是否被标记为删除//如果被删除了,就返回顶部再去获取
err := DelteTageConnectP(st.Tag, upidt, Needid)
if err != nil {
JsLogger.Info("detail failure Product")
} else {
i++ //找到正常的然后进行加一
idList = append(idList, Needid)
proCommonList = append(proCommonList, dataPro)
}
upidt = Needid
Needid = TagNextA(st.Tag, upidt) //读取下一个到的ID放在Needid中
}
}
if !st.ListOrDetail {
s.Forward("0", "success", proCommonList)
}
}
if st.ListOrDetail {
s.Forward("0", "success", idList)
}
}
|
// Package pangram implements a solution for the exercise titled `Pangram'.
package pangram
// IsPangram determines a given sentence is a pangram.
func IsPangram(sentence string) bool {
seen, count := [26]bool{}, 0
for _, letter := range sentence {
switch {
case 'a' <= letter && letter <= 'z':
if !seen[letter-'a'] {
seen[letter-'a'] = true
count++
}
case 'A' <= letter && letter <= 'Z':
if !seen[letter-'A'] {
seen[letter-'A'] = true
count++
}
}
if count >= 26 {
return true
}
}
return false
}
|
package domain
import (
"errors"
"time"
"github.com/rpagliuca/serverless-book-reading-tracker/pkg/entity"
"github.com/rpagliuca/serverless-book-reading-tracker/pkg/persistence"
)
func ListOneEntry(username, UUID string) (entity.Entry, error) {
entry, err := persistence.ListOneEntry(username, UUID)
return entry, err
}
func ListEntries(username string) ([]entity.Entry, error) {
entries, err := persistence.ListEntries(username)
return entries, err
}
func InsertEntry(username string, e entity.Entry) error {
e.Username = &username
now := time.Now()
e.DateCreated = &now
e.DateModified = &now
version := int64(1)
e.Version = &version
err := persistence.InsertEntry(e)
return err
}
func DeleteOneEntry(username, UUID string) error {
err := persistence.DeleteOneEntry(username, UUID)
return err
}
func PatchEntry(username string, e entity.Entry) error {
e.Username = &username
now := time.Now()
e.DateModified = &now
p := []entity.Property{}
if e.Version == nil {
return errors.New("Property 'version' must be incremented for PATCH operations")
}
if e.StartLocation != nil {
p = append(p, entity.StartLocation)
}
if e.StartTime != nil {
p = append(p, entity.StartTime)
}
if e.EndLocation != nil {
p = append(p, entity.EndLocation)
}
if e.EndTime != nil {
p = append(p, entity.EndTime)
}
if e.BookID != nil {
p = append(p, entity.BookID)
}
p = append(p, entity.Version)
p = append(p, entity.DateModified)
// At least one 2 properties must be patched to be valid (version plus others)
if len(p) < 3 {
return errors.New("At least one other property besides 'version' and 'date_modified' must be patched")
}
err := persistence.PatchEntry(e, p)
return err
}
|
package spanner
import (
"cloud.google.com/go/spanner"
"context"
"errors"
"fmt"
"os"
"time"
)
const healthCheckIntervalMins = 50
const numChannels = 4
func NewClient(ctx context.Context, projectID, instance, db string) (*spanner.Client, error) {
dbPath := fmt.Sprintf("projects/%s/instances/%s/databases/%s", projectID, instance, db)
client, err := spanner.NewClientWithConfig(ctx, dbPath,
spanner.ClientConfig{
SessionPoolConfig: spanner.SessionPoolConfig{
MinOpened: 100,
//MinOpened: 1,
MaxOpened: numChannels * 100,
//MaxOpened: 1,
MaxBurst: 10,
//WriteSessions: 0.2,
WriteSessions: 1.0,
HealthCheckWorkers: 10,
HealthCheckInterval: healthCheckIntervalMins * time.Minute,
},
})
if err != nil {
return nil, errors.New("Failed to Create Spanner Client.")
}
return client, nil
}
func ConcreteNewClient(ctx context.Context) *spanner.Client {
fmt.Println("client create start")
SpannerProjectID, ok1 := os.LookupEnv("SPANNER_PROJECT_ID")
SpannerInstance, ok2 := os.LookupEnv("SPANNER_INSTANCE")
SpannerDB, ok3 := os.LookupEnv("SPANNER_DB")
if !(ok1 && ok2 && ok3) {
panic("環境変数がないね。")
}
c, err := NewClient(ctx, SpannerProjectID, SpannerInstance, SpannerDB)
if err != nil {
fmt.Println("client create NG")
fmt.Printf("%v", err)
}
fmt.Println("client create OK")
return c
}
|
package server
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"strings"
"time"
"github.com/go-chi/chi/v5/middleware"
"github.com/sirupsen/logrus"
)
// structuredLogger holds our application's instance of our logger
type structuredLogger struct {
logger *logrus.Logger
}
// newLogEntry will return a new log entry scoped to the http.Request
func (l *structuredLogger) NewLogEntry(r *http.Request) middleware.LogEntry {
entry := &structuredLoggerEntry{logger: logrus.NewEntry(l.logger)}
logFields := logrus.Fields{}
if reqID := middleware.GetReqID(r.Context()); reqID != "" {
logFields["req_id"] = reqID
}
logFields["application"] = "pillbox-api"
logFields["http_method"] = r.Method
printQueryAndVariables(r, logFields)
// logFields["remote_addr"] = r.RemoteAddr
// logFields["user_agent"] = r.UserAgent()
logFields["uri"] = r.RequestURI
entry.logger = entry.logger.WithFields(logFields)
return entry
}
func printQueryAndVariables(r *http.Request, logFields logrus.Fields) {
payload := struct {
Query string `json:"query"`
Variables json.RawMessage `json:"variables"`
}{}
buf, err := io.ReadAll(r.Body)
if err != nil {
return
}
r.Body = io.NopCloser(bytes.NewBuffer(buf))
err = json.Unmarshal(buf, &payload)
if err != nil {
return
}
if payload.Query != "" {
payload.Query = strings.ReplaceAll(payload.Query, " ", "")
payload.Query = strings.ReplaceAll(payload.Query, "\n", " ")
payload.Query = strings.ReplaceAll(payload.Query, ":", ": ")
payload.Query = strings.ReplaceAll(payload.Query, ",", ", ")
logFields["query"] = payload.Query
}
if len(payload.Variables) > 0 {
logFields["variables"] = string(payload.Variables)
}
}
// structuredLoggerEntry holds our FieldLogger entry
type structuredLoggerEntry struct {
logger logrus.FieldLogger
}
// Write will write to logger entry once the http.Request is complete
func (l *structuredLoggerEntry) Write(status, bytes int, _ http.Header, elapsed time.Duration, extra interface{}) {
l.logger.WithFields(logrus.Fields{
"resp_status": status,
"resp_bytes_length": bytes,
"resp_elasped_ms": float64(elapsed.Nanoseconds()) / 1000000.0,
}).Infoln("request complete")
}
// Panic attaches the panic stack and text to the log entry
func (l *structuredLoggerEntry) Panic(v interface{}, stack []byte) {
l.logger.WithFields(logrus.Fields{
"stack": string(stack),
"panic": fmt.Sprintf("%+v", v),
}).Errorln("request panic'd")
}
// Helper methods used by the application to get the request-scoped
// logger entry and set additional fields between handlers.
//
// This is a useful pattern to use to set state on the entry as it
// passes through the handler chain, which at any point can be logged
// with a call to .Print(), .Info(), etc.
// GetLogEntry will get return the logger off of the http request
func GetLogEntry(r *http.Request) logrus.FieldLogger {
entry := middleware.GetLogEntry(r).(*structuredLoggerEntry)
return entry.logger
}
// LogEntrySetField will set a new field on a log entry
func LogEntrySetField(ctx context.Context, key string, value interface{}) {
if entry, ok := ctx.Value(middleware.LogEntryCtxKey).(*structuredLoggerEntry); ok {
entry.logger = entry.logger.WithField(key, value)
}
}
// LogEntrySetFields will set a map of key/value pairs on a log entry
func LogEntrySetFields(ctx context.Context, fields map[string]interface{}) {
if entry, ok := ctx.Value(middleware.LogEntryCtxKey).(*structuredLoggerEntry); ok {
entry.logger = entry.logger.WithFields(fields)
}
}
|
package main
import (
"io"
"os"
"time"
"strings"
"testing"
"io/ioutil"
"path/filepath"
)
type TestFile struct {
Name, Contents, Date string
}
var testFiles = []*TestFile{
{".ssync-test", ".ssync-test\ndir1\ndir1/dir2\ndir1/dir2/file3\n", ""},
{"file1", "file1Contents", "2018-01-01"},
{"dir1/file2", "file2Contents", "2018-01-01"},
{"dir1/dir2/file3", "file3Contents", "2018-01-01"},
}
var testFiles2 = []*TestFile{
{".ssync-test", ".ssync-test\ndir1\ndir1/dir2\ndir1/dir2/file3\n", ""},
{"file1", "file1Contents2", "2017-01-01"},
{"file4", "file4Contents", "2018-01-01"},
{"dir3/file5", "file2Contents", "2018-01-01"},
{"dir1/dir2/file3", "file3Contents2", "2018-02-01"},
}
func createTestFiles(t *testing.T, files []*TestFile) (string, []string) {
td, err := ioutil.TempDir("", "")
if err != nil {
t.Fatal(err)
}
paths := []string{}
for i := range files {
if len(files[i].Name) == 0 {
continue
}
// set default date
if len(files[i].Date) == 0 {
files[i].Date = "2006-01-02"
}
writeFile(t, td, files[i])
// append to paths
paths = append(paths, filepath.Join(td, files[i].Name))
}
return td, paths
}
func tmpFile(t *testing.T, input string, f func(in *os.File)) {
in, err := ioutil.TempFile("", "")
if err != nil {
t.Fatal(err)
}
defer os.Remove(in.Name())
defer in.Close()
_, err = io.WriteString(in, input)
if err != nil {
t.Fatal(err)
}
_, _ = in.Seek(0, os.SEEK_SET)
f(in)
}
// writes files also creating parent directories
func writeFile(t *testing.T, path string, testFile *TestFile) {
pa := strings.Split(testFile.Name, "/")
// return if filename blank
if len(pa) == 0 || len(pa[len(pa)-1]) == 0 {
return
}
// create parent dirs
p := filepath.Join(path, filepath.Join(pa[:len(pa)-1]...))
if len(pa) > 1 {
err := os.MkdirAll(p, 0777)
if err != nil {
t.Fatal(err)
}
}
// create file
fullpath := filepath.Join(p, pa[len(pa)-1])
err := ioutil.WriteFile(fullpath, []byte(testFile.Contents), 0644)
if err != nil {
t.Fatal(err)
}
// parse modified timestamp
modTime, err := time.Parse("2006-01-02", testFile.Date)
if err != nil {
t.Fatal(err)
}
// set modified timestamp
err = os.Chtimes(fullpath, modTime, modTime)
if err != nil {
t.Fatal(err)
}
}
|
// Copyright 2015 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package gossip
import (
"testing"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/metric"
)
func TestNodeSetMaxSize(t *testing.T) {
defer leaktest.AfterTest(t)()
nodes := makeNodeSet(1, metric.NewGauge(metric.Metadata{Name: ""}))
if !nodes.hasSpace() {
t.Error("set should have space")
}
nodes.addNode(roachpb.NodeID(1))
if nodes.hasSpace() {
t.Error("set should have no space")
}
}
func TestNodeSetHasNode(t *testing.T) {
defer leaktest.AfterTest(t)()
nodes := makeNodeSet(2, metric.NewGauge(metric.Metadata{Name: ""}))
node := roachpb.NodeID(1)
if nodes.hasNode(node) {
t.Error("node wasn't added and should not be valid")
}
// Add node and verify it's valid.
nodes.addNode(node)
if !nodes.hasNode(node) {
t.Error("empty node wasn't added and should not be valid")
}
}
func TestNodeSetAddAndRemoveNode(t *testing.T) {
defer leaktest.AfterTest(t)()
nodes := makeNodeSet(2, metric.NewGauge(metric.Metadata{Name: ""}))
node0 := roachpb.NodeID(1)
node1 := roachpb.NodeID(2)
nodes.addNode(node0)
nodes.addNode(node1)
if !nodes.hasNode(node0) || !nodes.hasNode(node1) {
t.Error("failed to locate added nodes")
}
nodes.removeNode(node0)
if nodes.hasNode(node0) || !nodes.hasNode(node1) {
t.Error("failed to remove node0", nodes)
}
nodes.removeNode(node1)
if nodes.hasNode(node0) || nodes.hasNode(node1) {
t.Error("failed to remove node1", nodes)
}
}
func TestNodeSetFilter(t *testing.T) {
defer leaktest.AfterTest(t)()
nodes1 := makeNodeSet(2, metric.NewGauge(metric.Metadata{Name: ""}))
node0 := roachpb.NodeID(1)
node1 := roachpb.NodeID(2)
nodes1.addNode(node0)
nodes1.addNode(node1)
nodes2 := makeNodeSet(1, metric.NewGauge(metric.Metadata{Name: ""}))
nodes2.addNode(node1)
filtered := nodes1.filter(func(a roachpb.NodeID) bool {
return !nodes2.hasNode(a)
})
if filtered.len() != 1 || filtered.hasNode(node1) || !filtered.hasNode(node0) {
t.Errorf("expected filter to leave node0: %+v", filtered)
}
}
func TestNodeSetAsSlice(t *testing.T) {
defer leaktest.AfterTest(t)()
nodes := makeNodeSet(2, metric.NewGauge(metric.Metadata{Name: ""}))
node0 := roachpb.NodeID(1)
node1 := roachpb.NodeID(2)
nodes.addNode(node0)
nodes.addNode(node1)
nodeArr := nodes.asSlice()
if len(nodeArr) != 2 {
t.Error("expected slice of length 2:", nodeArr)
}
if (nodeArr[0] != node0 && nodeArr[0] != node1) ||
(nodeArr[1] != node1 && nodeArr[1] != node0) {
t.Error("expected slice to contain both node0 and node1:", nodeArr)
}
}
|
package datasource
type WritableDataSource interface {
DataSource
Store(key string, value interface{}) error
}
|
package math
import "fmt"
func Add(a int64, b int64) {
fmt.Println(a + b)
}
func Sub(a int64, b int64) {
fmt.Println(a - b)
}
|
package tests
import (
"testing"
"reflect"
. "github.com/go-dash/slice/tests/types"
"github.com/go-dash/slice/_string"
"github.com/go-dash/slice/_int"
"github.com/go-dash/slice/_Person" // github.com/go-dash/slice/tests/types
"strings"
)
var tableFilterString = []struct {
input []string
output []string
}{
{nil, []string{}},
{[]string{}, []string{}},
{[]string{"aaa", "aaa", "aaa"}, []string{}},
{[]string{"aa", "bb", "aa", "cc", "bb"}, []string{"bb", "cc", "bb"}},
}
func TestFilterString(t *testing.T) {
for _, tt := range tableFilterString {
res := _string.Filter(tt.input, func (s string, index int) bool {
return !strings.HasPrefix(s, "a")
})
if !reflect.DeepEqual(res, tt.output) {
t.Fatalf("Expected %v received %v", tt.output, res)
}
res = _string.Chain(tt.input).Filter(func (s string, index int) bool {
return !strings.HasPrefix(s, "a")
}).Value()
if !reflect.DeepEqual(res, tt.output) {
t.Fatalf("Expected %v received %v", tt.output, res)
}
}
}
var tableFilterInt = []struct {
input []int
output []int
}{
{nil, []int{}},
{[]int{}, []int{}},
{[]int{1, 1, 1}, []int{}},
{[]int{1, 2, 1, 3, 2}, []int{2, 2}},
}
func TestFilterInt(t *testing.T) {
for _, tt := range tableFilterInt {
res := _int.Filter(tt.input, func (n int, index int) bool {
return n % 2 == 0
})
if !reflect.DeepEqual(res, tt.output) {
t.Fatalf("Expected %v received %v", tt.output, res)
}
res = _int.Chain(tt.input).Filter(func (n int, index int) bool {
return n % 2 == 0
}).Value()
if !reflect.DeepEqual(res, tt.output) {
t.Fatalf("Expected %v received %v", tt.output, res)
}
}
}
var tableFilterPerson = []struct {
input []Person
output []Person
}{
{nil, []Person{}},
{[]Person{}, []Person{}},
{[]Person{Person{"aa", 18}, Person{"aa", 18}}, []Person{Person{"aa", 18}, Person{"aa", 18}}},
{[]Person{Person{"aa", 18}, Person{"aa", 17}, Person{"aa", 18}, Person{"bb", 16}, Person{"aa", 17}}, []Person{Person{"aa", 18}, Person{"aa", 18}}},
}
func TestFilterPerson(t *testing.T) {
for _, tt := range tableFilterPerson {
res := _Person.Filter(tt.input, func (p Person, index int) bool {
return p.Age >= 18
})
if !reflect.DeepEqual(res, tt.output) {
t.Fatalf("Expected %v received %v", tt.output, res)
}
res = _Person.Chain(tt.input).Filter(func (p Person, index int) bool {
return p.Age >= 18
}).Value()
if !reflect.DeepEqual(res, tt.output) {
t.Fatalf("Expected %v received %v", tt.output, res)
}
}
} |
// Copyright 2016 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package rowexec
import (
"context"
"time"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverbase"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/settings"
"github.com/cockroachdb/cockroach/pkg/sql/backfill"
"github.com/cockroachdb/cockroach/pkg/sql/catalog"
"github.com/cockroachdb/cockroach/pkg/sql/execinfra"
"github.com/cockroachdb/cockroach/pkg/sql/execinfrapb"
"github.com/cockroachdb/cockroach/pkg/sql/row"
"github.com/cockroachdb/cockroach/pkg/sql/rowenc"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/util/ctxgroup"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/cockroachdb/cockroach/pkg/util/tracing"
"github.com/cockroachdb/errors"
"github.com/cockroachdb/logtags"
)
// indexBackfiller is a processor that backfills new indexes.
type indexBackfiller struct {
backfill.IndexBackfiller
adder kvserverbase.BulkAdder
desc catalog.TableDescriptor
spec execinfrapb.BackfillerSpec
out execinfra.ProcOutputHelper
flowCtx *execinfra.FlowCtx
output execinfra.RowReceiver
filter backfill.MutationFilter
}
var _ execinfra.Processor = &indexBackfiller{}
var backfillerBufferSize = settings.RegisterByteSizeSetting(
"schemachanger.backfiller.buffer_size", "the initial size of the BulkAdder buffer handling index backfills", 32<<20,
)
var backfillerMaxBufferSize = settings.RegisterByteSizeSetting(
"schemachanger.backfiller.max_buffer_size", "the maximum size of the BulkAdder buffer handling index backfills", 512<<20,
)
var backfillerBufferIncrementSize = settings.RegisterByteSizeSetting(
"schemachanger.backfiller.buffer_increment", "the size by which the BulkAdder attempts to grow its buffer before flushing", 32<<20,
)
var backillerSSTSize = settings.RegisterByteSizeSetting(
"schemachanger.backfiller.max_sst_size", "target size for ingested files during backfills", 16<<20,
)
func newIndexBackfiller(
ctx context.Context,
flowCtx *execinfra.FlowCtx,
processorID int32,
spec execinfrapb.BackfillerSpec,
post *execinfrapb.PostProcessSpec,
output execinfra.RowReceiver,
) (*indexBackfiller, error) {
indexBackfillerMon := execinfra.NewMonitor(ctx, flowCtx.Cfg.BackfillerMonitor,
"index-backfill-mon")
ib := &indexBackfiller{
desc: spec.BuildTableDescriptor(),
spec: spec,
flowCtx: flowCtx,
output: output,
filter: backfill.IndexMutationFilter,
}
if err := ib.IndexBackfiller.InitForDistributedUse(ctx, flowCtx, ib.desc,
indexBackfillerMon); err != nil {
return nil, err
}
return ib, nil
}
func (ib *indexBackfiller) OutputTypes() []*types.T {
// No output types.
return nil
}
// indexEntryBatch represents a "batch" of index entries which are constructed
// and sent for ingestion. Breaking up the index entries into these batches
// serves for better progress reporting as explained in the ingestIndexEntries
// method.
type indexEntryBatch struct {
indexEntries []rowenc.IndexEntry
completedSpan roachpb.Span
memUsedBuildingBatch int64
}
// constructIndexEntries is responsible for constructing the index entries of
// all the spans assigned to the processor. It streams batches of constructed
// index entries over the indexEntriesCh.
func (ib *indexBackfiller) constructIndexEntries(
ctx context.Context, indexEntriesCh chan indexEntryBatch,
) error {
var memUsedBuildingBatch int64
var err error
var entries []rowenc.IndexEntry
for i := range ib.spec.Spans {
log.VEventf(ctx, 2, "index backfiller starting span %d of %d: %s",
i+1, len(ib.spec.Spans), ib.spec.Spans[i].Span)
todo := ib.spec.Spans[i].Span
for todo.Key != nil {
startKey := todo.Key
todo.Key, entries, memUsedBuildingBatch, err = ib.buildIndexEntryBatch(ctx, todo,
ib.spec.ReadAsOf)
if err != nil {
return err
}
// Identify the Span for which we have constructed index entries. This is
// used for reporting progress and updating the job details.
completedSpan := ib.spec.Spans[i].Span
if todo.Key != nil {
completedSpan.Key = startKey
completedSpan.EndKey = todo.Key
}
log.VEventf(ctx, 2, "index entries built for span %s", completedSpan)
indexBatch := indexEntryBatch{completedSpan: completedSpan, indexEntries: entries,
memUsedBuildingBatch: memUsedBuildingBatch}
// Send index entries to be ingested into storage.
select {
case indexEntriesCh <- indexBatch:
case <-ctx.Done():
return ctx.Err()
}
knobs := ib.flowCtx.Cfg.TestingKnobs
// Block until the current index entry batch has been ingested. Ingested
// does not mean written to storage, unless we force a flush after every
// batch.
if knobs.SerializeIndexBackfillCreationAndIngestion != nil {
<-knobs.SerializeIndexBackfillCreationAndIngestion
}
}
}
return nil
}
// ingestIndexEntries adds the batches of built index entries to the buffering
// adder and reports progress back to the coordinator node.
func (ib *indexBackfiller) ingestIndexEntries(
ctx context.Context,
indexEntryCh <-chan indexEntryBatch,
progCh chan execinfrapb.RemoteProducerMetadata_BulkProcessorProgress,
) error {
ctx, span := tracing.ChildSpan(ctx, "ingestIndexEntries")
defer span.Finish()
minBufferSize := backfillerBufferSize.Get(&ib.flowCtx.Cfg.Settings.SV)
maxBufferSize := func() int64 { return backfillerMaxBufferSize.Get(&ib.flowCtx.Cfg.Settings.SV) }
sstSize := func() int64 { return backillerSSTSize.Get(&ib.flowCtx.Cfg.Settings.SV) }
stepSize := backfillerBufferIncrementSize.Get(&ib.flowCtx.Cfg.Settings.SV)
opts := kvserverbase.BulkAdderOptions{
SSTSize: sstSize,
MinBufferSize: minBufferSize,
MaxBufferSize: maxBufferSize,
StepBufferSize: stepSize,
SkipDuplicates: ib.ContainsInvertedIndex(),
}
adder, err := ib.flowCtx.Cfg.BulkAdder(ctx, ib.flowCtx.Cfg.DB, ib.spec.ReadAsOf, opts)
if err != nil {
return err
}
ib.adder = adder
defer ib.adder.Close(ctx)
// Synchronizes read and write access on completedSpans which is updated on a
// BulkAdder flush, but is read when progress is being sent back to the
// coordinator.
mu := struct {
syncutil.Mutex
completedSpans []roachpb.Span
addedSpans []roachpb.Span
}{}
// When the bulk adder flushes, the spans which were previously marked as
// "added" can now be considered "completed", and be sent back to the
// coordinator node as part of the next progress report.
adder.SetOnFlush(func() {
mu.Lock()
defer mu.Unlock()
mu.completedSpans = append(mu.completedSpans, mu.addedSpans...)
mu.addedSpans = nil
})
pushProgress := func() {
mu.Lock()
var prog execinfrapb.RemoteProducerMetadata_BulkProcessorProgress
prog.CompletedSpans = append(prog.CompletedSpans, mu.completedSpans...)
mu.completedSpans = nil
mu.Unlock()
progCh <- prog
}
// stopProgress will be closed when there is no more progress to report.
stopProgress := make(chan struct{})
g := ctxgroup.WithContext(ctx)
g.GoCtx(func(ctx context.Context) error {
tick := time.NewTicker(ib.getProgressReportInterval())
defer tick.Stop()
done := ctx.Done()
for {
select {
case <-done:
return ctx.Err()
case <-stopProgress:
return nil
case <-tick.C:
pushProgress()
}
}
})
g.GoCtx(func(ctx context.Context) error {
defer close(stopProgress)
for indexBatch := range indexEntryCh {
for _, indexEntry := range indexBatch.indexEntries {
if err := ib.adder.Add(ctx, indexEntry.Key, indexEntry.Value.RawBytes); err != nil {
return ib.wrapDupError(ctx, err)
}
}
// Once ALL the KVs for an indexBatch have been added, we can consider the
// span representing this indexBatch as "added". This span will be part of
// the set of completed spans on the next bulk adder flush.
mu.Lock()
mu.addedSpans = append(mu.addedSpans, indexBatch.completedSpan)
mu.Unlock()
// After the index KVs have been copied to the underlying BulkAdder, we can
// free the memory which was accounted when building the index entries of the
// current chunk.
indexBatch.indexEntries = nil
ib.ShrinkBoundAccount(ctx, indexBatch.memUsedBuildingBatch)
knobs := &ib.flowCtx.Cfg.TestingKnobs
if knobs.BulkAdderFlushesEveryBatch {
if err := ib.adder.Flush(ctx); err != nil {
return ib.wrapDupError(ctx, err)
}
pushProgress()
}
if knobs.RunAfterBackfillChunk != nil {
knobs.RunAfterBackfillChunk()
}
// Unblock the index creation of the next batch once it has been ingested.
if knobs.SerializeIndexBackfillCreationAndIngestion != nil {
knobs.SerializeIndexBackfillCreationAndIngestion <- struct{}{}
}
}
return nil
})
if err := g.Wait(); err != nil {
return err
}
if err := ib.adder.Flush(ctx); err != nil {
return ib.wrapDupError(ctx, err)
}
// Push the final set of completed spans as progress.
pushProgress()
return nil
}
func (ib *indexBackfiller) runBackfill(
ctx context.Context, progCh chan execinfrapb.RemoteProducerMetadata_BulkProcessorProgress,
) error {
// Used to send index entries to the KV layer.
indexEntriesCh := make(chan indexEntryBatch, 10)
// This group holds the go routines that are responsible for producing index
// entries and ingesting the KVs into storage.
group := ctxgroup.WithContext(ctx)
// Construct index entries for the spans.
group.GoCtx(func(ctx context.Context) error {
defer close(indexEntriesCh)
ctx, span := tracing.ChildSpan(ctx, "buildIndexEntries")
defer span.Finish()
err := ib.constructIndexEntries(ctx, indexEntriesCh)
if err != nil {
return errors.Wrap(err, "failed to construct index entries during backfill")
}
return nil
})
// Ingest the index entries that are emitted to the chan.
group.GoCtx(func(ctx context.Context) error {
err := ib.ingestIndexEntries(ctx, indexEntriesCh, progCh)
if err != nil {
return errors.Wrap(err, "failed to ingest index entries during backfill")
}
return nil
})
if err := group.Wait(); err != nil {
return err
}
return nil
}
func (ib *indexBackfiller) Run(ctx context.Context) {
opName := "indexBackfillerProcessor"
ctx = logtags.AddTag(ctx, opName, int(ib.spec.Table.ID))
ctx, span := execinfra.ProcessorSpan(ctx, opName)
defer span.Finish()
defer ib.output.ProducerDone()
defer execinfra.SendTraceData(ctx, ib.output)
defer ib.Close(ctx)
progCh := make(chan execinfrapb.RemoteProducerMetadata_BulkProcessorProgress)
semaCtx := tree.MakeSemaContext()
if err := ib.out.Init(&execinfrapb.PostProcessSpec{}, nil, &semaCtx, ib.flowCtx.NewEvalCtx(),
ib.output); err != nil {
ib.output.Push(nil, &execinfrapb.ProducerMetadata{Err: err})
return
}
var err error
// We don't have to worry about this go routine leaking because next we loop
// over progCh which is closed only after the go routine returns.
go func() {
defer close(progCh)
err = ib.runBackfill(ctx, progCh)
}()
for prog := range progCh {
// Take a copy so that we can send the progress address to the output processor.
p := prog
if p.CompletedSpans != nil {
log.VEventf(ctx, 2, "sending coordinator completed spans: %+v", p.CompletedSpans)
}
ib.output.Push(nil, &execinfrapb.ProducerMetadata{BulkProcessorProgress: &p})
}
if err != nil {
ib.output.Push(nil, &execinfrapb.ProducerMetadata{Err: err})
return
}
}
func (ib *indexBackfiller) wrapDupError(ctx context.Context, orig error) error {
if orig == nil {
return nil
}
var typed *kvserverbase.DuplicateKeyError
if !errors.As(orig, &typed) {
return orig
}
desc, err := ib.desc.MakeFirstMutationPublic(catalog.IncludeConstraints)
if err != nil {
return err
}
v := &roachpb.Value{RawBytes: typed.Value}
return row.NewUniquenessConstraintViolationError(ctx, desc, typed.Key, v)
}
const indexBackfillProgressReportInterval = 10 * time.Second
func (ib *indexBackfiller) getProgressReportInterval() time.Duration {
knobs := &ib.flowCtx.Cfg.TestingKnobs
if knobs.IndexBackfillProgressReportInterval > 0 {
return knobs.IndexBackfillProgressReportInterval
}
return indexBackfillProgressReportInterval
}
// buildIndexEntryBatch constructs the index entries for a single indexBatch.
func (ib *indexBackfiller) buildIndexEntryBatch(
tctx context.Context, sp roachpb.Span, readAsOf hlc.Timestamp,
) (roachpb.Key, []rowenc.IndexEntry, int64, error) {
knobs := &ib.flowCtx.Cfg.TestingKnobs
var memUsedBuildingBatch int64
if knobs.RunBeforeBackfillChunk != nil {
if err := knobs.RunBeforeBackfillChunk(sp); err != nil {
return nil, nil, 0, err
}
}
var key roachpb.Key
ctx, traceSpan := tracing.ChildSpan(tctx, "indexBatch")
defer traceSpan.Finish()
start := timeutil.Now()
var entries []rowenc.IndexEntry
if err := ib.flowCtx.Cfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error {
txn.SetFixedTimestamp(ctx, readAsOf)
// TODO(knz): do KV tracing in DistSQL processors.
var err error
entries, key, memUsedBuildingBatch, err = ib.BuildIndexEntriesChunk(ctx, txn, ib.desc, sp,
ib.spec.ChunkSize, false /*traceKV*/)
return err
}); err != nil {
return nil, nil, 0, err
}
prepTime := timeutil.Now().Sub(start)
log.VEventf(ctx, 3, "index backfill stats: entries %d, prepare %+v",
len(entries), prepTime)
return key, entries, memUsedBuildingBatch, nil
}
|
package main
import (
"os"
gobzip "github.com/shaban/kengal/gobzip"
"time"
)
type Articles []*Article
type Rubrics []*Rubric
type Blogs []*Blog
type Themes []*Theme
type Resources []*Resource
type Globals []*Global
func (ser Articles)Len()int{
return len(ser)
}
func (ser Articles)Less(i, j int) bool{
it,_ := time.Parse("02.01.2006 15:04:05",ser[i].Date)
jt,_ := time.Parse("02.01.2006 15:04:05",ser[j].Date)
return jt.Seconds() < it.Seconds()
}
func (ser Articles)Swap(i, j int){
cycle := make([]*Article,1)
copy(cycle,ser[i:i+1])
ser[i] = ser[j]
ser[j]=cycle[0]
}
func (s *Article) Key() int {
return s.ID
}
func (s *Blog) Key() int {
return s.ID
}
func (s *Global) Key() int {
return s.ID
}
func (s *Resource) Key() int {
return s.ID
}
func (s *Rubric) Key() int {
return s.ID
}
func (s *Theme) Key() int {
return s.ID
}
func (s *Article) Kind() string {
return "articles"
}
func (s *Blog) Kind() string {
return "blogs"
}
func (s *Global) Kind() string {
return "globals"
}
func (s *Resource) Kind() string {
return "resources"
}
func (s *Rubric) Kind() string {
return "rubrics"
}
func (s *Theme) Kind() string {
return "themes"
}
func (ser Articles) Kind() string {
return "articles"
}
func (ser Blogs) Kind() string {
return "blogs"
}
func (ser Globals) Kind() string {
return "globals"
}
func (ser Resources) Kind() string {
return "resources"
}
func (ser Rubrics) Kind() string {
return "rubrics"
}
func (ser Themes) Kind() string {
return "themes"
}
func (ser Articles) New() gobzip.Serial {
return new(Article)
}
func (ser Blogs) New() gobzip.Serial {
return new(Blog)
}
func (ser Globals) New() gobzip.Serial {
return new(Global)
}
func (ser Resources) New() gobzip.Serial {
return new(Resource)
}
func (ser Rubrics) New() gobzip.Serial {
return new(Rubric)
}
func (ser Themes) New() gobzip.Serial {
return new(Theme)
}
func (ser Articles) All(ins gobzip.Serializer) {
View.Articles = ins.(Articles)
}
func (ser Blogs) All(ins gobzip.Serializer) {
View.Blogs = ins.(Blogs)
}
func (ser Globals) All(ins gobzip.Serializer) {
View.Globals = ins.(Globals)
}
func (ser Resources) All(ins gobzip.Serializer) {
View.Resources = ins.(Resources)
}
func (ser Rubrics) All(ins gobzip.Serializer) {
View.Rubrics = ins.(Rubrics)
}
func (ser Themes) All(ins gobzip.Serializer) {
View.Themes = ins.(Themes)
}
func (ser Articles) NewKey() int {
id := 0
for _, v := range ser {
if v.ID > id {
id = v.ID
}
}
return id + 1
}
func (ser Blogs) NewKey() int {
id := 0
for _, v := range ser {
if v.ID > id {
id = v.ID
}
}
return id + 1
}
func (ser Globals) NewKey() int {
id := 0
for _, v := range ser {
if v.ID > id {
id = v.ID
}
}
return id + 1
}
func (ser Resources) NewKey() int {
id := 0
for _, v := range ser {
if v.ID > id {
id = v.ID
}
}
return id + 1
}
func (ser Rubrics) NewKey() int {
id :=0
for _, v := range ser {
if v.ID > id {
id = v.ID
}
}
return id + 1
}
func (ser Themes) NewKey() int {
id := 0
for _, v := range ser {
if v.ID > id {
id = v.ID
}
}
return id + 1
}
func (ser Articles)At(key int)gobzip.Serial{
for k, v := range ser{
if v.ID == key{
return ser[k]
}
}
return nil
}
func (ser Blogs)At(key int)gobzip.Serial{
for k, v := range ser{
if v.ID == key{
return ser[k]
}
}
return nil
}
func (ser Globals)At(key int)gobzip.Serial{
for k, v := range ser{
if v.ID == key{
return ser[k]
}
}
return nil
}
func (ser Resources)At(key int)gobzip.Serial{
for k, v := range ser{
if v.ID == key{
return ser[k]
}
}
return nil
}
func (ser Rubrics)At(key int)gobzip.Serial{
for k, v := range ser{
if v.ID == key{
return ser[k]
}
}
return nil
}
func (ser Themes)At(key int)gobzip.Serial{
for k, v := range ser{
if v.ID == key{
return ser[k]
}
}
return nil
}
func (ser Articles) Insert(s gobzip.Serial) gobzip.Serializer {
ser = append(ser, s.(*Article))
return ser
}
func (ser Blogs) Insert(s gobzip.Serial) gobzip.Serializer {
ser = append(ser, s.(*Blog))
return ser
}
func (ser Globals) Insert(s gobzip.Serial) gobzip.Serializer {
ser = append(ser, s.(*Global))
return ser
}
func (ser Resources) Insert(s gobzip.Serial) gobzip.Serializer {
ser = append(ser, s.(*Resource))
return ser
}
func (ser Rubrics) Insert(s gobzip.Serial) gobzip.Serializer {
ser = append(ser, s.(*Rubric))
return ser
}
func (ser Themes) Insert(s gobzip.Serial) gobzip.Serializer {
ser = append(ser, s.(*Theme))
return ser
}
func (ser Articles) Replace(s gobzip.Serial) os.Error {
for k, v := range ser {
if v.ID == s.Key() {
ser[k] = s.(*Article)
return nil
}
}
return os.ENOENT
}
func (ser Blogs) Replace(s gobzip.Serial) os.Error {
for k, v := range ser {
if v.ID == s.Key() {
ser[k] = s.(*Blog)
return nil
}
}
return os.ENOENT
}
func (ser Globals) Replace(s gobzip.Serial) os.Error {
for k, v := range ser {
if v.ID == s.Key() {
ser[k] = s.(*Global)
return nil
}
}
return os.ENOENT
}
func (ser Resources) Replace(s gobzip.Serial) os.Error {
for k, v := range ser {
if v.ID == s.Key() {
ser[k] = s.(*Resource)
return nil
}
}
return os.ENOENT
}
func (ser Rubrics) Replace(s gobzip.Serial) os.Error {
for k, v := range ser {
if v.ID == s.Key() {
ser[k] = s.(*Rubric)
return nil
}
}
return os.ENOENT
}
func (ser Themes) Replace(s gobzip.Serial) os.Error {
for k, v := range ser {
if v.ID == s.Key() {
ser[k] = s.(*Theme)
return nil
}
}
return os.ENOENT
}
func (ser Articles) Init()gobzip.Serializer{
s := make([]*Article, 0)
var o Articles = s
return o
}
func (ser Blogs) Init()gobzip.Serializer{
s := make([]*Blog, 0)
var o Blogs = s
return o
}
func (ser Globals) Init()gobzip.Serializer{
s := make([]*Global, 0)
var o Globals = s
return o
}
func (ser Resources) Init()gobzip.Serializer{
s := make([]*Resource, 0)
var o Resources = s
return o
}
func (ser Rubrics) Init()gobzip.Serializer{
s := make([]*Rubric, 0)
var o Rubrics = s
return o
}
func (ser Themes) Init()gobzip.Serializer{
s := make([]*Theme, 0)
var o Themes = s
return o
}
func (ser Articles) Keys()[]int{
keys := make([]int,0)
for _, v := range ser{
keys = append(keys, v.ID)
}
return keys
}
func (ser Blogs) Keys()[]int{
keys := make([]int,0)
for _, v := range ser{
keys = append(keys, v.ID)
}
return keys
}
func (ser Globals) Keys()[]int{
keys := make([]int,0)
for _, v := range ser{
keys = append(keys, v.ID)
}
return keys
}
func (ser Resources) Keys()[]int{
keys := make([]int,0)
for _, v := range ser{
keys = append(keys, v.ID)
}
return keys
}
func (ser Rubrics) Keys()[]int{
keys := make([]int,0)
for _, v := range ser{
keys = append(keys, v.ID)
}
return keys
}
func (ser Themes) Keys()[]int{
keys := make([]int,0)
for _, v := range ser{
keys = append(keys, v.ID)
}
return keys
}
func (p *Page) Delegate(kind string) gobzip.Serializer {
switch kind {
case "articles":
return p.Articles
case "blogs":
return p.Blogs
case "globals":
return p.Globals
case "resources":
return p.Resources
case "rubrics":
return p.Rubrics
case "themes":
return p.Themes
}
return nil
}
type Blog struct {
ID int
Title string
Url string
Template int
Keywords string
Description string
Slogan string
Server int
}
type Rubric struct {
ID int
Title string
Url string
Keywords string
Description string
Blog int
}
type Article struct {
ID int
Date string
Title string
Keywords string
Description string
Text string
Teaser string
Blog int
Rubric int
Url string
}
type Resource struct {
ID int
Name string
Template int
Data []byte
}
type Global struct {
ID int
Name string
Data []byte
}
type Page struct {
HeadMeta string
Rubrics Rubrics
Articles Articles
Blogs Blogs
Blog int
Themes Themes
Resources Resources
Globals Globals
Index int
Rubric int
Article int
Server string
Imprint bool
Host string
Master string
}
type Theme struct {
ID int
Index string
Style string
Title string
FromUrl string
} |
package main
import (
"context"
"os"
"github.com/chromedp/cdproto/page"
"github.com/chromedp/chromedp"
)
func SetDownloadTask(path string) chromedp.Tasks {
return chromedp.Tasks{
page.SetAdBlockingEnabled(true),
page.SetDownloadBehavior(page.SetDownloadBehaviorBehaviorAllow).WithDownloadPath(path),
}
}
func DownloadSong(song SearchMatch, ct *context.Context) {
ctx, cancel := chromedp.NewContext(*ct)
inputFieldSelector := "#layout > header > div.container.header__container > div.convert-form > div.container > div.convert-form__input-container > label > input"
submitURLSelector := "div.format_selection > button"
downloadButtonSelector := "#layout > header > div.container.header__container > div.convert-form > div > div.download__buttons > button"
err := chromedp.Run(
ctx,
chromedp.Navigate("https://2conv.com/"),
chromedp.WaitReady(inputFieldSelector),
chromedp.SendKeys(inputFieldSelector, song.URL),
chromedp.Click(submitURLSelector),
chromedp.WaitVisible(downloadButtonSelector),
SetDownloadTask(MakeDownloadPath(song, os.Args[1])),
chromedp.Click(downloadButtonSelector),
)
HandleError(err)
cancel()
}
|
package bannerController
import (
"github.com/gin-gonic/gin"
"hd-mall-ed/packages/admin/models/staticModel"
"hd-mall-ed/packages/common/pkg/adminApp"
"hd-mall-ed/packages/common/pkg/e"
)
// 获取所有 banner
func GetBannerList(c *gin.Context) {
api := adminApp.ApiInit(c)
model := &staticModel.Static{}
query := map[string]string{
"type": "3", // banner 主图类型
}
list, err := model.GetListByQuery(query)
if err != nil {
api.ResFail(e.Fail)
return
}
api.Response(list)
}
|
package flapjack
import "testing"
func TestDialFails(t *testing.T) {
address := "localhost:55555" // non-existent Redis server
database := 0
_, err := Dial(address, database)
if err == nil {
t.Error("Dial should fail")
}
}
// TODO(auxesis): add test for sending and receiving Events
|
package transparent
import "errors"
type layerSource struct {
Storage BackendStorage
}
// NewLayerSource returns LayerSource.
// LayerSource wraps BackendStorage.
// It Get/Set key-value to BackendStorage.
// This layer must be the bottom of Stack.
func NewLayerSource(storage BackendStorage) (Layer, error) {
if storage == nil {
return nil, errors.New("empty storage")
}
return &layerSource{Storage: storage}, nil
}
// Set set new value to storage.
func (s *layerSource) Set(key interface{}, value interface{}) (err error) {
err = s.Storage.Add(key, value)
if err != nil {
return err
}
return nil
}
// Get value from storage
func (s *layerSource) Get(key interface{}) (value interface{}, err error) {
return s.Storage.Get(key)
}
// Remove value
func (s *layerSource) Remove(key interface{}) (err error) {
return s.Storage.Remove(key)
}
// Sync do nothing
func (s *layerSource) Sync() error {
return nil
}
func (s *layerSource) setNext(next Layer) error {
return errors.New("don't set next layer")
}
func (s *layerSource) start() error {
return nil
}
func (s *layerSource) stop() error {
return nil
}
|
package graph
// This file will be automatically regenerated based on the schema, any resolver implementations
// will be copied through when generating and any unknown code will be moved to the end.
import (
"context"
"github.com/vrppaul/training-app/graph/generated"
"github.com/vrppaul/training-app/graph/model"
)
func (r *mutationResolver) CreateExercise(ctx context.Context, input model.NewExercise) (*model.Exercise, error) {
return r.CRUDDB.InsertExercise(&input)
}
func (r *queryResolver) Exercise(ctx context.Context, id string) (*model.Exercise, error) {
return r.CRUDDB.GetExerciseById(id)
}
func (r *queryResolver) Exercises(ctx context.Context) ([]*model.Exercise, error) {
return r.CRUDDB.GetExercises()
}
// Mutation returns generated.MutationResolver implementation.
func (r *Resolver) Mutation() generated.MutationResolver { return &mutationResolver{r} }
// Query returns generated.QueryResolver implementation.
func (r *Resolver) Query() generated.QueryResolver { return &queryResolver{r} }
type mutationResolver struct{ *Resolver }
type queryResolver struct{ *Resolver }
|
package config
var(
KEY string = "cHNta15AJioxMTA1IygpSw=="
ServerKEY string = "K8Ff3KY4gSjGstf%"
SEPARATE_CHAR string = "_"
LF byte = 10
CR byte = 13
SecretLen = 17
VersionSplit string = "."
)
|
// Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package colinfo
import (
"fmt"
"github.com/cockroachdb/cockroach/pkg/sql/catalog"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/errors"
"github.com/lib/pq/oid"
"golang.org/x/text/language"
)
// ColTypeInfo is a type that allows multiple representations of column type
// information (to avoid conversions and allocations).
type ColTypeInfo struct {
// Only one of these fields can be set.
resCols ResultColumns
colTypes []*types.T
}
// ColTypeInfoFromResCols creates a ColTypeInfo from ResultColumns.
func ColTypeInfoFromResCols(resCols ResultColumns) ColTypeInfo {
return ColTypeInfo{resCols: resCols}
}
// ColTypeInfoFromColTypes creates a ColTypeInfo from []ColumnType.
func ColTypeInfoFromColTypes(colTypes []*types.T) ColTypeInfo {
return ColTypeInfo{colTypes: colTypes}
}
// ColTypeInfoFromColDescs creates a ColTypeInfo from []ColumnDescriptor.
func ColTypeInfoFromColDescs(colDescs []descpb.ColumnDescriptor) ColTypeInfo {
colTypes := make([]*types.T, len(colDescs))
for i, colDesc := range colDescs {
colTypes[i] = colDesc.Type
}
return ColTypeInfoFromColTypes(colTypes)
}
// NumColumns returns the number of columns in the type.
func (ti ColTypeInfo) NumColumns() int {
if ti.resCols != nil {
return len(ti.resCols)
}
return len(ti.colTypes)
}
// Type returns the datum type of the i-th column.
func (ti ColTypeInfo) Type(idx int) *types.T {
if ti.resCols != nil {
return ti.resCols[idx].Typ
}
return ti.colTypes[idx]
}
// ValidateColumnDefType returns an error if the type of a column definition is
// not valid. It is checked when a column is created or altered.
func ValidateColumnDefType(t *types.T) error {
switch t.Family() {
case types.StringFamily, types.CollatedStringFamily:
if t.Family() == types.CollatedStringFamily {
if _, err := language.Parse(t.Locale()); err != nil {
return pgerror.Newf(pgcode.Syntax, `invalid locale %s`, t.Locale())
}
}
case types.DecimalFamily:
switch {
case t.Precision() == 0 && t.Scale() > 0:
// TODO (seif): Find right range for error message.
return errors.New("invalid NUMERIC precision 0")
case t.Precision() < t.Scale():
return fmt.Errorf("NUMERIC scale %d must be between 0 and precision %d",
t.Scale(), t.Precision())
}
case types.ArrayFamily:
if t.ArrayContents().Family() == types.ArrayFamily {
// Nested arrays are not supported as a column type.
return errors.Errorf("nested array unsupported as column type: %s", t.String())
}
if err := types.CheckArrayElementType(t.ArrayContents()); err != nil {
return err
}
return ValidateColumnDefType(t.ArrayContents())
case types.BitFamily, types.IntFamily, types.FloatFamily, types.BoolFamily, types.BytesFamily, types.DateFamily,
types.INetFamily, types.IntervalFamily, types.JsonFamily, types.OidFamily, types.TimeFamily,
types.TimestampFamily, types.TimestampTZFamily, types.UuidFamily, types.TimeTZFamily,
types.GeographyFamily, types.GeometryFamily, types.EnumFamily, types.Box2DFamily:
// These types are OK.
default:
return pgerror.Newf(pgcode.InvalidTableDefinition,
"value type %s cannot be used for table columns", t.String())
}
return nil
}
// ColumnTypeIsIndexable returns whether the type t is valid as an indexed column.
func ColumnTypeIsIndexable(t *types.T) bool {
// Some inverted index types also have a key encoding, but we don't
// want to support those yet. See #50659.
return !MustBeValueEncoded(t) && !ColumnTypeIsInvertedIndexable(t)
}
// ColumnTypeIsInvertedIndexable returns whether the type t is valid to be indexed
// using an inverted index.
func ColumnTypeIsInvertedIndexable(t *types.T) bool {
family := t.Family()
return family == types.JsonFamily || family == types.ArrayFamily ||
family == types.GeographyFamily || family == types.GeometryFamily
}
// MustBeValueEncoded returns true if columns of the given kind can only be value
// encoded.
func MustBeValueEncoded(semanticType *types.T) bool {
switch semanticType.Family() {
case types.ArrayFamily:
switch semanticType.Oid() {
case oid.T_int2vector, oid.T_oidvector:
return true
default:
return MustBeValueEncoded(semanticType.ArrayContents())
}
case types.JsonFamily, types.TupleFamily, types.GeographyFamily, types.GeometryFamily:
return true
}
return false
}
// GetColumnTypes populates the types of the columns with the given IDs into the
// outTypes slice, returning it. You must use the returned slice, as this
// function might allocate a new slice.
func GetColumnTypes(
desc catalog.TableDescriptor, columnIDs []descpb.ColumnID, outTypes []*types.T,
) ([]*types.T, error) {
if cap(outTypes) < len(columnIDs) {
outTypes = make([]*types.T, len(columnIDs))
} else {
outTypes = outTypes[:len(columnIDs)]
}
for i, id := range columnIDs {
col, err := desc.FindColumnWithID(id)
if err != nil {
return nil, err
}
if !col.Public() {
return nil, fmt.Errorf("column-id \"%d\" does not exist", id)
}
outTypes[i] = col.GetType()
}
return outTypes, nil
}
// GetColumnTypesFromColDescs populates the types of the columns with the given
// IDs into the outTypes slice, returning it. You must use the returned slice,
// as this function might allocate a new slice.
func GetColumnTypesFromColDescs(
cols []descpb.ColumnDescriptor, columnIDs []descpb.ColumnID, outTypes []*types.T,
) []*types.T {
if cap(outTypes) < len(columnIDs) {
outTypes = make([]*types.T, len(columnIDs))
} else {
outTypes = outTypes[:len(columnIDs)]
}
for i, id := range columnIDs {
for j := range cols {
if id == cols[j].ID {
outTypes[i] = cols[j].Type
break
}
}
}
return outTypes
}
|
package main
import (
"encoding/json"
"fmt"
"log"
Maps "api/maps"
Services "api/services"
)
const ratingFileGz = "./rating.gz"
const titlesFileGz = "./titles.gz"
const ratingUrl = "https://datasets.imdbws.com/title.ratings.tsv.gz"
const titlesUrl = "https://datasets.imdbws.com/title.basics.tsv.gz"
const ratingFileTsv = "./rating.tsv"
const titlesFileTsv = "./titles.tsv"
const ratingFileJson = "./rating.json"
const titlesFileJson = "./titles.json"
func main() {
// download files
Services.DownloadFile(ratingFileGz, ratingUrl)
Services.DownloadFile(titlesFileGz, titlesUrl)
// unzip files
Services.UnGzip(ratingFileGz, ratingFileTsv)
Services.UnGzip(titlesFileGz, titlesFileTsv)
fmt.Println("Download finished")
// open file
dataTitles, errReadTitles := Services.ReadTsv(titlesFileTsv)
dataRatings, errReadRating := Services.ReadTsv(ratingFileTsv)
if errReadRating != nil || errReadTitles != nil {
log.Fatal(errReadTitles, errReadRating)
}
fmt.Println("read files finished")
titles := Maps.CreateTitles(dataTitles)
ratings := Maps.CreateRatings(dataRatings)
// 4. Convert an array of structs to JSON using marshaling functions from the encoding/json package
jsonDataTitles, errJsonTitles := json.MarshalIndent(titles, "", " ")
if errJsonTitles != nil {
log.Fatal(errJsonTitles)
}
fmt.Println(string(jsonDataTitles))
Services.SaveJsonFile(titlesFileJson, string(jsonDataTitles))
// 4. Convert an array of structs to JSON using marshaling functions from the encoding/json package
jsonDataRatings, errJsonRatings := json.MarshalIndent(ratings, "", " ")
if errJsonRatings != nil {
log.Fatal(errJsonRatings)
}
fmt.Println(string(jsonDataRatings))
Services.SaveJsonFile(ratingFileJson, string(jsonDataRatings))
Services.RemoveFile(ratingFileGz)
Services.RemoveFile(titlesFileGz)
Services.RemoveFile(ratingFileTsv)
Services.RemoveFile(titlesFileTsv)
}
|
package validator
import (
"encoding/json"
"fmt"
"html/template"
"io"
"os"
"strings"
"github.com/go-wyvern/leego"
)
var AppApis []Api
const codeTag = "```"
type Api struct {
Description string
Method string
Path string
Handler leego.HandlerFunc
SuccessStdOut interface{}
SuccessFormat []byte
FailStdOut interface{}
FailFormat []byte
StdFormat string
CodeTag string
Validator *Validator
}
type Module struct {
ModuleName string
Apis []Api
}
type Project struct {
ProjectName string
Modules []Module
}
func Find(method, path string) *Validator {
for _, r := range AppApis {
if r.Method == method && r.Path == path {
return r.Validator
}
}
return nil
}
func NewProject(name string) *Project {
p := new(Project)
p.ProjectName = name
return p
}
func NewModule(module_name string) *Module {
module := new(Module)
module.ModuleName = module_name
return module
}
func NewApi(method, path, d string, h leego.HandlerFunc, v *Validator) *Api {
r := new(Api)
r.Method = method
r.Description = d
r.Path = path
r.Handler = h
r.Validator = v
r.CodeTag = codeTag
return r
}
func (c *Api) SetSuccessStdOut(s interface{}) {
c.SuccessStdOut = s
if c.StdFormat == "json" {
c.SuccessFormat, _ = json.MarshalIndent(s, "", " ")
}
}
func (c *Api) SetFailStdOut(s interface{}) {
c.FailStdOut = s
if c.StdFormat == "json" {
c.FailFormat, _ = json.MarshalIndent(s, "", " ")
}
}
func (c *Module) Use(a Api) *Module {
c.Apis = append(c.Apis, a)
AppApis = append(AppApis, a)
return c
}
func (c *Project) Use(m Module) *Project {
c.Modules = append(c.Modules, m)
return c
}
func (c *Project) RenderMarkdown(filename string) error {
var err error
f, err := os.Create(filename)
if err != nil {
fmt.Println(err.Error())
return err
}
err = tmpl(f, MarkdownTemplate, c)
if err != nil {
return err
}
return nil
}
var MarkdownTemplate = `{{with .}}# {{.ProjectName}}
{{range .Modules}}
## {{.ModuleName}}
{{range .Apis}}
### {{.Method}} {{.Path}} {{.Description}}
请求参数:
| 名称 | 类型 | 说明 | 是否必须 |
| -----|:-----:|:---------:|:-----:|
{{range $name, $params :=.Validator.ApiParams}}|**{{$name}}**|{{$params.Type}}|{{$params.Description}}|{{$params.Require}}|
{{end}}
请求正确返回:
{{.CodeTag}}
{{.SuccessFormat|printf "%s"|unescaped}}
{{.CodeTag}}
请求错误返回:
{{.CodeTag}}
{{.FailFormat | printf "%s"|unescaped}}
{{.CodeTag}}
{{end}}{{end}}{{end}}
`
func tmpl(w io.Writer, text string, data interface{}) error {
t := template.New("top")
t.Funcs(template.FuncMap{"trim": func(s template.HTML) template.HTML {
return template.HTML(strings.TrimSpace(string(s)))
}})
t.Funcs(template.FuncMap{"unescaped": func(x string) interface{} {
return template.HTML(x)
}})
template.Must(t.Parse(text))
if err := t.Execute(w, data); err != nil {
return err
}
return nil
}
|
/*
Copyright 2021 The KodeRover Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package client
import (
"encoding/json"
"errors"
"fmt"
"net/http"
"net/url"
"github.com/koderover/zadig/lib/microservice/cron/core/service"
"github.com/koderover/zadig/lib/setting"
"github.com/koderover/zadig/lib/tool/xlog"
"github.com/koderover/zadig/lib/util"
)
func (c *CollieClient) ListColliePipelines(log *xlog.Logger) ([]*service.PipelineResource, error) {
collieApiAddress := c.ApiAddress
if collieApiAddress == "" {
return nil, errors.New("collieApiAddress cannot be empty")
}
header := http.Header{}
header.Add("authorization", fmt.Sprintf("%s %s", setting.ROOTAPIKEY, c.ApiRootKey))
url := collieApiAddress + "/api/collie/api/pipelines"
resp, err := util.SendRequest(url, http.MethodGet, header, nil)
if err != nil {
return nil, err
}
pipelineList := make([]*service.PipelineResource, 0)
err = json.Unmarshal(resp, &pipelineList)
if err != nil {
return nil, err
}
return pipelineList, nil
}
func (c *CollieClient) RunColliePipelineTask(args *service.CreateBuildRequest, log *xlog.Logger) error {
collieApiAddress := c.ApiAddress
if collieApiAddress == "" {
return errors.New("collieApiAddress cannot be empty")
}
// collie pipeline list接口返回的pipelineName的格式为:项目名/pipelineName, 需要对进行encode
encodePipelineName := url.QueryEscape(args.PipelineName)
uri := fmt.Sprintf("/api/collie/api/builds/%s/%s/run", encodePipelineName, args.ProductName)
url := collieApiAddress + uri
header := http.Header{}
header.Add("authorization", fmt.Sprintf("%s %s", setting.ROOTAPIKEY, c.ApiRootKey))
body, _ := json.Marshal(args)
resp, err := util.SendRequest(url, http.MethodPost, header, body)
if err != nil {
return err
}
log.Infof("run collie pipeline %s result %s", args.PipelineName, string(resp))
return nil
}
|
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
//
package utility
import (
"strings"
"github.com/mattermost/mattermost-cloud/internal/tools/aws"
"github.com/mattermost/mattermost-cloud/model"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
)
type rtcd struct {
environment string
kubeconfigPath string
cluster *model.Cluster
logger log.FieldLogger
desiredVersion *model.HelmUtilityVersion
actualVersion *model.HelmUtilityVersion
}
func newRtcdOrUnmanagedHandle(cluster *model.Cluster, kubeconfigPath string, awsClient aws.AWS, logger log.FieldLogger) (Utility, error) {
desired := cluster.DesiredUtilityVersion(model.RtcdCanonicalName)
actual := cluster.ActualUtilityVersion(model.RtcdCanonicalName)
if model.UtilityIsUnmanaged(desired, actual) {
return newUnmanagedHandle(model.RtcdCanonicalName, logger), nil
}
rtcd := newRtcdHandle(cluster, desired, kubeconfigPath, awsClient, logger)
err := rtcd.validate()
if err != nil {
return nil, errors.Wrap(err, "rtcd utility config is invalid")
}
return rtcd, nil
}
func newRtcdHandle(cluster *model.Cluster, desiredVersion *model.HelmUtilityVersion, kubeconfigPath string, awsClient aws.AWS, logger log.FieldLogger) *rtcd {
return &rtcd{
environment: awsClient.GetCloudEnvironmentName(),
kubeconfigPath: kubeconfigPath,
cluster: cluster,
logger: logger.WithField("cluster-utility", model.RtcdCanonicalName),
desiredVersion: desiredVersion,
actualVersion: cluster.UtilityMetadata.ActualVersions.Rtcd,
}
}
func (r *rtcd) validate() error {
if r.kubeconfigPath == "" {
return errors.New("kubeconfig path cannot be empty")
}
return nil
}
func (r *rtcd) updateVersion(h *helmDeployment) error {
actualVersion, err := h.Version()
if err != nil {
return err
}
r.actualVersion = actualVersion
return nil
}
func (r *rtcd) ValuesPath() string {
if r.desiredVersion == nil {
return ""
}
return r.desiredVersion.Values()
}
func (r *rtcd) CreateOrUpgrade() error {
h := r.newHelmDeployment()
err := h.Update()
if err != nil {
return err
}
err = r.updateVersion(h)
return err
}
func (r *rtcd) DesiredVersion() *model.HelmUtilityVersion {
return r.desiredVersion
}
func (r *rtcd) ActualVersion() *model.HelmUtilityVersion {
if r.actualVersion == nil {
return nil
}
return &model.HelmUtilityVersion{
Chart: strings.TrimPrefix(r.actualVersion.Version(), "mattermost-rtcd-"),
ValuesPath: r.actualVersion.Values(),
}
}
func (r *rtcd) Destroy() error {
helm := r.newHelmDeployment()
return helm.Delete()
}
func (r *rtcd) Migrate() error {
// if anything needs to be migrated can be added here
return nil
}
func (r *rtcd) newHelmDeployment() *helmDeployment {
return newHelmDeployment(
"mattermost/mattermost-rtcd",
"mattermost-rtcd",
"mattermost-rtcd",
r.kubeconfigPath,
r.desiredVersion,
defaultHelmDeploymentSetArgument,
r.logger,
)
}
func (r *rtcd) Name() string {
return model.RtcdCanonicalName
}
|
package commands
import "strings"
func gitLineEnding(git env) string {
value, _ := git.Get("core.autocrlf")
switch strings.ToLower(value) {
case "input", "true", "t", "1":
return "\r\n"
default:
return osLineEnding()
}
}
type env interface {
Get(string) (string, bool)
}
|
package main
import (
"fmt"
"os"
)
func main() {
/*
panic("Some error happened")
fmt.Println("It will not get printed")
*/
// Permisssion denied error
res, err := os.Create("/etc/akilan.txt")
if err != nil {
panic(err)
} else {
fmt.Println(res)
}
}
|
package zxcrpc
import (
"context"
"net"
"google.golang.org/grpc"
log "github.com/sirupsen/logrus"
)
type SimpleZxcRPCServer struct{}
func (server *SimpleZxcRPCServer) DidStartJob(ctx context.Context, job *JobMessage) (*Server, error) {
log.Info("DidStartJob", job)
return &Server{
Name: "foo",
}, nil
}
func (server *SimpleZxcRPCServer) DidEndJob(ctx context.Context, jobResult *JobResultMessage) (*Server, error) {
log.Info("DidEndJob", jobResult)
return &Server{
Name: "foo",
}, nil
}
func NewZxcRPCServer() *SimpleZxcRPCServer {
return &SimpleZxcRPCServer{}
}
func (server *SimpleZxcRPCServer) Serve(address string) error {
grpcServer := grpc.NewServer()
lis, err := net.Listen("tcp", address)
if err != nil {
return err
}
RegisterZxcRPCServer(grpcServer, NewZxcRPCServer())
return grpcServer.Serve(lis)
}
|
package main
import "fmt"
func main0301() {
var a int = 10
//fmt.Printf("%p\n", &a)
// 定义指针变量存储变量的地址
var p *int = &a
//fmt.Printf("%p\n", p)
//通过指针间接修改变量的值
//写操作
*p = 123
//fmt.Println(a)
//读操作
fmt.Println(*p)
}
func main0302() {
//声明指针变量 默认值为0x0 (nil)
//内存地址编号为0 0-255的空间为系统占用 不允许用户访问(读写)
//空指针
var p *int = nil
fmt.Println(p)
//new(数据类型) 开辟数据类型对应的内存空间 返回值为数据类型指针
//gc 垃圾回收机制
p = new(int)
*p = 123
fmt.Println(p)
fmt.Println(*p)
//fmt.Printf("%p\n", p)
//fmt.Println(*p)
//*p = 123
//
//fmt.Println(*p)
}
func main() {
// 野指针 指针变量指向了一个未知空间 会报错
// var p *int = *int(0xc042058088)
// 指针变量必须有一个合理的指向
// 在程序中允许出现空指针 不允许出现野指针
// fmt.Println(*p)
}
|
/*
Package shuffler shuffles arrays of integers with anchoring.
Array entries can be free or anchored.
The former type is shuffled and the latter type is not.
An entry anchored by position retains the same position after shuffling.
An entry anchored relative its previous or next entry retains the same relative position
to the anchor, which can be shuffled.
It is possible to create multiple chains of anchors.
Terminology
"A > B" describes a list of two items where A is anchored to its successor, B.
"A B <" describes the inverse, where B is anchored to its predecessor, A.
"A B . C" describes a list where B is anchored by position.
Edge Cases:
In "A > B <" A and B are mutually anchored. This will be converted into "A > B".
In "A < B C" and "A B C >" The endpoints are anchored to non-existent neighbors. The anchors are removed.
Chains of references are handled correctly.
*/
package shuffler
import (
crand "crypto/rand"
"math"
"math/big"
"math/rand"
)
// A shuffler shuffles integers (generally representing positions) while maintaining anchored positions.
type shuffler struct {
items []int
position map[int]bool
before map[int]int
after map[int]int
skip map[int]bool
}
// New returns an initialized shuffler.
func New() *shuffler {
return &shuffler{
items: make([]int, 0),
position: make(map[int]bool),
before: make(map[int]int),
after: make(map[int]int),
skip: make(map[int]bool),
}
}
// Add appends a new items to be shuffled.
// The anchor argument specifies how the item is anchored.
func (s *shuffler) Add(slot int, anchor Anchor) {
index := len(s.items)
s.items = append(s.items, slot)
switch anchor {
case Position:
s.position[index] = true
case ToPrevious:
s.after[index-1] = index
s.skip[index] = true
case ToNext:
s.before[index+1] = index
s.skip[index] = true
}
}
// resolve dangling references and break mutual anchors
// A < B C becomes A B C
// A B C > becomes A B C
// A > B < becomes A > B
func (s *shuffler) resolve() {
if index, ok := s.after[-1]; ok {
delete(s.after, -1)
delete(s.skip, index)
}
if index, ok := s.before[len(s.items)]; ok {
delete(s.before, len(s.items))
delete(s.skip, index)
}
for from, to := range s.after {
if s.before[to] == from {
delete(s.before, to)
delete(s.skip, from)
}
}
}
// Shuffle shuffles and returns the list of items added with Add,
// using seed to initialize the random number generator.
func (s *shuffler) Shuffle(seed int64) []int {
l := len(s.items)
r := rand.New(rand.NewSource(seed))
p := r.Perm(l)
out := make([]int, l)
k := 0
s.resolve()
for _, j := range p {
// previous and next anchored items are established by their anchors.
if s.skip[j] {
continue
}
// position anchored items stay in place.
if s.position[j] {
out[j] = s.items[j]
continue
}
// skip reserved position anchored slots
for s.position[k] {
k++
}
// insert "ToNext" anchored items before item.
// follow chains of references
for v, ok := s.before[j]; ok; v, ok = s.before[v] {
out[k] = s.items[v]
k++
}
// place the item
out[k] = s.items[j]
k++
// insert "ToPrevious" anchored items after item.
// follow chains of references
for v, ok := s.after[j]; ok; v, ok = s.after[v] {
out[k] = s.items[v]
k++
}
}
return out
}
// old shuffle version. only handles position anchors, specified by an array.
func shuffle(seed int64, items []int, anchored []int) []int {
anchorMap := make(map[int]bool)
for _, j := range anchored {
anchorMap[j] = true
}
l := len(items)
out := make([]int, l)
k := 0
r := rand.New(rand.NewSource(seed))
p := r.Perm(l)
for _, j := range p {
if anchorMap[j] {
out[j] = j
continue
}
for anchorMap[k] {
k++
}
out[k] = j
k++
}
return out
}
// Seed, a convenience function, produces a 64bit int value read from crypto/rand.Reader.
func Seed() (int64, error) {
val, err := crand.Int(crand.Reader, big.NewInt(math.MaxInt64))
if err != nil {
return 0, err
}
return val.Int64(), nil
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.