text stringlengths 11 4.05M |
|---|
package uploadcard
import (
"archive/zip"
"bytes"
"encoding/base64"
"fmt"
"io/ioutil"
"net/http"
"path/filepath"
"text/template"
"github.com/HanYu1983/gomod/lib/db2"
tool "github.com/HanYu1983/gomod/lib/tool"
"google.golang.org/appengine"
)
func Serve_ParseResult(w http.ResponseWriter, r *http.Request) {
defer tool.Recover(func(err error) {
tool.Output(w, nil, err.Error())
})
ctx := appengine.NewContext(r)
r.ParseForm()
tool.Assert(tool.ParameterIsNotExist(r.Form, "id"))
id := r.Form["id"][0]
path := fmt.Sprintf("root/tcg/extensionZip/%s", id)
fileList, err := db2.GetFileList(ctx, path, true)
tool.Assert(tool.IfError(err))
if len(fileList) == 0 {
panic("no file")
}
file := fileList[0]
zipReader, err := zip.NewReader(bytes.NewReader(file.Content), int64(len(file.Content)))
tool.Assert(tool.IfError(err))
manifast, err := VerifyZip(ctx, zipReader)
tool.Assert(tool.IfError(err))
var _ = manifast
imgs := map[string]string{}
for _, zipFile := range zipReader.File {
isJpg, err := filepath.Match("*/*.jpg", zipFile.Name)
tool.Assert(tool.IfError(err))
isPng, err := filepath.Match("*/*.png", zipFile.Name)
tool.Assert(tool.IfError(err))
fileName := filepath.Base(zipFile.Name)
subFile, err := zipFile.Open()
tool.Assert(tool.IfError(err))
defer subFile.Close()
bytes, err := ioutil.ReadAll(subFile)
tool.Assert(tool.IfError(err))
if isJpg {
imgBase64Str := base64.StdEncoding.EncodeToString(bytes)
imgs[fileName] = imgBase64Str
}
if isPng {
imgBase64Str := base64.StdEncoding.EncodeToString(bytes)
imgs[fileName] = imgBase64Str
}
/*if isPng {
png, _, err := image.Decode(subFile)
tool.Assert(tool.IfError(err))
imgBase64Str := string(tool.PngToBase64(png))
imgs[fileName] = imgBase64Str
}*/
}
model := map[string]interface{}{
"id": id,
"fileName": filepath.Base(file.Name),
"game": manifast.Game,
"extensionName": manifast.ExtensionName,
"extensionDescribe": manifast.ExtensionDescribe,
"imgs": imgs,
"infos": manifast.CardInfo,
}
t, err := template.ParseFiles("app/uploadcard/parseResult.html", "app/uploadcard/header.html", "app/uploadcard/htmlHeader.html")
tool.Assert(tool.IfError(err))
err = t.Execute(w, model)
tool.Assert(tool.IfError(err))
}
|
package freee
import (
"fmt"
"net/url"
)
func SetCompanyID(v *url.Values, companyID uint32) {
v.Set("company_id", fmt.Sprintf("%d", companyID))
}
|
/**
* Copyright (c) 2018-present, MultiVAC Foundation.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
package consensus
import (
"github.com/stretchr/testify/assert"
"math/rand"
"testing"
"github.com/multivactech/MultiVAC/model/chaincfg/chainhash"
"github.com/multivactech/MultiVAC/model/wire"
)
type nextBAndByzValue struct {
b byte
v *wire.ByzAgreementValue
}
// newByzValueForTest generates a random b and a corresponding fake ByzValue.
func newByzValueForTest() (byte, *wire.ByzAgreementValue) {
emptyValue := &wire.ByzAgreementValue{}
b := (byte)(rand.Int() % 2)
if b == 0 {
hash := *new(chainhash.Hash)
randBytes := make([]byte, 32)
for i := range randBytes {
randBytes[i] = byte(rand.Int() % 10)
}
if err := hash.SetBytes(randBytes); err != nil {
return 0, nil
}
value := &wire.ByzAgreementValue{BlockHash: hash, Leader: ""}
return b, value
}
return b, emptyValue
}
// test
func TestBinaryBAVoter_numOfFinNodesBeforeStep(t *testing.T) {
// count all votes which have finished and step after 4
voter := newBinaryBAVoter(0, 100, nil)
voter.setCurrentRound(1)
bCount := make(map[byte]int)
for i := 0; i < 50; i++ {
b, v := newByzValueForTest()
bCount[b]++
step := (rand.Int() % 4) + 4
_, _, _ = voter.addVote(1, -step, b, v)
}
numOfFinNodesBeforeStep0 := voter.numOfFinNodesBeforeStep(1, 10, 0, nil)
numOfFinNodesBeforeStep1 := voter.numOfFinNodesBeforeStep(1, 10, 1, nil)
assert.Equal(t, numOfFinNodesBeforeStep0, bCount[0])
assert.Equal(t, numOfFinNodesBeforeStep1, bCount[1])
assert.Equal(t, numOfFinNodesBeforeStep0+numOfFinNodesBeforeStep1, 50)
}
func TestBinaryBAVoter_getBBAHistory(t *testing.T) {
round, step, b := 1, 1, 1
voter := newBinaryBAVoter(0, 100, nil)
voter.setCurrentRound(round)
bbaMap := make(map[*wire.MsgBinaryBA]struct{})
for i := 0; i < 50; i++ {
bba := &wire.MsgBinaryBA{
SignedCredentialWithBA: &wire.SignedMsg{Pk: []byte{31: byte(i)}},
}
bbaMap[bba] = struct{}{}
voter.addVoteWithBBAMsg(round, step, byte(b), &wire.ByzAgreementValue{}, bba)
}
bbaHistory := voter.getBBAHistory(round, step, byte(b))
assert.Equal(t, len(bbaMap), len(bbaHistory))
for _, bba := range bbaHistory {
assert.Contains(t, bbaMap, bba)
}
}
func TestBinaryBAVoter_addVoteWithBBAMsg(t *testing.T) {
var threshold = 100
round := 1
step := 5
voter := newBinaryBAVoter(0, 100, nil)
for i := 0; i < 2*threshold; i++ {
b, v := newByzValueForTest()
msgbba := &wire.MsgBinaryBA{}
msgbba.SignedCredentialWithBA = &wire.SignedMsg{}
msgbba.SignedCredentialWithBA.Pk = make([]byte, 32)
randBytes := make([]byte, 32)
for i := range randBytes {
randBytes[i] = byte(rand.Int() % 10)
}
copy(msgbba.SignedCredentialWithBA.Pk, randBytes)
res, agreeType, _ := voter.addVoteWithBBAMsg(round, step, b, v, msgbba)
if agreeType == notAgree && res != nil {
t.Error("wrong")
}
}
}
func TestBinaryBAVoter_addVote(t *testing.T) {
var threshold = 100
voter := newBinaryBAVoter(0, 100, nil)
v := &wire.ByzAgreementValue{}
var b byte
// if round not equal
res, agreeType, _ := voter.addVote(1, 5, 0, v)
if res != nil || agreeType != notAgree {
t.Errorf("when currentRound != vote.round, it work wrong")
}
voter.setCurrentRound(1)
// make a byzValuePool, so we can put it in addVote
byzValuePool := make([]nextBAndByzValue, 50)
for i := 0; i < threshold/2; i++ {
b, v = newByzValueForTest()
byzValuePool[i] = nextBAndByzValue{b, v}
}
for i := 0; i < threshold; i++ {
b, v = byzValuePool[0].b, byzValuePool[0].v
res, agreeType, _ := voter.addVote(1, 5, b, v)
if res != nil {
if agreeType == notAgree {
t.Error("vote belong to same point, it wrong", agreeType)
}
}
}
binaryCount := make(map[byte]int)
// 1/3 same point
// 2/3 random point
for i := 1; i < 2*threshold; i++ {
idx := (rand.Int() % 49) + 1
b, v = byzValuePool[idx].b, byzValuePool[idx].v
binaryCount[b]++
step := (rand.Int() % 4) + 4
if res, agreeType, _ := voter.addVote(1, step, b, v); res != nil {
switch agreeType {
case notAgree:
if res != nil {
t.Error("notAgree, expect nil, recieve value")
}
default:
if res == nil {
t.Error("Agree, but res is empty")
}
}
//if res.b == 0 {
// if !(binaryCount[0] >= threshold && binaryCount[1] < threshold && agreeType == totalAgreeOnZero) {
// //t.Errorf("get result b = 0, but count[0] = %v, count[1] = %v, agreeType = %v",
// // binaryCount[0], binaryCount[1], agreeType)
// }
//} else if res.b == 1 {
// if !(binaryCount[1] >= threshold && binaryCount[0] < threshold && agreeType == agreeOnOne) {
// //t.Errorf("get result b = 1, but count[0] = %v, count[1] = %v, agreeType = %v",
// // binaryCount[0], binaryCount[1], agreeType)
// }
//} else {
// t.Errorf("Wrong result b")
//}
}
}
}
func TestBinaryBAVoter_GetVoteKeyWithHalfThreshold(t *testing.T) {
threshold := 100
voter := newBinaryBAVoter(1, threshold, nil)
emptyValue := &wire.ByzAgreementValue{}
validValue := &wire.ByzAgreementValue{BlockHash: *new(chainhash.Hash), Leader: "42"}
if voteKey, byzValue := voter.getVoteKeyWithHalfThreshold(); voteKey != nil || byzValue != nil {
t.Errorf("voteKey is empty")
}
binaryCount := make(map[byte]int)
for {
b := (byte)(rand.Int() % 2)
var v *wire.ByzAgreementValue
if b == 0 {
v = validValue
} else {
v = emptyValue
}
binaryCount[b]++
voter.addVote(1, 4, b, v)
if voteKey, _ := voter.getVoteKeyWithHalfThreshold(); voteKey != nil {
if binaryCount[voteKey.b] < (threshold+1)/2 {
t.Errorf("Votekey didn't reach more than half of threshold")
}
break
}
}
}
|
package main
import (
"fmt"
)
func twoSum(nums []int, target int) []int {
var ans []int
if numsLen := len(nums); numsLen > 0 {
for i := 0; i < numsLen; i++ {
for j := i + 1; j < numsLen; j++ {
if sum := nums[i] + nums[j]; sum == target {
ans = append(ans, i)
ans = append(ans, j)
break
}
}
}
}
return ans
}
func main() {
var ans []int
var arr []int
var target int
arr = []int{2, 7, 11, 15}
target = 9
ans = twoSum(arr, target)
fmt.Println(ans)
arr = []int{3, 2, 4}
target = 6
ans = twoSum(arr, target)
fmt.Println(ans)
arr = []int{3, 3}
target = 6
ans = twoSum(arr, target)
fmt.Println(ans)
}
|
package main
func main() {
var x int
switch y++; x {
case 1:
}
}
|
package other
import (
"github.com/coredumptoday/practice/linear"
)
func CopyLinkListWithRandPtr(head *linear.NodeJmp) *linear.NodeJmp {
if head == nil || head.Next == nil {
return head
}
cur := head
for cur != nil {
nNode := &linear.NodeJmp{
Value: cur.Value,
Next: cur.Next,
}
cur.Next = nNode
cur = nNode.Next
}
cur = head
for cur != nil {
if cur.Jmp == nil {
cur.Next.Jmp = nil
} else {
cur.Next.Jmp = cur.Jmp.Next
}
cur = cur.Next.Next
}
var n = head.Next
cur = head
for cur != nil {
next := cur.Next.Next
cp := cur.Next
cur.Next = next
if next != nil {
cp.Next = next.Next
} else {
cp.Next = nil
}
cur = cur.Next
}
return n
}
|
/**
* DEFER
*
* A defer statement pushes a function call onto a list. The list of
* saved calls is executed after the surrounding function returns.
*
* Rules
* 1. A deferred functions arguments are evaluated when the defer statement is evaluated
* 2. Deferred function calls are executed Last In First Out order after the surrounding function returns
* 3. Deferred functions may read and assign the returning functions named return values.
*
* Resources
* 1. http://blog.golang.org/defer-panic-and-recover
*/
package main
import "fmt"
import "os"
func main() {
f := createFile("./defer.txt")
defer closeFile(f)
writeFile(f)
}
func createFile(p string) *os.File {
fmt.Println("creating")
f, err := os.Create(p)
if err != nil {
panic(err)
}
return f
}
func writeFile(f *os.File) {
fmt.Println("writing")
fmt.Fprintln(f, "data")
}
func closeFile(f *os.File) {
fmt.Println("closing")
f.Close()
}
|
package enums
const (
SZ int = 100
SH int = 130
HK int = 160
)
|
package authlete
import (
"fmt"
"testing"
)
func TestParse(t *testing.T) {
cases := []struct {
input string
success bool
want BasicCredentials
}{
{
input: "Basic YWxhZGRpbjpvcGVuc2VzYW1l",
success: true,
want: BasicCredentials{"aladdin", "opensesame"},
},
{
input: "BAsiC YWxhZGRpbjpvcGVuc2VzYW1l",
success: true,
want: BasicCredentials{"aladdin", "opensesame"},
},
{
input: "Basic YWxhZGRpbjpvcGVuc2VzYW1l",
success: false,
},
{
input: "Basic",
success: false,
},
}
for i, c := range cases {
t.Run(fmt.Sprintf("test%d", i), func(t *testing.T) {
var bc BasicCredentials
err := bc.Parse(c.input)
if c.success {
if err != nil || bc != c.want {
t.Errorf("%#v", bc)
t.Errorf(fmt.Sprint(err))
}
} else {
if err == nil {
t.Errorf("%#v", bc)
t.Errorf(fmt.Sprint(err))
}
}
})
}
}
|
package health
import (
"context"
"testing"
"github.com/jrapoport/gothic/test/tsrv"
"github.com/stretchr/testify/assert"
)
func TestHealthServer_HealthCheck(t *testing.T) {
t.Parallel()
s, _ := tsrv.RPCServer(t, false)
srv := newHealthServer(s)
ctx := context.Background()
res, err := srv.HealthCheck(ctx, nil)
assert.NoError(t, err)
test := s.HealthCheck()
assert.Equal(t, test.Name, res.Name)
assert.Equal(t, test.Status, res.Status)
assert.Equal(t, test.Version, res.Version)
}
|
package utils
import (
"fmt"
"os"
"text/tabwriter"
"time"
)
type ProgressPrinter struct {
numWriters int
progressWriters []ProgressWriter
prevProgress []uint64
tw *tabwriter.Writer
linesWritten int
}
func (pp *ProgressPrinter) PrintProgress() {
if pp.linesWritten > 0 {
for i := 0; i < pp.linesWritten; i++ {
fmt.Fprintf(pp.tw, "\033[A%c[2K", 27)
}
pp.linesWritten = 0
pp.tw.Flush()
}
fmt.Fprintf(pp.tw, "File\tFetched\tTotal\tSpeed\tTime\n")
pp.linesWritten += 1
for i, pw := range pp.progressWriters {
if !pw.startTime.IsZero() {
percentage := int((float64(pw.fetched) / float64(pw.total)) * 100)
fmt.Fprintf(pp.tw, "%s\t%s (%d%%)\t%s\t", pw.filename,
humanizeBytes(pw.fetched), percentage, humanizeBytes(pw.total))
if !pw.endTime.IsZero() {
fmt.Fprintf(pp.tw, "\t%v", pw.endTime.Sub(pw.startTime).Round(time.Second))
} else {
deltaFetched := pw.fetched - pp.prevProgress[i]
pp.prevProgress[i] = pw.fetched
fmt.Fprintf(pp.tw, "%s/s\t%v",
humanizeBytes(deltaFetched), time.Now().Sub(pw.startTime).Round(time.Second))
}
fmt.Fprintf(pp.tw, "\n")
pp.linesWritten += 1
}
}
pp.tw.Flush()
}
func NewProgressPrinter(numWriters int) ProgressPrinter {
tw := new(tabwriter.Writer)
tw.Init(os.Stdout, 0, 8, 4, '\t', 0)
return ProgressPrinter{
numWriters: numWriters,
progressWriters: make([]ProgressWriter, numWriters),
prevProgress: make([]uint64, numWriters),
tw: tw,
linesWritten: 0,
}
}
|
package concerts
import (
"fmt"
)
func DoWork(){
fmt.Println("Hello Concerts")
} |
// Copyright (c) 2019-present Mattermost, Inc. All Rights Reserved.
// See License for license information.
package types
import (
"encoding/json"
"sort"
)
type Value interface {
GetID() ID
}
type Setter interface {
Set(Value)
}
type Getter interface {
Get(Value)
}
type ValueArray interface {
Len() int
GetAt(int) Value
SetAt(int, Value)
InstanceOf() ValueArray
Ref() interface{}
Resize(int)
}
type ValueSet struct {
proto ValueArray
ids []ID
m map[ID]Value
}
func NewValueSet(proto ValueArray, vv ...Value) *ValueSet {
i := &ValueSet{
proto: proto,
}
for _, v := range vv {
i.Set(v)
}
return i
}
func (set *ValueSet) From(other *ValueSet) {
set.proto = other.proto
set.ids = append([]ID{}, other.ids...)
set.m = map[ID]Value{}
for id, v := range other.m {
set.m[id] = v
}
}
func (set *ValueSet) Contains(id ID) bool {
if set.IsEmpty() {
return false
}
_, ok := set.m[id]
return ok
}
func (set *ValueSet) Delete(toDelete ID) {
if !set.Contains(toDelete) {
return
}
for n, key := range set.ids {
if key == toDelete {
updated := set.ids[:n]
if n+1 < len(set.ids) {
updated = append(updated, set.ids[n+1:]...)
}
set.ids = updated
}
}
delete(set.m, toDelete)
}
func (set *ValueSet) Get(id ID) Value {
if set.IsEmpty() {
return nil
}
return set.m[id]
}
func (set *ValueSet) GetAt(n int) Value {
if set.IsEmpty() {
return nil
}
return set.m[set.ids[n]]
}
func (set *ValueSet) Len() int {
if set.IsEmpty() {
return 0
}
return len(set.ids)
}
func (set *ValueSet) IDs() []ID {
if set.IsEmpty() {
return []ID{}
}
n := make([]ID, len(set.ids))
copy(n, set.ids)
return n
}
func (set *ValueSet) Set(vv ...Value) {
if set.ids == nil {
set.ids = []ID{}
}
if set.m == nil {
set.m = map[ID]Value{}
}
for _, v := range vv {
id := v.GetID()
if !set.Contains(id) {
set.ids = append(set.ids, id)
}
set.m[id] = v
}
}
func (set *ValueSet) SetAt(n int, v Value) {
if set.ids == nil {
set.ids = []ID{}
}
if set.m == nil {
set.m = map[ID]Value{}
}
id := v.GetID()
if !set.Contains(id) {
set.ids = append(set.ids, id)
}
set.m[id] = v
}
func (set *ValueSet) AsArray(out ValueArray) {
if set.IsEmpty() {
out.Resize(0)
return
}
out.Resize(len(set.ids))
for n, key := range set.ids {
out.SetAt(n, set.m[key])
}
}
func (set *ValueSet) IsEmpty() bool {
if set == nil {
return true
}
return len(set.ids) == 0
}
func (set *ValueSet) TestAsArray(out ValueArray) {
if set.IsEmpty() {
out.Resize(0)
return
}
out.Resize(len(set.ids))
for n, key := range set.ids {
out.SetAt(n, set.m[key])
}
}
func (set *ValueSet) TestIDs() []string {
if set.IsEmpty() {
return nil
}
n := []string{}
for _, id := range set.IDs() {
n = append(n, string(id))
}
sort.Strings(n)
return n
}
func (set *ValueSet) MarshalJSON() ([]byte, error) {
if set.IsEmpty() {
return []byte("[]"), nil
}
proto := set.proto.InstanceOf()
proto.Resize(len(set.ids))
for n, id := range set.ids {
proto.SetAt(n, set.m[id])
}
return json.Marshal(proto)
}
func (set *ValueSet) UnmarshalJSON(data []byte) error {
proto := set.proto.InstanceOf()
err := json.Unmarshal(data, proto.Ref())
if err != nil {
return err
}
set.ids = []ID{}
set.m = map[ID]Value{}
for n := 0; n < proto.Len(); n++ {
set.Set(proto.GetAt(n))
}
return nil
}
|
package eth
import (
commitmenttypes "github.com/bianjieai/tibc-sdk-go/commitment"
tibctypes "github.com/bianjieai/tibc-sdk-go/types"
)
var _ tibctypes.ClientState = (*ClientState)(nil)
func (m ClientState) ClientType() string {
return "009-eth"
}
func (m ClientState) GetLatestHeight() tibctypes.Height {
return m.Header.Height
}
func (m ClientState) Validate() error {
return m.Header.ValidateBasic()
}
func (m ClientState) GetDelayTime() uint64 {
return m.TimeDelay
}
func (m ClientState) GetDelayBlock() uint64 {
return m.BlockDelay
}
func (m ClientState) GetPrefix() tibctypes.Prefix {
return commitmenttypes.MerklePrefix{}
}
|
/**
* Doubly LinkedList to solve problem. Keeps track of size in a variable and traverses from
* either the head or tail of the list depending position of the given index.
*
*/
package main
import (
"bufio"
"errors"
"fmt"
"os"
"strconv"
"strings"
)
/**
4
10 200 3 40000 5
200
*/
func main() {
reader := bufio.NewReader(os.Stdin)
s, _ := reader.ReadString('\n')
index, _ := strconv.Atoi(strings.TrimSpace(s))
s, _ = reader.ReadString('\n')
ll := List(new(LinkedList))
for _, element := range strings.Split(s, " ") {
number, _ := strconv.Atoi(strings.TrimSpace(element))
ll.Add(number)
}
val, err := ll.Get(ll.Size() - index)
if err != nil {
fmt.Println("NIL")
} else {
fmt.Println(val)
}
}
type Node struct {
prev *Node
value int
next *Node
}
type List interface {
Add(value int)
Remove(index int) (int, error)
Get(index int) (int, error)
Empty() bool
Size() int
}
type LinkedList struct {
head *Node
tail *Node
size int
}
func (this *LinkedList) Add(value int) {
tmp := &Node{value: value, next: nil, prev: nil}
if this.head == nil {
this.head = tmp
this.tail = tmp
this.size = 0
} else {
tmp.prev = this.tail // assign previous in temp
this.tail.next = tmp // assign next in tail
this.tail = tmp // swap tail with temp
}
this.size++
}
func (this *LinkedList) Remove(index int) (int, error) {
tmp, err := this.getNode(index)
if err != nil {
return -1, err
} else {
ret := tmp.value
if tmp.prev == nil { // head
this.head = tmp.next
} else { // non-head
tmp.prev.next = tmp.next
}
if tmp.next == nil { // tail
this.tail = tmp.prev
} else { // non-tail
tmp.next.prev = tmp.prev
}
tmp.next = nil
tmp.prev = nil
tmp = nil
this.size--
return ret, nil
}
}
/**
Helper func to get a node at the given index.
Does error checking for size.
*/
func (this *LinkedList) getNode(index int) (*Node, error) {
if index < 0 || index >= this.size {
return nil, errors.New("Index must be greater than 0 and less than LinkedList size")
}
// if we get this far, there exists at least one object in the list
// Check the size of the list and traverse from tail or head. depending on which is closer to the given index
tmp := &Node{}
useHead := true
if (this.size - index) > (this.size / 2.0) {
// index is closer to head
tmp = this.head
} else {
// index is closer to tail
tmp = this.tail
index = this.size - index - 1
useHead = false
}
for tmp != nil && 0 < index {
if useHead {
tmp = tmp.next
} else {
tmp = tmp.prev
}
index--
}
if tmp == nil {
return nil, errors.New("Uh oh, less items in list than list.size dictates..")
} else {
return tmp, nil
}
}
func (this *LinkedList) Get(index int) (int, error) {
tmp, err := this.getNode(index)
if err != nil {
return -1, err
}
return tmp.value, nil
}
func (this *LinkedList) Print() {
tmp := this.head
for tmp != nil {
fmt.Printf("%d, ", tmp.value)
tmp = tmp.next
}
fmt.Println()
}
func (this *LinkedList) Empty() bool {
return this.size == 0
}
func (this *LinkedList) Size() int {
return this.size
}
|
package game
import "errors"
var (
// ErrNotYourTurn is returned when the wrong player
// attempts to make a move.
ErrNotYourTurn = errors.New("not your turn")
// ErrOutsideBoard is returned when the players
// move is outside the board they are playing on.
ErrOutsideBoard = errors.New("outside board")
// ErrPositionOccupied is returned when there is
// already a stone in the move position.
ErrPositionOccupied = errors.New("position occupied")
// ErrSuicidalMove is returned when the move would
// result in the piece being immediately captured.
// https://senseis.xmp.net/?Suicide
ErrSuicidalMove = errors.New("suicidal move")
// ErrViolatesKo is returned when the players move
// violates the ko rule, repeating board state.
// https://senseis.xmp.net/?Ko
ErrViolatesKo = errors.New("violates ko")
)
|
package main
import (
"image"
"image/draw"
"sync"
"github.com/driusan/de/demodel"
"github.com/driusan/de/kbmap"
"github.com/driusan/de/renderer"
"github.com/driusan/de/viewer"
"golang.org/x/exp/shiny/screen"
"golang.org/x/mobile/event/size"
)
// dewindow encapsulates the shiny window of de.
type dewindow struct {
sync.Mutex
screen.Window
painting bool
buffer screen.Buffer
sz size.Event
}
func (w *dewindow) paint(buf *demodel.CharBuffer, viewport *viewer.Viewport) {
if w.painting {
return
}
w.realpaint(buf, viewport)
}
// paints buf into the viewport attached to this window
func (w *dewindow) realpaint(buf *demodel.CharBuffer, viewport *viewer.Viewport) {
w.Lock()
defer w.Unlock()
defer func() {
w.painting = false
}()
w.painting = true
if w.buffer == nil {
return
}
dst := w.buffer.RGBA()
// Fill the buffer with the window background colour before
// drawing the web page on top of it.
// This should logically be in the viewport code itself, but importing
// kbmap to switch on the mode sentinals would result in a cyclical
// import.
if viewport.BackgroundMode != viewer.StableBackground {
switch viewport.GetKeyboardMode() {
case kbmap.InsertMode:
draw.Draw(dst, dst.Bounds(), &image.Uniform{renderer.InsertBackground}, image.ZP, draw.Src)
case kbmap.DeleteMode:
draw.Draw(dst, dst.Bounds(), &image.Uniform{renderer.DeleteBackground}, image.ZP, draw.Src)
default:
draw.Draw(dst, dst.Bounds(), &image.Uniform{renderer.NormalBackground}, image.ZP, draw.Src)
}
} else {
draw.Draw(dst, dst.Bounds(), &image.Uniform{renderer.NormalBackground}, image.ZP, draw.Src)
}
s := w.sz.Size()
contentBounds := dst.Bounds()
tagBounds := tagSize
// ensure that the tag takes no more than half the window, so that the content doesn't get
// drowned out by commands that output more to stderr than they should.
if wHeight := s.Y; tagBounds.Max.Y > wHeight/2 {
tagBounds.Max.Y = wHeight / 2
}
contentBounds.Min.Y = tagBounds.Max.Y
tagline.RenderInto(dst.SubImage(image.Rectangle{image.ZP, image.Point{s.X, tagBounds.Max.Y}}).(*image.RGBA), buf.Tagline, clipRectangle(w.sz, viewport))
viewport.RenderInto(dst.SubImage(image.Rectangle{image.Point{0, tagBounds.Max.Y}, s}).(*image.RGBA), buf, clipRectangle(w.sz, viewport))
w.Upload(image.Point{0, 0}, w.buffer, dst.Bounds())
w.Publish()
return
}
func (w *dewindow) setSize(s size.Event, sc screen.Screen) error {
w.Lock()
defer w.Unlock()
w.sz = s
if w.buffer != nil {
// Release the old buffer.
w.buffer.Release()
}
sbuffer, err := sc.NewBuffer(s.Size())
w.buffer = sbuffer
return err
}
|
package cleanup
import (
"context"
"io/ioutil"
"os"
"path"
"time"
"github.com/ssok8s/ssok8s/pkg/log"
"github.com/ssok8s/ssok8s/pkg/registry"
"github.com/ssok8s/ssok8s/pkg/setting"
)
type CleanUpService struct {
log log.Logger
Cfg *setting.Cfg `inject:""`
}
func init() {
registry.RegisterService(&CleanUpService{})
}
func (srv *CleanUpService) Init() error {
srv.log = log.New("cleanup")
return nil
}
func (srv *CleanUpService) Run(ctx context.Context) error {
srv.cleanUpTmpFiles()
ticker := time.NewTicker(time.Minute * 10)
for {
select {
case <-ticker.C:
srv.cleanUpTmpFiles()
case <-ctx.Done():
return ctx.Err()
}
}
}
func (srv *CleanUpService) cleanUpTmpFiles() {
if _, err := os.Stat(srv.Cfg.ImagesDir); os.IsNotExist(err) {
return
}
files, err := ioutil.ReadDir(srv.Cfg.ImagesDir)
if err != nil {
srv.log.Error("Problem reading image dir", "error", err)
return
}
var toDelete []os.FileInfo
var now = time.Now()
for _, file := range files {
if srv.shouldCleanupTempFile(file.ModTime(), now) {
toDelete = append(toDelete, file)
}
}
for _, file := range toDelete {
fullPath := path.Join(srv.Cfg.ImagesDir, file.Name())
err := os.Remove(fullPath)
if err != nil {
srv.log.Error("Failed to delete temp file", "file", file.Name(), "error", err)
}
}
srv.log.Debug("Found old rendered image to delete", "deleted", len(toDelete), "kept", len(files))
}
func (srv *CleanUpService) shouldCleanupTempFile(filemtime time.Time, now time.Time) bool {
if srv.Cfg.TempDataLifetime == 0 {
return false
}
return filemtime.Add(srv.Cfg.TempDataLifetime).Before(now)
}
|
package main
import (
"bytes"
"fmt"
"net/http"
"os"
"github.com/apex/log"
"github.com/jackmcguire1/UserService/api/healthcheck"
"github.com/jackmcguire1/UserService/api/searchapi"
"github.com/jackmcguire1/UserService/api/userapi"
"github.com/jackmcguire1/UserService/dom/user"
"github.com/jackmcguire1/UserService/pkg/utils"
)
var (
userService user.UserService
userHandler *userapi.UserHandler
searchHandler *searchapi.SearchHandler
healthCheckHandler *healthcheck.HealthCheckHandler
elasticSearchHost string
elasticSearchPort string
elasticSearchSecondPort string
elasticSearchUserIndex string
listenPort string
listenHost string
userUpdates chan *user.UserUpdate
eventsURL string
)
func init() {
logLevel := os.Getenv("LOG_VERBOSITY")
switch logLevel {
case "":
logLevel = "info"
fallthrough
default:
log.SetLevelFromString(logLevel)
}
elasticSearchHost = os.Getenv("ELASTIC_HOST")
elasticSearchPort = os.Getenv("ELASTIC_PORT")
elasticSearchSecondPort = os.Getenv("ELASTIC_SECOND_PORT")
elasticSearchUserIndex = os.Getenv("ELASTIC_USER_INDEX")
listenPort = os.Getenv("LISTEN_PORT")
listenHost = os.Getenv("LISTEN_HOST")
userUpdates = make(chan *user.UserUpdate, 1)
eventsURL = os.Getenv("EVENTS_URL")
var err error
userService, err = user.NewService(&user.Resources{
UserChannel: userUpdates,
Repo: user.NewElasticRepo(&user.ElasticSearchParams{
Host: elasticSearchHost,
Port: elasticSearchPort,
SecondPort: elasticSearchPort,
UserIndexName: elasticSearchUserIndex,
}),
})
if err != nil {
log.WithError(err).Fatal("failed to init user service")
}
userHandler = &userapi.UserHandler{UserService: userService}
searchHandler = &searchapi.SearchHandler{UserService: userService}
healthCheckHandler = &healthcheck.HealthCheckHandler{LogVerbosity: logLevel}
}
func main() {
s := http.NewServeMux()
s.Handle("/user", userHandler)
s.HandleFunc("/search/users/by_country", searchHandler.UsersByCountry)
s.Handle("/healthcheck", healthCheckHandler)
addr := fmt.Sprintf("%s:%s", listenHost, listenPort)
log.
WithField("events-url", eventsURL).
Info("starting user updates handler")
// POST user updates to URL
go func(i chan *user.UserUpdate) {
for update := range userUpdates {
log.
WithField("update", utils.ToJSON(update)).
Info("got user update")
if eventsURL != "" {
r := bytes.NewReader([]byte(utils.ToJSON(update)))
_, err := http.Post(eventsURL, "application/json", r)
if err != nil {
log.
WithError(err).
Error("failed to publish user update")
}
}
}
}(userUpdates)
log.
WithField("addr", addr).
Info("starting http server")
err := http.ListenAndServe(addr, s)
if err != nil {
log.
WithError(err).
Fatal("failed to listen and serve")
}
}
|
package main
import (
"../../internal/handlers"
"../../internal/utils"
)
func main() {
utils.ShowHello()
handlers.ShowMenu()
}
|
package cmd
import (
"fmt"
"os"
)
func CmdPrintln(a ...interface{}) (int, error) {
return fmt.Println(a...)
}
func CmdPrintErrorln(a ...interface{}) (int, error) {
return fmt.Fprintln(os.Stderr, a...)
}
func CmdPrettyPrintln(a ...interface{}) (int, error) {
return fmt.Fprintln(os.Stdout, a...)
}
|
package main
import (
"errors"
"flag"
"fmt"
"io"
"log"
"net"
"strings"
"time"
)
var (
masterAddr *net.TCPAddr
raddr *net.TCPAddr
saddr *net.TCPAddr
localAddr = flag.String("listen", ":9999", "local address")
sentinelAddr = flag.String("sentinel", ":26379", "remote address")
masterName = flag.String("master", "", "name of the master redis node")
)
func main() {
flag.Parse()
laddr, err := net.ResolveTCPAddr("tcp", *localAddr)
if err != nil {
log.Fatal("Failed to resolve local address: %s", err)
}
saddr, err = net.ResolveTCPAddr("tcp", *sentinelAddr)
if err != nil {
log.Fatal("Failed to resolve sentinel address: %s", err)
}
go master()
listener, err := net.ListenTCP("tcp", laddr)
if err != nil {
log.Fatal(err)
}
for {
conn, err := listener.AcceptTCP()
if err != nil {
log.Println(err)
continue
}
go proxy(conn, masterAddr)
}
}
func master() {
var err error
for {
masterAddr, err = getMasterAddr(saddr, *masterName)
if err != nil {
log.Println(err)
}
time.Sleep(1 * time.Second)
}
}
func pipe(r io.Reader, w io.WriteCloser) {
io.Copy(w, r)
w.Close()
}
func proxy(local io.ReadWriteCloser, remoteAddr *net.TCPAddr) {
remote, err := net.DialTCP("tcp", nil, remoteAddr)
if err != nil {
log.Println(err)
local.Close()
return
}
go pipe(local, remote)
go pipe(remote, local)
}
func getMasterAddr(sentinelAddress *net.TCPAddr, masterName string) (*net.TCPAddr, error) {
conn, err := net.DialTCP("tcp", nil, sentinelAddress)
if err != nil {
return nil, err
}
defer conn.Close()
conn.Write([]byte(fmt.Sprintf("sentinel get-master-addr-by-name %s\n", masterName)))
b := make([]byte, 256)
_, err = conn.Read(b)
if err != nil {
log.Fatal(err)
}
parts := strings.Split(string(b), "\r\n")
if len(parts) < 5 {
err = errors.New("Couldn't get master address from sentinel")
return nil, err
}
//getting the string address for the master node
stringaddr := fmt.Sprintf("%s:%s", parts[2], parts[4])
addr, err := net.ResolveTCPAddr("tcp", stringaddr)
if err != nil {
return nil, err
}
//check that there's actually someone listening on that address
conn2, err := net.DialTCP("tcp", nil, addr)
if err == nil {
defer conn2.Close()
}
return addr, err
}
|
package main
import (
"bytes"
"fmt"
"io/ioutil"
"net/http"
"runtime"
"sync"
)
type HttpHost struct {
Host string
}
func main() {
hosts := []string{"www.baidu.com", "www.sina.com"}
url := "192.168.1.20"
buffer := bytes.NewBufferString("")
fmt.Fprintf(buffer, `func FindProxyForURL(url, host) {`)
for _, host := range hosts {
fmt.Fprintf(buffer, `if (dnsDomainIs(host, "%s"))`, host)
fmt.Fprintf(buffer, `{ return "PROXY %s"; }`, url)
}
fmt.Fprintf(buffer, `return "DIRECT" }`)
fmt.Println(buffer.String())
var wait sync.WaitGroup
wait.Add(1)
go func() {
s := "first go routine"
fmt.Println(s)
go func() {
fmt.Println(s + "inner")
wait.Done()
}()
}()
wait.Wait()
url = "https://www.jianshu.com/favicon.ico"
resp, _ := http.Get(url)
defer resp.Body.Close()
b, _ := ioutil.ReadAll(resp.Body)
fmt.Println(string(b))
sys := runtime.GOOS
if sys == "linux" {
fmt.Println("linux")
} else if sys == "windows" {
fmt.Println("windows")
} else {
fmt.Println(sys)
}
httphosts := []*HttpHost{&HttpHost{"http://www.baidu.com"}, &HttpHost{"jira.zshield.int"}, &HttpHost{"http://www.baidu.com"}, &HttpHost{"jira.zshield.int"}}
re := removeRepeathost(httphosts)
fmt.Println(len(re))
}
func removeRepeathost(hosts []*HttpHost) []*HttpHost {
result := []*HttpHost{}
for i := 0; i < len(hosts); i++ {
repeat := true
for j := i + 1; j < len(hosts); j++ {
if hosts[i].Host == hosts[j].Host {
repeat = false
break
}
}
if repeat {
result = append(result, hosts[i])
}
}
return result
}
|
package main
import (
"bufio"
"flag"
"os"
"strconv"
)
var loading_has_completed = false
var largest_previous_prime = 0
func Generate(out chan<- int) {
i := LoadDataFile(out)
i |= 1
loading_has_completed = true
largest_previous_prime = i
for {
i++
out <- i
}
}
func Filter(in <-chan int, out chan<- int, prime int) {
for {
i := <-in
if i%prime != 0 {
out <- i
}
}
}
func LoadDataFile(ch chan<- int) int {
var prime int
file, _ := os.Open("data.txt")
defer file.Close()
scanner := bufio.NewScanner(file)
for scanner.Scan() {
prime, _ = strconv.Atoi(scanner.Text())
ch <- prime
}
return prime
}
func GetNth() int {
description := "Find the Nth prime. (Default: 100)"
nth := flag.Int("nth", 100, description)
flag.Parse()
return *nth
}
func GetAppendableFile() *os.File {
flag := os.O_CREATE | os.O_APPEND | os.O_WRONLY
file, err := os.OpenFile("data.txt", flag, 0600)
dealbreaker(err)
return file
}
func SavePrimes(primes []int) {
file := GetAppendableFile()
defer file.Close()
message := ""
for _, prime := range primes {
message += strconv.Itoa(prime) + "\n"
}
_, err := file.WriteString(message)
dealbreaker(err)
}
func dealbreaker(err error) {
if err != nil {
panic(err)
}
}
func main() {
channel := make(chan int)
go Generate(channel)
length := GetNth()
var i, prime int
var primes []int
for i = 0; i < length; i++ {
// Get next prime.
prime = <-channel
// Save "new" primes.
if loading_has_completed && prime > largest_previous_prime {
primes = append(primes, prime)
}
// Pick a daisy.
new_channel := make(chan int)
go Filter(channel, new_channel, prime)
channel = new_channel
}
SavePrimes(primes)
print("prime", i, " := ", prime)
}
|
package main
import (
"bufio"
"fmt"
"log"
"os"
)
func abs(a int) int {
if a < 0 {
return -a
}
return a
}
func delta(h1, m1, s1, h2, m2, s2 int) string {
t := abs((h1-h2)*3600 + (m1-m2)*60 + s1 - s2)
return fmt.Sprintf("%02d:%02d:%02d", t/3600, (t/60)%60, t%60)
}
func main() {
var h1, m1, s1, h2, m2, s2 int
data, err := os.Open(os.Args[1])
if err != nil {
log.Fatal(err)
}
scanner := bufio.NewScanner(data)
for scanner.Scan() {
fmt.Sscanf(scanner.Text(), "%d:%d:%d %d:%d:%d", &h1, &m1, &s1, &h2, &m2, &s2)
fmt.Println(delta(h1, m1, s1, h2, m2, s2))
}
}
|
package sprigmath
import (
"github.com/Masterminds/sprig"
"math"
"strconv"
)
func GenericFuncMap() map[string]interface{} {
funcMap := sprig.GenericFuncMap()
for k, v := range functions {
funcMap[k] = v
}
return funcMap
}
var functions = map[string]interface{}{
// conversions
"atoi": strconv.Atoi,
"int": toInt,
"int64": toInt64,
"float64": toFloat64,
// converts to an integer or float
"number": toNumber,
// convenience
"double": toFloat64,
// math in sprig that we're overriding
"add1": add1,
"add": add,
"sub": sub,
"div": div,
"mod": mod,
"mul": mul,
"biggest": max,
"max": max,
"min": min,
"ceil": ceil,
"floor": floor,
"round": round,
// math
"acos": acos,
"acosh": acosh,
"asin": asin,
"asinh": asinh,
"atan": atan,
"atan2": atan2,
"atanh": atanh,
"cbrt": cbrt,
"copysign": copysign, // args are inverted to accomdate `computation | copysign -1`
"cos": cos,
"cosh": cosh,
"erf": erf,
"erfc": erfc,
"erfinv": erfinv,
"exp": exp,
"exp2": exp2,
"expm1": expm1,
"gamma": gamma,
"hypot": hypot,
"ilogb": ilogb,
"inf": inf,
"log": log,
"log10": log10,
"log1p": log1p,
"log2": log2,
"logb": logb,
"pow": pow,
"pow10": pow10,
"signbit": signbit,
"sin": sin,
"sinh": sinh,
"sqrt": sqrt,
"tan": tan,
"tanh": tanh,
"trunc": trunc,
// these are missing from the go math stdlib, but useful anyways?
"degrees": degrees,
"radians": radians,
// constants
"pi": func() float64 { return math.Pi },
"e": func() float64 { return math.E },
}
|
package dictionary
import (
"testing"
)
//测试Search方法,参数为map和key
func TestSearchDictionary(t *testing.T) {
dictionary := map[string]string{"test": "this is a test"}
got := Search(dictionary, "test")
want := "this is a test"
assertString(t, got, want)
}
//测试Search方法,Search添加Dictionary Type作为Reciever
func TestSearch(t *testing.T) {
//Happy Path测试
dictionary := Dictionary{"test": "this is a test"}
t.Run("finding key:test", func(t *testing.T) {
got, _ := dictionary.Search("test")
want := "this is a test"
assertString(t, got, want)
})
//测试出错的情况
t.Run("finding an unknown key:wtf", func(t *testing.T) {
_, err := dictionary.Search("wtf")
want := errorNotFound
assertError(t, err, want)
})
}
//测试Add方法,Add添加Dictionary作为Reciever
func TestAdd(t *testing.T) {
//happy path测试
t.Run("add a word", func(t *testing.T) {
dictionary := Dictionary{}
key := "test"
want := "this is just a test"
dictionary.Add(key, want)
assertDefinition(t, dictionary, key, want)
})
//测试出错的情况,add插入的key已存在
t.Run("add an existing word", func(t *testing.T) {
key := "test"
definition := "this is a new test"
dictionary := Dictionary{key: definition}
err := dictionary.Add(key, "new test")
assertError(t, err, errorKeyExisting)
assertDefinition(t, dictionary, key, definition)
})
}
//测试Update方法
func TestUpdate(t *testing.T) {
//happy path路径测试
t.Run("existing word", func(t *testing.T) {
word := "test"
definition := "this is just a test"
dictionary := Dictionary{word: definition}
newDefinition := "this is a new test"
dictionary.Update(word, newDefinition)
assertDefinition(t, dictionary, word, newDefinition)
})
//更新操作遇到新key
t.Run("new word", func(t *testing.T) {
dictionary := Dictionary{}
word := "test"
newDefinition := "this is just a test"
err := dictionary.Update(word, newDefinition)
assertError(t, err, errKeyNotExisting)
})
}
//测试Delete方法
func TestDelete(t *testing.T) {
//happy path路径测试
t.Run("delete a existed word", func(t *testing.T) {
word := "test"
definition := "this is just a test"
dictionary := Dictionary{word: definition}
dictionary.Delete(word)
_, err := dictionary.Search(word)
assertError(t, err, errorNotFound)
})
//删除一个不存在的word
t.Run("delete a not existed word", func(t *testing.T) {
word := "test"
dictionary := Dictionary{}
err := dictionary.Delete(word)
assertError(t, err, errKeyNotExisting)
})
}
//工具方法
func assertDefinition(t *testing.T, dictionary Dictionary, key, want string) {
t.Helper()
got, err := dictionary.Search(key)
if err != nil {
t.Fatal("should find an error:", err)
}
if want != got {
t.Errorf("got '%s' want '%s'", got, want)
}
}
//工具方法
func assertError(t *testing.T, got error, want error) {
t.Helper()
if got == nil {
t.Fatal("error should not be nil")
}
if got != want {
t.Errorf("got %s, want %s", got.Error(), want.Error())
}
}
//工具方法
func assertString(t *testing.T, got string, want string) {
t.Helper()
if got != want {
t.Errorf("got %s, want %s", got, want)
}
}
|
package gobcnbicing
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
)
// BCNBicing type holds a list of bike stations on the city
type BCNBicing struct {
Stations []struct {
Altitude string `json:"altitude"`
Bikes string `json:"bikes"`
ID string `json:"id"`
Latitude string `json:"latitude"`
Longitude string `json:"longitude"`
NearbyStations string `json:"nearbyStations"`
Slots string `json:"slots"`
Status string `json:"status"`
StreetName string `json:"streetName"`
StreetNumber string `json:"streetNumber"`
Type string `json:"type"`
} `json:"stations"`
UpdateTime int `json:"updateTime"`
}
// GetStations updates the stations from the API
func GetStations() (bcnbicing BCNBicing, err error) {
client := &http.Client{}
req, _ := http.NewRequest("GET", "http://wservice.viabicing.cat/v2/stations", nil)
req.Header.Add("Accept", "application/json")
resp, err := client.Do(req)
if err != nil {
fmt.Println(err)
return bcnbicing, err
}
defer resp.Body.Close()
body, _ := ioutil.ReadAll(resp.Body)
err = json.Unmarshal(body, &bcnbicing)
if err != nil {
fmt.Println(err)
return bcnbicing, err
}
return bcnbicing, err
}
|
package command
type Command struct {
Name string
Targets []string
BoolFlags map[string]bool
ValueFlags map[string]string
}
func NewCommand(name string) *Command {
return &Command{
Name: name,
Targets: []string{},
BoolFlags: map[string]bool{},
ValueFlags: map[string]string{},
}
}
func (c *Command) setBool(flag BoolFlag) {
for k := range flag.Aliases {
c.BoolFlags[k] = !flag.Default
}
}
func (c *Command) setBoolDefault(flag BoolFlag) {
if flag.Default {
for k := range flag.Aliases {
c.BoolFlags[k] = flag.Default
}
}
}
func (c *Command) setValue(flag ValueFlag, value string) {
for k := range flag.Aliases {
c.ValueFlags[k] = value
}
}
func (c *Command) setValueDefault(flag ValueFlag) {
if flag.Default != "" {
for k := range flag.Aliases {
c.ValueFlags[k] = flag.Default
}
}
}
|
package github
import (
"context"
"errors"
"fmt"
"net/http"
"os"
"regexp"
"github.com/google/go-github/v31/github"
"golang.org/x/oauth2"
"github.com/weaveworks/go-git-provider/pkg/providers"
)
const (
EnvVarGitHubToken = "GITHUB_TOKEN"
)
var (
sshFull = regexp.MustCompile(`ssh://git@github.com/([^/]+)/([^.]+).git`)
sshShort = regexp.MustCompile(`git@github.com:([^/]+)/([^.]+).git`)
patterns = []*regexp.Regexp{
sshFull,
sshShort,
}
)
// GitHubProvider accesses the Github API
type GitHubProvider struct {
owner, repo string
readOnly bool
githubToken string
}
func NewGitHubProvider(repoURL string, readOnly bool) (*GitHubProvider, error) {
githubToken := os.Getenv(EnvVarGitHubToken)
if githubToken == "" {
return nil,fmt.Errorf("%s is not set. Cannot authenticate to github.com", EnvVarGitHubToken)
}
repo, err := repoName(repoURL)
if err != nil {
return nil, err
}
owner, err := repoOwner(repoURL)
if err != nil {
return nil, err
}
return &GitHubProvider{
githubToken: githubToken,
readOnly: readOnly,
owner: owner,
repo: repo,
}, nil
}
func (p *GitHubProvider) AuthorizeSSHKey(ctx context.Context, key providers.SSHKey) error {
gh := p.getGitHubAPIClient(ctx)
_, resp, err := gh.Repositories.CreateKey(ctx, p.owner, p.repo, &github.Key{
Key: &key.Key,
Title: &key.Title,
ReadOnly: &p.readOnly,
})
if err != nil {
return err
}
if resp.StatusCode != http.StatusCreated {
return fmt.Errorf("unable to authorize SSH Key %q. Got StatusCode %s", key.Title, resp.Status)
}
return nil
}
func (p *GitHubProvider) Delete(ctx context.Context, title string) error {
gh := p.getGitHubAPIClient(ctx)
keys, _, err := gh.Repositories.ListKeys(ctx, p.owner, p.repo, &github.ListOptions{})
if err != nil {
return err
}
var keyID int64
for _, key := range keys {
if key.GetTitle() == title {
keyID = key.GetID()
break
}
}
if keyID == 0 {
return nil
}
if _, err := gh.Repositories.DeleteKey(ctx, p.owner, p.repo, keyID); err != nil {
return err
}
return nil
}
func (p *GitHubProvider) getGitHubAPIClient(ctx context.Context) *github.Client {
ts := oauth2.StaticTokenSource(
&oauth2.Token{AccessToken: p.githubToken},
)
tc := oauth2.NewClient(ctx, ts)
gh := github.NewClient(tc)
return gh
}
func repoOwner(repoURL string) (string, error) {
return findRepoGroup(repoURL, 1)
}
func repoName(repoURL string) (string, error) {
return findRepoGroup(repoURL, 2)
}
func findRepoGroup(repoURL string, groupNum int) (string, error) {
if repoURL == "" {
return "", errors.New("unable to parse empty repo URL")
}
for _, p := range patterns {
m := p.FindStringSubmatch(repoURL)
if len(m) == 3 {
return m[groupNum], nil
}
}
return "", fmt.Errorf("unable to parse repo URL %q", repoURL)
} |
package problem0237
// ListNode is a struct
type ListNode struct {
Val int
Next *ListNode
}
func deleteNode(node *ListNode) {
*node = *node.Next
}
|
package internal
import (
"log"
"os"
client "github.com/influxdata/influxdb1-client/v2"
)
func Connect() client.Client {
username := os.Getenv("DB_USER")
password := os.Getenv("DB_PW")
dbhost := os.Getenv("DB_HOST")
conf := client.HTTPConfig {
Addr: dbhost,
Username: username,
Password: password,
}
con, err := client.NewHTTPClient(conf)
if err != nil {
log.Fatal(err)
}
return con
}
|
package column_test
import (
"context"
"fmt"
"os"
"strings"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/vahid-sohrabloo/chconn/v2"
"github.com/vahid-sohrabloo/chconn/v2/column"
)
func TestString(t *testing.T) {
t.Parallel()
connString := os.Getenv("CHX_TEST_TCP_CONN_STRING")
conn, err := chconn.Connect(context.Background(), connString)
require.NoError(t, err)
tableName := "string"
chType := "String"
err = conn.Exec(context.Background(),
fmt.Sprintf(`DROP TABLE IF EXISTS test_%s`, tableName),
)
require.NoError(t, err)
set := chconn.Settings{
{
Name: "allow_suspicious_low_cardinality_types",
Value: "true",
},
}
err = conn.ExecWithOption(context.Background(), fmt.Sprintf(`CREATE TABLE test_%[1]s (
block_id UInt8,
%[1]s %[2]s,
%[1]s_nullable Nullable(%[2]s),
%[1]s_array Array(%[2]s),
%[1]s_array_nullable Array(Nullable(%[2]s)),
%[1]s_lc LowCardinality(%[2]s),
%[1]s_nullable_lc LowCardinality(Nullable(%[2]s)),
%[1]s_array_lc Array(LowCardinality(%[2]s)),
%[1]s_array_lc_nullable Array(LowCardinality(Nullable(%[2]s)))
) Engine=Memory`, tableName, chType), &chconn.QueryOptions{
Settings: set,
})
require.NoError(t, err)
blockID := column.New[uint8]()
col := column.NewString()
colNullable := column.NewString().Nullable()
colArray := column.NewString().Array()
colNullableArray := column.NewString().Nullable().Array()
colLC := column.NewString().LC()
colLCNullable := column.NewString().Nullable().LC()
colArrayLC := column.NewString().LC().Array()
colArrayLCNullable := column.NewString().Nullable().LC().Array()
var colInsert []string
var colInsertByte [][]byte
var colNullableInsert []*string
var colArrayInsert [][]string
var colArrayNullableInsert [][]*string
var colLCInsert []string
var colLCNullableInsert []*string
var colLCArrayInsert [][]string
var colLCNullableArrayInsert [][]*string
for insertN := 0; insertN < 2; insertN++ {
rows := 10
for i := 0; i < rows; i++ {
blockID.Append(uint8(insertN))
val := fmt.Sprintf("string %d", i)
val2 := strings.Repeat(val, 50)
valArray := []string{val, val2}
valArrayNil := []*string{&val, nil}
col.Append(val)
colInsert = append(colInsert, val)
colInsertByte = append(colInsertByte, []byte(val))
// example add nullable
if i%2 == 0 {
colNullableInsert = append(colNullableInsert, &val)
colNullable.Append(val)
colLCNullableInsert = append(colLCNullableInsert, &val)
colLCNullable.Append(val)
} else {
colNullableInsert = append(colNullableInsert, nil)
colNullable.AppendNil()
colLCNullableInsert = append(colLCNullableInsert, nil)
colLCNullable.AppendNil()
}
colArray.Append(valArray)
colArrayInsert = append(colArrayInsert, valArray)
colNullableArray.AppendP(valArrayNil)
colArrayNullableInsert = append(colArrayNullableInsert, valArrayNil)
colLCInsert = append(colLCInsert, val)
colLC.Append(val)
colLCArrayInsert = append(colLCArrayInsert, valArray)
colArrayLC.Append(valArray)
colLCNullableArrayInsert = append(colLCNullableArrayInsert, valArrayNil)
colArrayLCNullable.AppendP(valArrayNil)
}
err = conn.Insert(context.Background(), fmt.Sprintf(`INSERT INTO
test_%[1]s (
block_id,
%[1]s,
%[1]s_nullable,
%[1]s_array,
%[1]s_array_nullable,
%[1]s_lc,
%[1]s_nullable_lc,
%[1]s_array_lc,
%[1]s_array_lc_nullable
)
VALUES`, tableName),
blockID,
col,
colNullable,
colArray,
colNullableArray,
colLC,
colLCNullable,
colArrayLC,
colArrayLCNullable,
)
require.NoError(t, err)
}
// example read all
colRead := column.NewString()
colNullableRead := column.NewString().Nullable()
colArrayRead := column.NewString().Array()
colNullableArrayRead := column.NewString().Nullable().Array()
colLCRead := column.NewString().LC()
colLCNullableRead := column.NewString().Nullable().LC()
colArrayLCRead := column.NewString().LC().Array()
colArrayLCNullableRead := column.NewString().Nullable().LC().Array()
selectStmt, err := conn.Select(context.Background(), fmt.Sprintf(`SELECT
%[1]s,
%[1]s_nullable,
%[1]s_array,
%[1]s_array_nullable,
%[1]s_lc,
%[1]s_nullable_lc,
%[1]s_array_lc,
%[1]s_array_lc_nullable
FROM test_%[1]s order by block_id`, tableName),
colRead,
colNullableRead,
colArrayRead,
colNullableArrayRead,
colLCRead,
colLCNullableRead,
colArrayLCRead,
colArrayLCNullableRead)
require.NoError(t, err)
require.True(t, conn.IsBusy())
var colData []string
var colDataByte [][]byte
var colDataByteByData [][]byte
var colDataByteByRow [][]byte
var colNullableData []*string
var colArrayData [][]string
var colArrayNullableData [][]*string
var colLCData []string
var colLCNullableData []*string
var colLCArrayData [][]string
var colLCNullableArrayData [][]*string
for selectStmt.Next() {
require.NoError(t, err)
colData = colRead.Read(colData)
colDataByte = colRead.ReadBytes(colDataByte)
colDataByteByData = append(colDataByteByData, colRead.DataBytes()...)
for i := 0; i < selectStmt.RowsInBlock(); i++ {
colDataByteByRow = append(colDataByteByRow, colRead.RowBytes(i))
}
colNullableData = colNullableRead.ReadP(colNullableData)
colArrayData = colArrayRead.Read(colArrayData)
colArrayNullableData = colNullableArrayRead.ReadP(colArrayNullableData)
colLCData = colLCRead.Read(colLCData)
colLCNullableData = colLCNullableRead.ReadP(colLCNullableData)
colLCArrayData = colArrayLCRead.Read(colLCArrayData)
colLCNullableArrayData = colArrayLCNullableRead.ReadP(colLCNullableArrayData)
}
require.NoError(t, selectStmt.Err())
assert.Equal(t, colInsert, colData)
assert.Equal(t, colInsertByte, colDataByte)
assert.Equal(t, colInsertByte, colDataByteByData)
assert.Equal(t, colInsertByte, colDataByteByRow)
assert.Equal(t, colNullableInsert, colNullableData)
assert.Equal(t, colArrayInsert, colArrayData)
assert.Equal(t, colArrayNullableInsert, colArrayNullableData)
assert.Equal(t, colLCInsert, colLCData)
assert.Equal(t, colLCNullableInsert, colLCNullableData)
assert.Equal(t, colLCArrayInsert, colLCArrayData)
assert.Equal(t, colLCNullableArrayInsert, colLCNullableArrayData)
// check dynamic column
selectStmt, err = conn.Select(context.Background(), fmt.Sprintf(`SELECT
%[1]s,
%[1]s_nullable,
%[1]s_array,
%[1]s_array_nullable,
%[1]s_lc,
%[1]s_nullable_lc,
%[1]s_array_lc,
%[1]s_array_lc_nullable
FROM test_%[1]s order by block_id`, tableName),
)
require.NoError(t, err)
autoColumns := selectStmt.Columns()
assert.Len(t, autoColumns, 8)
assert.Equal(t, colRead.ColumnType(), autoColumns[0].ColumnType())
assert.Equal(t, colNullableRead.ColumnType(), autoColumns[1].ColumnType())
assert.Equal(t, colArrayRead.ColumnType(), autoColumns[2].ColumnType())
assert.Equal(t, colNullableArrayRead.ColumnType(), autoColumns[3].ColumnType())
assert.Equal(t, colLCRead.ColumnType(), autoColumns[4].ColumnType())
assert.Equal(t, colLCNullableRead.ColumnType(), autoColumns[5].ColumnType())
assert.Equal(t, colArrayLCRead.ColumnType(), autoColumns[6].ColumnType())
assert.Equal(t, colArrayLCNullableRead.ColumnType(), autoColumns[7].ColumnType())
for selectStmt.Next() {
}
require.NoError(t, selectStmt.Err())
selectStmt.Close()
}
|
package Data
type Data struct {
CallId string `json:"callId"`
Location string `json:"location"`
Situation string `json:"situation"`
Name string `json:"name"`
}
func (d Data) UpdateTable() error {
return nil
}
|
package main
import (
"fmt"
"sort"
"strings"
"testing"
)
type team struct {
id int
cs string
}
type teams []team
func (slice teams) Len() int { return len(slice) }
func (slice teams) Less(i, j int) bool { return slice[i].id < slice[j].id }
func (slice teams) Swap(i, j int) { slice[i], slice[j] = slice[j], slice[i] }
func TestFootball(t *testing.T) {
for k, v := range map[string]string{
"1 2 3 4 | 3 1 | 4 1": "1:1,2,3; 2:1; 3:1,2; 4:1,3;",
"19 11 | 19 21 23 | 31 39 29": "11:1; 19:1,2; 21:2; 23:2; 29:3; 31:3; 39:3;"} {
if r := football(k); r != v {
t.Errorf("failed: football %s is %s, got %s",
k, v, r)
}
}
}
func football(p string) string {
var (
a int
r []team
q []string
)
m := make(map[int][]string)
s := strings.Split(p, "|")
for ix, i := range s {
t := strings.Fields(i)
for _, j := range t {
fmt.Sscan(j, &a)
m[a] = append(m[a], fmt.Sprint(ix+1))
}
}
for k, v := range m {
r = append(r, team{k, strings.Join(v, ",")})
}
sort.Sort(teams(r))
for _, i := range r {
q = append(q, fmt.Sprintf("%d:%s;", i.id, i.cs))
}
return strings.Join(q, " ")
}
|
package swordoffer
//114. 外星文字典
//现有一种使用英语字母的外星文语言,这门语言的字母顺序与英语顺序不同。
//
//给定一个字符串列表 words ,作为这门语言的词典,words 中的字符串已经 按这门新语言的字母顺序进行了排序 。
//
//请你根据该词典还原出此语言中已知的字母顺序,并 按字母递增顺序 排列。若不存在合法字母顺序,返回 "" 。若存在多种可能的合法字母顺序,返回其中 任意一种 顺序即可。
//
//字符串 s 字典顺序小于 字符串 t 有两种情况:
//
//在第一个不同字母处,如果 s 中的字母在这门外星语言的字母顺序中位于 t 中字母之前,那么s 的字典顺序小于 t 。
//如果前面 min(s.length, t.length) 字母都相同,那么 s.length < t.length 时,s 的字典顺序也小于 t 。
//
//
//示例 1:
//
//输入:words = ["wrt","wrf","er","ett","rftt"]
//输出:"wertf"
//示例 2:
//
//输入:words = ["z","x"]
//输出:"zx"
//示例 3:
//
//输入:words = ["z","x","z"]
//输出:""
//解释:不存在合法字母顺序,因此返回 "" 。
//
//
//提示:
//
//1 <= words.length <= 100
//1 <= words[i].length <= 100
//words[i] 仅由小写英文字母组成
func alienOrder(words []string) string {
graph := map[byte][]byte{}
Next:
for i := 0; i < len(words); i++ {
for _, c := range words[i] {
graph[byte(c)] = graph[byte(c)]
}
if i == 0 {
continue
}
s, t := words[i-1], words[i]
for j := 0; j < len(s) && j < len(t); j++ {
if s[j] != t[j] {
graph[s[j]] = append(graph[s[j]], t[j])
continue Next
}
}
if len(s) > len(t) {
return ""
}
}
visiting, visited := 1, 2
order := make([]byte, len(graph))
index := len(order) - 1
state := map[byte]int{}
var dfs func(byte) bool
dfs = func(u byte) bool {
state[u] = visiting
for _, v := range graph[u] {
if state[v] == 0 {
if !dfs(v) {
return false
}
} else if state[v] == visiting {
return false
}
}
order[index] = u
index--
state[u] = visited
return true
}
for u := range graph {
if state[u] == 0 && !dfs(u) {
return ""
}
}
return string(order)
}
|
package main
import "fmt"
func main() {
numbers := []int{31, 13, 12, 4, 18, 16, 7, 2, 3, 0, 10}
sortedNums := bubbleSort(numbers)
fmt.Println(sortedNums)
}
func bubbleSort(numbers []int) []int {
swapped := false
for i := 0; i < len(numbers)-1; i++ {
if numbers[i] > numbers[i+1] {
swapped = true
numbers[i], numbers[i+1] = numbers[i+1], numbers[i]
}
}
fmt.Println(numbers)
if swapped {
return bubbleSort(numbers)
}
return numbers
}
|
package gormzap
import (
"database/sql/driver"
"io/ioutil"
stdlog "log"
"testing"
"github.com/erikstmartin/go-testdb"
"github.com/jinzhu/gorm"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"go.uber.org/zap/zaptest"
)
func Benchmark_WithTestDB(b *testing.B) {
// https://github.com/uber-go/zap/blob/35aad584952c3e7020db7b839f6b102de6271f89/benchmarks/zap_test.go#L106-L116
ec := zap.NewProductionEncoderConfig()
ec.EncodeDuration = zapcore.NanosDurationEncoder
ec.EncodeTime = zapcore.EpochNanosTimeEncoder
enc := zapcore.NewJSONEncoder(ec)
zapLogger := zap.New(zapcore.NewCore(
enc,
&zaptest.Discarder{},
zap.DebugLevel,
))
defer zapLogger.Sync()
type Post struct {
ID int
Title string
Body string
}
testdb.SetExecWithArgsFunc(func(query string, args []driver.Value) (driver.Result, error) {
return testdb.NewResult(1, nil, 1, nil), nil
})
testdb.SetQueryWithArgsFunc(func(query string, args []driver.Value) (driver.Rows, error) {
return testdb.RowsFromCSVString([]string{"title", "body"}, `"awesome","This is an awesome post"`), nil
})
setupDB := func() *gorm.DB {
db, err := gorm.Open("testdb", "")
if err != nil {
b.Fatal(err)
}
db.AutoMigrate(&Post{})
db.LogMode(true)
return db
}
benchInsert := func(b *testing.B, db *gorm.DB) {
post := &Post{Title: "awesome", Body: "This is an awesome post"}
b.ResetTimer()
for i := 1; i <= b.N; i++ {
db.Create(post)
}
}
benchSelectByID := func(b *testing.B, db *gorm.DB) {
b.ResetTimer()
for i := 1; i <= b.N; i++ {
db.Model(&Post{}).Where(&Post{ID: i}).Find(&[]*Post{})
}
}
benchSelectByIDs := func(b *testing.B, db *gorm.DB, n int) {
ids := make([]int, n)
for i := 1; i <= n; i++ {
ids = append(ids, i)
}
b.ResetTimer()
for i := 1; i <= b.N; i++ {
db.Model(&Post{}).Where("id in (?)", ids).Find(&[]*Post{})
}
}
b.Run("default", func(b *testing.B) {
db := setupDB()
defer db.Close()
// https://github.com/jinzhu/gorm/blob/3a9e91ab372120a0e35b518430255308e3d8d5ea/logger.go#L16
db.SetLogger(gorm.Logger{LogWriter: stdlog.New(ioutil.Discard, "\r\n", 0)})
b.ResetTimer()
b.Run("insert post", func(b *testing.B) { benchInsert(b, db) })
b.Run("select by ID", func(b *testing.B) { benchSelectByID(b, db) })
b.Run("select by 10 IDs", func(b *testing.B) { benchSelectByIDs(b, db, 10) })
b.Run("select by 100 IDs", func(b *testing.B) { benchSelectByIDs(b, db, 100) })
})
b.Run("gormzap", func(b *testing.B) {
db := setupDB()
defer db.Close()
db.SetLogger(New(zapLogger))
b.ResetTimer()
b.Run("insert post", func(b *testing.B) { benchInsert(b, db) })
b.Run("select by ID", func(b *testing.B) { benchSelectByID(b, db) })
b.Run("select by 10 IDs", func(b *testing.B) { benchSelectByIDs(b, db, 10) })
b.Run("select by 100 IDs", func(b *testing.B) { benchSelectByIDs(b, db, 100) })
})
}
|
/*@Author : Manasvini Banavara Suryanarayana
*SJSU ID : 010102040
*CMPE 273 Lab#3
*/
package main
import (
"fmt"
"./httprouter"
"net/http"
"strconv"
"encoding/json"
)
type Response1 struct {
Key int `json:"key"`
Value string `json:"value"`
}
type Response2 struct {
Arr []Response1 `json:"Response"`
}
var KeyValueMap = map[int]string{}
func putmethod(rw http.ResponseWriter, req *http.Request, p httprouter.Params) {
id := p.ByName("key_id")
val := p.ByName("value")
idval,err := strconv.Atoi(id)
if err!=nil{
fmt.Println("error occured in conversion ")
}
KeyValueMap[idval]=val
rw.WriteHeader(200)
}
func getAllvalue(rw http.ResponseWriter, req *http.Request, p httprouter.Params) {
Responseret := make([]Response1, len(KeyValueMap))
i := 0
for key,value := range KeyValueMap {
var temp Response1
temp.Key = key
temp.Value = value
Responseret[i]=temp
i++
}
//constructing struct for sending back response body
test := Response2{
Arr: Responseret,
}
//converting response body struct to json format
respjson, err2 := json.Marshal(test)
if err2 != nil {
fmt.Println("error occured 2")
}
rw.Header().Set("Content-Type","application/json")
rw.WriteHeader(200)
//sending back response
fmt.Fprintf(rw, "%s", respjson)
}
func getValue(rw http.ResponseWriter, req *http.Request, p httprouter.Params) {
id := p.ByName("key_id")
idval,err := strconv.Atoi(id)
if err!=nil{
fmt.Println("error occured in conversion ")
}
value := KeyValueMap[idval]
//constructing struct for sending back response body
test := Response1{
Key: idval,
Value: value,
}
//converting response body struct to json format
respjson, err2 := json.Marshal(test)
if err2 != nil {
fmt.Println("error occured 2")
}
rw.Header().Set("Content-Type","application/json")
rw.WriteHeader(200)
//sending back response
fmt.Fprintf(rw, "%s", respjson)
}
func main() {
mux := httprouter.New()
mux.GET("/keys/:key_id", getValue)
mux.PUT("/keys/:key_id/:value", putmethod)
mux.GET("/keys", getAllvalue)
server := http.Server{
Addr: "0.0.0.0:3002",
Handler: mux,
}
server.ListenAndServe()
} |
package objs
import (
"sort"
"strconv"
)
type UserState struct {
UserId int `json:"user_id"`
Username string `json:"username"`
Score float64 `json:"score"`
}
func (us *UserState) FromUserData(np UserNameplate, ns UserScore) {
us.Username = np.DisplayName
us.UserId, _ = strconv.Atoi(np.Username)
us.Score = ns.Score
}
func MergeUserData(nps []UserNameplate, nss []UserScore) []UserState {
preNP := make(map[string]UserNameplate)
preNS := make(map[string]UserScore)
for _, np := range nps {
preNP[np.Username] = np
}
for _, ns := range nss {
preNS[ns.Username] = ns
}
ans := make([]UserState, len(nps))
i := 0
for _, np := range nps {
us := UserState{}
(&us).FromUserData(np, preNS[np.Username])
ans[i] = us
i++
}
return ans
}
type ByUsers func(u1, u2 *UserState) bool
type userStateSorter struct {
userstates []UserState
by ByUsers
}
func (byu ByUsers) Sort(us []UserState) {
sort.Sort(&userStateSorter{
userstates: us,
by: byu,
})
}
// Len is part of sort.Interface.
func (s *userStateSorter) Len() int {
return len(s.userstates)
}
// Swap is part of sort.Interface.
func (s *userStateSorter) Swap(i, j int) {
s.userstates[i], s.userstates[j] = s.userstates[j], s.userstates[i]
}
// Less is part of sort.Interface. It is implemented by calling the "by" closure in the sorter.
func (s *userStateSorter) Less(i, j int) bool {
return s.by(&s.userstates[i], &s.userstates[j])
}
|
package stack
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"sort"
"strings"
"time"
"github.com/docker/cli/cli/command"
"github.com/docker/cli/cli/compose/convert"
"github.com/docker/cli/cli/compose/loader"
composetypes "github.com/docker/cli/cli/compose/types"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/events"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/swarm"
apiclient "github.com/docker/docker/client"
dockerclient "github.com/docker/docker/client"
"github.com/pkg/errors"
"golang.org/x/net/context"
)
func deployCompose(ctx context.Context, dockerCli command.Cli, opts DeployOptions) error {
configDetails, err := getConfigDetails(opts.Composefile)
if err != nil {
return err
}
config, err := loader.Load(configDetails)
if err != nil {
if fpe, ok := err.(*loader.ForbiddenPropertiesError); ok {
return errors.Errorf("Compose file contains unsupported options:\n\n%s\n",
propertyWarnings(fpe.Properties))
}
return err
}
unsupportedProperties := loader.GetUnsupportedProperties(configDetails)
if len(unsupportedProperties) > 0 {
fmt.Fprintf(dockerCli.Err(), "Ignoring unsupported options: %s\n\n",
strings.Join(unsupportedProperties, ", "))
}
deprecatedProperties := loader.GetDeprecatedProperties(configDetails)
if len(deprecatedProperties) > 0 {
fmt.Fprintf(dockerCli.Err(), "Ignoring deprecated options:\n\n%s\n\n",
propertyWarnings(deprecatedProperties))
}
if err := checkDaemonIsSwarmManager(ctx, dockerCli); err != nil {
return err
}
namespace := convert.NewNamespace(opts.Namespace)
if opts.Prune {
services := map[string]struct{}{}
for _, service := range config.Services {
services[service.Name] = struct{}{}
}
pruneServices(ctx, dockerCli, namespace, services)
}
serviceNetworks := getServicesDeclaredNetworks(config.Services)
networks, externalNetworks := convert.Networks(namespace, config.Networks, serviceNetworks)
if err := validateExternalNetworks(ctx, dockerCli.Client(), externalNetworks); err != nil {
return err
}
if err := createNetworks(ctx, dockerCli, namespace, networks); err != nil {
return err
}
secrets, err := convert.Secrets(namespace, config.Secrets)
if err != nil {
return err
}
if err := createSecrets(ctx, dockerCli, secrets); err != nil {
return err
}
configs, err := convert.Configs(namespace, config.Configs)
if err != nil {
return err
}
if err := createConfigs(ctx, dockerCli, configs); err != nil {
return err
}
services, err := convert.Services(namespace, config, dockerCli.Client())
if err != nil {
return err
}
return deployServices(ctx, dockerCli, services, namespace, opts.SendRegistryAuth, opts.ResolveImage, opts.ExpectedState)
}
func getServicesDeclaredNetworks(serviceConfigs []composetypes.ServiceConfig) map[string]struct{} {
serviceNetworks := map[string]struct{}{}
for _, serviceConfig := range serviceConfigs {
if len(serviceConfig.Networks) == 0 {
serviceNetworks["default"] = struct{}{}
continue
}
for network := range serviceConfig.Networks {
serviceNetworks[network] = struct{}{}
}
}
return serviceNetworks
}
func propertyWarnings(properties map[string]string) string {
var msgs []string
for name, description := range properties {
msgs = append(msgs, fmt.Sprintf("%s: %s", name, description))
}
sort.Strings(msgs)
return strings.Join(msgs, "\n\n")
}
func getConfigDetails(composefile string) (composetypes.ConfigDetails, error) {
var details composetypes.ConfigDetails
absPath, err := filepath.Abs(composefile)
if err != nil {
return details, err
}
details.WorkingDir = filepath.Dir(absPath)
configFile, err := getConfigFile(composefile)
if err != nil {
return details, err
}
// TODO: support multiple files
details.ConfigFiles = []composetypes.ConfigFile{*configFile}
details.Environment, err = buildEnvironment(os.Environ())
return details, err
}
func buildEnvironment(env []string) (map[string]string, error) {
result := make(map[string]string, len(env))
for _, s := range env {
// if value is empty, s is like "K=", not "K".
if !strings.Contains(s, "=") {
return result, errors.Errorf("unexpected environment %q", s)
}
kv := strings.SplitN(s, "=", 2)
result[kv[0]] = kv[1]
}
return result, nil
}
func getConfigFile(filename string) (*composetypes.ConfigFile, error) {
bytes, err := ioutil.ReadFile(filename)
if err != nil {
return nil, err
}
config, err := loader.ParseYAML(bytes)
if err != nil {
return nil, err
}
return &composetypes.ConfigFile{
Filename: filename,
Config: config,
}, nil
}
func validateExternalNetworks(
ctx context.Context,
client dockerclient.NetworkAPIClient,
externalNetworks []string,
) error {
for _, networkName := range externalNetworks {
network, err := client.NetworkInspect(ctx, networkName, types.NetworkInspectOptions{})
switch {
case dockerclient.IsErrNotFound(err):
return errors.Errorf("network %q is declared as external, but could not be found. You need to create a swarm-scoped network before the stack is deployed", networkName)
case err != nil:
return err
case container.NetworkMode(networkName).IsUserDefined() && network.Scope != "swarm":
return errors.Errorf("network %q is declared as external, but it is not in the right scope: %q instead of \"swarm\"", networkName, network.Scope)
}
}
return nil
}
func createSecrets(
ctx context.Context,
dockerCli command.Cli,
secrets []swarm.SecretSpec,
) error {
client := dockerCli.Client()
for _, secretSpec := range secrets {
secret, _, err := client.SecretInspectWithRaw(ctx, secretSpec.Name)
switch {
case err == nil:
// secret already exists, then we update that
if err := client.SecretUpdate(ctx, secret.ID, secret.Meta.Version, secretSpec); err != nil {
return errors.Wrapf(err, "failed to update secret %s", secretSpec.Name)
}
case apiclient.IsErrSecretNotFound(err):
// secret does not exist, then we create a new one.
if _, err := client.SecretCreate(ctx, secretSpec); err != nil {
return errors.Wrapf(err, "failed to create secret %s", secretSpec.Name)
}
default:
return err
}
}
return nil
}
func createConfigs(
ctx context.Context,
dockerCli command.Cli,
configs []swarm.ConfigSpec,
) error {
client := dockerCli.Client()
for _, configSpec := range configs {
config, _, err := client.ConfigInspectWithRaw(ctx, configSpec.Name)
switch {
case err == nil:
// config already exists, then we update that
if err := client.ConfigUpdate(ctx, config.ID, config.Meta.Version, configSpec); err != nil {
errors.Wrapf(err, "failed to update config %s", configSpec.Name)
}
case apiclient.IsErrConfigNotFound(err):
// config does not exist, then we create a new one.
if _, err := client.ConfigCreate(ctx, configSpec); err != nil {
errors.Wrapf(err, "failed to create config %s", configSpec.Name)
}
default:
return err
}
}
return nil
}
func createNetworks(
ctx context.Context,
dockerCli command.Cli,
namespace convert.Namespace,
networks map[string]types.NetworkCreate,
) error {
client := dockerCli.Client()
existingNetworks, err := getStackNetworks(ctx, client, namespace.Name())
if err != nil {
return err
}
existingNetworkMap := make(map[string]types.NetworkResource)
for _, network := range existingNetworks {
existingNetworkMap[network.Name] = network
}
for internalName, createOpts := range networks {
name := namespace.Scope(internalName)
if _, exists := existingNetworkMap[name]; exists {
continue
}
if createOpts.Driver == "" {
createOpts.Driver = DefaultNetworkDriver
}
fmt.Fprintf(dockerCli.Out(), "Creating network %s\n", name)
if _, err := client.NetworkCreate(ctx, name, createOpts); err != nil {
return errors.Wrapf(err, "failed to create network %s", internalName)
}
}
return nil
}
func deployServices(
ctx context.Context,
dockerCli command.Cli,
services map[string]swarm.ServiceSpec,
namespace convert.Namespace,
sendAuth bool,
resolveImage string,
expectedState swarm.TaskState,
) error {
apiClient := dockerCli.Client()
out := dockerCli.Out()
existingServices, err := getServices(ctx, apiClient, namespace.Name())
if err != nil {
return err
}
existingServiceMap := make(map[string]swarm.Service)
for _, service := range existingServices {
existingServiceMap[service.Spec.Name] = service
}
for internalName, serviceSpec := range services {
name := namespace.Scope(internalName)
encodedAuth := ""
image := serviceSpec.TaskTemplate.ContainerSpec.Image
if sendAuth {
// Retrieve encoded auth token from the image reference
encodedAuth, err = command.RetrieveAuthTokenFromImage(ctx, dockerCli, image)
if err != nil {
return err
}
}
// service stabilization defaults
stabilizeDelay := time.Duration(5) * time.Second
stabilizeTimeout := time.Duration(1) * time.Minute
// override service stabilization default settings based on spec labels
labels := serviceSpec.TaskTemplate.ContainerSpec.Labels
if labels["amp.service.stabilize.delay"] != "" {
stabilizeDelay, err = time.ParseDuration(labels["amp.service.stabilize.delay"])
if err != nil {
return err
}
}
if labels["amp.service.stabilize.timeout"] != "" {
stabilizeTimeout, err = time.ParseDuration(labels["amp.service.stabilize.timeout"])
if err != nil {
return err
}
}
// apply service stabilization timeout setting - the service must be stable before the timeout
ctx, _ := context.WithTimeout(ctx, stabilizeTimeout)
var imageName string
var serviceID string
if service, exists := existingServiceMap[name]; exists {
fmt.Fprintf(out, "Updating service %s (id: %s)\n", name, service.ID)
fmt.Fprintf(out, "service: %+v\n", service)
imageName = service.Spec.TaskTemplate.ContainerSpec.Image
serviceID = service.ID
updateOpts := types.ServiceUpdateOptions{EncodedRegistryAuth: encodedAuth}
if resolveImage == ResolveImageAlways || (resolveImage == ResolveImageChanged && image != service.Spec.Labels[convert.LabelImage]) {
updateOpts.QueryRegistry = true
}
response, err := apiClient.ServiceUpdate(
ctx,
service.ID,
service.Version,
serviceSpec,
updateOpts,
)
if err != nil {
return errors.Wrapf(err, "failed to update service %s", name)
}
for _, warning := range response.Warnings {
fmt.Fprintln(dockerCli.Err(), warning)
}
} else {
fmt.Fprintf(out, "Creating service %s\n", name)
createOpts := types.ServiceCreateOptions{EncodedRegistryAuth: encodedAuth}
// query registry if flag disabling was not set
if resolveImage == ResolveImageAlways || resolveImage == ResolveImageChanged {
createOpts.QueryRegistry = true
}
var resp types.ServiceCreateResponse
if resp, err = apiClient.ServiceCreate(ctx, serviceSpec, createOpts); err != nil {
return errors.Wrapf(err, "failed to create service %s", name)
}
fmt.Fprintf(out, "service: %+v\n", resp)
serviceID = resp.ID
imageName = serviceSpec.TaskTemplate.ContainerSpec.Image
}
fmt.Fprintf(out, "image: %s\n", imageName)
fmt.Fprintf(out, "Stabilization delay: %s\n", stabilizeDelay)
fmt.Fprintf(out, "Stabilization timeout: %s\n", stabilizeTimeout)
done := make(chan error)
// create a watcher for service/container events based on the service image
options := NewEventsWatcherOptions(events.ServiceEventType, events.ContainerEventType)
options.AddImageFilter(imageName)
w := NewEventsWatcherWithCancel(ctx, apiClient, options)
w.On("*", func(m events.Message) {
//fmt.Fprintf(out, "EVENT: %s\n", MessageString(m))
})
w.OnError(func(err error) {
//fmt.Fprintf(out, "OnError: %s\n", err)
w.Cancel()
done <- err
})
w.Watch()
NotifyState(ctx, apiClient, serviceID, expectedState, stabilizeDelay, func(err error) {
done <- err
})
err = <-done
// unlike what docker does with stack deployment,
// we consider that a failing service should fail the stack deployment
if err != nil {
w.Cancel()
return err
}
}
return nil
}
// MessageString returns a formatted event message
func MessageString(m events.Message) string {
a := ""
for k, v := range m.Actor.Attributes {
a += fmt.Sprintf(" %s: %s\n", k, v)
}
return fmt.Sprintf("ID: %s\n Status: %s\n From: %s\n Type: %s\n Action: %s\n Actor ID: %s\n Actor Attributes: \n%s\n Scope: %s\n Time: %d\n TimeNano: %d\n\n",
m.ID, m.Status, m.From, m.Type, m.Action, m.Actor.ID, a, m.Scope, m.Time, m.TimeNano)
}
// NotifyState calls the provided callback when the desired service state is achieved for all tasks or when the deadline is exceeded
func NotifyState(ctx context.Context, apiClient apiclient.APIClient, serviceID string, desiredState swarm.TaskState, stabilizeDelay time.Duration, callback func(error)) {
deadline, isSet := ctx.Deadline()
if !isSet {
deadline = time.Now().Add(1 * time.Minute)
}
go func() {
taskOpts := types.TaskListOptions{}
taskOpts.Filters = filters.NewArgs()
taskOpts.Filters.Add("service", serviceID)
counter := 0
for {
// all tasks need to match the desired state and be stable within the deadline
if time.Now().After(deadline) {
callback(errors.New("failed to achieve desired state before deadline"))
return
}
// get tasks
tasks, err := ListTasks(ctx, apiClient, taskOpts)
if err != nil {
callback(err)
return
}
// if *any* task does not match the desired state then wait for another loop iteration to check again
failure := false
for _, t := range tasks {
if t.Status.State != desiredState {
failure = true
break
}
}
// all tasks matched the desired state - now wait for things to stabilize, or if already stabilized,
// then callback with success
if !failure {
if counter < 1 {
// make sure we have enough time to wait for things to stabilize within the deadline
if time.Now().Add(stabilizeDelay).After(deadline) {
callback(errors.New("failed to achieve desired state with stabilization delay before deadline"))
return
}
time.Sleep(stabilizeDelay)
counter++
} else {
// success!
callback(nil)
return
}
}
// task polling interval
time.Sleep(1 * time.Second)
}
}()
}
|
// Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package colflow_test
import (
"context"
"testing"
"github.com/cockroachdb/cockroach/pkg/col/coldata"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/sql/colexec"
"github.com/cockroachdb/cockroach/pkg/sql/colexecerror"
"github.com/cockroachdb/cockroach/pkg/sql/colexecop"
"github.com/cockroachdb/cockroach/pkg/sql/execinfra"
"github.com/cockroachdb/cockroach/pkg/sql/execinfrapb"
"github.com/cockroachdb/cockroach/pkg/sql/randgen"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/errors"
"github.com/stretchr/testify/require"
)
// TestVectorizedInternalPanic verifies that materializers successfully
// handle panics coming from exec package. It sets up the following chain:
// RowSource -> columnarizer -> test panic emitter -> materializer,
// and makes sure that a panic doesn't occur yet the error is propagated.
func TestVectorizedInternalPanic(t *testing.T) {
defer leaktest.AfterTest(t)()
ctx := context.Background()
st := cluster.MakeTestingClusterSettings()
evalCtx := tree.MakeTestingEvalContext(st)
defer evalCtx.Stop(ctx)
flowCtx := execinfra.FlowCtx{
EvalCtx: &evalCtx,
Cfg: &execinfra.ServerConfig{Settings: cluster.MakeTestingClusterSettings()},
}
nRows, nCols := 1, 1
typs := types.OneIntCol
input := execinfra.NewRepeatableRowSource(typs, randgen.MakeIntRows(nRows, nCols))
col, err := colexec.NewBufferingColumnarizer(ctx, testAllocator, &flowCtx, 0 /* processorID */, input)
if err != nil {
t.Fatal(err)
}
vee := newTestVectorizedInternalPanicEmitter(col)
mat, err := colexec.NewMaterializer(
&flowCtx,
1, /* processorID */
vee,
typs,
nil, /* output */
nil, /* getStats */
nil, /* metadataSourceQueue */
nil, /* toClose */
nil, /* cancelFlow */
)
if err != nil {
t.Fatal(err)
}
mat.Start(ctx)
var meta *execinfrapb.ProducerMetadata
require.NotPanics(t, func() { _, meta = mat.Next() }, "InternalError was not caught")
require.NotNil(t, meta.Err, "InternalError was not propagated as metadata")
}
// TestNonVectorizedPanicPropagation verifies that materializers do not handle
// panics coming not from exec package. It sets up the following chain:
// RowSource -> columnarizer -> test panic emitter -> materializer,
// and makes sure that a panic is emitted all the way through the chain.
func TestNonVectorizedPanicPropagation(t *testing.T) {
defer leaktest.AfterTest(t)()
ctx := context.Background()
st := cluster.MakeTestingClusterSettings()
evalCtx := tree.MakeTestingEvalContext(st)
defer evalCtx.Stop(ctx)
flowCtx := execinfra.FlowCtx{
EvalCtx: &evalCtx,
Cfg: &execinfra.ServerConfig{Settings: cluster.MakeTestingClusterSettings()},
}
nRows, nCols := 1, 1
typs := types.OneIntCol
input := execinfra.NewRepeatableRowSource(typs, randgen.MakeIntRows(nRows, nCols))
col, err := colexec.NewBufferingColumnarizer(ctx, testAllocator, &flowCtx, 0 /* processorID */, input)
if err != nil {
t.Fatal(err)
}
nvee := newTestNonVectorizedPanicEmitter(col)
mat, err := colexec.NewMaterializer(
&flowCtx,
1, /* processorID */
nvee,
typs,
nil, /* output */
nil, /* getStats */
nil, /* metadataSourceQueue */
nil, /* toClose */
nil, /* cancelFlow */
)
if err != nil {
t.Fatal(err)
}
mat.Start(ctx)
require.Panics(t, func() { mat.Next() }, "NonVectorizedPanic was caught by the operators")
}
// testVectorizedInternalPanicEmitter is an colexec.Operator that panics with
// colexecerror.InternalError on every odd-numbered invocation of Next()
// and returns the next batch from the input on every even-numbered (i.e. it
// becomes a noop for those iterations). Used for tests only.
type testVectorizedInternalPanicEmitter struct {
colexecop.OneInputNode
emitBatch bool
}
var _ colexecop.Operator = &testVectorizedInternalPanicEmitter{}
func newTestVectorizedInternalPanicEmitter(input colexecop.Operator) colexecop.Operator {
return &testVectorizedInternalPanicEmitter{
OneInputNode: colexecop.NewOneInputNode(input),
}
}
// Init is part of exec.Operator interface.
func (e *testVectorizedInternalPanicEmitter) Init() {
e.Input.Init()
}
// Next is part of exec.Operator interface.
func (e *testVectorizedInternalPanicEmitter) Next(ctx context.Context) coldata.Batch {
if !e.emitBatch {
e.emitBatch = true
colexecerror.InternalError(errors.AssertionFailedf(""))
}
e.emitBatch = false
return e.Input.Next(ctx)
}
// testNonVectorizedPanicEmitter is the same as
// testVectorizedInternalPanicEmitter but it panics with the builtin panic
// function. Used for tests only. It is the only colexec.Operator panics from
// which are not caught.
type testNonVectorizedPanicEmitter struct {
colexecop.OneInputNode
emitBatch bool
}
var _ colexecop.Operator = &testVectorizedInternalPanicEmitter{}
func newTestNonVectorizedPanicEmitter(input colexecop.Operator) colexecop.Operator {
return &testNonVectorizedPanicEmitter{
OneInputNode: colexecop.NewOneInputNode(input),
}
}
// Init is part of exec.Operator interface.
func (e *testNonVectorizedPanicEmitter) Init() {
e.Input.Init()
}
// Next is part of exec.Operator interface.
func (e *testNonVectorizedPanicEmitter) Next(ctx context.Context) coldata.Batch {
if !e.emitBatch {
e.emitBatch = true
colexecerror.NonCatchablePanic("")
}
e.emitBatch = false
return e.Input.Next(ctx)
}
|
package e2e
const (
simpleSuccessfulPipeline = `
node() {
sh 'exit 0'
}
`
simpleFailedPipeline = `
node() {
sh 'exit 1'
}
`
pipelineWithEnvs = `
node() {
echo "FOO1 is ${env.FOO1}"
echo "FOO2 is ${env.FOO2}"
}
`
samplepipeline = `
try {
timeout(time: 20, unit: 'MINUTES') {
stage('build') {
openshift.withCluster() {
openshift.withProject() {
echo "Using project ${openshift.project()} in cluster with url ${openshift.cluster()}"
}
}
}
}
} catch (err) {
echo "in catch block"
echo "Caught: ${err}"
currentBuild.result = 'FAILURE'
throw err
}
`
simplemaven2 = `
try {
timeout(time: 20, unit: 'MINUTES') {
node("POD_TEMPLATE_NAME") {
container("java") {
sh "mvn --version"
}
}
}
} catch (err) {
echo "in catch block"
echo "Caught: ${err}"
currentBuild.result = 'FAILURE'
throw err
}
`
simplemaven1 = `
try {
timeout(time: 20, unit: 'MINUTES') {
node("POD_TEMPLATE_NAME") {
sh "mvn --version"
}
}
} catch (err) {
echo "in catch block"
echo "Caught: ${err}"
currentBuild.result = 'FAILURE'
throw err
}
`
javabuilder = `
try {
timeout(time: 20, unit: 'MINUTES') {
node("java-builder") {
container("java") {
sh "mvn --version"
}
}
}
} catch (err) {
echo "in catch block"
echo "Caught: ${err}"
currentBuild.result = 'FAILURE'
throw err
}
`
nodejsbuilder = `
try {
timeout(time: 20, unit: 'MINUTES') {
node("nodejs-builder") {
container("nodejs") {
sh "npm --version"
}
}
}
} catch (err) {
echo "in catch block"
echo "Caught: ${err}"
currentBuild.result = 'FAILURE'
throw err
}
`
nodejsDeclarative = `
// path of the template to use
def templatePath = 'nodejs-postgresql-example'
// name of the template that will be created
def templateName = 'nodejs-postgresql-example'
// NOTE, the "pipeline" directive/closure from the declarative pipeline syntax needs to include, or be nested outside,
// and "openshift" directive/closure from the OpenShift Client Plugin for Jenkins. Otherwise, the declarative pipeline engine
// will not be fully engaged.
pipeline {
agent {
node {
// spin up a node.js slave pod to run this build on
label 'nodejs-builder'
}
}
options {
// set a timeout of 20 minutes for this pipeline
timeout(time: 20, unit: 'MINUTES')
}
stages {
stage('preamble') {
steps {
script {
openshift.withCluster() {
openshift.withProject() {
echo "Using project: ${openshift.project()}"
}
}
}
}
}
stage('cleanup') {
steps {
script {
openshift.withCluster() {
openshift.withProject() {
// delete everything with this template label
openshift.selector("all", [ template : templateName ]).delete()
// delete any secrets with this template label
if (openshift.selector("secrets", templateName).exists()) {
openshift.selector("secrets", templateName).delete()
}
}
}
} // script
} // steps
} // stage
stage('create') {
steps {
script {
openshift.withCluster() {
openshift.withProject() {
// create a new application from the templatePath
openshift.newApp(templatePath, "-p", "APPLICATION_DOMAIN=rails-%s.ocp.io")
}
}
} // script
} // steps
} // stage
stage('build') {
steps {
script {
openshift.withCluster() {
openshift.withProject() {
def builds = openshift.selector("bc", templateName).related('builds')
builds.untilEach(1) {
return (it.object().status.phase == "Complete")
}
}
}
} // script
} // steps
} // stage
stage('deploy') {
steps {
script {
openshift.withCluster() {
openshift.withProject() {
def rm = openshift.selector("dc", templateName).rollout()
openshift.selector("dc", templateName).related('pods').untilEach(1) {
return (it.object().status.phase == "Running")
}
}
}
} // script
} // steps
} // stage
stage('tag') {
steps {
script {
openshift.withCluster() {
openshift.withProject() {
// if everything else succeeded, tag the ${templateName}:latest image as ${templateName}-staging:latest
// a pipeline build config for the staging environment can watch for the ${templateName}-staging:latest
// image to change and then deploy it to the staging environment
openshift.tag("${templateName}:latest", "${templateName}-staging:latest")
}
}
} // script
} // steps
} // stage
} // stages
} // pipeline
`
bluegreenTemplateYAML = `
apiVersion: v1
kind: Template
labels:
template: bluegreen-pipeline
message: A Jenkins server must be instantiated in this project to manage
the Pipeline BuildConfig created by this template. You will be able to log in to
it using your OpenShift user credentials.
metadata:
annotations:
description: This example showcases a blue green deployment using a Jenkins
pipeline that pauses for approval.
iconClass: icon-jenkins
tags: instant-app,jenkins
name: bluegreen-pipeline
objects:
- apiVersion: v1
kind: BuildConfig
metadata:
annotations:
pipeline.alpha.openshift.io/uses: '[{"name": "${NAME}", "namespace": "", "kind": "DeploymentConfig"}]'
creationTimestamp: null
labels:
name: bluegreen-pipeline
name: bluegreen-pipeline
spec:
strategy:
jenkinsPipelineStrategy:
jenkinsfile: |-
try {
timeout(time: 20, unit: 'MINUTES') {
def appName="${NAME}"
def project=""
def tag="blue"
def altTag="green"
def verbose="${VERBOSE}"
node {
project = env.PROJECT_NAME
stage("Initialize") {
sh "oc get route ${appName} -n ${project} -o jsonpath='{ .spec.to.name }' --loglevel=4 > activeservice"
activeService = readFile('activeservice').trim()
if (activeService == "${appName}-blue") {
tag = "green"
altTag = "blue"
}
sh "oc get route ${tag}-${appName} -n ${project} -o jsonpath='{ .spec.host }' --loglevel=4 > routehost"
routeHost = readFile('routehost').trim()
}
openshift.withCluster() {
openshift.withProject() {
stage("Build") {
echo "building tag ${tag}"
def bld = openshift.startBuild("${appName}")
bld.untilEach {
return it.object().status.phase == "Running"
}
bld.logs('-f')
}
stage("Deploy Test") {
openshift.tag("${appName}:latest", "${appName}:${tag}")
def dc = openshift.selector('dc', "${appName}-${tag}")
dc.rollout().status()
}
stage("Test") {
input message: "Test deployment: http://${routeHost}. Approve?", id: "approval"
}
stage("Go Live") {
sh "oc set -n ${project} route-backends ${appName} ${appName}-${tag}=100 ${appName}-${altTag}=0 --loglevel=4"
}
}
}
}
}
} catch (err) {
echo "in catch block"
echo "Caught: ${err}"
currentBuild.result = 'FAILURE'
throw err
}
type: JenkinsPipeline
triggers:
- github:
secret: "${GITHUB_WEBHOOK_SECRET}"
type: GitHub
- generic:
secret: "${GENERIC_WEBHOOK_SECRET}"
type: Generic
- apiVersion: v1
kind: Secret
metadata:
name: ${NAME}
stringData:
database-admin-password: ${DATABASE_ADMIN_PASSWORD}
database-password: ${DATABASE_PASSWORD}
database-user: ${DATABASE_USER}
- apiVersion: v1
kind: Route
metadata:
name: blue-${NAME}
spec:
host: blue-${APPLICATION_DOMAIN}
to:
kind: Service
name: ${NAME}-blue
- apiVersion: v1
kind: Route
metadata:
name: green-${NAME}
spec:
host: green-${APPLICATION_DOMAIN}
to:
kind: Service
name: ${NAME}-green
- apiVersion: v1
kind: Route
metadata:
name: ${NAME}
spec:
alternateBackends:
- name: ${NAME}-green
weight: 0
host: n-p-e-${APPLICATION_DOMAIN}
to:
kind: Service
name: ${NAME}-blue
weight: 100
- apiVersion: v1
kind: ImageStream
metadata:
annotations:
description: Keeps track of changes in the application image
name: ${NAME}
- apiVersion: v1
kind: BuildConfig
metadata:
annotations:
description: Defines how to build the application
name: ${NAME}
spec:
output:
to:
kind: ImageStreamTag
name: ${NAME}:latest
postCommit:
script: npm test
source:
contextDir: ${CONTEXT_DIR}
git:
ref: ${SOURCE_REPOSITORY_REF}
uri: ${SOURCE_REPOSITORY_URL}
type: Git
strategy:
sourceStrategy:
env:
- name: NPM_MIRROR
value: ${NPM_MIRROR}
from:
kind: ImageStreamTag
name: nodejs:latest
namespace: ${NAMESPACE}
type: Source
triggers:
- github:
secret: ${GITHUB_WEBHOOK_SECRET}
type: GitHub
- generic:
secret: ${GENERIC_WEBHOOK_SECRET}
type: Generic
- apiVersion: v1
kind: Service
metadata:
annotations:
service.alpha.openshift.io/dependencies: '[{"name": "${DATABASE_SERVICE_NAME}", "namespace": "", "kind": "Service"}]'
name: ${NAME}-blue
spec:
ports:
- name: web
port: 8080
targetPort: 8080
selector:
name: ${NAME}-blue
- apiVersion: v1
kind: DeploymentConfig
metadata:
annotations:
description: Defines how to deploy the application server
name: ${NAME}-blue
spec:
replicas: 1
selector:
name: ${NAME}-blue
strategy:
type: Rolling
template:
metadata:
labels:
name: ${NAME}-blue
name: ${NAME}-blue
spec:
containers:
- env:
- name: DATABASE_SERVICE_NAME
value: ${DATABASE_SERVICE_NAME}
- name: MYSQL_USER
value: ${DATABASE_USER}
- name: MYSQL_PASSWORD
value: ${DATABASE_PASSWORD}
- name: MYSQL_DATABASE
value: ${DATABASE_NAME}
- name: MYSQL_ROOT_PASSWORD
value: ${DATABASE_ROOT_PASSWORD}
image: ' '
livenessProbe:
httpGet:
path: /pagecount
port: 8080
initialDelaySeconds: 30
timeoutSeconds: 3
name: nodejs-postgresql-example
ports:
- containerPort: 8080
readinessProbe:
httpGet:
path: /pagecount
port: 8080
initialDelaySeconds: 3
timeoutSeconds: 3
resources:
limits:
memory: ${MEMORY_LIMIT}
triggers:
- imageChangeParams:
automatic: true
containerNames:
- nodejs-postgresql-example
from:
kind: ImageStreamTag
name: ${NAME}:blue
type: ImageChange
- type: ConfigChange
- apiVersion: v1
kind: Service
metadata:
annotations:
service.alpha.openshift.io/dependencies: '[{"name": "${DATABASE_SERVICE_NAME}", "namespace": "", "kind": "Service"}]'
name: ${NAME}-green
spec:
ports:
- name: web
port: 8080
targetPort: 8080
selector:
name: ${NAME}-green
- apiVersion: v1
kind: DeploymentConfig
metadata:
annotations:
description: Defines how to deploy the application server
name: ${NAME}-green
spec:
replicas: 1
selector:
name: ${NAME}-green
strategy:
type: Rolling
template:
metadata:
labels:
name: ${NAME}-green
name: ${NAME}-green
spec:
containers:
- env:
- name: POSTGRESQL_USER
valueFrom:
secretKeyRef:
key: database-user
name: ${NAME}
- name: POSTGRESQL_PASSWORD
valueFrom:
secretKeyRef:
key: database-password
name: ${NAME}
- name: POSTGRESQL_DATABASE
value: ${DATABASE_NAME}
- name: POSTGRESQL_ADMIN_PASSWORD
valueFrom:
secretKeyRef:
key: database-admin-password
name: ${NAME}
image: ' '
livenessProbe:
httpGet:
path: /pagecount
port: 8080
initialDelaySeconds: 30
timeoutSeconds: 3
name: nodejs-postgresql-example
ports:
- containerPort: 8080
readinessProbe:
httpGet:
path: /pagecount
port: 8080
initialDelaySeconds: 3
timeoutSeconds: 3
resources:
limits:
memory: ${MEMORY_LIMIT}
triggers:
- imageChangeParams:
automatic: true
containerNames:
- nodejs-postgresql-example
from:
kind: ImageStreamTag
name: ${NAME}:green
type: ImageChange
- type: ConfigChange
- apiVersion: v1
kind: Service
metadata:
annotations:
description: Exposes the database server
name: ${DATABASE_SERVICE_NAME}
spec:
ports:
- name: postgresql
port: 5432
targetPort: 5432
selector:
name: ${DATABASE_SERVICE_NAME}
- apiVersion: v1
kind: DeploymentConfig
metadata:
annotations:
description: Defines how to deploy the database
name: ${DATABASE_SERVICE_NAME}
spec:
replicas: 1
selector:
name: ${DATABASE_SERVICE_NAME}
strategy:
type: Recreate
template:
metadata:
labels:
name: ${DATABASE_SERVICE_NAME}
name: ${DATABASE_SERVICE_NAME}
spec:
containers:
- env:
- name: POSTGRESQL_USER
valueFrom:
secretKeyRef:
key: database-user
name: ${NAME}
- name: POSTGRESQL_PASSWORD
valueFrom:
secretKeyRef:
key: database-password
name: ${NAME}
- name: POSTGRESQL_DATABASE
value: ${DATABASE_NAME}
- name: POSTGRESQL_ADMIN_PASSWORD
valueFrom:
secretKeyRef:
key: database-admin-password
name: ${NAME}
image: ' '
livenessProbe:
initialDelaySeconds: 30
tcpSocket:
port: 5432
timeoutSeconds: 1
name: postgresql
ports:
- containerPort: 5432
resources:
limits:
memory: ${MEMORY_MYSQL_LIMIT}
volumeMounts:
- mountPath: /var/lib/mysql/data
name: ${DATABASE_SERVICE_NAME}-data
volumes:
- emptyDir:
medium: ""
name: ${DATABASE_SERVICE_NAME}-data
triggers:
- imageChangeParams:
automatic: true
containerNames:
- postgresql
from:
kind: ImageStreamTag
name: postgresql:${POSTGRESQL_VERSION}
namespace: ${NAMESPACE}
type: ImageChange
- type: ConfigChange
parameters:
- description: The name assigned to all of the frontend objects defined in this template.
displayName: Name
name: NAME
required: true
value: nodejs-postgresql-example
- description: The exposed hostname that will route to the Node.js service, if left
blank a value will be defaulted.
displayName: Application Hostname
name: APPLICATION_DOMAIN
- description: The URL of the repository with your application source code.
displayName: Git Repository URL
name: SOURCE_REPOSITORY_URL
required: true
value: https://github.com/openshift/nodejs-ex.git
- description: The reference of the repository with your application source code.
displayName: Git Repository Ref
name: SOURCE_REPOSITORY_REF
required: true
value: master
- description: Password for the database admin user.
displayName: Database Administrator Password
from: '[a-zA-Z0-9]{16}'
generate: expression
name: DATABASE_ADMIN_PASSWORD
- displayName: Database Name
name: DATABASE_NAME
required: true
value: sampledb
- description: Username for postgresql user that will be used for accessing the database.
displayName: postgresql Username
from: user[A-Z0-9]{3}
generate: expression
name: DATABASE_USER
- description: Password for the postgresql user.
displayName: postgresql Password
from: '[a-zA-Z0-9]{16}'
generate: expression
name: DATABASE_PASSWORD
- description: Maximum amount of memory the Node.js container can use.
displayName: Memory Limit
name: MEMORY_LIMIT
required: true
value: 512Mi
- description: Maximum amount of memory the postgresql container can use.
displayName: Memory Limit (postgresql)
name: MEMORY_MYSQL_LIMIT
required: true
value: 512Mi
- displayName: Database Service Name
name: DATABASE_SERVICE_NAME
required: true
value: postgresql
- description: Password for the database admin user.
displayName: Database Administrator Password
from: '[a-zA-Z0-9]{16}'
generate: expression
name: DATABASE_ROOT_PASSWORD
- description: Set this to the relative path to your project if it is not in the root
of your repository.
displayName: Context Directory
name: CONTEXT_DIR
- description: Github trigger secret. A difficult to guess string encoded as part of the webhook URL. Not encrypted.
displayName: GitHub Webhook Secret
from: '[a-zA-Z0-9]{40}'
generate: expression
name: GITHUB_WEBHOOK_SECRET
- description: A secret string used to configure the Generic webhook.
displayName: Generic Webhook Secret
from: '[a-zA-Z0-9]{40}'
generate: expression
name: GENERIC_WEBHOOK_SECRET
- description: The custom NPM mirror URL
displayName: Custom NPM Mirror URL
name: NPM_MIRROR
- description: The OpenShift Namespace where the NodeJS and postgresql ImageStreams reside.
displayName: Namespace
name: NAMESPACE
required: true
value: openshift
- description: Whether to enable verbose logging of Jenkinsfile steps in pipeline
displayName: Verbose
name: VERBOSE
required: true
value: "false"`
)
|
package model
type Dict struct {
ID int `json:"id"`
JpName string `json:"jp_name"`
EngName string `json:"eng_name"`
Body string `json:"body"`
Tags []string `json:"tags"`
}
|
package fmap
import (
"fmt"
)
// Iter struct maintains the current state for walking the *Map data structure.
type Iter struct {
kvIdx int
curLeaf leafI
tblNextNode tableIterFunc
stack *tableIterStack
}
func newIter(root tableI) *Iter {
var it = new(Iter)
//it.kvIdx = 0
//it.curLeaf = nil
it.tblNextNode = root.iter()
it.stack = newTableIterStack()
return it
}
// Next returns each sucessive key/value mapping in the *Map. When all enrties
// have been returned it will return KeyVal{Key: nil, Val: ...}
//func (it *Iter) Next() (key.Hash, interface{}) {
func (it *Iter) Next() KeyVal {
//log.Printf("it.Next: called. it=%s", it)
var kv KeyVal
LOOP:
for {
switch x := it.curLeaf.(type) {
case nil:
kv.Key = nil // the end
kv.Val = nil
break LOOP
case *flatLeaf:
kv = KeyVal(*x)
it.kvIdx = 0
it.setNextNode()
break LOOP
case *collisionLeaf:
if it.kvIdx >= len(*x) {
it.setNextNode()
continue LOOP
}
kv = (*x)[it.kvIdx]
it.kvIdx++
break LOOP
default:
panic("Set (*iter).Next(); it.curLeaf unknown type")
}
}
//log.Printf("it.Next: key=%s; val=%v;", key, val)
return kv
//return key, val
}
// setNextNode() sets the iter struct pointing to the next node. If there is no
// next node it returns false, else it returns true.
func (it *Iter) setNextNode() bool {
//log.Printf("it.setNextNode: called; it=%s", it)
LOOP:
for {
//log.Printf("it.setNextNode: it=%s", it)
var cur = it.tblNextNode()
//log.Printf("it.setNextNode: it.tblNextNode()=>cur=%s", cur)
// if cur==nil pop stack and loop
for cur == nil {
it.tblNextNode = it.stack.pop()
if it.tblNextNode == nil {
it.curLeaf = nil
return false
}
cur = it.tblNextNode()
}
// cur != nil
switch x := cur.(type) {
case nil:
panic("WTF!!! cur == nil")
case tableI:
it.stack.push(it.tblNextNode)
it.tblNextNode = x.iter()
// break switch and LOOP
case leafI:
it.curLeaf = x
break LOOP
}
//log.Println("it.setNextNode: looping for")
}
return true
}
func (it *Iter) String() string {
return fmt.Sprintf("%#v", *it)
}
|
package cmd
import (
"context"
"net/http"
"os"
"os/signal"
"time"
"github.com/allegro/bigcache"
"github.com/dotkom/image-server/api"
gorm_adapter "github.com/dotkom/image-server/storage/gorm"
s3_adapter "github.com/dotkom/image-server/storage/s3"
"github.com/gorilla/mux"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
var serverCmd = &cobra.Command{
Use: "server",
Short: "Start the server",
Long: `Start the server`,
Run: func(cmd *cobra.Command, args []string) {
serve()
},
}
func serve() {
fs, err := s3_adapter.New(viper.GetString(s3Bucket))
if err != nil {
log.Fatal("Failed to create storage adapter", err)
}
ms := gorm_adapter.New(gorm_adapter.DBDriver(viper.GetString(dbDriver)), viper.GetString(dbDSN))
cache, err := bigcache.NewBigCache(bigcache.DefaultConfig(10 * time.Minute))
if err != nil {
log.Fatal("Failed to create cache", err)
}
router := mux.NewRouter()
api := api.New(fs, ms, router, cache)
server := &http.Server{
Addr: viper.GetString(listenAddr),
WriteTimeout: time.Second * 30,
ReadTimeout: time.Second * 30,
IdleTimeout: time.Second * 60,
Handler: api,
}
go func() {
log.Infof("Server listening to %s", viper.GetString(listenAddr))
if err := server.ListenAndServe(); err != nil {
log.Error(err)
}
}()
channel := make(chan os.Signal, 1)
signal.Notify(channel, os.Interrupt)
sig := <-channel
log.Infof("Recieved signal: %s", sig)
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
defer cancel()
server.Shutdown(ctx)
log.Info("Shutting down")
os.Exit(0)
}
|
package main
import (
"bufio"
"fmt"
"os"
"strconv"
"strings"
"sort"
"flag"
//"text/tabwriter"
//"phd/polymorphism"
//"github.com/biogo/boom"
)
//Usage: go run formatEST.go focal.species.vcf ancestral1.species.vcf ancestral2.species.vcf all.species.vcf "synonymous" > out.formatted.est
//focal.species.vcf: This file contains annotated focal snps with no missing data
//ancestral1.species.vcf: This file contains annotated snp data for the parent of interest. It contains all the positions
//ancestral2.species.vcf: This file contains annotated snp data for the other parent (outgroup). It contains all the positions
//Careful, these snps can often not be in the the file from the focal species because they are mapping to the other part of the sub-genome...
//all.species.vcf: This file is the same as ancestral.species.vcf. Can contain all species and must contain all postions.
//(used to assess the the reference snp, also outgroup)
type Key struct {
Gene string
Pos int
}
type SNP struct {
NumA int
NumC int
NumG int
NumT int
Effect string //The effect (synonymous, missense ...) of the snp
}
type Poly struct {
Name string
Number int
}
type Data map[Key]SNP
// input has to be a comma seperated .frq file (from vcftools and no nan filtering)
func PopulateFocal(f *os.File, d Data) Data {
input := bufio.NewScanner(f)
for input.Scan() {
line := input.Text()
firstChar := strings.Split(line, "")[0]
entry := strings.Split(line, "\t")
var alt string
if (firstChar != "#"){
alt = entry[4]
}
//the firstChar part is to make sure we work on lines that are not comments.
//the len part is to make sure there are less than 3 snp, no indels. Should do for now
if (firstChar != "#") && (len(alt) <= 5) {
//SNP part
snp := SNP{0,0,0,0,""} //initialize snp
ref := entry[3]
//Possibility of two alternatives
altSplit := strings.Split(alt, ",")
//could have up to 3 alternative snps (eg. A,T,G with ref C)
//Only 1 for the moment
alt1 := ""
alt2 := ""
alt3 := ""
numAlt := len(altSplit)
switch numAlt {
case 1:
alt1 = altSplit[0]
case 2:
alt1 = altSplit[0]
alt2 = altSplit[1]
case 3:
alt1 = altSplit[0]
alt2 = altSplit[1]
alt3 = altSplit[2]
}
//Work on the assigning the snps
for i := 9; i <= (len(entry)-1); i++ {
//Note: Have not found any 1/0 nor 1/2 nor 0/1
polymorphism := strings.Split(entry[i], ":")[0]
if polymorphism == "0/0" {
switch ref {
case "A":
snp.NumA = snp.NumA + 2
case "C":
snp.NumC = snp.NumC + 2
case "G":
snp.NumG = snp.NumG + 2
case "T":
snp.NumT = snp.NumT + 2
}
} else if polymorphism == "1/1" {
switch alt1 {
case "A":
snp.NumA = snp.NumA + 2
case "C":
snp.NumC = snp.NumC + 2
case "G":
snp.NumG = snp.NumG + 2
case "T":
snp.NumT = snp.NumT + 2
}
} else if polymorphism == "2/2" {
switch alt2 {
case "A":
snp.NumA = snp.NumA + 2
case "C":
snp.NumC = snp.NumC + 2
case "G":
snp.NumG = snp.NumG + 2
case "T":
snp.NumT = snp.NumT + 2
}
} else if polymorphism == "3/3" {
switch alt3 {
case "A":
snp.NumA = snp.NumA + 2
case "C":
snp.NumC = snp.NumC + 2
case "G":
snp.NumG = snp.NumG + 2
case "T":
snp.NumT = snp.NumT + 2
}
} else if polymorphism == "0/1"{
switch ref {
case "A":
snp.NumA = snp.NumA + 1
case "C":
snp.NumC = snp.NumC + 1
case "G":
snp.NumG = snp.NumG + 1
case "T":
snp.NumT = snp.NumT + 1
}
switch alt1 {
case "A":
snp.NumA = snp.NumA + 1
case "C":
snp.NumC = snp.NumC + 1
case "G":
snp.NumG = snp.NumG + 1
case "T":
snp.NumT = snp.NumT + 1
}
} else if polymorphism == "0/2"{
switch ref {
case "A":
snp.NumA = snp.NumA + 1
case "C":
snp.NumC = snp.NumC + 1
case "G":
snp.NumG = snp.NumG + 1
case "T":
snp.NumT = snp.NumT + 1
}
switch alt2 {
case "A":
snp.NumA = snp.NumA + 1
case "C":
snp.NumC = snp.NumC + 1
case "G":
snp.NumG = snp.NumG + 1
case "T":
snp.NumT = snp.NumT + 1
}
} else if polymorphism == "0/3"{
switch ref {
case "A":
snp.NumA = snp.NumA + 1
case "C":
snp.NumC = snp.NumC + 1
case "G":
snp.NumG = snp.NumG + 1
case "T":
snp.NumT = snp.NumT + 1
}
switch alt3 {
case "A":
snp.NumA = snp.NumA + 1
case "C":
snp.NumC = snp.NumC + 1
case "G":
snp.NumG = snp.NumG + 1
case "T":
snp.NumT = snp.NumT + 1
}
}
}
//Find the effect (synonymous, missense ...) of the snp
/*info := entry[7]
//fmt.Printf("%s", info)
annStr := strings.Split(info, ";")
ann := annStr[len(annStr)-1]
effect := strings.Split(ann, "|")[1] //OK effect is obtained
snp.Effect = effect
//fmt.Printf("%s", effect)
*/
//Key part
gene := entry[0]
pos, _ := strconv.Atoi(entry[1])
key := Key{gene, pos}
d[key] = snp
//nAll, _ := strconv.Atoi(entry[2])
//nChr, _ := strconv.Atoi(entry[3])
}
}
return d
}
func PopulateAncestral(f *os.File, d Data) Data {
input := bufio.NewScanner(f)
for input.Scan() {
line := input.Text()
firstChar := strings.Split(line, "")[0]
entry := strings.Split(line, "\t")
var alt string
if (firstChar != "#"){
alt = entry[4]
}
//the firstChar part is to make sure we work on lines that are not comments.
//the len part is to make sure there are less than 3 snp, no indels. Should do for now
if (firstChar != "#") && (len(alt) <= 5) {
//SNP part
snp := SNP{0,0,0,0,""} //initialize snp
ref := entry[3]
//Possibility of two alternatives
altSplit := strings.Split(alt, ",")
//could have up to 3 alternative snps (eg. A,T,G with ref C)
//Only 1 for the moment
alt1 := ""
alt2 := ""
alt3 := ""
numAlt := len(altSplit)
switch numAlt {
case 1:
alt1 = altSplit[0]
case 2:
alt1 = altSplit[0]
alt2 = altSplit[1]
case 3:
alt1 = altSplit[0]
alt2 = altSplit[1]
alt3 = altSplit[2]
}
//Work on the assigning the snps
for i := 9; i <= (len(entry)-1); i++ {
//Note: Have not found any 1/0 nor 1/2 nor 0/1
polymorphism := strings.Split(entry[i], ":")[0]
if polymorphism == "0/0" {
switch ref {
case "A":
snp.NumA = snp.NumA + 2
case "C":
snp.NumC = snp.NumC + 2
case "G":
snp.NumG = snp.NumG + 2
case "T":
snp.NumT = snp.NumT + 2
}
} else if polymorphism == "1/1" {
switch alt1 {
case "A":
snp.NumA = snp.NumA + 2
case "C":
snp.NumC = snp.NumC + 2
case "G":
snp.NumG = snp.NumG + 2
case "T":
snp.NumT = snp.NumT + 2
}
} else if polymorphism == "2/2" {
switch alt2 {
case "A":
snp.NumA = snp.NumA + 2
case "C":
snp.NumC = snp.NumC + 2
case "G":
snp.NumG = snp.NumG + 2
case "T":
snp.NumT = snp.NumT + 2
}
} else if polymorphism == "3/3" {
switch alt3 {
case "A":
snp.NumA = snp.NumA + 2
case "C":
snp.NumC = snp.NumC + 2
case "G":
snp.NumG = snp.NumG + 2
case "T":
snp.NumT = snp.NumT + 2
}
} else if polymorphism == "0/1"{
switch ref {
case "A":
snp.NumA = snp.NumA + 1
case "C":
snp.NumC = snp.NumC + 1
case "G":
snp.NumG = snp.NumG + 1
case "T":
snp.NumT = snp.NumT + 1
}
switch alt1 {
case "A":
snp.NumA = snp.NumA + 1
case "C":
snp.NumC = snp.NumC + 1
case "G":
snp.NumG = snp.NumG + 1
case "T":
snp.NumT = snp.NumT + 1
}
} else if polymorphism == "0/2"{
switch ref {
case "A":
snp.NumA = snp.NumA + 1
case "C":
snp.NumC = snp.NumC + 1
case "G":
snp.NumG = snp.NumG + 1
case "T":
snp.NumT = snp.NumT + 1
}
switch alt2 {
case "A":
snp.NumA = snp.NumA + 1
case "C":
snp.NumC = snp.NumC + 1
case "G":
snp.NumG = snp.NumG + 1
case "T":
snp.NumT = snp.NumT + 1
}
} else if polymorphism == "0/3"{
switch ref {
case "A":
snp.NumA = snp.NumA + 1
case "C":
snp.NumC = snp.NumC + 1
case "G":
snp.NumG = snp.NumG + 1
case "T":
snp.NumT = snp.NumT + 1
}
switch alt3 {
case "A":
snp.NumA = snp.NumA + 1
case "C":
snp.NumC = snp.NumC + 1
case "G":
snp.NumG = snp.NumG + 1
case "T":
snp.NumT = snp.NumT + 1
}
}
}
//sort the snps :D
snps := []Poly{
{"NumA", snp.NumA},
{"NumC", snp.NumC},
{"NumG", snp.NumG},
{"NumT", snp.NumT},
}
//Closure
sort.Slice(snps, func(i, j int) bool {
return snps[i].Number > snps[j].Number //Change > < when major minor is wanted
})
highestName := snps[0].Name
highestNumber := snps[0].Number
//WHAT HAPPENS WHEN NONE ARE HIGHER THAN THE OTHER (BOUNDERY CONDITION)
//Enter only if there is one type of snp that has a higher count.
//Otherwise do not do anything, we are already at 0 everywhere
if highestNumber > 0 {
switch highestName {
case "NumA":
snp.NumA = 1
snp.NumC = 0
snp.NumG = 0
snp.NumT = 0
case "NumC":
snp.NumA = 0
snp.NumC = 1
snp.NumG = 0
snp.NumT = 0
case "NumG":
snp.NumA = 0
snp.NumC = 0
snp.NumG = 1
snp.NumT = 0
case "NumT":
snp.NumA = 0
snp.NumC = 0
snp.NumG = 0
snp.NumT = 1
}
//fmt.Println(snps[0])
}
//Find the effect (synonymous, missense ...) of the snp
/*info := entry[7]
//fmt.Printf("%s", info)
annStr := strings.Split(info, ";")
ann := annStr[len(annStr)-1]
effect := strings.Split(ann, "|")[1] //OK effect is obtained
snp.Effect = effect
//fmt.Printf("%s", effect)
*/
//Key part
gene := entry[0]
pos, _ := strconv.Atoi(entry[1])
key := Key{gene, pos}
d[key] = snp
//nAll, _ := strconv.Atoi(entry[2])
//nChr, _ := strconv.Atoi(entry[3])
}
}
return d
}
func PopulateReference(f *os.File, d Data) Data {
input := bufio.NewScanner(f)
for input.Scan() {
line := input.Text()
firstChar := strings.Split(line, "")[0]
if firstChar != "#" {
//SNP part
snp := SNP{0,0,0,0,""} //initialize snp
entry := strings.Split(line, "\t")
ref := entry[3]
//assign the ref
switch ref {
case "A":
snp.NumA = snp.NumA + 1
case "C":
snp.NumC = snp.NumC + 1
case "G":
snp.NumG = snp.NumG + 1
case "T":
snp.NumT = snp.NumT + 1
}
//Find the effect (synonymous, missense ...) of the snp
//info := entry[7]
//fmt.Printf("%s", info)
//annStr := strings.Split(info, ";")
//ann := annStr[len(annStr)-1]
//effect := strings.Split(ann, "|")[1] //OK effect is obtained
//snp.Effect = effect
//fmt.Printf("%s", effect)
//Key part
gene := entry[0]
pos, _ := strconv.Atoi(entry[1])
key := Key{gene, pos}
d[key] = snp
//nAll, _ := strconv.Atoi(entry[2])
//nChr, _ := strconv.Atoi(entry[3])
}
}
return d
}
func main() {
fPtr := flag.String("f", "nothing", "file containing focal SNPs; no missing data")
a1Ptr := flag.String("a1", "nothing", "file containing outgroup 1 SNPs; all data for that outgroup must be present")
a2Ptr := flag.String("a2", "nothing", "file containing outgroup 2 SNPs; all data for that outgroup must be present")
a3Ptr := flag.String("a3", "nothing", "file containing outgroup 3 SNPs; all data must be present")
//typePtr := flag.String("type", "synonymous_variant", "what type of SNP are you interested in? (eg. missense_variant)")
//Parse the flags before doing anything!!!
flag.Parse()
//variant := *typePtr
fileFocal, _ := os.Open(*fPtr) // focal allele
fileAncestral1, _ := os.Open(*a1Ptr) // ancestral 1 allele
fileAncestral2, _ := os.Open(*a2Ptr) // ancestral 2 allele
fileAncestralRef, _ := os.Open(*a3Ptr) // ancestral allele
dataFocal := make(Data)
dataAncestral1 := make(Data)
dataAncestral2 := make(Data)
dataAncestralRef := make(Data)
dataFocal = PopulateFocal(fileFocal, dataFocal)
dataAncestral1 = PopulateAncestral(fileAncestral1, dataAncestral1)
dataAncestral2 = PopulateAncestral(fileAncestral2, dataAncestral2)
dataAncestralRef = PopulateReference(fileAncestralRef, dataAncestralRef)
fileFocal.Close()
fileAncestral1.Close()
fileAncestral2.Close()
fileAncestralRef.Close()
//fileAncestralRef.Close()
//fileChange.Close()
/*for k, v := range dataAncestralRef {
fmt.Printf("key[%s] value[%s]\n", k, v)
}*/
/*for k, v := range dataFocal {
fmt.Printf("%d,%d,%d,%d %d,%d,%d,%d %d,%d,%d,%d %d,%d,%d,%d \n", v.NumA, v.NumC, v.NumG, v.NumT,
dataAncestral1[k].NumA, dataAncestral1[k].NumC, dataAncestral1[k].NumG, dataAncestral1[k].NumT,
dataAncestral2[k].NumA, dataAncestral2[k].NumC, dataAncestral2[k].NumG, dataAncestral2[k].NumT,
dataAncestralRef[k].NumA, dataAncestralRef[k].NumC, dataAncestralRef[k].NumG, dataAncestralRef[k].NumT)
}*/
//Format so that SNPs are in order.
var keys []Key
for k := range dataFocal {
keys = append(keys, k)
}
sort.Slice(keys, func(i, j int) bool {
if keys[i].Gene < keys[j].Gene {
return true
}
if keys[i].Gene > keys[j].Gene {
return false
}
return keys[i].Pos < keys[j].Pos
})
for _, k := range keys {
//if dataFocal[k].Effect == variant {
fmt.Printf("%s %d %d,%d,%d,%d %d,%d,%d,%d %d,%d,%d,%d %d,%d,%d,%d \n",
k.Gene, k.Pos,
dataFocal[k].NumA, dataFocal[k].NumC, dataFocal[k].NumG, dataFocal[k].NumT,
dataAncestral1[k].NumA, dataAncestral1[k].NumC, dataAncestral1[k].NumG, dataAncestral1[k].NumT,
dataAncestral2[k].NumA, dataAncestral2[k].NumC, dataAncestral2[k].NumG, dataAncestral2[k].NumT,
dataAncestralRef[k].NumA, dataAncestralRef[k].NumC, dataAncestralRef[k].NumG, dataAncestralRef[k].NumT)
//k.Gene, k.Pos)
//fmt.Printf("gene[%s] pos[%d]\n", k.Gene, k.Pos)
//}
}
}
|
package hasm
import (
"fmt"
"github.com/leonhfr/nand2tetris/src/hasm/symboltable"
)
type CCommand struct {
Dest string `json:"dest"`
Comp string `json:"comp"`
Jump string `json:"jump"`
}
func NewC(dest, comp, jump string) CCommand {
return CCommand{dest, comp, jump}
}
func (c CCommand) Handle(st *symboltable.SymbolTable) (string, error) {
var output string = "111"
if compA0, ok := compA0Table[c.Comp]; ok {
output = fmt.Sprint(output, "0", compA0)
} else if compA1, ok := compA1Table[c.Comp]; ok {
output = fmt.Sprint(output, "1", compA1)
} else {
return output, fmt.Errorf("comparison symbol %v does not exist in the comp table", c.Comp)
}
if c.Dest == "" {
output = fmt.Sprint(output, "000")
} else if dest, ok := destTable[c.Dest]; ok {
output = fmt.Sprint(output, dest)
} else {
return output, fmt.Errorf("destination symbol %v does not exist in the dest table", c.Dest)
}
if c.Jump == "" {
output = fmt.Sprint(output, "000")
} else if jump, ok := jumpTable[c.Jump]; ok {
output = fmt.Sprint(output, jump)
} else {
return output, fmt.Errorf("jump symbol %v does not exist in the jump table", c.Dest)
}
return output, nil
}
var compA0Table = map[string]string{
"0": "101010",
"1": "111111",
"-1": "111010",
"D": "001100",
"A": "110000",
"!D": "001101",
"!A": "110001",
"-D": "001111",
"-A": "110011",
"D+1": "011111",
"A+1": "110111",
"D-1": "001110",
"A-1": "110010",
"D+A": "000010",
"D-A": "010011",
"A-D": "000111",
"D&A": "000000",
"D|A": "010101",
}
var compA1Table = map[string]string{
"M": "110000",
"!M": "110001",
"-M": "110011",
"M+1": "110111",
"M-1": "110010",
"D+M": "000010",
"D-M": "010011",
"M-D": "000111",
"D&M": "000000",
"D|M": "010101",
}
var destTable = map[string]string{
"M": "001",
"D": "010",
"MD": "011",
"A": "100",
"AM": "101",
"AD": "110",
"AMD": "111",
}
var jumpTable = map[string]string{
"JGT": "001",
"JEQ": "010",
"JGE": "011",
"JLT": "100",
"JNE": "101",
"JLE": "110",
"JMP": "111",
}
|
package config
import "github.com/gobuffalo/envy"
type Configuration struct {
DatabaseURL string
}
var config *Configuration
func GetConfig() *Configuration {
if config == nil {
config = &Configuration{
DatabaseURL: envy.Get("DATABASE_URL", "postgres://postgres:@postgres:5432/postgres?sslmode=disable"),
}
}
return config
}
|
package sstats
import "math"
// StdDev computes the streaming standard deviation, sqrt((Σx^2 + n*x̄^2 - 2*x̄*Σx)/n-1)
type StdDev struct {
xx *SumSq
xm *Mean
}
// NewStdDev creates a new standard deviation statistic with a given circular buffer size
func NewStdDev(size int) (*StdDev, error) {
xx, err := NewSumSq(size)
if err != nil {
return nil, err
}
xm, err := NewMean(size)
if err != nil {
return nil, err
}
s := &StdDev{
xx: xx,
xm: xm,
}
return s, nil
}
// Update adds a new element to the standard deviation circular buffer
func (s *StdDev) Update(x float64) {
s.xx.Update(x)
s.xm.Update(x)
}
// UpdateBulk adds multiple elements to the standard deviation circular buffer
func (s *StdDev) UpdateBulk(xb []float64) error {
for _, x := range xb {
s.xx.Update(x)
s.xm.Update(x)
}
return nil
}
// Reset clears out the values in the circular buffer and reset ptr and tail pointers
func (s *StdDev) Reset() {
s.xx.Reset()
s.xm.Reset()
}
// Value computes the current standard deviation value of the circular buffer
func (s *StdDev) Value() float64 {
n := float64(s.Len())
if n <= 1 {
return 0
}
xm := s.xm.Value()
return math.Sqrt((s.xx.Value() + n*xm*xm - 2*xm*s.xm.Sum()) / (n - 1))
}
// Len returns the number of current elements stored in the circular buffer
func (s *StdDev) Len() int {
return s.xm.Len()
}
func (s *StdDev) Mean() float64 {
return s.xm.Value()
}
|
package chartserver
import (
"errors"
"net/url"
)
//Controller is used to handle flows of related requests based on the corresponding handlers
//A reverse proxy will be created and managed to proxy the related traffics between API and
//backend chart server
type Controller struct {
//The access endpoint of the backend chart repository server
backendServerAddr *url.URL
//To cover the server info and status requests
baseHandler *BaseHandler
//To cover the chart repository requests
repositoryHandler *RepositoryHandler
//To cover all the manipulation requests
manipulationHandler *ManipulationHandler
}
//NewController is constructor of the chartserver.Controller
func NewController(backendServer *url.URL) (*Controller, error) {
if backendServer == nil {
return nil, errors.New("failed to create chartserver.Controller: backend sever address is required")
}
//Use customized reverse proxy
proxy := NewProxyEngine(backendServer)
//Initialize chart operator for use
operator := &ChartOperator{}
return &Controller{
backendServerAddr: backendServer,
baseHandler: &BaseHandler{proxy},
repositoryHandler: &RepositoryHandler{proxy},
manipulationHandler: &ManipulationHandler{
trafficProxy: proxy,
chartOperator: operator,
},
}, nil
}
//GetBaseHandler returns the reference of BaseHandler
func (c *Controller) GetBaseHandler() *BaseHandler {
return c.baseHandler
}
//GetRepositoryHandler returns the reference of RepositoryHandler
func (c *Controller) GetRepositoryHandler() *RepositoryHandler {
return c.repositoryHandler
}
//GetManipulationHandler returns the reference of ManipulationHandler
func (c *Controller) GetManipulationHandler() *ManipulationHandler {
return c.manipulationHandler
}
|
package database
import (
"github.com/lotteryjs/ten-minutes-app/model"
"github.com/stretchr/testify/assert"
)
func (s *DatabaseSuite) TestCreateAttackPattern() {
s.db.DB.Collection("mitre_attack").Drop(nil)
killChainPhase := model.KillChainPhase{
KillChainName: "mitre_attack",
PhaseName: "privilege-escalation",
}
externalReference := model.ExternalReference{
SourceName: "mitre-attack",
ExternalID: "T1546.004",
URL: "https://attack.mitre.org/techniques/T1546/004",
}
externalReference2 := model.ExternalReference{
SourceName: "mitre-attack",
ExternalID: "T1546.004",
URL: "https://attack.mitre.org/techniques/T1546/004",
}
technique := model.Technique{
STIX_ID: "id287487",
Type: "attack-pattern",
}
attackPattern := (&model.AttackPattern{
Technique: technique,
ExternalReferences: []model.ExternalReference{externalReference, externalReference2},
Name: "我是哈哈",
Description: "我是嘻嘻",
KillChainPhases: []model.KillChainPhase{killChainPhase},
XMitreIsSubtechnique: true,
})
err := s.db.CreateAttackPattern(attackPattern)
assert.Nil(s.T(), err)
}
func (s *DatabaseSuite) TestGetAttackPatterns() {
start := int64(0)
limit := int64(10)
sort := "_id"
order := -1
users := s.db.GetAttackPatterns(&model.Paging{
Skip: &start,
Limit: &limit,
SortKey: sort,
SortVal: order,
Condition: nil,
})
assert.Len(s.T(), users, 6)
}
func (s *DatabaseSuite) TestGetAttackByName() {
user := s.db.GetAttackPatternByName("我是ss哈哈")
assert.Equal(s.T(), "我是ss哈哈", user.Name)
}
func (s *DatabaseSuite) TestGetAttackByIDs() {
user := s.db.GetAttackPatternByStixID("id287487")
println(user.Name)
}
func (s *DatabaseSuite) TestUpdateAttackByIDs() {
attackPattern := s.db.GetAttackPatternByStixID("id287487")
attackPattern.Name = "改变厚的哈哈哈哈哈哈哈"
s.db.UpdateAttackPattern(attackPattern)
println(attackPattern.Name)
}
|
package machineLearning
import (
"math/rand"
"fmt"
)
func Actions()[]func(int) int{
_actions := []func(int) int{
func(x int) int { return x + 1 },
func(x int) int { return 0 },
func(x int) int { return (x / 2) },
func(x int) int { return x * 100 },
func(x int) int { return x % 2 }}
return _actions
}
var ACTIONS_SIZE int =len(Actions())
type Machine struct {
matrix [][]int
lastCommand int
lastActionProposed int
}
//Function called to get your machine initialised
func NewMachine() Machine {
matrix := make([][]int, ACTIONS_SIZE)
for i := range matrix {
matrix[i] = make([]int, ACTIONS_SIZE)
}
return Machine{matrix, -1, rand.Intn(ACTIONS_SIZE)}
}
func (m *Machine) PrintState(){
fmt.Printf("LastCommand:%d\tlastProposedAction:%d\n", m.lastCommand, m.lastActionProposed)
for _,r:= range m.matrix[m.lastCommand]{
fmt.Print(r, " ")
}
fmt.Println("")
}
func (m *Machine ) PrintMatrix(){
fmt.Println("Matrix:")
for _, act:=range m.matrix{
for _,com:=range act{
fmt.Print(com, " ")
}
fmt.Println("")
}
}
func (m *Machine) Command(cmd int, num int) int {
c:=m.lastActionProposed //rand.Intn(5)
i:=0
for c>=0{
i ++
if m.matrix[cmd][i%ACTIONS_SIZE]>=0 {
c--
}
}
proposedAction:=i%ACTIONS_SIZE
m.lastCommand=cmd
m.lastActionProposed=proposedAction
return Actions()[proposedAction](num)
}
func (m *Machine) Response(res bool) {
if res {
m.matrix[m.lastCommand][m.lastActionProposed] ++
}else{
m.matrix[m.lastCommand][m.lastActionProposed] --
}
}
|
package mint
//sudo service mongodb start
import (
"gopkg.in/mgo.v2"
"gopkg.in/mgo.v2/bson"
)
type BudgetDBNoSQL struct {
c *mgo.Collection
}
func NewBudgetDBNoSQL() (*BudgetDBNoSQL, error) {
sess, err := mgo.Dial("localhost")
if err != nil {
return nil, err
}
db := sess.DB("testdb")
c := db.C("monthly_spending")
index := mgo.Index{Key: []string{"username", "category"}, Unique: true}
err = c.EnsureIndex(index)
return &BudgetDBNoSQL{c}, err
}
func (b *BudgetDBNoSQL) GetAllFrom(username string) ([]MonthlyTotal, error) {
totals := []MonthlyTotal{}
err := b.c.Find(bson.M{"username": username}).All(&totals)
if err != nil {
return totals, err
}
return totals, nil
}
func (b *BudgetDBNoSQL) UpdateBudget(username, category string, budget float64) error {
_, err := b.c.Upsert(bson.M{"username": username, "category": category}, bson.M{"$set": bson.M{"budget": budget}})
return err
}
func (b *BudgetDBNoSQL) SetMonthlyTotal(username, category string, spent float64) error {
_, err := b.c.Upsert(bson.M{"username": username, "category": category}, bson.M{"$set": bson.M{"spent": spent}})
return err
}
func (b *BudgetDBNoSQL) ResetMonth() error {
_, err := b.c.UpdateAll(bson.M{}, bson.M{"$set": bson.M{"spent": 0}})
return err
}
func (b *BudgetDBNoSQL) Clear() error {
//have to insert in case collection is empty, which will cause an err
//when calling dropcollection()
err := b.c.Insert(bson.M{"todelete": true})
if err != nil {
return err
}
err = b.c.DropCollection()
return err
}
|
package schain
import (
"fmt"
"github.com/prebid/openrtb/v19/openrtb2"
"github.com/prebid/prebid-server/openrtb_ext"
)
// BidderToPrebidSChains organizes the ORTB 2.5 multiple root schain nodes into a map of schain nodes by bidder
func BidderToPrebidSChains(sChains []*openrtb_ext.ExtRequestPrebidSChain) (map[string]*openrtb2.SupplyChain, error) {
bidderToSChains := make(map[string]*openrtb2.SupplyChain)
for _, schainWrapper := range sChains {
for _, bidder := range schainWrapper.Bidders {
if _, present := bidderToSChains[bidder]; present {
return nil, fmt.Errorf("request.ext.prebid.schains contains multiple schains for bidder %s; "+
"it must contain no more than one per bidder.", bidder)
} else {
bidderToSChains[bidder] = &schainWrapper.SChain
}
}
}
return bidderToSChains, nil
}
|
// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package login
import (
"context"
"fmt"
"time"
"chromiumos/tast/common/hwsec"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/uiauto/faillog"
"chromiumos/tast/local/chrome/uiauto/lockscreen"
"chromiumos/tast/local/chrome/uiauto/ossettings"
hwseclocal "chromiumos/tast/local/hwsec"
"chromiumos/tast/local/input"
"chromiumos/tast/testing"
)
type testParam struct {
OobeEnroll bool
Autosubmit bool
}
func init() {
testing.AddTest(&testing.Test{
Func: Pin,
LacrosStatus: testing.LacrosVariantUnneeded,
Desc: "Test pin enrollment, pin unlock and pin login",
Contacts: []string{
"anastasiian@google.com",
"bohdanty@google.com",
"chromeos-sw-engprod@google.com",
"cros-oac@google.com",
"cros-oobe@google.com",
},
SoftwareDeps: []string{"chrome"},
Attr: []string{"group:mainline", "informational"},
VarDeps: []string{"ui.signinProfileTestExtensionManifestKey"},
Timeout: 2*chrome.LoginTimeout + 25*time.Second,
Params: []testing.Param{{
Name: "settings_enroll",
Val: testParam{false, false},
}, {
Name: "settings_enroll_autosubmit",
Val: testParam{false, true},
}, {
Name: "oobe_enroll",
Val: testParam{true, false},
}, {
Name: "oobe_enroll_autosubmit",
Val: testParam{true, true},
}},
SearchFlags: []*testing.StringPair{{
Key: "feature_id",
// Pin login and unlock.
Value: "screenplay-0c901f0b-a092-4f23-9eaa-36f8acfb0220",
}},
})
}
func Pin(ctx context.Context, s *testing.State) {
oobeEnroll := s.Param().(testParam).OobeEnroll
autosubmit := s.Param().(testParam).Autosubmit
pin := "1234566543210"
if autosubmit {
// autosubmit works for pins with len<=12 only
pin = "654321"
}
keyboard, err := input.VirtualKeyboard(ctx)
if err != nil {
s.Fatal("Failed to get virtual keyboard: ", err)
}
defer keyboard.Close()
func() {
var cr *chrome.Chrome
var err error
var tconn *chrome.TestConn
if oobeEnroll {
cr, err = chrome.New(ctx,
chrome.ExtraArgs(
// Force pin screen during OOBE.
"--force-tablet-mode=touch_view",
"--vmodule=wizard_controller=1",
// Disable VK so it does not get in the way of the pin pad.
"--disable-virtual-keyboard"),
chrome.DontSkipOOBEAfterLogin())
if err != nil {
s.Fatal("Chrome login failed: ", err)
}
defer cr.Close(ctx)
oobeConn, err := cr.WaitForOOBEConnection(ctx)
if err != nil {
s.Fatal("Failed to create OOBE connection: ", err)
}
defer oobeConn.Close()
if err := oobeConn.Eval(ctx, "OobeAPI.advanceToScreen('pin-setup')", nil); err != nil {
s.Fatal("Failed to advance to the pin screen: ", err)
}
if err := oobeConn.WaitForExprFailOnErr(ctx, "!document.querySelector('#pin-setup').hidden"); err != nil {
s.Fatal("Failed to wait for the pin screen: ", err)
}
for _, step := range []string{"start", "confirm"} {
if err := oobeConn.WaitForExprFailOnErr(ctx, fmt.Sprintf("document.querySelector('#pin-setup').uiStep === '%s'", step)); err != nil {
s.Fatalf("Failed to wait for %s step: %v", step, err)
}
if err := oobeConn.Eval(ctx, fmt.Sprintf("document.querySelector('#pin-setup').$.pinKeyboard.$.pinKeyboard.$.pinInput.value = '%s'", pin), nil); err != nil {
s.Fatal("Failed to enter pin: ", err)
}
if err := oobeConn.Eval(ctx, "document.querySelector('#pin-setup').$.nextButton.click()", nil); err != nil {
s.Fatal("Failed to click on the next button: ", err)
}
}
if err := oobeConn.WaitForExprFailOnErr(ctx, "document.querySelector('#pin-setup').uiStep === 'done'"); err != nil {
s.Fatal("Failed to wait for the done step: ", err)
}
if err := oobeConn.Eval(ctx, "OobeAPI.skipPostLoginScreens()", nil); err != nil {
// This is not fatal because sometimes it fails because Oobe shutdowns too fast after the call - which produces error.
s.Log("Failed to call skip post login screens: ", err)
}
if err := cr.WaitForOOBEConnectionToBeDismissed(ctx); err != nil {
s.Fatal("Failed to wait for OOBE to be dismissed: ", err)
}
tconn, err = cr.TestAPIConn(ctx)
if err != nil {
s.Fatal("Getting test API connection failed: ", err)
}
defer faillog.DumpUITreeOnError(ctx, s.OutDir(), s.HasError, tconn)
} else {
// Setup pin from the settings.
// Disable VK so it does not get in the way of the pin pad.
cr, err = chrome.New(ctx, chrome.ExtraArgs("--disable-virtual-keyboard"))
if err != nil {
s.Fatal("Chrome login failed: ", err)
}
defer cr.Close(ctx)
tconn, err = cr.TestAPIConn(ctx)
if err != nil {
s.Fatal("Getting test API connection failed: ", err)
}
defer faillog.DumpUITreeOnError(ctx, s.OutDir(), s.HasError, tconn)
// Set up PIN through a connection to the Settings page.
settings, err := ossettings.Launch(ctx, tconn)
if err != nil {
s.Fatal("Failed to launch Settings app: ", err)
}
if err := settings.EnablePINUnlock(cr, cr.Creds().Pass, pin, autosubmit)(ctx); err != nil {
s.Fatal("Failed to enable PIN unlock: ", err)
}
}
// Lock the screen.
if err := lockscreen.Lock(ctx, tconn); err != nil {
s.Fatal("Failed to lock the screen: ", err)
}
if st, err := lockscreen.WaitState(ctx, tconn, func(st lockscreen.State) bool { return st.Locked && st.ReadyForPassword }, 30*time.Second); err != nil {
s.Fatalf("Waiting for screen to be locked failed: %v (last status %+v)", err, st)
}
// Enter and submit the PIN to unlock the DUT.
if err := lockscreen.EnterPIN(ctx, tconn, keyboard, pin); err != nil {
s.Fatal("Failed to enter in PIN: ", err)
}
if !autosubmit {
if err := lockscreen.SubmitPINOrPassword(ctx, tconn); err != nil {
s.Fatal("Failed to submit PIN: ", err)
}
}
if st, err := lockscreen.WaitState(ctx, tconn, func(st lockscreen.State) bool { return !st.Locked }, 30*time.Second); err != nil {
s.Fatalf("Waiting for screen to be unlocked failed: %v (last status %+v)", err, st)
}
}()
cmdRunner := hwseclocal.NewCmdRunner()
cryptohome := hwsec.NewCryptohomeClient(cmdRunner)
supportsLE, err := cryptohome.SupportsLECredentials(ctx)
if err != nil {
s.Fatal("Failed to get supported policies: ", err)
}
options := []chrome.Option{
chrome.ExtraArgs("--skip-force-online-signin-for-testing"),
chrome.NoLogin(),
chrome.KeepState(),
chrome.LoadSigninProfileExtension(s.RequiredVar("ui.signinProfileTestExtensionManifestKey")),
}
if supportsLE {
// Disable VK so it does not get in the way of the pin pad.
options = append(options, chrome.ExtraArgs("--disable-virtual-keyboard"))
}
cr, err := chrome.New(ctx, options...)
if err != nil {
s.Fatal("Chrome start failed: ", err)
}
defer cr.Close(ctx)
tconn, err := cr.SigninProfileTestAPIConn(ctx)
if err != nil {
s.Fatal("Getting test Signin Profile API connection failed: ", err)
}
if !supportsLE {
if err := lockscreen.WaitForPasswordField(ctx, tconn, cr.Creds().User, 20*time.Second); err != nil {
s.Fatal("Failed to wait for the password field: ", err)
}
if err = lockscreen.EnterPassword(ctx, tconn, cr.Creds().User, cr.Creds().Pass, keyboard); err != nil {
s.Fatal("Failed to enter password: ", err)
}
} else {
// Enter and submit the PIN to unlock the DUT.
if err := lockscreen.EnterPIN(ctx, tconn, keyboard, pin); err != nil {
s.Fatal("Failed to enter in PIN: ", err)
}
if !autosubmit {
if err := lockscreen.SubmitPINOrPassword(ctx, tconn); err != nil {
s.Fatal("Failed to submit PIN: ", err)
}
}
}
if err := lockscreen.WaitForLoggedIn(ctx, tconn, chrome.LoginTimeout); err != nil {
s.Fatal("Failed to login: ", err)
}
}
|
package main
import (
"github.com/gin-gonic/gin"
"net/http"
)
func main() {
g := gin.Default()
g.GET("v1/api/images", func(context *gin.Context) {
context.JSON(http.StatusOK, gin.H{
"images": "docker",
})
})
g.GET("v2/api/dir:path", func(context *gin.Context) {
context.JSON(http.StatusOK, gin.H{
"message": context.Param("path"),
})
})
g.Run(":9090")
}
|
//dsp video duration directional
package logic
type Duration struct {
Min int
Max int
}
func (d Duration) UnderDuration(duration int) bool {
return (d.Min == 0 || duration >= d.Min) && (d.Max == 0 || duration < d.Max)
}
|
package actions
const (
PerformAction = "action.octant.dev/performAction"
TriggerJob = "action.jenkins-x.io/job"
TriggerBootJob = "action.jenkins-x.io/triggerBootJob"
)
|
// Copyright 2019 Yunion
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package splitable
import (
"database/sql"
"time"
"yunion.io/x/pkg/errors"
"yunion.io/x/sqlchemy"
)
type STableMetadata struct {
Id int64 `primary:"true" auto_increment:"true"`
Table string `width:"64" charset:"ascii"`
Start int64 `nullable:"true"`
End int64 `nullable:"true"`
StartDate time.Time `nullable:"true"`
EndDate time.Time `nullable:"true"`
Deleted bool `nullable:"false"`
DeleteAt time.Time `nullable:"true"`
CreatedAt time.Time `nullable:"false" created_at:"true"`
}
func (spec *SSplitTableSpec) GetTableMetas() ([]STableMetadata, error) {
q := spec.metaSpec.Query().Asc("id").IsFalse("deleted")
metas := make([]STableMetadata, 0)
err := q.All(&metas)
if err != nil && errors.Cause(err) != sql.ErrNoRows {
return nil, errors.Wrap(err, "query metadata")
}
return metas, nil
}
func (spec *SSplitTableSpec) GetTableSpec(meta STableMetadata) *sqlchemy.STableSpec {
tbSpec := *spec.tableSpec
return tbSpec.Clone(meta.Table, meta.Start)
}
|
package rocserv
import (
"errors"
"fmt"
"github.com/opentracing-contrib/go-grpc"
"github.com/opentracing/opentracing-go"
"github.com/shawnfeng/sutil/slog"
"github.com/shawnfeng/sutil/stime"
"google.golang.org/grpc"
"sync"
"time"
)
type ServProtocol int
const (
GRPC ServProtocol = iota
THRIFT
HTTP
)
type ClientGrpc struct {
clientLookup ClientLookup
processor string
breaker *Breaker
router Router
pool *ClientPool
fnFactory func(client *grpc.ClientConn) interface{}
}
type Provider struct {
Ip string
Port uint16
}
func NewClientGrpcWithRouterType(cb ClientLookup, processor string, poollen int, fn func(client *grpc.ClientConn) interface{}, routerType int) *ClientGrpc {
clientGrpc := &ClientGrpc{
clientLookup: cb,
processor: processor,
breaker: NewBreaker(cb),
router: NewRouter(routerType, cb),
fnFactory: fn,
}
pool := NewClientPool(poollen, clientGrpc.newClient)
clientGrpc.pool = pool
return clientGrpc
}
func NewClientGrpcByConcurrentRouter(cb ClientLookup, processor string, poollen int, fn func(client *grpc.ClientConn) interface{}) *ClientGrpc {
return NewClientGrpcWithRouterType(cb, processor, poollen, fn, 1)
}
func NewClientGrpc(cb ClientLookup, processor string, poollen int, fn func(client *grpc.ClientConn) interface{}) *ClientGrpc {
return NewClientGrpcWithRouterType(cb, processor, poollen, fn, 0)
}
func (m *ClientGrpc) CustomizedRouteRpc(getProvider func() *Provider, fnrpc func(interface{}) error) error {
if getProvider == nil {
return errors.New("fun getProvider is nil")
}
provider := getProvider()
return m.DirectRouteRpc(provider, fnrpc)
}
func (m *ClientGrpc) DirectRouteRpc(provider *Provider, fnrpc func(interface{}) error) error {
if provider == nil {
return errors.New("get Provider is nil")
}
si, rc, e := m.getClient(provider)
if e != nil {
return e
}
if rc == nil {
return fmt.Errorf("not find thrift service:%s processor:%s", m.clientLookup.ServPath(), m.processor)
}
m.router.Pre(si)
defer m.router.Post(si)
call := func(si *ServInfo, rc rpcClient, fnrpc func(interface{}) error) func() error {
return func() error {
return m.rpc(si, rc, fnrpc)
}
}(si, rc, fnrpc)
funcName := GetFunName(3)
var err error
st := stime.NewTimeStat()
defer func() {
collector(m.clientLookup.ServKey(), m.processor, st.Duration(), 0, si.Servid, funcName, err)
}()
err = m.breaker.Do(0, si.Servid, funcName, call, GRPC, nil)
return err
}
func (m *ClientGrpc) getClient(provider *Provider) (*ServInfo, rpcClient, error) {
servInfos := m.clientLookup.GetAllServAddr(m.processor)
if len(servInfos) < 1 {
return nil, nil, errors.New(m.processor + " server provider is emtpy ")
}
var serv *ServInfo
addr := fmt.Sprintf("%s:%d", provider.Ip, provider.Port)
for _, item := range servInfos {
if item.Addr == addr {
serv = item
break
}
}
if serv == nil {
return nil, nil, errors.New(m.processor + " server provider is emtpy ")
}
return serv, m.pool.GrtClient(serv.Addr), nil
}
func (m *ClientGrpc) Rpc(haskkey string, fnrpc func(interface{}) error) error {
si, rc := m.route(haskkey)
if rc == nil {
return fmt.Errorf("not find thrift service:%s processor:%s", m.clientLookup.ServPath(), m.processor)
}
m.router.Pre(si)
defer m.router.Post(si)
call := func(si *ServInfo, rc rpcClient, fnrpc func(interface{}) error) func() error {
return func() error {
return m.rpc(si, rc, fnrpc)
}
}(si, rc, fnrpc)
funcName := GetFunName(3)
var err error
st := stime.NewTimeStat()
defer func() {
collector(m.clientLookup.ServKey(), m.processor, st.Duration(), 0, si.Servid, funcName, err)
}()
err = m.breaker.Do(0, si.Servid, funcName, call, GRPC, nil)
return err
}
func (m *ClientGrpc) rpc(si *ServInfo, rc rpcClient, fnrpc func(interface{}) error) error {
fun := "ClientGrpc.rpc -->"
c := rc.GetServiceClient()
err := fnrpc(c)
if err == nil {
m.pool.Put(si.Addr, rc)
} else {
slog.Warnf("%s close rpcclient s:%s", fun, si)
rc.Close()
}
return err
}
func (m *ClientGrpc) route(key string) (*ServInfo, rpcClient) {
s := m.router.Route(m.processor, key)
if s == nil {
return nil, nil
}
addr := s.Addr
return s, m.pool.GrtClient(addr)
}
type grpcClient struct {
serviceClient interface{}
conn *grpc.ClientConn
}
func (m *grpcClient) SetTimeout(timeout time.Duration) error {
return fmt.Errorf("SetTimeout is not support ")
}
func (m *grpcClient) Close() error {
return m.conn.Close()
}
func (m *grpcClient) GetServiceClient() interface{} {
return m.serviceClient
}
func (m *ClientGrpc) newClient(addr string) rpcClient {
fun := "ClientGrpc.newClient -->"
// 可加入多种拦截器
tracer := opentracing.GlobalTracer()
opts := []grpc.DialOption{
grpc.WithInsecure(),
grpc.WithUnaryInterceptor(
otgrpc.OpenTracingClientInterceptor(tracer)),
grpc.WithStreamInterceptor(
otgrpc.OpenTracingStreamClientInterceptor(tracer)),
}
conn, err := grpc.Dial(addr, opts...)
if err != nil {
slog.Errorf("%s NetTSocket addr:%s err:%s", fun, addr, err)
return nil
}
client := m.fnFactory(conn)
return &grpcClient{
serviceClient: client,
conn: conn,
}
}
type ClientPool struct {
poolClient sync.Map
poolLen int
Factory func(addr string) rpcClient
}
func NewClientPool(poolLen int, factory func(addr string) rpcClient) *ClientPool {
return &ClientPool{poolLen: poolLen, Factory: factory}
}
func (m *ClientPool) GrtClient(addr string) rpcClient {
fun := "ClientPool.GrtClient -->"
po := m.getPool(addr)
var c rpcClient
select {
case c = <-po:
slog.Tracef("%s get:%s len:%d", fun, addr, len(po))
default:
c = m.Factory(addr)
}
return c
}
func (m *ClientPool) getPool(addr string) chan rpcClient {
fun := "ClientPool.getPool -->"
var tmp chan rpcClient
value, ok := m.poolClient.Load(addr)
if ok == true {
tmp = value.(chan rpcClient)
} else {
slog.Infof("%s not found addr:%s", fun, addr)
tmp = make(chan rpcClient, m.poolLen)
m.poolClient.Store(addr, tmp)
}
return tmp
}
// 连接池链接回收
func (m *ClientPool) Put(addr string, client rpcClient) {
fun := "ClientPool.Close -->"
// po 链接池
po := m.getPool(addr)
select {
// 回收连接 client
case po <- client:
slog.Tracef("%s payback:%s len:%d", fun, addr, len(po))
//不能回收了,关闭链接(满了)
default:
slog.Infof("%s full not payback:%s len:%d", fun, addr, len(po))
client.Close()
}
}
|
// Copyright 2021 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package shim
import (
"testing"
specs "github.com/opencontainers/runtime-spec/specs-go"
"gvisor.dev/gvisor/pkg/shim/utils"
)
func TestCgroupPath(t *testing.T) {
for _, tc := range []struct {
name string
path string
want string
}{
{
name: "simple",
path: "foo/pod123/container",
want: "foo/pod123",
},
{
name: "absolute",
path: "/foo/pod123/container",
want: "/foo/pod123",
},
{
name: "no-container",
path: "foo/pod123",
want: "",
},
{
name: "no-container-absolute",
path: "/foo/pod123",
want: "",
},
{
name: "double-pod",
path: "/foo/podium/pod123/container",
want: "/foo/podium/pod123",
},
{
name: "start-pod",
path: "pod123/container",
want: "pod123",
},
{
name: "start-pod-absolute",
path: "/pod123/container",
want: "/pod123",
},
{
name: "slashes",
path: "///foo/////pod123//////container",
want: "/foo/pod123",
},
{
name: "no-pod",
path: "/foo/nopod123/container",
want: "",
},
} {
t.Run(tc.name, func(t *testing.T) {
spec := specs.Spec{
Linux: &specs.Linux{
CgroupsPath: tc.path,
},
}
updated := setPodCgroup(&spec)
if got := spec.Annotations[cgroupParentAnnotation]; got != tc.want {
t.Errorf("setPodCgroup(%q), want: %q, got: %q", tc.path, tc.want, got)
}
if shouldUpdate := len(tc.want) > 0; shouldUpdate != updated {
t.Errorf("setPodCgroup(%q)=%v, want: %v", tc.path, updated, shouldUpdate)
}
})
}
}
// Test cases that cgroup path should not be updated.
func TestCgroupNoUpdate(t *testing.T) {
for _, tc := range []struct {
name string
spec *specs.Spec
}{
{
name: "empty",
spec: &specs.Spec{},
},
{
name: "subcontainer",
spec: &specs.Spec{
Linux: &specs.Linux{
CgroupsPath: "foo/pod123/container",
},
Annotations: map[string]string{
utils.ContainerTypeAnnotation: utils.ContainerTypeContainer,
},
},
},
} {
t.Run(tc.name, func(t *testing.T) {
if updated := setPodCgroup(tc.spec); updated {
t.Errorf("setPodCgroup(%+v), got: %v, want: false", tc.spec.Linux, updated)
}
})
}
}
|
package main
//invalid
//Case to check if float * int is type cast to int or float or does it give error (error is given)
func main() {
var x int = 6.5 * 7
}
|
package fiber
import (
"net/http"
"github.com/gojek/fiber/errors"
)
type Response interface {
IsSuccess() bool
Payload() []byte
StatusCode() int
BackendName() string
WithBackendName(string) Response
}
type ErrorResponse struct {
*CachedPayload
code int
backend string
}
func (resp *ErrorResponse) IsSuccess() bool {
return false
}
func (resp *ErrorResponse) BackendName() string {
return resp.backend
}
func (resp *ErrorResponse) WithBackendName(backendName string) Response {
resp.backend = backendName
return resp
}
func (resp *ErrorResponse) StatusCode() int {
return resp.code
}
func NewErrorResponse(err error) Response {
var httpErr *errors.HTTPError
if castedError, ok := err.(*errors.HTTPError); ok {
httpErr = castedError
} else {
httpErr = &errors.HTTPError{
Code: http.StatusInternalServerError,
Message: err.Error(),
}
}
payload, _ := httpErr.ToJSON()
return &ErrorResponse{
CachedPayload: NewCachedPayload(payload),
code: httpErr.Code,
}
}
|
package main
import "fmt"
func main() {
s0 := []int{1,2,3,4,5,6,78,9}
fmt.Println("sssss",s0)
fmt.Println("sssssff",len(s0[:5]))
fmt.Println("sssssfffffss",s0[:5])
fmt.Println("sssssff",s0[5])
//s1 := make([]int)
s1 := make([]int,5)
copy(s1,s0[:5])
fmt.Println("so",s0)
fmt.Println("s1",s1)
s1 = append(s1,s0[5])
fmt.Println("add",s1)
var s2 []int
s2 = make([]int,len(s0)*2)
//var s3 []int
//var s4 []int
s2 = s0
fmt.Println("s2==",s2,"len",len(s2),cap(s2))
s2 = s2[:len(s2)-1]
fmt.Println("s2==",s2,"len",len(s2),cap(s2))
s2 = append(s2,8)
fmt.Println("s2==",s2,"len",len(s2),cap(s2))
}
|
/*
* Copyright (c) 2019. ENNOO - All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package proc
import (
"github.com/aberic/gnomon"
"strings"
"sync"
)
var (
memInfoInstance *MemInfo
memInfoInstanceOnce sync.Once
)
// MemInfo 存储器使用信息,包括物理内存和交换内存
type MemInfo struct {
MemTotal string // 所有可用RAM大小 (即物理内存减去一些预留位和内核的二进制代码大小)
MemFree string // LowFree与HighFree的总和,被系统留着未使用的内存
MemAvailable string // 有些应用程序会根据系统的可用内存大小自动调整内存申请的多少,所以需要一个记录当前可用内存数量的统计值,MemFree并不适用,因为MemFree不能代表全部可用的内存,系统中有些内存虽然已被使用但是可以回收的,比如cache/buffer、slab都有一部分可以回收,所以这部分可回收的内存加上MemFree才是系统可用的内存,即MemAvailable。/proc/meminfo中的MemAvailable是内核使用特定的算法估算出来的,要注意这是一个估计值,并不精确。
Buffers string // 用来给文件做缓冲大小
Cached string // 被高速缓冲存储器(cache memory)用的内存的大小(等于 diskcache minus SwapCache )
SwapCached string // 被高速缓冲存储器(cache memory)用的交换空间的大小。已经被交换出来的内存,但仍然被存放在swap file中。用来在需要的时候很快的被替换而不需要再次打开I/O端口
Active string // 在活跃使用中的缓冲或高速缓冲存储器页面文件的大小,除非非常必要否则不会被移作他用
Inactive string // 在不经常使用中的缓冲或高速缓冲存储器页面文件的大小,可能被用于其他途径
ActiveAnon string //
InactiveAnon string //
ActiveFile string //
InactiveFile string //
Unevictable string //
MLocked string //
SwapTotal string // 交换空间的总大小
SwapFree string // 未被使用交换空间的大小
Dirty string // 等待被写回到磁盘的内存大小
WriteBack string // 正在被写回到磁盘的内存大小
AnonPages string // 未映射页的内存大小
Mapped string // 设备和文件等映射的大小
Shmem string //
Slab string // 内核数据结构缓存的大小,可以减少申请和释放内存带来的消耗
SReclaimable string // 可收回Slab的大小
SUnreclaim string // 不可收回Slab的大小(SUnreclaim+SReclaimable=Slab)
KernelStack string // 每一个用户线程都会分配一个kernel stack(内核栈),内核栈虽然属于线程,但用户态的代码不能访问,只有通过系统调用(syscall)、自陷(trap)或异常(exception)进入内核态的时候才会用到,也就是说内核栈是给kernel code使用的。在x86系统上Linux的内核栈大小是固定的8K或16K
PageTables string // 管理内存分页页面的索引表的大小
NFSUnstable string // 不稳定页表的大小
Bounce string // 有些老设备只能访问低端内存,比如16M以下的内存,当应用程序发出一个I/O 请求,DMA的目的地址却是高端内存时(比如在16M以上),内核将在低端内存中分配一个临时buffer作为跳转,把位于高端内存的缓存数据复制到此处。这种额外的数据拷贝被称为“bounce buffering”,会降低I/O 性能。大量分配的bounce buffers 也会占用额外的内存。
WriteBackTmp string //
CommitLimit string //
CommittedAS string //
VMAllocTotal string // 可以vmalloc虚拟内存大小
VMAllocUsed string // 已经被使用的虚拟内存大小
VMAllocChunk string //
HardwareCorrupted string // 当系统检测到内存的硬件故障时,会把有问题的页面删除掉,不再使用,/proc/meminfo中的HardwareCorrupted统计了删除掉的内存页的总大小。
AnonHugePages string //
CmaTotal string //
CmaFree string //
HugePagesTotal string // 对应内核参数 vm.nr_hugepages,也可以在运行中的系统上直接修改 /proc/sys/vm/nr_hugepages,修改的结果会立即影响空闲内存 MemFree的大小,因为HugePages在内核中独立管理,只要一经定义,无论是否被使用,都不再属于free memory。
HugePagesFree string //
HugePagesRsvd string //
HugePagesSurp string //
HugePageSize string //
DirectMap4k string //
DirectMap2M string //
DirectMap1G string //
}
func obtainMemInfo() *MemInfo {
memInfoInstanceOnce.Do(func() {
if nil == memInfoInstance {
memInfoInstance = &MemInfo{}
}
})
return memInfoInstance
}
// Info MemInfo 对象
func (m *MemInfo) Info() error {
return m.doFormatMemInfo(gnomon.StringBuild(FileRootPath(), "/meminfo"))
}
// FormatMemInfo 将文件内容转为 MemInfo 对象
func (m *MemInfo) doFormatMemInfo(filePath string) error {
data, err := gnomon.FileReadLines(filePath)
if nil == err {
for index := range data {
m.formatMemInfo(data[index])
}
} else {
return err
}
return nil
}
func (m *MemInfo) formatMemInfo(lineStr string) {
if strings.HasPrefix(lineStr, "MemTotal") {
m.MemTotal = gnomon.StringTrim(strings.Split(lineStr, ":")[1])
} else if strings.HasPrefix(lineStr, "MemFree") {
m.MemFree = gnomon.StringTrim(strings.Split(lineStr, ":")[1])
} else if strings.HasPrefix(lineStr, "MemAvailable") {
m.MemAvailable = gnomon.StringTrim(strings.Split(lineStr, ":")[1])
} else if strings.HasPrefix(lineStr, "Buffers") {
m.Buffers = gnomon.StringTrim(strings.Split(lineStr, ":")[1])
} else if strings.HasPrefix(lineStr, "Cached") {
m.Cached = gnomon.StringTrim(strings.Split(lineStr, ":")[1])
} else if strings.HasPrefix(lineStr, "SwapCached") {
m.SwapCached = gnomon.StringTrim(strings.Split(lineStr, ":")[1])
} else if strings.HasPrefix(lineStr, "Active") {
m.Active = gnomon.StringTrim(strings.Split(lineStr, ":")[1])
} else if strings.HasPrefix(lineStr, "Inactive") {
m.Inactive = gnomon.StringTrim(strings.Split(lineStr, ":")[1])
} else if strings.HasPrefix(lineStr, "Active(anon)") {
m.ActiveAnon = gnomon.StringTrim(strings.Split(lineStr, ":")[1])
} else if strings.HasPrefix(lineStr, "Inactive(anon)") {
m.InactiveAnon = gnomon.StringTrim(strings.Split(lineStr, ":")[1])
} else if strings.HasPrefix(lineStr, "Active(file)") {
m.ActiveFile = gnomon.StringTrim(strings.Split(lineStr, ":")[1])
} else if strings.HasPrefix(lineStr, "Inactive(file)") {
m.InactiveFile = gnomon.StringTrim(strings.Split(lineStr, ":")[1])
} else if strings.HasPrefix(lineStr, "Unevictable") {
m.Unevictable = gnomon.StringTrim(strings.Split(lineStr, ":")[1])
} else if strings.HasPrefix(lineStr, "Mlocked") {
m.MLocked = gnomon.StringTrim(strings.Split(lineStr, ":")[1])
} else if strings.HasPrefix(lineStr, "SwapTotal") {
m.SwapTotal = gnomon.StringTrim(strings.Split(lineStr, ":")[1])
} else if strings.HasPrefix(lineStr, "SwapFree") {
m.SwapFree = gnomon.StringTrim(strings.Split(lineStr, ":")[1])
} else if strings.HasPrefix(lineStr, "Dirty") {
m.Dirty = gnomon.StringTrim(strings.Split(lineStr, ":")[1])
} else if strings.HasPrefix(lineStr, "Writeback") {
m.WriteBack = gnomon.StringTrim(strings.Split(lineStr, ":")[1])
} else if strings.HasPrefix(lineStr, "AnonPages") {
m.AnonPages = gnomon.StringTrim(strings.Split(lineStr, ":")[1])
} else if strings.HasPrefix(lineStr, "Mapped") {
m.Mapped = gnomon.StringTrim(strings.Split(lineStr, ":")[1])
} else if strings.HasPrefix(lineStr, "Shmem") {
m.Shmem = gnomon.StringTrim(strings.Split(lineStr, ":")[1])
} else if strings.HasPrefix(lineStr, "Slab") {
m.Slab = gnomon.StringTrim(strings.Split(lineStr, ":")[1])
} else if strings.HasPrefix(lineStr, "SReclaimable") {
m.SReclaimable = gnomon.StringTrim(strings.Split(lineStr, ":")[1])
} else if strings.HasPrefix(lineStr, "SUnreclaim") {
m.SUnreclaim = gnomon.StringTrim(strings.Split(lineStr, ":")[1])
} else if strings.HasPrefix(lineStr, "KernelStack") {
m.KernelStack = gnomon.StringTrim(strings.Split(lineStr, ":")[1])
} else if strings.HasPrefix(lineStr, "PageTables") {
m.PageTables = gnomon.StringTrim(strings.Split(lineStr, ":")[1])
} else if strings.HasPrefix(lineStr, "NFS_Unstable") {
m.NFSUnstable = gnomon.StringTrim(strings.Split(lineStr, ":")[1])
} else if strings.HasPrefix(lineStr, "Bounce") {
m.Bounce = gnomon.StringTrim(strings.Split(lineStr, ":")[1])
} else if strings.HasPrefix(lineStr, "WritebackTmp") {
m.WriteBackTmp = gnomon.StringTrim(strings.Split(lineStr, ":")[1])
} else if strings.HasPrefix(lineStr, "CommitLimit") {
m.CommitLimit = gnomon.StringTrim(strings.Split(lineStr, ":")[1])
} else if strings.HasPrefix(lineStr, "Committed_AS") {
m.CommittedAS = gnomon.StringTrim(strings.Split(lineStr, ":")[1])
} else if strings.HasPrefix(lineStr, "VmallocTotal") {
m.VMAllocTotal = gnomon.StringTrim(strings.Split(lineStr, ":")[1])
} else if strings.HasPrefix(lineStr, "VmallocUsed") {
m.VMAllocUsed = gnomon.StringTrim(strings.Split(lineStr, ":")[1])
} else if strings.HasPrefix(lineStr, "VmallocChunk") {
m.VMAllocChunk = gnomon.StringTrim(strings.Split(lineStr, ":")[1])
} else if strings.HasPrefix(lineStr, "HardwareCorrupted") {
m.HardwareCorrupted = gnomon.StringTrim(strings.Split(lineStr, ":")[1])
} else if strings.HasPrefix(lineStr, "AnonHugePages") {
m.AnonHugePages = gnomon.StringTrim(strings.Split(lineStr, ":")[1])
} else if strings.HasPrefix(lineStr, "CmaTotal") {
m.CmaTotal = gnomon.StringTrim(strings.Split(lineStr, ":")[1])
} else if strings.HasPrefix(lineStr, "CmaFree") {
m.CmaFree = gnomon.StringTrim(strings.Split(lineStr, ":")[1])
} else if strings.HasPrefix(lineStr, "HugePages_Total") {
m.HugePagesTotal = gnomon.StringTrim(strings.Split(lineStr, ":")[1])
} else if strings.HasPrefix(lineStr, "HugePages_Free") {
m.HugePagesFree = gnomon.StringTrim(strings.Split(lineStr, ":")[1])
} else if strings.HasPrefix(lineStr, "HugePages_Rsvd") {
m.HugePagesRsvd = gnomon.StringTrim(strings.Split(lineStr, ":")[1])
} else if strings.HasPrefix(lineStr, "HugePages_Surp") {
m.HugePagesSurp = gnomon.StringTrim(strings.Split(lineStr, ":")[1])
} else if strings.HasPrefix(lineStr, "Hugepagesize") {
m.HugePageSize = gnomon.StringTrim(strings.Split(lineStr, ":")[1])
} else if strings.HasPrefix(lineStr, "DirectMap4k") {
m.DirectMap4k = gnomon.StringTrim(strings.Split(lineStr, ":")[1])
} else if strings.HasPrefix(lineStr, "DirectMap2M") {
m.DirectMap2M = gnomon.StringTrim(strings.Split(lineStr, ":")[1])
} else if strings.HasPrefix(lineStr, "DirectMap1G") {
m.DirectMap1G = gnomon.StringTrim(strings.Split(lineStr, ":")[1])
}
}
|
package main
import "fmt"
// Pointers allow you to point to the memory address of a value
func main() {
// b is a pointer to a
a := 5
b := &a
fmt.Println(a, b)
fmt.Printf("%T %T\n", a, b)
// Use * to read val from address
fmt.Println(*b, *&a)
// Change val at a with pointer b
*b = 10
fmt.Println(a)
// Reason you might want to use pointer to change the value of data is it is performant to change data through pointers rather than passing raw data around
} |
package client
import (
"encoding/json"
"fmt"
"net/url"
"os"
"strings"
"github.com/hyperhq/hyper/engine"
gflag "github.com/jessevdk/go-flags"
)
/*
-a, --author= Author (e.g., "Hello World <hello@a-team.com>")
-c, --change=[] Apply Dockerfile instruction to the created image
--help=false Print usage
-m, --message= Commit message
-p, --pause=true Pause container during Commit
*/
func (cli *HyperClient) HyperCmdCommit(args ...string) error {
var opts struct {
Author string `short:"a" long:"author" default:"" value-name:"\"\"" description:"Author (e.g., \"Hello World <hello@a-team.com>\")"`
Change []string `short:"c" long:"change" default:"" value-name:"[]" description:"Apply Dockerfile instruction to the created image"`
Message string `short:"m" long:"message" default:"" value-name:"\"\"" description:"Commit message"`
Pause bool `short:"p" long:"pause" default:"false" value-name:"true" description:"Pause container during Commit"`
}
var parser = gflag.NewParser(&opts, gflag.Default)
parser.Usage = "commit [OPTIONS] CONTAINER [REPOSITORY[:TAG]]\n\nCreate a new image from a container's changes"
args, err := parser.Parse()
if err != nil {
if !strings.Contains(err.Error(), "Usage") {
return err
} else {
return nil
}
}
if len(args) == 0 {
return fmt.Errorf("%s: \"commit\" requires a minimum of 1 argument, See 'hyper build --help'.", os.Args[0])
}
var (
containerId string = ""
repo string = ""
)
if len(args) > 1 {
containerId = args[1]
}
if len(args) > 2 {
repo = args[2]
}
v := url.Values{}
v.Set("author", opts.Author)
changeJson, err := json.Marshal(opts.Change)
if err != nil {
return err
}
v.Set("change", string(changeJson))
v.Set("message", opts.Message)
if opts.Pause == true {
v.Set("pause", "yes")
} else {
v.Set("pause", "no")
}
v.Set("container", containerId)
v.Set("repo", repo)
body, _, err := readBody(cli.call("POST", "/container/commit?"+v.Encode(), nil, nil))
if err != nil {
return err
}
out := engine.NewOutput()
remoteInfo, err := out.AddEnv()
if err != nil {
return err
}
if _, err := out.Write(body); err != nil {
return err
}
out.Close()
fmt.Fprintf(cli.out, "%s\n", remoteInfo.Get("ID"))
return nil
}
|
package main
import (
"bufio"
"fmt"
"log"
"os"
"strings"
)
func maxRangeSum(n int, q string) (r int) {
t := strings.Fields(q)
var c int
u := make([]int, len(t))
for ix, i := range t {
fmt.Sscan(i, &u[ix])
}
for i := 0; i < n; i++ {
c += u[i]
}
if c > r {
r = c
}
for len(u) > n {
c = c - u[0] + u[n]
if c > r {
r = c
}
u = u[1:]
}
return r
}
func main() {
var n int
data, err := os.Open(os.Args[1])
if err != nil {
log.Fatal(err)
}
defer data.Close()
scanner := bufio.NewScanner(data)
for scanner.Scan() {
s := strings.Split(scanner.Text(), ";")
fmt.Sscan(s[0], &n)
fmt.Println(maxRangeSum(n, s[1]))
}
}
|
package common
import (
"bytes"
"io"
"testing"
"encoding/json"
"github.com/nautilus/events"
)
func TestLogging_writerPublishesToLogging(t *testing.T) {
// a mock event broker we can test with
broker := events.NewMockEventBroker()
// the byte string we are going to write
action := LogPayload{
Label: "log label",
Payload: "hello world",
}
payload, err := json.Marshal(action)
// if something went wrong
if err != nil {
// the test failed
t.Error(err)
return
}
// create the writer appropriate for this broker
writer, err := LogWriter(broker, action.Label)
// if something went wrong
if err != nil {
// the test failed
t.Error(err)
return
}
if writer == nil {
t.Error("nil writer was returned")
return
}
// we are expecting messages to be published on the log topioc
broker.ExpectPublish("log", &events.Action{
Type: ActionLogAction,
Payload: string(payload),
})
// attempt to write the byte string to the writer
l, err := writer.Write([]byte(action.Payload))
switch {
// if ther was something wrong
case err != nil:
t.Error(err)
// make sure we wrote everything
case l != len([]byte(action.Payload)):
t.Error("did not write the full bytestring")
// try to close the broker to check for unsatisfied expectations
case broker.Close() != nil:
t.Error("Did not publish action on log topic")
}
}
func TestLogging_canCopyToLogWriter(t *testing.T) {
// a mock event broker we can test with
broker := events.NewMockEventBroker()
// the byte string we are going to write
action := LogPayload{
Label: "log label",
Payload: "hello world",
}
payload, err := json.Marshal(action)
// if something went wrong
if err != nil {
// the test failed
t.Error(err)
return
}
// create the writer appropriate for this broker
writer, err := LogWriter(broker, action.Label)
// if something went wrong
if err != nil {
// the test failed
t.Error(err)
return
}
if writer == nil {
t.Error("nil writer was returned")
return
}
source := bytes.NewBuffer([]byte(action.Payload))
// we are expecting messages to be published on the log topioc
err = broker.ExpectPublish("log", &events.Action{
Type: ActionLogAction,
Payload: string(payload),
})
if err != nil {
t.Error(err)
}
_, err = io.Copy(writer, source)
if err != nil {
t.Error(err)
}
}
|
package admin
import (
"github.com/gin-gonic/gin"
"net/http"
)
func ListArticle(c *gin.Context) {
c.HTML(http.StatusOK, "admin/articlelist.html", gin.H{})
}
func AddArticle(c *gin.Context) {
c.HTML(http.StatusOK, "admin/articleadd.html", nil)
}
|
package sd
import (
"github.com/gin-gonic/gin"
"net/http"
)
const (
B = 1
KB = 1024 * B
MB = 1024 * KB
GB = 1024 * MB
)
// @Summary Shows OK as the ping-pong result
// @Description Shows OK as the ping-pong result
// @Tags sd
// @Accept json
// @Produce json
// @Success 200 {string} plain "OK"
// @Router /sd/health [get]
func HealthCheck(c *gin.Context) {
message := "OK"
c.String(http.StatusOK, message+"\n")
}
|
// Copyright 2018-present The Yumcoder Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//
// Author: yumcoder (omid.jn@gmail.com)
//
package datatype
import (
"strconv"
"sync"
"testing"
)
// region different map implementations
// see https://blog.golang.org/go-maps-in-action
// And now we’ve arrived as to why the sync.Map was created. The Go team identified situations in the standard lib
// where performance wasn’t great. There were cases where items were fetched from data structures wrapped in a sync.RWMutex,
// under high read scenarios// while deployed on very high multi-core setups and performance suffered considerably.
type counterMap struct {
data map[string]int
}
func (m *counterMap) read(k string) int {
return m.data[k]
}
func (m *counterMap) write(k string, v int) {
m.data[k] = v
}
func (m *counterMap) inc(k string) {
// info: need lock, check TestSyncMap
m.data[k] = m.data[k] + 1
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
type counterSafeMap struct {
sync.RWMutex
data map[string]int
}
func (m *counterSafeMap) read(k string) int {
m.RLock()
defer m.RUnlock()
return m.data[k]
}
func (m *counterSafeMap) write(k string, v int) {
m.Lock()
defer m.Unlock()
m.data[k] = v
}
func (m *counterSafeMap) inc(k string) {
m.Lock()
defer m.Unlock()
m.data[k] = m.data[k] + 1
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
type counterSyncMap struct {
data sync.Map
}
func (m *counterSyncMap) read(k string) (int, bool) {
if v, ok := m.data.Load(k); ok {
return v.(int), true
}
return -1, false
}
func (m *counterSyncMap) write(k string, v int) {
m.data.Store(k, v)
}
func (m *counterSyncMap) inc(k string) {
// info: need lock, check TestSyncMap
if v, ok := m.data.Load(k); ok {
m.data.Store(k, v.(int)+1)
return
}
m.data.Store(k, 1)
}
// endregion
// run several time and you may get result that the expected and result are different
// or get fatal error: concurrent map read and map write
func Test_Map(t *testing.T) {
m := counterMap{data: make(map[string]int)}
t.Log(m.read("10"))
m.write("10", 10)
t.Log(m.read("10"))
var swg sync.WaitGroup
for i := 0; i < 100; i++ {
swg.Add(1)
go func() {
m.write("10", 10)
swg.Done()
}()
}
swg.Wait()
}
func Test_SafeMap(t *testing.T) {
m := counterSafeMap{data: make(map[string]int)}
t.Log(m.read("10"))
m.write("10", 10)
t.Log(m.read("10"))
var swg sync.WaitGroup
for i := 0; i < 100; i++ {
swg.Add(1)
go func() {
m.write("10", 10)
swg.Done()
}()
}
swg.Wait()
}
func Test_SyncMap(t *testing.T) {
m := counterSyncMap{}
t.Log(m.read("10"))
m.write("10", 10)
t.Log(m.read("10"))
var swg sync.WaitGroup
for i := 0; i < 100; i++ {
swg.Add(1)
go func() {
m.write("10", 10)
swg.Done()
}()
}
swg.Wait()
}
// 100000000 18.7 ns/op 0 B/op 0 allocs/op
func Benchmark_WriteMap(b *testing.B) {
m := counterMap{data: make(map[string]int)}
b.ReportAllocs()
for i := 0; i < b.N; i++ {
m.write("10", 10)
}
}
// 10000000 174 ns/op 40 B/op 3 allocs/op
func Benchmark_WriteSyncMap(b *testing.B) {
m := counterSyncMap{}
b.ReportAllocs()
for i := 0; i < b.N; i++ {
m.write("10", 10)
}
}
// 50000000 20.5 ns/op 0 B/op 0 allocs/op
func Benchmark_ReadMap(b *testing.B) {
m := counterMap{data: make(map[string]int)}
for i := 0; i < 100; i++ {
m.write(strconv.Itoa(i), i)
}
b.ReportAllocs()
for i := 0; i < b.N; i++ {
m.read("10")
}
}
// 20000000 82.0 ns/op 0 B/op 0 allocs/op
func Benchmark_ReadSafeMap(b *testing.B) {
m := counterSafeMap{data: make(map[string]int)}
for i := 0; i < 100; i++ {
m.write(strconv.Itoa(i), i)
}
b.ReportAllocs()
for i := 0; i < b.N; i++ {
m.read("10")
}
}
// 30000000 50.8 ns/op 0 B/op 0 allocs/op
func Benchmark_ReadSyncMap(b *testing.B) {
m := counterSyncMap{}
for i := 0; i < 100; i++ {
m.write(strconv.Itoa(i), i)
}
b.ReportAllocs()
for i := 0; i < b.N; i++ {
m.read("10")
}
}
|
// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package cellular
import (
"context"
"fmt"
"io/ioutil"
"net/http"
"time"
"chromiumos/tast/errors"
"chromiumos/tast/local/cellular"
"chromiumos/tast/local/modemmanager"
"chromiumos/tast/testing"
)
// Note: This test enables and connects to Cellular if not already enabled or connected.
func init() {
testing.AddTest(&testing.Test{
Func: Smoke,
Desc: "Verifies that traffic can be sent over the Cellular network",
Contacts: []string{
"stevenjb@google.com",
"chromeos-cellular-team@google.com",
},
Attr: []string{"group:cellular", "cellular_sim_active"},
Fixture: "cellular",
Timeout: 5 * time.Minute,
})
}
func Smoke(ctx context.Context, s *testing.State) {
if _, err := modemmanager.NewModemWithSim(ctx); err != nil {
s.Fatal("Could not find MM dbus object with a valid sim: ", err)
}
helper, err := cellular.NewHelper(ctx)
if err != nil {
s.Fatal("Failed to create cellular.Helper: ", err)
}
if _, err := helper.Connect(ctx); err != nil {
s.Fatal("Failed to connect to cellular service: ", err)
}
verifyNetworkConnectivity := func(ctx context.Context) error {
// try to download from google.com and testing-chargen.appspot.com, with retries
if err := testing.Poll(ctx, func(ctx context.Context) error {
const googURL = "http://www.gstatic.com/generate_204"
googResp, err := http.Get(googURL)
if err != nil {
return errors.Wrapf(err, "error executing HTTP Get on %q", googURL)
}
defer googResp.Body.Close()
// This URL comes from src/third_party/autotest/files/client/cros/network.py.
// Code for the app is here: https://chromereviews.googleplex.com/2390012/
const hostName = "testing-chargen.appspot.com"
// This pattern also comes from src/third_party/autotest/files/client/cros/network.py
// and is undocumented.
const downloadBytes = 65536
fetchURL := fmt.Sprintf("http://%s/download?size=%d", hostName, downloadBytes)
s.Log("Fetch URL: ", fetchURL)
// Get data from |fetchURL| and confirm that the correct number of bytes are received.
resp, err := http.Get(fetchURL)
if err != nil {
return errors.Wrapf(err, "error fetching data from URL %q", fetchURL)
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return errors.Wrapf(err, "error reading data, got HTTP status code %d", resp.StatusCode)
}
bytesRead := len(body)
if bytesRead != downloadBytes {
return errors.Errorf("read wrong number of bytes: got %d, want %d", bytesRead, downloadBytes)
}
return nil
}, &testing.PollOptions{
Timeout: 90 * time.Second,
Interval: 20 * time.Second,
}); err != nil {
return errors.Wrap(err, "unable to verify connectivity")
}
return nil
}
if err := helper.RunTestOnCellularInterface(ctx, verifyNetworkConnectivity); err != nil {
s.Fatal("Failed to run test on cellular interface: ", err)
}
}
|
package itree
import (
"sort"
)
type Tree struct {
root *intervalTreeNode
}
func NewTree(itvl []Interval) (Tree, error) {
var tree Tree
if len(itvl) == 0 {
return tree, nil
}
sort.Slice(itvl, func(i, j int) bool {
return itvl[i].End > itvl[j].End
})
rID := len(itvl) / 2
tree.root = newIntervalTreeNode(itvl[rID])
err := tree.root.insert(itvl[0:rID])
if err != nil {
return tree, err
}
err = tree.root.insert(itvl[rID+1:])
if err != nil {
return tree, err
}
return tree, nil
}
func (t Tree) Contains(value int64) bool {
if t.root == nil {
return false
}
return t.root.contains(value)
}
|
package httpio
import "net/http"
//TransFunc implements Transformer when casted to
type TransFunc func(a interface{}, r *http.Request, w http.ResponseWriter) error
//Transform allows a transfunc to be used as a Transformer
func (f TransFunc) Transform(a interface{}, r *http.Request, w http.ResponseWriter) error {
return f(a, r, w)
}
//Transformer is used to transform value a in the context of responding with 'w' to request 'r'
type Transformer interface {
Transform(a interface{}, r *http.Request, w http.ResponseWriter) error
}
//Transware is used to implement a chain of transformers, works like router middlewares
type Transware func(next Transformer) Transformer
//Chain builds a recursing transformer with 'base' at the end and 'others' in front. If any transformer
//returns an error the recursion is unwound and the error is returned.
func Chain(base Transformer, others ...Transware) Transformer {
if len(others) == 0 {
return base
}
return others[0](Chain(base, others[1:cap(others)]...))
}
|
package main
import (
"github.com/magiconair/properties/assert"
"testing"
)
func Test_getMD5(t *testing.T) {
t.Run("not valid url", func(t *testing.T) {
resp := getMD5("google.com")
assert.Equal(t, resp, "")
})
t.Run("valid url", func(t *testing.T) {
resp := getMD5("http://google.com")
//check if the size is 32
assert.Equal(t, len(resp), 32)
})
}
func Test_getHash(t *testing.T) {
type args struct {
content []byte
}
tests := []struct {
name string
args args
want string
}{
{
"get valid hash",
args{
content: []byte("test"),
},
"098f6bcd4621d373cade4e832627b4f6",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := getHash(tt.args.content); got != tt.want {
t.Errorf("getHash() = %v, want %v", got, tt.want)
}
})
}
}
func Test_getValidUrl(t *testing.T) {
type args struct {
rawUrl string
}
tests := []struct {
name string
args args
want string
}{
{
"get valid url with scheme",
args{
rawUrl: "google.com",
},
"http://google.com",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := getValidUrl(tt.args.rawUrl); got != tt.want {
t.Errorf("getValidUrl() = %v, want %v", got, tt.want)
}
})
}
}
|
package util
import "crypto/sha256"
// Merkle tree
// A Merkle tree is built for each block, and it starts with leaves where a leaf is a transaction hash
// MerkleTree represent a Merkle tree
type MerkleTree struct {
RootNode *MerkleNode
}
// MerkleNode represent a Merkle tree node
type MerkleNode struct {
Left *MerkleNode
Right *MerkleNode
Data []byte
}
// NewMerkleTree creates a new Merkle tree from a sequence of data
func NewMerkleTree(data [][]byte) *MerkleTree {
var nodes []MerkleNode
// The number of leaves must be even, if there is an odd number of transactions, the last transaction is duplicated
if len(data)%2 != 0 {
data = append(data, data[len(data)-1])
}
// Convert all nodes to leaf nodes
for _, datum := range data {
node := NewMerkleNode(nil, nil, datum)
nodes = append(nodes, *node)
}
// Out loop for each level, len(data)/2 is the number of level
for i := 0; i < len(data)/2; i++ {
var newLevel []MerkleNode
// Every two nodes produce one new node as parent
for j := 0; j < len(nodes); j += 2 {
node := NewMerkleNode(&nodes[j], &nodes[j+1], nil)
newLevel = append(newLevel, *node)
}
nodes = newLevel
}
merkleTree := MerkleTree{&nodes[0]}
return &merkleTree
}
// NewMerkleNode creates a new Merkle tree node
func NewMerkleNode(left, right *MerkleNode, data []byte) *MerkleNode {
merkleNode := MerkleNode{}
if left == nil && right == nil {
// leaf nodes
hash := sha256.Sum256(data)
merkleNode.Data = hash[:]
} else {
prevData := append(left.Data, right.Data...)
hash := sha256.Sum256(prevData)
merkleNode.Data = hash[:]
}
merkleNode.Left = left
merkleNode.Right = right
return &merkleNode
}
|
package main
/*
--- Day 9: Marble Mania ---
You talk to the Elves while you wait for your navigation system to initialize. To pass the time, they introduce you to their favorite marble game.
The Elves play this game by taking turns arranging the marbles in a circle according to very particular rules. The marbles are numbered starting with 0 and increasing by 1 until every marble has a number.
First, the marble numbered 0 is placed in the circle. At this point, while it contains only a single marble, it is still a circle: the marble is both clockwise from itself and counter-clockwise from itself. This marble is designated the current marble.
Then, each Elf takes a turn placing the lowest-numbered remaining marble into the circle between the marbles that are 1 and 2 marbles clockwise of the current marble. (When the circle is large enough, this means that there is one marble between the marble that was just placed and the current marble.) The marble that was just placed then becomes the current marble.
However, if the marble that is about to be placed has a number which is a multiple of 23, something entirely different happens. First, the current player keeps the marble they would have placed, adding it to their score. In addition, the marble 7 marbles counter-clockwise from the current marble is removed from the circle and also added to the current player's score. The marble located immediately clockwise of the marble that was removed becomes the new current marble.
For example, suppose there are 9 players. After the marble with value 0 is placed in the middle, each player (shown in square brackets) takes a turn. The result of each of those turns would produce circles of marbles like this, where clockwise is to the right and the resulting current marble is in parentheses:
[-] (0)
[1] 0 (1)
[2] 0 (2) 1
[3] 0 2 1 (3)
[4] 0 (4) 2 1 3
[5] 0 4 2 (5) 1 3
[6] 0 4 2 5 1 (6) 3
[7] 0 4 2 5 1 6 3 (7)
[8] 0 (8) 4 2 5 1 6 3 7
[9] 0 8 4 (9) 2 5 1 6 3 7
[1] 0 8 4 9 2(10) 5 1 6 3 7
[2] 0 8 4 9 2 10 5(11) 1 6 3 7
[3] 0 8 4 9 2 10 5 11 1(12) 6 3 7
[4] 0 8 4 9 2 10 5 11 1 12 6(13) 3 7
[5] 0 8 4 9 2 10 5 11 1 12 6 13 3(14) 7
[6] 0 8 4 9 2 10 5 11 1 12 6 13 3 14 7(15)
[7] 0(16) 8 4 9 2 10 5 11 1 12 6 13 3 14 7 15
[8] 0 16 8(17) 4 9 2 10 5 11 1 12 6 13 3 14 7 15
[9] 0 16 8 17 4(18) 9 2 10 5 11 1 12 6 13 3 14 7 15
[1] 0 16 8 17 4 18 9(19) 2 10 5 11 1 12 6 13 3 14 7 15
[2] 0 16 8 17 4 18 9 19 2(20)10 5 11 1 12 6 13 3 14 7 15
[3] 0 16 8 17 4 18 9 19 2 20 10(21) 5 11 1 12 6 13 3 14 7 15
[4] 0 16 8 17 4 18 9 19 2 20 10 21 5(22)11 1 12 6 13 3 14 7 15
[5] 0 16 8 17 4 18(19) 2 20 10 21 5 22 11 1 12 6 13 3 14 7 15
[6] 0 16 8 17 4 18 19 2(24)20 10 21 5 22 11 1 12 6 13 3 14 7 15
[7] 0 16 8 17 4 18 19 2 24 20(25)10 21 5 22 11 1 12 6 13 3 14 7 15
The goal is to be the player with the highest score after the last marble is used up. Assuming the example above ends after the marble numbered 25, the winning score is 23+9=32 (because player 5 kept marble 23 and removed marble 9, while no other player got any points in this very short example game).
Here are a few more examples:
10 players; last marble is worth 1618 points: high score is 8317
13 players; last marble is worth 7999 points: high score is 146373
17 players; last marble is worth 1104 points: high score is 2764
21 players; last marble is worth 6111 points: high score is 54718
30 players; last marble is worth 5807 points: high score is 37305
What is the winning Elf's score?
*/
import (
"fmt"
)
func main() {
analyze(459, 72103)
// analyze(9, 25)
// analyze(10, 1618)
}
func analyze(numPlayers, numMarbles int) {
elfScores := make([]int, numPlayers)
c1 := make([]int, 1)
c1[0] = 0
currentMarbleIndex := 0
currentElf := 0
// fmt.Printf("Curerent index: %d\n", currentMarbleIndex)
// fmt.Printf("Circe: %v\n", c1)
for i := 1; i <= numMarbles; i++ {
currentElf += 1
if currentElf == numPlayers {
currentElf = 0
}
if i%23 == 0 {
elfScores[currentElf] += i
currentMarbleIndex -= 7
if currentMarbleIndex < 0 {
currentMarbleIndex = len(c1) + currentMarbleIndex
}
elfScores[currentElf] += c1[currentMarbleIndex]
c1 = append(c1[:currentMarbleIndex], c1[currentMarbleIndex+1:]...)
continue
}
currentMarbleIndex += 1
if currentMarbleIndex == len(c1) {
currentMarbleIndex = 0
}
currentMarbleIndex += 1
if currentMarbleIndex == len(c1) {
c1 = append(c1, i)
} else {
c1 = append(c1[:currentMarbleIndex], append([]int{i}, c1[currentMarbleIndex:]...)...)
}
// fmt.Printf("Curerent index: %d\n", currentMarbleIndex)
// fmt.Printf("Circe: %v\n", c1)
}
fmt.Printf("Max score: %d\n", maxScore(elfScores))
}
func maxScore(scores []int) int {
ms := scores[0]
for i := 1; i < len(scores); i++ {
if scores[i] > ms {
ms = scores[i]
}
}
return ms
}
|
package web
import (
"movie-app/handler"
)
type handlerModule struct {
user handler.UserHandler
genre handler.GenreHandler
movie handler.MovieHandler
movieGenre handler.MovieGenreHandler
review handler.ReviewHandler
}
func GetModule(service handlerService) handlerModule {
userHandler := handler.NewUserHandler(service.auth, service.user)
genreHandler := handler.NewGenreHandler(service.genre)
movieHandler := handler.NewMovieHandler(service.movie)
movieGenreHandler := handler.NewMovieGenreHandler(service.movieGenre)
reviewHandler := handler.NewReviewHandler(service.review)
return handlerModule{
user: userHandler,
genre: genreHandler,
movie: movieHandler,
movieGenre: movieGenreHandler,
review: reviewHandler,
}
}
|
package mock
import "github.com/florianehmke/plexname/prompt"
type AskNumberFn func(question string) (int, error)
type AskStringFn func(question string) (string, error)
type ConfirmFn func(question string) (bool, error)
func NewMockPrompter(askNumberFn AskNumberFn, askStringFn AskStringFn, confirmFn ConfirmFn) prompt.Prompter {
return &mockPrompter{
askNumberFn: askNumberFn,
askStringFn: askStringFn,
confirmFn: confirmFn,
}
}
type mockPrompter struct {
askNumberFn AskNumberFn
askStringFn AskStringFn
confirmFn ConfirmFn
}
func (p mockPrompter) AskNumber(question string) (int, error) {
return p.askNumberFn(question)
}
func (p mockPrompter) AskString(question string) (string, error) {
return p.askStringFn(question)
}
func (p mockPrompter) Confirm(question string) (bool, error) {
return p.confirmFn(question)
}
|
package cpu
import "testing"
func (p *CPU) oraImmediate(first byte, second byte) {
p.A = first
p.Memory.Write(second, 0)
p.Ora(0)
}
func TestOraSetsAccumulator(t *testing.T) {
var p *CPU = NewCPU()
p.oraImmediate(0x01, 0xff)
if p.A != 0xff {
t.Errorf("Binary ora seems not to have worked correctly")
t.Errorf("Expected 0xff, got %#02x", p.A)
t.FailNow()
}
}
func TestOraZeroFlagSet(t *testing.T) {
var p *CPU = NewCPU()
p.oraImmediate(0x00, 0x00)
if !p.Zero() {
t.Errorf("Zero flag should be set when result is 0x00 (flags: %08b)", p.Flags)
t.FailNow()
}
}
func TestOraNegativeFlagSet(t *testing.T) {
var p *CPU = NewCPU()
p.oraImmediate(0x80, 0x80)
if !p.Negative() {
t.Errorf("Negative flag should be set when high bit is 1 (flags: %08b)", p.Flags)
t.FailNow()
}
}
|
// Copyright 2019 go-gtp authors. All rights reserved.
// Use of this source code is governed by a MIT-style license that can be
// found in the LICENSE file.
// Command sgw is a dead simple implementation of S-GW only with GTP-related features.
//
// S-GW follows the steps below if there's no unexpected events in the middle.
//
// 1. Start listening on S11 interface.
//
// 2. If MME connects to S-GW with Create Session Request, S-GW sends Create Session Request
// to P-GW whose IP is specified by MME with F-TEID IE.
//
// 3. Wait for Create Session Response coming from P-GW with Cause="request accepted", and
// other IEs required are properly set.
//
// 4. Respond to MME with Create Session Response. Here the C-Plane Session is considered to
// be created properly.
//
// 5. If MME sends Modify Bearer Request with eNB information inside, set incoming TEID to
// Bearer and start listening on U-Plane.
//
// 6. If some U-Plane message comes from eNB/P-GW, relay it to P-GW/eNB with TEID and IP
// properly set as told while exchanging the C-Plane signals.
package main
import (
"flag"
"log"
"net"
"time"
v1 "github.com/wmnsk/go-gtp/v1"
v2 "github.com/wmnsk/go-gtp/v2"
"github.com/wmnsk/go-gtp/v2/messages"
)
// command-line arguments
var (
s11 = flag.String("s11", "127.0.0.112:2123", "local IP:Port on S11 interface.")
s5c = flag.String("s5c", "127.0.0.51:2123", "local IP:Port on S5-C interface.")
s1u = flag.String("s1u", "127.0.0.2:2152", "local IP:Port on S1-U interface.")
s5u = flag.String("s5u", "127.0.0.3:2152", "local IP:Port on S5-U interface.")
)
var (
delCh = make(chan struct{})
loggerCh = make(chan string)
errCh = make(chan error)
)
func main() {
flag.Parse()
log.SetPrefix("[S-GW] ")
// start listening on the specified IP:Port.
s11laddr, err := net.ResolveUDPAddr("udp", *s11)
if err != nil {
log.Fatal(err)
}
s1uladdr, err := net.ResolveUDPAddr("udp", *s1u)
if err != nil {
log.Fatal(err)
}
s5uladdr, err := net.ResolveUDPAddr("udp", *s5u)
if err != nil {
log.Fatal(err)
}
s11Conn, err := v2.ListenAndServe(s11laddr, 0, errCh)
if err != nil {
log.Fatal(err)
}
defer s11Conn.Close()
log.Printf("Started serving on %s", s11Conn.LocalAddr())
// register handlers for ALL the messages you expect remote endpoint to send.
s11Conn.AddHandlers(map[uint8]v2.HandlerFunc{
messages.MsgTypeCreateSessionRequest: handleCreateSessionRequest,
messages.MsgTypeModifyBearerRequest: handleModifyBearerRequest,
messages.MsgTypeDeleteSessionRequest: handleDeleteSessionRequest,
})
// let relay start working here.
// this just drops packets until TEID and peer information is registered.
s1uConn, err := v1.ListenAndServeUPlane(s1uladdr, 0, errCh)
if err != nil {
log.Fatal(err)
}
s5uConn, err := v1.ListenAndServeUPlane(s5uladdr, 0, errCh)
if err != nil {
log.Fatal(err)
}
relay = v1.NewRelay(s1uConn, s5uConn)
go relay.Run()
defer relay.Close()
// wait for events(logs, errors, timers).
for {
select {
case str := <-loggerCh:
log.Println(str)
case err := <-errCh:
log.Printf("Warning: %s", err)
case <-time.After(10 * time.Second):
var activeIMSIs []string
for _, sess := range s11Conn.Sessions {
if !sess.IsActive() {
continue
}
activeIMSIs = append(activeIMSIs, sess.IMSI)
}
if len(activeIMSIs) == 0 {
continue
}
log.Println("Active Subscribers:")
for _, imsi := range activeIMSIs {
log.Printf("\t%s", imsi)
}
activeIMSIs = nil
}
}
}
|
package set3
import (
"bytes"
"cryptopals/utils"
"testing"
)
func TestMT19937StreamCipher(t *testing.T) {
input := utils.GenerateRandomCharacters(8)
suffix := []byte("AAAAAAAAAAAAAA")
input = append(input, suffix...)
t.Log("Plaintext:", string(input))
key := uint16(getMT19937Seed())
t.Log("Key:", key)
cipherText := encryptUsingMT19937StreamCipher(input, key)
plainText := decryptUsingMT19937StreamCipher(cipherText, key)
t.Log("Decrypted", string(plainText))
if !bytes.Equal(input, plainText) {
t.Error("Failed to decrypt using MT19937 stream cipher")
}
recoveredKey := recoverMT19937StreamCipherKey(cipherText, suffix)
t.Log("Recovered key:", recoveredKey)
if key != recoveredKey {
t.Error("Failed to recover MT19937 stream cipher key")
}
resetToken := generateRandomPasswordResetTokenUsingMT19937(16)
t.Log("Reset token:", resetToken)
if !isResetTokenGeneratedUsingMT19937(resetToken) {
t.Error("Failed to detect the reset token was generated using MT19937")
}
randomToken := utils.GenerateRandomCharacters(16)
if isResetTokenGeneratedUsingMT19937(randomToken) {
t.Error("Detected random token was generated using MT19937")
}
}
|
package apis
import (
"encoding/hex"
"hash/fnv"
"regexp"
"strings"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/api/validation/path"
"k8s.io/apimachinery/pkg/util/validation"
)
const MaxNameLength = validation.DNS1123SubdomainMaxLength
var invalidLabelCharacters = regexp.MustCompile("[^-A-Za-z0-9_.]")
var invalidPathCharacters = regexp.MustCompile(`[` + strings.Join(path.NameMayNotContain, "") + `]`)
type KeyableObject interface {
GetName() string
GetNamespace() string
}
func Key(o KeyableObject) types.NamespacedName {
return types.NamespacedName{Name: o.GetName(), Namespace: o.GetNamespace()}
}
func KeyFromMeta(objMeta metav1.ObjectMeta) types.NamespacedName {
return types.NamespacedName{Name: objMeta.Name, Namespace: objMeta.Namespace}
}
// SanitizeLabel ensures a value is suitable as both a label key and value.
func SanitizeLabel(name string) string {
sanitized := invalidLabelCharacters.ReplaceAllString(name, "_")
max := validation.LabelValueMaxLength
if len(sanitized) > max {
var sb strings.Builder
sb.Grow(max)
sb.WriteString(sanitized[:max-9])
sb.WriteRune('-')
sb.WriteString(hashValue(name))
sanitized = sb.String()
}
return sanitized
}
// SanitizeName ensures a value is suitable for usage as an apiserver identifier.
func SanitizeName(name string) string {
sanitized := name
if len(path.IsValidPathSegmentName(name)) != 0 {
for _, invalidName := range path.NameMayNotBe {
if name == invalidName {
// the only strictly invalid names are `.` and `..` so this is sufficient
return strings.ReplaceAll(name, ".", "_")
}
}
sanitized = invalidPathCharacters.ReplaceAllString(sanitized, "_")
}
if len(sanitized) > MaxNameLength {
var sb strings.Builder
sb.Grow(MaxNameLength)
sb.WriteString(sanitized[:MaxNameLength-9])
sb.WriteRune('-')
sb.WriteString(hashValue(name))
sanitized = sb.String()
}
return sanitized
}
func hashValue(v string) string {
h := fnv.New32a()
_, _ = h.Write([]byte(v))
return hex.EncodeToString(h.Sum(nil))
}
|
package main
func adjacentElementsProduct(inputArray []int) int {
var result int = inputArray[0]*inputArray[1]
for i:=1; i < len(inputArray); i++ {
if len(inputArray) <= 2 {
return result
}
if inputArray[i]*inputArray[i-1] > result {
result = inputArray[i]*inputArray[i-1]
}
}
return result
} |
package gemini
import (
"context"
"fmt"
"net/http"
"testing"
"github.com/stretchr/testify/assert"
)
func TestClient_Balances(t *testing.T) {
type fields struct {
BaseURL string
apiKey string
apiSecret string
HTTPClient *http.Client
}
type args struct {
ctx context.Context
}
tests := []struct {
name string
fields fields
args args
want *BalancesResponse
wantErr bool
}{
{
name: "get balance",
wantErr: false,
args: args{
ctx: context.Background(),
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := NewClient()
c.BaseURL = sandboxBaseURLV1
got, err := c.Balances(tt.args.ctx)
if (err != nil) != tt.wantErr {
t.Errorf("Client.Balances() error = %v, wantErr %v", err, tt.wantErr)
return
}
assert.NotNil(t, got)
// TODO move this to a seperate test
b, err := c.BalanceSymbol(tt.args.ctx, got, "BTC")
assert.Nil(t, err)
assert.NotNil(t, b)
fmt.Printf("%+v", b)
})
}
}
|
package Index
import (
"errors"
"github.com/PuerkitoBio/goquery"
"github.com/sirupsen/logrus"
"os"
"poetryAdmin/worker/app/config"
"poetryAdmin/worker/app/tools"
"poetryAdmin/worker/core/data"
"poetryAdmin/worker/core/define"
"poetryAdmin/worker/core/grasp/poetry/Category"
"poetryAdmin/worker/core/grasp/poetry/Famous"
"poetryAdmin/worker/core/grasp/poetry/base"
"sync"
)
//抓取首页
type Index struct {
Content string
GoQuery *goquery.Document
CategoryData define.DataMap //首页分类数据
AuthorData define.DataMap //首页作者数据
FamousData define.DataMap //首页名句数据
group *sync.WaitGroup
}
func NewIndex() *Index {
return &Index{
CategoryData: make(define.DataMap),
AuthorData: make(define.DataMap),
FamousData: make(define.DataMap),
group: &sync.WaitGroup{},
}
}
//获取首页所有内容
func (i *Index) GetAllData() {
logrus.Info("GetAllData start .......")
if err := i.GetIndexSource(); err != nil {
logrus.Debug("GetIndexHtml err:", err)
return
}
if base.CheckContent(i.Content) == false {
logrus.Debug("CheckContent err: content is nil")
return
}
i.group.Add(3)
go i.GetPoetryCategory()
go i.GetPoetryFamousCategory()
go i.GetPoetryAuthor()
i.group.Wait()
return
}
//首页-诗文分类
func (i *Index) GetPoetryCategory() {
defer i.group.Done()
if len(i.Content) == 0 || i.GoQuery == nil {
logrus.Debug("GetPoetryCategory() i.Content is nil or i.query is nil")
return
}
i.GoQuery.Find(".right>.sons").Eq(0).Find(".cont>a").Each(func(j int, selection *goquery.Selection) {
href, _ := selection.Attr("href")
result := &define.TextHrefFormat{
Href: href,
Text: selection.Text(),
ShowPosition: define.CategoryPosition,
}
i.CategoryData[j] = result
})
home := &define.HomeFormat{
Identifier: define.HomePoetryCategoryFormatSign,
Data: i.CategoryData,
}
data.G_GraspResult.SendData(home)
Category.NewCategory().GraspByIndexData(home)
return
}
//首页-名句分类
func (i *Index) GetPoetryFamousCategory() {
defer i.group.Done()
if len(i.Content) == 0 || i.GoQuery == nil {
logrus.Debug("GetPoetryFamousCategory() i.Content is nil or i.query is nil")
return
}
i.GoQuery.Find(".right>.sons").Eq(1).Find(".cont>a").Each(func(j int, selection *goquery.Selection) {
href, _ := selection.Attr("href")
result := &define.TextHrefFormat{
Href: href,
Text: selection.Text(),
ShowPosition: define.FamousPosition,
}
i.FamousData[j] = result
})
home := &define.HomeFormat{
Identifier: define.HomePoetryFamousFormatSign,
Data: i.FamousData,
}
data.G_GraspResult.SendData(home)
Famous.NewFamous().GraspByIndexData(home)
return
}
//首页-作者
func (i *Index) GetPoetryAuthor() {
defer i.group.Done()
if len(i.Content) == 0 || i.GoQuery == nil {
logrus.Debug("GetPoetryAuthor() i.Content is nil or i.query is nil")
return
}
i.GoQuery.Find(".right>.sons").Eq(2).Find(".cont>a").Each(func(j int, selection *goquery.Selection) {
href, _ := selection.Attr("href")
result := &define.TextHrefFormat{
Href: href,
Text: selection.Text(),
}
i.AuthorData[j] = result
})
home := &define.HomeFormat{
Identifier: define.HomePoetryAuthorFormatSign,
Data: i.AuthorData,
}
data.G_GraspResult.SendData(home)
return
}
//获取首页html内容
func (i *Index) GetIndexSource() (err error) {
var (
query *goquery.Document
bytes []byte
)
if config.G_Conf.Env == define.TestEnv {
bytes, err = i.IndexTestFile()
} else {
bytes, err = base.GetHtml(config.G_Conf.GuShiWenIndexUrl)
}
if err != nil {
return
}
if len(bytes) > 0 {
i.Content = string(bytes)
query, err = tools.NewDocumentFromReader(i.Content)
}
if err != nil {
return err
}
i.GoQuery = query
return nil
}
//读取测试的首页文件,避免每次都http请求
func (i *Index) IndexTestFile() (byt []byte, err error) {
dir, _ := os.Getwd()
file := dir + "/index.html"
if ret, _ := tools.PathExists(file); ret == true {
return tools.ReadFile(file)
}
return nil, errors.New(file + "file is not exists")
}
|
package main
import (
"github.com/zairza-cetb/bench-routes/src/lib/handlers"
"github.com/zairza-cetb/bench-routes/src/lib/logger"
)
type qPingRoute struct {
URL string `json:"url"`
}
type qFloodPingRoute struct {
URL string `json:"url"`
}
type qJitterRoute struct {
URL string `json:"url"`
}
type qReqResDelayRoute struct {
URL string `json:"url"`
Method string `json:"method"`
}
// HandlerPingGeneral handles the ping route
func HandlerPingGeneral(signal string) bool {
// Get latest service state settings
conf.Refresh()
pingServiceState := conf.Config.UtilsConf.ServicesSignal.Ping
switch signal {
case "start":
if pingServiceState == "passive" {
conf.Config.UtilsConf.ServicesSignal.Ping = "active"
_, e := conf.Write()
if e != nil {
panic(e)
}
go func() {
handlers.HandlePingStart(conf, pingServiceState)
}()
return true
}
// return handlePingStart(conf, pingServiceState)
case "stop":
conf.Config.UtilsConf.ServicesSignal.Ping = "passive"
_, e := conf.Write()
if e != nil {
panic(e)
}
return true
default:
logger.Terminal("invalid signal", "f")
}
return false
}
//HandlerJitterGeneral handles the request to the jitter module
func HandlerJitterGeneral(signal string) bool {
// Get latest service state settings
conf.Refresh()
jitterServiceState := conf.Config.UtilsConf.ServicesSignal.Jitter
switch signal {
case "start":
if jitterServiceState == "passive" {
conf.Config.UtilsConf.ServicesSignal.Jitter = "active"
_, e := conf.Write()
if e != nil {
panic(e)
}
go func() {
handlers.HandleJitterStart(conf, jitterServiceState)
}()
return true
}
case "stop":
conf.Config.UtilsConf.ServicesSignal.Jitter = "passive"
_, e := conf.Write()
if e != nil {
panic(e)
}
return true
default:
logger.Terminal("invalid signal", "f")
}
return false
}
// HandleReqResGeneral is the handler for requests regarding
// req-res delay and monitoring
func HandleReqResGeneral(signal string) bool {
// Get latest service state settings
conf.Refresh()
reqResMonitoringServiceState := conf.Config.UtilsConf.ServicesSignal.ReqResDelayMonitoring
switch signal {
case "start":
if reqResMonitoringServiceState == "passive" {
conf.Config.UtilsConf.ServicesSignal.ReqResDelayMonitoring = "active"
_, e := conf.Write()
if e != nil {
panic(e)
}
go func() {
handlers.HandleReqResMonitoringStart(conf, reqResMonitoringServiceState)
}()
return true
}
case "stop":
conf.Config.UtilsConf.ServicesSignal.ReqResDelayMonitoring = "passive"
_, e := conf.Write()
if e != nil {
panic(e)
}
return true
default:
logger.Terminal("invalid signal", "f")
}
return false
}
//HandlerFloodPingGeneral handles the flood-ping route
func HandlerFloodPingGeneral(signal string) bool {
// Refresh conf with latest update
conf.Refresh()
serviceState := conf.Config.UtilsConf.ServicesSignal.FloodPing
switch signal {
case "start":
if serviceState == "passive" {
conf.Config.UtilsConf.ServicesSignal.FloodPing = "active"
_, e := conf.Write()
if e != nil {
panic(e)
}
go func() {
handlers.HandleFloodPingStart(conf, serviceState)
}()
return true
}
case "stop":
conf.Config.UtilsConf.ServicesSignal.FloodPing = "passive"
_, e := conf.Write()
if e != nil {
panic(e)
}
return true
default:
logger.Terminal("invalid signal", "f")
}
return false
}
|
package glubcms
import (
"html/template"
"log"
"net/url"
"sync"
"time"
"github.com/lemmi/glubcms/backend"
)
type Entries []Entry
func (e Entries) Less(i, j int) bool {
switch {
case e[i].meta.IsIndex && !e[j].meta.IsIndex:
return false
case !e[i].meta.IsIndex && e[j].meta.IsIndex:
return true
case e[i].Priority() != e[j].Priority():
return e[i].Priority() > e[j].Priority()
}
return e[i].Date().After(e[j].Date())
}
type Entry struct {
meta Meta
active bool
html []byte
isarticle bool
link url.URL
next *Entry
prev *Entry
fs backend.Backend
md_path string
once sync.Once
renderHTML ContentRenderer
}
func (e *Entry) Active() bool {
return e.active
}
func (a *Entry) Author() string {
return a.meta.Author
}
func (e *Entry) Date() time.Time {
return time.Time(e.meta.Date)
}
func (e *Entry) ExtraStyle() []string {
return e.meta.ExtraStyle
}
func (e *Entry) ExtraScript() []string {
return e.meta.ExtraScript
}
func (e *Entry) HTML() template.HTML {
e.once.Do(func() {
var err error
e.html, err = e.renderHTML.Render()
if err != nil {
//TODO make errorpage
log.Println(err)
}
})
return template.HTML(e.html)
}
func (e *Entry) IsArticle() bool {
return e.isarticle
}
func (e *Entry) Link() string {
return e.link.String()
}
func (e *Entry) Priority() int {
return e.meta.Priority
}
func (e *Entry) Title() string {
return e.meta.Title
}
func (e *Entry) Next() *Entry {
if e.next != nil {
return e.next
}
return nil
}
func (e *Entry) Prev() *Entry {
if e.prev != nil {
return e.prev
}
return nil
}
func (e *Entry) IsIndex() bool {
return e.meta.IsIndex
}
func (e *Entry) Context(c int) Entries {
next := e
prev := e
n := 1
for {
moved := false
if n < c && next.next != nil {
moved = true
next = next.next
n++
}
if n < c && prev.prev != nil {
moved = true
prev = prev.prev
n++
}
if n == c || moved == false {
break
}
}
ret := make(Entries, 0, n)
t := next
for n > 0 {
ret = append(ret, *t)
t = t.prev
n--
}
return ret
}
func SplitEntries(e Entries) (Menu, Articles Entries) {
for _, v := range e {
if v.IsArticle() {
Articles = append(Articles, v)
} else {
Menu = append(Menu, v)
}
}
return
}
|
package categoryModel
import (
"hd-mall-ed/packages/common/database"
"hd-mall-ed/packages/common/database/tableModel"
)
type Category tableModel.Category
// 创建
func (category *Category) Create() error {
return database.DataBase.Create(category).Error
}
// 获取所有的列表数据
func (category *Category) Get() ([]*tableModel.CategoryBase, error) {
var categoryList []*tableModel.CategoryBase
if err := database.DataBase.Model(&Category{}).Find(&categoryList).Error; err != nil {
return categoryList, err
}
return categoryList, nil
}
// 查询 primary 分类的个数
func (category *Category) GetPrimaryCount() (int, error) {
var count int64
err := database.DataBase.Model(&Category{}).Where("parent_id != ?", 0).Count(&count).Error
if err != nil {
return int(count), err
}
return int(count), nil
}
// 通过类别名称查找是否重复
func (category *Category) FindByName() (*Category, error) {
findCategory := &Category{}
if err := database.DataBase.Model(&Category{}).Where("name = ?", category.Name).First(findCategory).Error; err != nil {
return findCategory, err
}
return findCategory, nil
}
// 通过类别重复吵吵是否重复排除自己
func (category *Category) FindByNameExistCurrentName() (*Category, error) {
findCategory := &Category{}
if err := database.DataBase.Model(&Category{}).Where("name = ?", category.Name).Not("id = ?", category.ID).Error; err != nil {
return findCategory, err
}
return findCategory, nil
}
// 更新
func (category *Category) Update() error {
return database.DataBase.Model(&Category{}).Where("id = ?", category.ID).Updates(*category).Error
}
// 删除
func (category *Category) Delete() error {
return database.DataBase.Delete(category).Error
}
|
package TigoWeb
// MethodMapping http请求方式的一个映射
var MethodMapping = map[string]string{
"get": "Get",
"head": "Head",
"post": "Post",
"put": "Put",
"delete": "Delete",
"connect": "Connect",
"options": "Options",
"trace": "Trace",
"GET": "Get",
"HEAD": "Head",
"POST": "Post",
"PUT": "Put",
"DELETE": "Delete",
"CONNECT": "Connect",
"OPTIONS": "Options",
"TRACE": "Trace",
}
const Version = "1.6.5"
|
package db
import (
_ "github.com/go-sql-driver/mysql"
"github.com/jinzhu/gorm"
)
//DB as a driver gorm
var DB *gorm.DB
//OpenConnectionMysql open connection to mysql
func OpenConnectionMysql() (*gorm.DB, error) {
DB, err := gorm.Open("mysql", MysqlConnURL(BuildDbConfig()))
if err != nil {
return nil, err
}
return DB, nil
}
func AutoMigrateMysql(db *gorm.DB, models ...interface{}) error {
if models == nil {
return nil
}
db.AutoMigrate(models)
return nil
}
|
package ent
import (
"context"
log "github.com/sirupsen/logrus"
pb "way-jasy-cron/cron-logger/api"
"way-jasy-cron/cron-logger/internal/model/ent"
"way-jasy-cron/cron-logger/internal/model/ent/logger"
"way-jasy-cron/cron-logger/internal/model/ent_ex"
)
func (m *Manager) ListLog(ctx context.Context, req *ent_ex.ListLoggerOptions) (logs []*ent.Logger, total int, err error){
total, err = m.Client.Logger.Query().Where(logger.OperatorEQ(req.Operator)).Count(ctx)
if err != nil {
log.Error("count logs total err:(%v)", err)
return nil, 0, err
}
logs, err = m.Client.Logger.Query().Where(logger.OperatorEQ(req.Operator)).
Offset(req.OffSet()).Limit(req.Limit()).Order(ent.Desc(logger.FieldID)).All(ctx)
if err != nil {
log.Error("list logs err:(%v)", err)
return nil, 0, err
}
return
}
func (m *Manager) CreateLog(ctx context.Context, req *pb.WriteLogReq) (*pb.NoReply, error) {
_, err := m.Client.Logger.Create().SetLog(req.Opt).SetOperator(req.Operator).Save(ctx)
return &pb.NoReply{}, err
}
func (m *Manager) Show(ctx context.Context) error{
l, err := m.Client.Logger.Query().Order(ent.Asc(logger.FieldID)).Limit(1).All(ctx)
if err != nil {
log.Error("(%v)", err)
return err
}
return m.Client.Logger.DeleteOne(l[0]).Exec(ctx)
} |
// Copyright © 2018 John Slee <john@sleefamily.org>
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package cmd
import (
"fmt"
"io"
"net/http"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/spf13/cobra"
)
var (
him8Histogram = prometheus.NewHistogramVec(prometheus.HistogramOpts{
Name: "http_request_seconds",
Help: "HTTP response time",
Buckets: []float64{1, 2, 5, 10},
}, []string{"code"})
him8Delay time.Duration
him8Message string
him8Path string
him8Listen string
him8Cmd = &cobra.Command{
Use: "hi-m8",
Short: "Host a single message on an HTTP endpoint",
Long: `hi-m8 simply listens on an HTTP endpoint and returns a static message.
Optionally, an artificial delay can be added prior to the message being returned.`,
Run: func(cmd *cobra.Command, args []string) {
prometheus.Register(him8Histogram)
http.Handle("/metrics", promhttp.Handler())
http.HandleFunc("/healthz", healthz)
http.HandleFunc(him8Path, him8)
http.ListenAndServe(him8Listen, nil)
},
}
)
func healthz(w http.ResponseWriter, r *http.Request) {
io.WriteString(w, "OK")
}
func him8(w http.ResponseWriter, r *http.Request) {
time.Sleep(him8Delay)
startTime := time.Now()
defer func() {
r.Body.Close()
him8Histogram.WithLabelValues(fmt.Sprint(http.StatusOK)).Observe(time.Since(startTime).Seconds())
}()
w.Write([]byte(him8Message))
}
func init() {
rootCmd.AddCommand(him8Cmd)
him8Cmd.Flags().DurationVar(&him8Delay, "delay", 0*time.Second, "sleep this duration before responding")
him8Cmd.Flags().StringVar(&him8Message, "message", "hi m8", "specify a message to be returned to clients")
him8Cmd.Flags().StringVar(&him8Listen, "listen", ":3000", "[address]:port to bind to")
him8Cmd.Flags().StringVar(&him8Path, "path", "/", "specify a path to respond to")
}
|
package ravendb
import (
"crypto/rand"
"encoding/hex"
)
// implements generating random uuid4 that mimics python's uuid.uuid4()
// it doesn't try to fully UUIDv4 compliant
// UUID represents a random 16-byte number
type UUID struct {
data [16]byte
}
// NewUUID creates a new UUID
func NewUUID() *UUID {
res := &UUID{}
n, _ := rand.Read(res.data[:])
panicIf(n != 16, "rand.Read() returned %d, expected 16", n)
return res
}
// String returns xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx representation
func (u *UUID) String() string {
buf := make([]byte, 36)
hex.Encode(buf[0:8], u.data[0:4])
buf[8] = '-'
hex.Encode(buf[9:13], u.data[4:6])
buf[13] = '-'
hex.Encode(buf[14:18], u.data[6:8])
buf[18] = '-'
hex.Encode(buf[19:23], u.data[8:10])
buf[23] = '-'
hex.Encode(buf[24:], u.data[10:])
return string(buf)
}
// Hex returns hex-encoded version.
// Equivalent of python's uuid.uuid4().hex
func (u *UUID) Hex() string {
dst := make([]byte, 32)
n := hex.Encode(dst, u.data[:])
panicIf(n != 32, "hex.Encode() returned %d, expected 32", n)
return string(dst)
}
|
package display
import "image"
// clip clips r against each image's bounds (after translating into the
// destination image's coordinate space) and shifts the points sp and mp by
// the same amount as the change in r.Min.
// Borrowed from "image".
func clip(dst image.Rectangle, r *image.Rectangle, src image.Rectangle, sp *image.Point, mask *image.Rectangle, mp *image.Point) {
orig := r.Min
*r = r.Intersect(dst)
*r = r.Intersect(src.Add(orig.Sub(*sp)))
if mask != nil {
*r = r.Intersect(mask.Add(orig.Sub(*mp)))
}
dx := r.Min.X - orig.X
dy := r.Min.Y - orig.Y
if dx == 0 && dy == 0 {
return
}
sp.X += dx
sp.Y += dy
if mp != nil {
mp.X += dx
mp.Y += dy
}
}
|
package model
import (
"github.com/guregu/null"
)
// StateCD
// 0 = pending
// 1 = fetching
// 2 = analyzing
// 3 = done
// 4 = error
// Project has uploaded repository information.
type Project struct {
UUID string `json:"uuid" gorm:"primary_key"`
UserID null.Int `json:"user_id"`
CartfileContent string `json:"cartfile_content"`
PodfileLockContent string `json:"podfile_content"`
PbxprojContent string `json:"pbxproj_content"`
SupportedSwiftVersion string `json:"supported_swift_version"`
Repository string `json:"repository"`
LastFetchedAt Time `json:"last_fetched_at"`
StateCD int `json:"status_cd"`
}
|
package main
import "fmt"
type FibI interface {
Fib(n int) int
Wrap(fib FibI) FibI
}
type Fib struct {
Wrapper FibI
}
func (this *Fib) Fib(n int) int {
//wrapper := this.Wrapper
if this.Wrapper == nil {
this.Wrapper = this
}
fmt.Printf("Fib.Fib..%T...%v\n", this.Wrapper, n)
if n == 0 {
return 0
}
if n == 1 {
return 1
}
// call son
fmt.Printf("Fib.Fib..%T...\n", this.Wrapper)
return this.Wrapper.Fib(n-1) + this.Wrapper.Fib(n-2)
}
func (this *Fib) Wrap(fib FibI) FibI {
fmt.Printf("Fib.Wrap..%T...\n", this.Wrapper)
this.Wrapper = fib
fmt.Printf("Fib.Wrap..%T...\n", this.Wrapper)
fmt.Println("Fib.Wrap 调用", this)
return this
}
type CacheFib struct {
Wrapper FibI
cache map[int]int
}
func (this *CacheFib) Wrap(fib FibI) FibI {
this.Wrapper = fib
fmt.Printf("CacheFib.Wrap..%T...\n", this.Wrapper)
return this
}
func (this *CacheFib) Fib(n int) int {
if this.cache == nil {
this.cache = make(map[int]int)
}
if ans, ok := this.cache[n]; ok {
return ans
}
ans := this.Wrapper.Fib(n)
this.cache[n] = ans
return ans
}
type CounterFib struct {
Wrapper FibI
Counter int
}
func (this *CounterFib) Wrap(fib FibI) FibI {
this.Wrapper = fib
fmt.Printf("CounterFib.Wrap..%T...\n", this.Wrapper)
return this
}
func (this *CounterFib) Fib(n int) int {
this.Counter++
return this.Wrapper.Fib(n)
}
func main() {
fib := new(Fib)
//fmt.Println("result fib", fib.Fib(10))
cacheFib := new(CacheFib)
//f := cacheFib.Wrap(fib.Wrap(cacheFib))
//fmt.Println(f.Fib(10), "heihei")
counterFib := new(CounterFib)
counterCacheFib := cacheFib.Wrap(counterFib.Wrap(fib.Wrap(cacheFib)))
fmt.Println("result cache:counter:fib", counterCacheFib.Fib(10))
fmt.Printf("count: %d, cache: %v", counterFib.Counter, cacheFib.cache)
}
|
package wkbcommon
import (
"bytes"
"encoding/binary"
"io"
"github.com/paulmach/orb"
)
// byteOrder represents little or big endian encoding.
// We don't use binary.ByteOrder because that is an interface
// that leaks to the heap all over the place.
type byteOrder int
const bigEndian byteOrder = 0
const littleEndian byteOrder = 1
const (
pointType uint32 = 1
lineStringType uint32 = 2
polygonType uint32 = 3
multiPointType uint32 = 4
multiLineStringType uint32 = 5
multiPolygonType uint32 = 6
geometryCollectionType uint32 = 7
ewkbType uint32 = 0x20000000
)
const (
// limits so that bad data can't come in and preallocate tons of memory.
// Well formed data with less elements will allocate the correct amount just fine.
MaxPointsAlloc = 10000
MaxMultiAlloc = 100
)
// DefaultByteOrder is the order used for marshalling or encoding
// is none is specified.
var DefaultByteOrder binary.ByteOrder = binary.LittleEndian
// An Encoder will encode a geometry as (E)WKB to the writer given at
// creation time.
type Encoder struct {
buf []byte
w io.Writer
order binary.ByteOrder
}
// MustMarshal will encode the geometry and panic on error.
// Currently there is no reason to error during geometry marshalling.
func MustMarshal(geom orb.Geometry, srid int, byteOrder ...binary.ByteOrder) []byte {
d, err := Marshal(geom, srid, byteOrder...)
if err != nil {
panic(err)
}
return d
}
// Marshal encodes the geometry with the given byte order.
func Marshal(geom orb.Geometry, srid int, byteOrder ...binary.ByteOrder) ([]byte, error) {
buf := bytes.NewBuffer(make([]byte, 0, GeomLength(geom, srid != 0)))
e := NewEncoder(buf)
if len(byteOrder) > 0 {
e.SetByteOrder(byteOrder[0])
}
err := e.Encode(geom, srid)
if err != nil {
return nil, err
}
if buf.Len() == 0 {
return nil, nil
}
return buf.Bytes(), nil
}
// NewEncoder creates a new Encoder for the given writer.
func NewEncoder(w io.Writer) *Encoder {
return &Encoder{
w: w,
order: DefaultByteOrder,
}
}
// SetByteOrder will override the default byte order set when
// the encoder was created.
func (e *Encoder) SetByteOrder(bo binary.ByteOrder) {
e.order = bo
}
// Encode will write the geometry encoded as (E)WKB to the given writer.
func (e *Encoder) Encode(geom orb.Geometry, srid int) error {
if geom == nil {
return nil
}
switch g := geom.(type) {
// nil values should not write any data. Empty sizes will still
// write an empty version of that type.
case orb.MultiPoint:
if g == nil {
return nil
}
case orb.LineString:
if g == nil {
return nil
}
case orb.MultiLineString:
if g == nil {
return nil
}
case orb.Polygon:
if g == nil {
return nil
}
case orb.MultiPolygon:
if g == nil {
return nil
}
case orb.Collection:
if g == nil {
return nil
}
// deal with types that are not supported by wkb
case orb.Ring:
if g == nil {
return nil
}
geom = orb.Polygon{g}
case orb.Bound:
geom = g.ToPolygon()
}
var b []byte
if e.order == binary.LittleEndian {
b = []byte{1}
} else {
b = []byte{0}
}
_, err := e.w.Write(b)
if err != nil {
return err
}
if e.buf == nil {
e.buf = make([]byte, 16)
}
switch g := geom.(type) {
case orb.Point:
return e.writePoint(g, srid)
case orb.MultiPoint:
return e.writeMultiPoint(g, srid)
case orb.LineString:
return e.writeLineString(g, srid)
case orb.MultiLineString:
return e.writeMultiLineString(g, srid)
case orb.Polygon:
return e.writePolygon(g, srid)
case orb.MultiPolygon:
return e.writeMultiPolygon(g, srid)
case orb.Collection:
return e.writeCollection(g, srid)
}
panic("unsupported type")
}
func (e *Encoder) writeTypePrefix(t uint32, l int, srid int) error {
if srid == 0 {
e.order.PutUint32(e.buf, t)
e.order.PutUint32(e.buf[4:], uint32(l))
_, err := e.w.Write(e.buf[:8])
return err
}
e.order.PutUint32(e.buf, t|ewkbType)
e.order.PutUint32(e.buf[4:], uint32(srid))
e.order.PutUint32(e.buf[8:], uint32(l))
_, err := e.w.Write(e.buf[:12])
return err
}
// Decoder can decoder (E)WKB geometry off of the stream.
type Decoder struct {
r io.Reader
}
// Unmarshal will decode the type into a Geometry.
func Unmarshal(data []byte) (orb.Geometry, int, error) {
order, typ, srid, geomData, err := unmarshalByteOrderType(data)
if err != nil {
return nil, 0, err
}
var g orb.Geometry
switch typ {
case pointType:
g, err = unmarshalPoint(order, geomData)
case multiPointType:
g, err = unmarshalMultiPoint(order, geomData)
case lineStringType:
g, err = unmarshalLineString(order, geomData)
case multiLineStringType:
g, err = unmarshalMultiLineString(order, geomData)
case polygonType:
g, err = unmarshalPolygon(order, geomData)
case multiPolygonType:
g, err = unmarshalMultiPolygon(order, geomData)
case geometryCollectionType:
g, _, err := NewDecoder(bytes.NewReader(data)).Decode()
if err == io.EOF || err == io.ErrUnexpectedEOF {
return nil, 0, ErrNotWKB
}
return g, srid, err
default:
return nil, 0, ErrUnsupportedGeometry
}
if err != nil {
return nil, 0, err
}
return g, srid, nil
}
// NewDecoder will create a new (E)WKB decoder.
func NewDecoder(r io.Reader) *Decoder {
return &Decoder{
r: r,
}
}
// Decode will decode the next geometry off of the stream.
func (d *Decoder) Decode() (orb.Geometry, int, error) {
buf := make([]byte, 8)
order, typ, srid, err := readByteOrderType(d.r, buf)
if err != nil {
return nil, 0, err
}
var g orb.Geometry
switch typ {
case pointType:
g, err = readPoint(d.r, order, buf)
case multiPointType:
g, err = readMultiPoint(d.r, order, buf)
case lineStringType:
g, err = readLineString(d.r, order, buf)
case multiLineStringType:
g, err = readMultiLineString(d.r, order, buf)
case polygonType:
g, err = readPolygon(d.r, order, buf)
case multiPolygonType:
g, err = readMultiPolygon(d.r, order, buf)
case geometryCollectionType:
g, err = readCollection(d.r, order, buf)
default:
return nil, 0, ErrUnsupportedGeometry
}
if err != nil {
return nil, 0, err
}
return g, srid, nil
}
func readByteOrderType(r io.Reader, buf []byte) (byteOrder, uint32, int, error) {
// the byte order is the first byte
if _, err := r.Read(buf[:1]); err != nil {
return 0, 0, 0, err
}
var order byteOrder
if buf[0] == 0 {
order = bigEndian
} else if buf[0] == 1 {
order = littleEndian
} else {
return 0, 0, 0, ErrNotWKB
}
// the type which is 4 bytes
typ, err := readUint32(r, order, buf[:4])
if err != nil {
return 0, 0, 0, err
}
if typ&ewkbType == 0 {
return order, typ, 0, nil
}
srid, err := readUint32(r, order, buf[:4])
if err != nil {
return 0, 0, 0, err
}
return order, typ & 0x0ff, int(srid), nil
}
func readUint32(r io.Reader, order byteOrder, buf []byte) (uint32, error) {
if _, err := io.ReadFull(r, buf); err != nil {
return 0, err
}
return unmarshalUint32(order, buf), nil
}
func unmarshalByteOrderType(buf []byte) (byteOrder, uint32, int, []byte, error) {
order, typ, err := byteOrderType(buf)
if err != nil {
return 0, 0, 0, nil, err
}
if typ&ewkbType == 0 {
// regular wkb, no srid
return order, typ & 0x0F, 0, buf[5:], nil
}
if len(buf) < 10 {
return 0, 0, 0, nil, ErrNotWKB
}
srid := unmarshalUint32(order, buf[5:])
return order, typ & 0x0F, int(srid), buf[9:], nil
}
func byteOrderType(buf []byte) (byteOrder, uint32, error) {
if len(buf) < 6 {
return 0, 0, ErrNotWKB
}
var order byteOrder
switch buf[0] {
case 0:
order = bigEndian
case 1:
order = littleEndian
default:
return 0, 0, ErrNotWKBHeader
}
// the type which is 4 bytes
typ := unmarshalUint32(order, buf[1:])
return order, typ, nil
}
func unmarshalUint32(order byteOrder, buf []byte) uint32 {
if order == littleEndian {
return binary.LittleEndian.Uint32(buf)
}
return binary.BigEndian.Uint32(buf)
}
// GeomLength helps to do preallocation during a marshal.
func GeomLength(geom orb.Geometry, ewkb bool) int {
ewkbExtra := 0
if ewkb {
ewkbExtra = 4
}
switch g := geom.(type) {
case orb.Point:
return 21 + ewkbExtra
case orb.MultiPoint:
return 9 + 21*len(g) + ewkbExtra
case orb.LineString:
return 9 + 16*len(g) + ewkbExtra
case orb.MultiLineString:
sum := 0
for _, ls := range g {
sum += 9 + 16*len(ls)
}
return 9 + sum + ewkbExtra
case orb.Polygon:
sum := 0
for _, r := range g {
sum += 4 + 16*len(r)
}
return 9 + sum + ewkbExtra
case orb.MultiPolygon:
sum := 0
for _, c := range g {
sum += GeomLength(c, false)
}
return 9 + sum + ewkbExtra
case orb.Collection:
sum := 0
for _, c := range g {
sum += GeomLength(c, false)
}
return 9 + sum + ewkbExtra
}
return 0
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.