text stringlengths 11 4.05M |
|---|
package main
import (
"fmt"
//"database/sql"
//"github.com/golang/crypto/ssh"
"golang.org/x/crypto/ssh"
//utils "github.com/converge/wandycatkeeper/utils"
//"os"
"strings"
"io/ioutil"
//"bufio"
"github.com/google/uuid"
)
type DiskInfo struct {
Uuid string
Ip string
Name string
Capility string
Status string
}
type DiskInfos struct {
DiskInfos []DiskInfo
}
type sshcmd struct {
user string
password string
ip_port string
cmd string
}
type Netinfo struct {
Ip string
Netspeed string
}
type Netinfos struct {
Netinfos []*Netinfo
}
const SHPATH = "sh/disk_collector.sh"
const NETPATH = "sh/net_info.sh"
//get all node net speed info
func GetNetSpeedInfos() (*Netinfos, error){
var ip string
db, err := GetDB()
defer db.Close()
if err != nil {
return nil, err
}
rows, err := db.Query("select IpAddress from physicalmachine")
if err != nil {
return nil, err
}
Netinfos := &Netinfos{}
for rows.Next() {
rows.Scan(&ip)
Netinfo, _ := GetNetSpeedInfo(ip)
fmt.Println("end GetNetSpeedInfo",Netinfo)
Netinfos.Netinfos = append(Netinfos.Netinfos,Netinfo)
}
//阻塞直到该命令执行完成,该命令必须是被Start方法开始执行的
fmt.Println("end GetNetSpeedInfo",Netinfos)
return Netinfos, nil
}
//get one node net speed info
func GetNetSpeedInfo(ip string) (*Netinfo, error){
//cmd := exec.Command(SHPATH, "-c", s)
//StdoutPipe方法返回一个在命令Start后与命令标准输出关联的管道。Wait方法获知命令结束后会关闭这个管道,一般不需要显式的关闭该管道。
fmt.Println("entry GetNetSpeedInfo")
scm := &sshcmd{}
scm.user = "root"
scm.password = "1234,qwer"
scm.ip_port = ip+":22"
scm.cmd, _ = getFileInfo(NETPATH)
stdout, err := SSH_do(scm.user,scm.password,scm.ip_port,scm.cmd)
if err != nil {
fmt.Println("SSH_do",err)
return nil,err
}
//创建一个流来读取管道内内容,这里逻辑是通过一行一行的读取的
//reader := strings.NewReader(stdout)
//_, out, _ := bufio.ScanLines(stdout, atEOF)
//reader := ioutil.
//实时循环读取输出流中的一行内容
Netinfo := &Netinfo{}
//Netinfos := &Netinfos{}
fmt.Println("end SSH_do",stdout)
Netinfo.Ip = ip
Netinfo.Netspeed = strings.Replace(stdout, "\n", "", -1)
//Netinfos.Netinfos = append(Netinfos.Netinfos, Netinfo)
fmt.Println("end GetNetSpeedInfo",Netinfo)
//阻塞直到该命令执行完成,该命令必须是被Start方法开始执行的
return Netinfo, nil
}
//get one node disk info
func GetDiskInfo(ip string) (*DiskInfos, error){
//cmd := exec.Command(SHPATH, "-c", s)
//StdoutPipe方法返回一个在命令Start后与命令标准输出关联的管道。Wait方法获知命令结束后会关闭这个管道,一般不需要显式的关闭该管道。
fmt.Println("entry GetDiskInfo")
scm := &sshcmd{}
scm.user = "root"
scm.password = "1234,qwer"
scm.ip_port = ip+":22"
scm.cmd, _ = getFileInfo(SHPATH)
stdout, err := SSH_do(scm.user,scm.password,scm.ip_port,scm.cmd)
if err != nil {
fmt.Println("SSH_do",err)
return nil,err
}
//创建一个流来读取管道内内容,这里逻辑是通过一行一行的读取的
//reader := strings.NewReader(stdout)
//_, out, _ := bufio.ScanLines(stdout, atEOF)
//reader := ioutil.
//实时循环读取输出流中的一行内容
DiskInfo := DiskInfo{}
DiskInfos := &DiskInfos{}
db, err := GetDB()
defer db.Close()
if err != nil {
return nil,err
}
fmt.Println("end SSH_do",stdout)
str := strings.Split(stdout, "\n")
for _, line := range str {
if line!= ""{
fmt.Println("line",line)
array := strings.Split(line, ":")
names := strings.Split(array[1], " ")
name := names[1]
fmt.Println("array",array)
fmt.Println("array[2]",array[2])
capilitys := strings.Split(array[2]," ")
fmt.Println("capilitys",capilitys)
capility := capilitys[:3]
fmt.Println("capility",capility)
status := array[3]
DiskInfo.Ip = ip
DiskInfo.Uuid = uuid.New().String()
DiskInfo.Name = name
DiskInfo.Capility = strings.Join(capility, "")
DiskInfo.Status = status
fmt.Println("get physicalmachinediskinfo",DiskInfo.Uuid, DiskInfo.Ip, DiskInfo.Name, DiskInfo.Capility, DiskInfo.Status)
//_, err :=db.Exec("insert into physicalmachinediskinfo(uuid, Ip, name, capility, status) values ('%s', '%s' ,'%s', '%s', '%s')",DiskInfo.Uuid, DiskInfo.Ip, DiskInfo.name, DiskInfo.capility, DiskInfo.status )
//if err != nil {
// return nil,err
//}
DiskInfos.DiskInfos = append(DiskInfos.DiskInfos, DiskInfo)
fmt.Println("end physicalmachinediskinfo")
}
}
//阻塞直到该命令执行完成,该命令必须是被Start方法开始执行的
return DiskInfos, nil
}
//get all node disk info
func GetDiskInfos() (*DiskInfos, error){
var ip string
db, err := GetDB()
defer db.Close()
if err != nil {
return nil, err
}
rows, err := db.Query("select IpAddress from physicalmachine")
if err != nil {
return nil, err
}
AllDiskInfos := &DiskInfos{}
for rows.Next() {
rows.Scan(&ip)
DiskInfos, _ := GetDiskInfo(ip)
fmt.Println("end physicalmachinediskinfo")
for _, value := range DiskInfos.DiskInfos {
fmt.Println("range DiskInfos.DiskInfos",value)
AllDiskInfos.DiskInfos = append(AllDiskInfos.DiskInfos,value)
}
}
//阻塞直到该命令执行完成,该命令必须是被Start方法开始执行的
return AllDiskInfos, nil
}
//读取文件(filePath)里面的内容,返回一个String
func getFileInfo(filePath string) (string, error){
fd, err := ioutil.ReadFile(filePath)
if err != nil {
return "",err
}
return string(fd[:]), nil
}
func SSH_do(user, password, ip_port string, cmd string) (string, error) {
PassWd := []ssh.AuthMethod{ssh.Password(password)}
Conf := ssh.ClientConfig{User: user, Auth: PassWd,HostKeyCallback: ssh.InsecureIgnoreHostKey()}
Client, err := ssh.Dial("tcp", ip_port, &Conf)
if err != nil {
return "",err
}
defer Client.Close()
session, err := Client.NewSession()
defer session.Close()
if err != nil {
return "", err
}
Stdout, Stderr := session.Output(cmd)
return string(Stdout[:]),Stderr
}
|
/**
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// [START all]
package main
import (
"fmt"
"log"
"net/http"
"os"
"time"
)
var Version string
var Buildtime string
var Hostname, oserr = os.Hostname()
func main() {
go func() {
for range time.NewTicker(time.Second).C {
log.Printf("This is a heartbeat..." + time.Now().String())
}
}()
// use PORT environment variable, or default to 8080
port := "8080"
if fromEnv := os.Getenv("PORT"); fromEnv != "" {
port = fromEnv
}
// register functions to handle all requests
server := http.NewServeMux()
server.HandleFunc("/", index)
server.HandleFunc("/d2iq", d2iq)
server.HandleFunc("/d2iq/", d2iq)
server.HandleFunc("/docker", docker)
server.HandleFunc("/docker/", docker)
// start the web server on port and accept requests
log.Printf("Server listening on port %s", port)
err := http.ListenAndServe(":"+port, server)
log.Fatal(err)
}
// hello responds to the request with a plain-text "Hello, world" message.
func index(w http.ResponseWriter, r *http.Request) {
log.Printf("Serving request: %s", r.URL.Path)
fmt.Fprint(w,"\n")
fmt.Fprint(w," ____ ____ _ ___\n")
fmt.Fprint(w,"| _ \\___ \\(_)/ _ \\\n")
fmt.Fprint(w,"| | | |__) | | | | |\n")
fmt.Fprint(w,"| |_| / __/| | |_| |\n")
fmt.Fprint(w,"|____/_____|_|\\__\\_\\\n")
fmt.Fprint(w,"\n")
fmt.Fprintf(w, "Hello!, from D2iQ Dispatch CI/CD GitOps!\n")
fmt.Fprintf(w, "Version: %s\n", Version)
fmt.Fprintf(w, "Build time: %s\n", Buildtime)
fmt.Fprintf(w, "App Version: 20200318-2\n")
fmt.Fprint(w,"\n")
fmt.Fprint(w, "Container hostname: ", Hostname, "\n")
dt := time.Now()
fmt.Fprint(w, "Time: ", dt.Format("2006-01-02 15:04:05.00"), "\n")
}
func d2iq(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w,"\n")
fmt.Fprint(w," _ _ _ ____ ____ _ ___\n")
fmt.Fprint(w,"| | ___ | | | | ___ | _ \\___ \\(_)/ _ \\\n")
fmt.Fprint(w,"| |___ / _ \\| | | | / _ \\ | | | |__) | | | | |\n")
fmt.Fprint(w,"| _ | __/| |__| |__| (_) | | |_| / __/| | |_| |\n")
fmt.Fprint(w,"|_| |_|\\___/ \\___|\\___|\\___/ |____/_____|_|\\__\\_\\\n")
fmt.Fprint(w,"\n")
dt := time.Now()
fmt.Fprint(w,"[", dt.Format("2006-01-02 15:04:05.00"), "] ", "Container hostname: ", Hostname, "\n")
}
func docker(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w,"\n")
fmt.Fprint(w," ##\n")
fmt.Fprint(w," ## ## ## ==\n")
fmt.Fprint(w," ## ## ## ## ## ===\n")
fmt.Fprint(w," /`````````````````\\___/ ===\n")
fmt.Fprint(w," ~~~ {~~ ~~~~ ~~~ ~~~~ ~~~ ~~/~ === ~~~\n")
fmt.Fprint(w," \\______ o __/\n")
fmt.Fprint(w," \\ \\ __/\n")
fmt.Fprint(w," \\____\\_______/\n")
fmt.Fprint(w," _ _ _ _ _\n")
fmt.Fprint(w,"| | ___ | | | | ___ __| | ___ ___| | _____ _ __\n")
fmt.Fprint(w,"| |___ / _ \\| | | | / _ \\ / _ |/ _ \\ / __| |/ / _ \\ '__|\n")
fmt.Fprint(w,"| _ | __/| |__| |__| (_) | | (_| | (_) | (__| < __/ |\n")
fmt.Fprint(w,"|_| |_|\\___/ \\___|\\___|\\___/ \\____|\\___/ \\___|_|\\_\\___|_|\n")
fmt.Fprint(w,"\n")
dt := time.Now()
fmt.Fprint(w,"[", dt.Format("2006-01-02 15:04:05.00"), "] ", "Container hostname: ", Hostname, "\n")
}
// [END all]
|
package main
/**
对称二叉树
给定一个二叉树,检查它是否是镜像对称的。
例如,二叉树 `[1,2,2,3,4,4,3]` 是对称的。
```
1
/ \
2 2
/ \ / \
3 4 4 3
```
但是下面这个 `[1,2,2,null,3,null,3]` 则不是镜像对称的:
```
1
/ \
2 2
\ \
3 3
```
*/
/**
还可以用递归做,第一时间没想到
*/
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
func IsSymmetric(root *TreeNode) bool {
if root == nil {
return true
}
var stack []*TreeNode
stack = append(stack, root.Left, root.Right)
for len(stack) > 0 {
l := stack[0]
r := stack[1]
if l == nil && r == nil {
stack = stack[2:]
continue
}
if l == nil || r == nil {
return false
}
if l.Val != r.Val {
return false
}
stack = append(stack, l.Left, r.Right, l.Right, r.Left)
stack = stack[2:]
}
return true
}
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
|
package main
import (
"flag"
"github.com/astaxie/beego"
"github.com/astaxie/beego/logs"
"github.com/uxff/taniago/conf/inits"
"github.com/uxff/taniago/models"
"github.com/uxff/taniago/models/picset"
_ "github.com/uxff/taniago/routers"
)
func main() {
logdeep := 3
serveDir := "r:/themedia" //"."
addr := ":" + beego.AppConfig.String("httpport")
serveStatic := false
//flag.IntVar(&logdeep, "logdeep", logdeep, "log deep")
flag.StringVar(&serveDir, "dir", serveDir, "serve dir, witch will browse")
flag.StringVar(&addr, "addr", addr, "beego run param addr, format as ip:port")
flag.BoolVar(&serveStatic, "s", serveStatic, "serve static")
//flag.StringVar(&appenv, "env", appenv, "app env, in app.conf")//use env BEEGO_MODE=dev
flag.Parse()
logs.SetLevel(logs.LevelDebug)
logs.SetLogFuncCallDepth(logdeep)
// todo: use nginx instead
if serveStatic {
beego.SetStaticPath("fs", serveDir)
}
//beego.AppConfig.Set("", "")
//controllers.SetLocalDirRoot(serveDir)
picset.SetLocalDirRoot(serveDir)
//models.LoadIndexLinksFromFile("./conf/friends.json")
models.SetLinksPath("./conf/index.json")
models.LoadIndexLinks()
models.SetFriendlyLinksPath("./conf/friends.json")
models.LoadFriendlyLinks()
logs.Info("beego server will run. dir=%s addr=%s serveStatic=%v", serveDir, addr, serveStatic)
inits.PrepareDb()
beego.Run(addr)
}
|
package remote
import (
"fmt"
"github.com/callumj/weave/remote/s3"
"github.com/callumj/weave/remote/uptypes"
"github.com/callumj/weave/tools"
"io"
"io/ioutil"
"log"
"net/http"
"os"
)
type DownloadInfo struct {
FilePath string
ETag string
}
func UploadToS3(config uptypes.S3Config, files []uptypes.FileDescriptor) {
s3.UploadToS3(config, files)
}
func DownloadRemoteFile(url, finalDirectory string) *DownloadInfo {
etagFile := fmt.Sprintf("%v/%v", finalDirectory, ".weave.etag")
var eTag string
if tools.PathExists(etagFile) {
contents, err := ioutil.ReadFile(etagFile)
if err != nil {
log.Printf("Unable to read eTag file %v\r\n", etagFile)
}
eTag = string(contents)
} else {
eTag = ""
}
out, err := ioutil.TempFile("/tmp", "weave")
if err != nil {
log.Printf("Unable to create temp file\r\n")
return nil
}
defer out.Close()
req, err := http.NewRequest("GET", url, nil)
if err != nil {
log.Printf("Unable to construct URL for %v\r\n", url)
os.Remove(out.Name())
return nil
}
if len(eTag) != 0 {
req.Header.Add("If-None-Match", eTag)
}
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
log.Printf("Unable to communicate with server %v\r\n", url)
os.Remove(out.Name())
return nil
}
defer resp.Body.Close()
if resp.StatusCode == 304 {
log.Printf("Object not modified, finishing up.\r\n")
os.Remove(out.Name())
return nil
}
n, err := io.Copy(out, resp.Body)
if err != nil {
log.Printf("Unable to download file\r\n")
os.Remove(out.Name())
return nil
}
if n == 0 {
log.Printf("Nothing was copied\r\n")
os.Remove(out.Name())
return nil
}
return &DownloadInfo{
FilePath: out.Name(),
ETag: resp.Header.Get("ETag"),
}
}
|
package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"os"
)
type Users struct {
Users []User `json:"users"`
}
type User struct {
Name string `json:"name"`
Type string `json:"type"`
Age int `json:"age"`
Social Social `json:"social"`
}
type Social struct {
Facebook string `json:"facebook"`
Twitter string `json:"twitter"`
}
func main() {
FilePath := "user.json"
JsonFileHandler, err := os.Open(FilePath)
if err != nil {
log.Fatal("File not found!")
}
defer JsonFileHandler.Close()
fmt.Println("Successfully open file:", FilePath)
byteVal, _ := ioutil.ReadAll(JsonFileHandler)
var users Users
json.Unmarshal(byteVal, &users)
for i := 0; i < len(users.Users); i++ {
fmt.Println("User Name:", users.Users[i].Name)
fmt.Println("User Age:", users.Users[i].Age)
fmt.Println("Facebook:", users.Users[i].Social.Facebook)
fmt.Println("Twitter:", users.Users[i].Social.Twitter)
}
}
|
package alerts
import (
"github.com/dennor/go-paddle/events/types"
"github.com/dennor/phpserialize"
"github.com/shopspring/decimal"
)
const PaymentRefundedAlertName = "payment_refunded"
// PaymentRefunded refer to https://paddle.com/docs/reference-using-webhooks/#payment_refunded
type PaymentRefunded struct {
AlertName string `json:"alert_name"`
Amount *decimal.Decimal `json:"amount,string"`
BalanceCurrency string `json:"balance_currency"`
BalanceEarningsDecrease *decimal.Decimal `json:"balance_earnings_decrease,string"`
BalanceFeeRefund *decimal.Decimal `json:"balance_fee_refund,string"`
BalanceGrossRefund *decimal.Decimal `json:"balance_gross_refund,string"`
BalanceTaxRefund *decimal.Decimal `json:"balance_tax_refund,string"`
CheckoutID string `json:"checkout_id"`
Currency string `json:"currency"`
EarningsDecrease *decimal.Decimal `json:"earnings_decrease,string"`
Email string `json:"email"`
EventTime *types.Datetime `json:"event_time,string"`
FeeRefund *decimal.Decimal `json:"fee_refund,string"`
GrossRefund *decimal.Decimal `json:"gross_refund,string"`
MarketingConsent *types.MarketingConsent `json:"marketing_consent,string"`
OrderID string `json:"order_id"`
Passthrough string `json:"passthrough"`
Quantity int `json:"quantity,string"`
RefundType string `json:"refund_type"`
TaxRefund *decimal.Decimal `json:"tax_refund,string"`
PSignature string `json:"p_signature" php:"-"`
}
func (s *PaymentRefunded) Serialize() ([]byte, error) {
return phpserialize.Marshal(s)
}
func (s *PaymentRefunded) Signature() ([]byte, error) {
return []byte(s.PSignature), nil
}
|
package day4
import (
"regexp"
"strconv"
"strings"
"github.com/littleajax/adventofcode/helpers"
)
func checkField(name string, value string) bool {
switch name {
case "cid":
return false
case "byr":
return stringToIntRange(value, 1920, 2002)
case "iyr":
return stringToIntRange(value, 2010, 2020)
case "eyr":
return stringToIntRange(value, 2020, 2030)
case "hgt":
if len(value) < 3 {
return false
}
if value[len(value)-2:] == "cm" {
return stringToIntRange(value[:len(value)-2], 150, 193)
}
if value[len(value)-2:] == "in" {
return stringToIntRange(value[:len(value)-2], 59, 76)
}
case "hcl":
if len(value) != 7 {
return false
}
regex := regexp.MustCompile(`^\#[0-9a-z]{6}$`)
return regex.MatchString(value)
case "ecl":
eclValues := []string{"amb", "blu", "brn", "gry", "grn", "hzl", "oth"}
for _, val := range eclValues {
if value == val {
return true
}
}
case "pid": //A nine-digit number, including leading zeros
if len(value) != 9 {
return false
}
regex := regexp.MustCompile(`^[0-9]{9}$`)
return regex.MatchString(value)
}
return false
}
func stringToIntRange(s string, min int, max int) bool {
number, err := strconv.Atoi(s)
if err != nil {
return false
}
if number >= min && number <= max {
return true
}
return false
}
func ValidPassportCount(passportLines [][]string) (valids int) {
var counter int
for _, passportFields := range passportLines {
counter = 0
for _, field := range passportFields {
if checkField(field[:3], field[4:]) {
counter++
}
}
if counter >= 7 {
valids++
}
}
return
}
func ProcessInputs() (passportLines [][]string) {
inputs := helpers.FetchInputs("./inputs/day4.txt")
var passportFields []string
for _, passportLine := range inputs {
if passportLine == "" {
passportLines = append(passportLines, passportFields)
//empty the old passport
passportFields = []string{}
continue
}
//Split on space to get each line,
for _, field := range strings.Split(passportLine, " ") {
passportFields = append(passportFields, field)
}
}
//Last field doesn't end...
if len(passportFields) > 1 {
passportLines = append(passportLines, passportFields)
}
return
}
|
package strmap
import (
"sync"
)
type (
// ConcurrentMap is a synchronous map.
ConcurrentMap struct {
data map[Key]Value
mutex sync.RWMutex // used only by writers
}
)
// NewConcurrentMap initializes a new empty map.
// Use of nil to empty the ConcurrentMap is okay.
func NewConcurrentMap() *ConcurrentMap {
m := new(ConcurrentMap)
m.data = empty()
return m
}
// Get retrieves the value associated with the key.
func (m *ConcurrentMap) Get(key Key) Value {
m.mutex.RLock()
value := m.data[key]
m.mutex.RUnlock()
return value
}
// GetOK retrieves the value associated with the key.
func (m *ConcurrentMap) GetOK(key Key) (value Value, ok bool) {
m.mutex.RLock()
value, ok = m.data[key]
m.mutex.RUnlock()
return
}
// Set inserts a key-value pair.
func (m *ConcurrentMap) Set(key Key, value Value) {
m.mutex.Lock()
m.data[key] = value
m.mutex.Unlock()
}
// Copy efficiently inserts all the key-value pairs.
func (m *ConcurrentMap) Copy(src map[Key]Value) {
m.mutex.Lock()
cp(m.data, src)
m.mutex.Unlock()
}
// Remove removes key from the ConcurrentMap.
func (m *ConcurrentMap) Remove(key Key) {
m.mutex.Lock()
delete(m.data, key)
m.mutex.Unlock()
}
// Clear removes all keys from the ConcurrentMap.
func (m *ConcurrentMap) Clear() {
m.mutex.Lock()
m.data = empty()
m.mutex.Unlock()
}
func (m *ConcurrentMap) Keys() []Key {
var keys []Key
m.mutex.RLock()
for k, _ := range m.data {
keys = append(keys, k)
}
m.mutex.RUnlock()
return keys
}
func (m *ConcurrentMap) Values() []Value {
var values []Value
m.mutex.RLock()
for _, v := range m.data {
values = append(values, v)
}
m.mutex.RUnlock()
return values
}
func (m *ConcurrentMap) RawMap() map[Key]Value {
m.mutex.RLock()
dst := dup(m.data)
m.mutex.RUnlock()
return dst
}
|
package cos_test
import (
"fmt"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
sut "github.com/rancher-sandbox/ele-testhelpers/vm"
)
var _ = Describe("cOS Installer tests", func() {
var s *sut.SUT
BeforeEach(func() {
s = sut.NewSUT()
s.EventuallyConnects()
})
Context("Using bios", func() {
BeforeEach(func() {
s.EmptyDisk("/dev/sda")
// Only reboot if we boot from other than the CD to speed up test preparation
if s.BootFrom() != sut.LiveCD {
By("Reboot to make sure we boot from CD")
s.Reboot()
}
// Assert we are booting from CD before running the tests
By("Making sure we booted from CD")
ExpectWithOffset(1, s.BootFrom()).To(Equal(sut.LiveCD))
out, err := s.Command("grep /dev/sr /etc/mtab")
Expect(err).ToNot(HaveOccurred())
Expect(out).To(ContainSubstring("iso9660"))
out, err = s.Command("df -h /")
Expect(err).ToNot(HaveOccurred())
Expect(out).To(ContainSubstring("LiveOS_rootfs"))
// squashfs comes from a command line flag at suite level
if squashfs {
By("Setting the squashfs recovery install")
err = s.SendFile("../assets/squashed_recovery.yaml", "/etc/elemental/config.d/install_recovery.yaml", "0770")
Expect(err).ToNot(HaveOccurred())
}
})
AfterEach(func() {
if CurrentSpecReport().Failed() {
s.GatherAllLogs()
}
})
Context("install source tests", func() {
It("from iso", func() {
By("Running the elemental install")
out, err := s.Command(s.ElementalCmd("install", "/dev/sda"))
Expect(err).To(BeNil())
Expect(out).To(ContainSubstring("Mounting disk partitions"))
Expect(out).To(ContainSubstring("Partitioning device..."))
Expect(out).To(ContainSubstring("Unmounting disk partitions"))
Expect(out).To(ContainSubstring("Running after-install hook"))
if squashfs {
// Check the squashfs image is used as recovery
Expect(out).To(ContainSubstring("/run/initramfs/live/rootfs.squashfs into /run/cos/recovery/cOS/recovery.squashfs"))
}
// Reboot so we boot into the just installed cos
s.Reboot()
By("Checking we booted from the installed cOS")
ExpectWithOffset(1, s.BootFrom()).To(Equal(sut.Active))
})
PIt("from url", func() {})
It("from docker image", func() {
By("Running the elemental install")
out, err := s.Command(s.ElementalCmd("install", "--system.uri", fmt.Sprintf("docker:%s:cos-system-%s", s.GetArtifactsRepo(), s.TestVersion), "/dev/sda"))
Expect(err).To(BeNil())
Expect(out).To(ContainSubstring("Mounting disk partitions"))
Expect(out).To(ContainSubstring("Partitioning device..."))
Expect(out).To(ContainSubstring("Unmounting disk partitions"))
Expect(out).To(ContainSubstring("Running after-install hook"))
if squashfs {
// Check the squashfs image is used as recovery
Expect(out).To(ContainSubstring("/run/initramfs/live/rootfs.squashfs into /run/cos/recovery/cOS/recovery.squashfs"))
}
// Reboot so we boot into the just installed cos
s.Reboot()
By("Checking we booted from the installed cOS")
ExpectWithOffset(1, s.BootFrom()).To(Equal(sut.Active))
Expect(s.GetOSRelease("VERSION")).To(Equal(s.TestVersion))
})
})
Context("partition layout tests", func() {
Context("with partition layout", func() {
It("Forcing GPT", func() {
err := s.SendFile("../assets/custom_partitions.yaml", "/etc/elemental/config.d/custom_partitions.yaml", "0770")
By("Running the elemental installer with a layout file")
Expect(err).To(BeNil())
out, err := s.Command(s.ElementalCmd("install", "--force-gpt", "/dev/sda"))
Expect(err).To(BeNil())
Expect(out).To(ContainSubstring("Mounting disk partitions"))
Expect(out).To(ContainSubstring("Partitioning device..."))
Expect(out).To(ContainSubstring("Unmounting disk partitions"))
Expect(out).To(ContainSubstring("Running after-install hook"))
if squashfs {
// Check the squashfs image is used as recovery
Expect(out).To(ContainSubstring("/run/initramfs/live/rootfs.squashfs into /run/cos/recovery/cOS/recovery.squashfs"))
}
s.Reboot()
By("Checking we booted from the installed cOS")
ExpectWithOffset(1, s.BootFrom()).To(Equal(sut.Active))
// check partition values
// Values have to match the yaml under ../assets/layout.yaml
// That is the file that the installer uses so partitions should match those values
disk := s.GetDiskLayout("/dev/sda")
for _, part := range []sut.PartitionEntry{
{
Label: "COS_STATE",
Size: 8192,
FsType: sut.Ext4,
},
{
Label: "COS_OEM",
Size: 10,
FsType: sut.Ext4,
},
{
Label: "COS_RECOVERY",
Size: 4000,
FsType: sut.Ext2,
},
{
Label: "COS_PERSISTENT",
Size: 100,
FsType: sut.Ext2,
},
} {
CheckPartitionValues(disk, part)
}
})
It("No GPT", func() {
err := s.SendFile("../assets/custom_partitions.yaml", "/etc/elemental/config.d/custom_partitions.yaml", "0770")
By("Running the elemental install with a layout file")
Expect(err).To(BeNil())
out, err := s.Command(s.ElementalCmd("install", "/dev/sda"))
Expect(err).To(BeNil())
Expect(out).To(ContainSubstring("Mounting disk partitions"))
Expect(out).To(ContainSubstring("Partitioning device..."))
Expect(out).To(ContainSubstring("Unmounting disk partitions"))
Expect(out).To(ContainSubstring("Running after-install hook"))
if squashfs {
// Check the squashfs image is used as recovery
Expect(out).To(ContainSubstring("/run/initramfs/live/rootfs.squashfs into /run/cos/recovery/cOS/recovery.squashfs"))
}
s.Reboot()
By("Checking we booted from the installed cOS")
ExpectWithOffset(1, s.BootFrom()).To(Equal(sut.Active))
// check partition values
// Values have to match the yaml under ../assets/layout.yaml
// That is the file that the installer uses so partitions should match those values
disk := s.GetDiskLayout("/dev/sda")
for _, part := range []sut.PartitionEntry{
{
Label: "COS_STATE",
Size: 8192,
FsType: sut.Ext4,
},
{
Label: "COS_OEM",
Size: 10,
FsType: sut.Ext4,
},
{
Label: "COS_RECOVERY",
Size: 4000,
FsType: sut.Ext2,
},
{
Label: "COS_PERSISTENT",
Size: 100,
FsType: sut.Ext2,
},
} {
CheckPartitionValues(disk, part)
}
})
})
})
Context("efi/gpt tests", func() {
It("forces gpt", func() {
By("Running the installer")
out, err := s.Command(s.ElementalCmd("install", "--part-table gpt", "/dev/sda"))
Expect(err).To(BeNil())
Expect(out).To(ContainSubstring("Mounting disk partitions"))
Expect(out).To(ContainSubstring("Partitioning device..."))
Expect(out).To(ContainSubstring("Unmounting disk partitions"))
Expect(out).To(ContainSubstring("Running after-install hook"))
if squashfs {
// Check the squashfs image is used as recovery
Expect(out).To(ContainSubstring("/run/initramfs/live/rootfs.squashfs into /run/cos/recovery/cOS/recovery.squashfs"))
}
s.Reboot()
By("Checking we booted from the installed cOS")
ExpectWithOffset(1, s.BootFrom()).To(Equal(sut.Active))
})
It("forces efi", func() {
By("Running the installer")
out, err := s.Command(s.ElementalCmd("install", "--firmware efi", "/dev/sda"))
Expect(err).To(BeNil())
Expect(out).To(ContainSubstring("Mounting disk partitions"))
Expect(out).To(ContainSubstring("Partitioning device..."))
Expect(out).To(ContainSubstring("Unmounting disk partitions"))
Expect(out).To(ContainSubstring("Running after-install hook"))
if squashfs {
// Check the squashfs image is used as recovery
Expect(out).To(ContainSubstring("/run/initramfs/live/rootfs.squashfs into /run/cos/recovery/cOS/recovery.squashfs"))
}
s.Reboot()
// We are on a bios system, we should not be able to boot from an EFI installed system!
By("Checking we booted from the CD")
ExpectWithOffset(1, s.BootFrom()).To(Equal(sut.LiveCD))
})
})
Context("config file tests", func() {
It("uses a proper config file", func() {
err := s.SendFile("../assets/hostname.yaml", "/tmp/config.yaml", "0770")
By("Running the elemental install with a config file")
Expect(err).To(BeNil())
By("Running the installer")
out, err := s.Command(s.ElementalCmd("install", "--cloud-init", "/tmp/config.yaml", "/dev/sda"))
Expect(err).To(BeNil())
Expect(out).To(ContainSubstring("Mounting disk partitions"))
Expect(out).To(ContainSubstring("Partitioning device..."))
Expect(out).To(ContainSubstring("Unmounting disk partitions"))
Expect(out).To(ContainSubstring("Running after-install hook"))
if squashfs {
// Check the squashfs image is used as recovery
Expect(out).To(ContainSubstring("/run/initramfs/live/rootfs.squashfs into /run/cos/recovery/cOS/recovery.squashfs"))
}
s.Reboot()
By("Checking we booted from the installed cOS")
ExpectWithOffset(1, s.BootFrom()).To(Equal(sut.Active))
By("Checking config file was run")
out, err = s.Command("stat /oem/90_custom.yaml")
Expect(err).To(BeNil())
out, err = s.Command("hostname")
Expect(err).To(BeNil())
Expect(out).To(ContainSubstring("testhostname"))
})
})
})
})
|
package main
import (
"encoding/json"
"fmt"
"io"
"log"
"strconv"
ini "gopkg.in/ini.v1"
)
//AccessPoint is the access point configuration
type AccessPoint struct {
Channel string `ini:"CHANNEL" json:"channel"`
Gateway string `ini:"GATEWAY" json:"gateway"`
WPAVersion int `ini:"WPA_VERSION" json:"wpa_version"`
ETCHosts int `ini:"ETC_HOSTS" json:"etc_hosts"`
DHCPDNS string `ini:"DHCP_DNS" json:"dhcp_dns"`
NoDNS int `ini:"NO_DNS" json:"no_dns"`
Hidden int `ini:"HIDDEN" json:"hidden"`
MACFilter int `ini:"MAC_FILTER" json:"mac_filter"`
MACFilterAccept string `ini:"MAC_FILTER_ACCEPT" json:"mac-filter_accept"`
IsolateClients int `ini:"ISOLATE_CLIENTS" json:"isolate_clients"`
ShareMethod string `ini:"SHARE_METHOD" json:"share_method"`
IEEE80211N int
IEEE80211AC int
HTCapAb string `ini:"HT_CAPAB" json:"ht_capab"`
VHTCapAb string `ini:"VHT_CAPAB" json:"vht_capab"`
Driver string `ini:"DRIVER" json:"driver"`
NoVirt int `ini:"NO_VIRT" json:"no_virt,omitempty"`
Country string `ini:"COUNTRY" json:"country"`
FreqBand float64 `ini:"FREQ_BAND" json:"freq_band"`
NewMACAddr string `ini:"NEW_MACADDR" json:"new_macaddr"`
Daemonize int `ini:"DAEMONIZE" json:"daemonize"`
NoHaveGED int `ini:"NO_HAVEGED" json:"no_haveged"`
WifiIface string `ini:"WIFI_IFACE" json:"wifi_interface"`
InternetIface string `ini:"INTERNET_IFACE" json:"internet_interface"`
SSID string `ini:"SSID" json:"ssid"`
Passphrase string `ini:"PASSPHRASE" json:"passphrase"`
UsePsk int `ini:"USE_PSK" json:"use_psk"`
}
//LoadAPFromSrc loads access point configuration fom [byte
func LoadAPFromConf(src []byte) (*AccessPoint, error) {
cfg, err := ini.Load(src)
if err != nil {
return nil, err
}
a := &AccessPoint{}
err = cfg.MapTo(a)
if err != nil {
return nil, err
}
return a, nil
}
func LoadFromJSON(src []byte) (*AccessPoint, error) {
a := DefaultAccesPoint()
ap := &AccessPointConfig{}
err := json.Unmarshal(src, ap)
if err != nil {
return nil, err
}
a.Update(ap)
return a, nil
}
//WriteTo writes ini representation of *AccessPoint to dst.
func (a *AccessPoint) WriteTo(dst io.Writer) (int64, error) {
f := ini.Empty()
err := f.ReflectFrom(a)
if err != nil {
return 0, err
}
s := f.Sections()
for _, sec := range s {
names := sec.KeyStrings()
for _, n := range names {
v := sec.Key(n).String()
fmt.Fprintf(dst, "%s=%s\n", n, v)
}
}
return 0, nil
}
type AccessPointConfig struct {
Interface string `json:"interface"`
Hidden bool `json:"hidden"`
Channel int `json:"channel"`
SSID string `json:"ssid"`
Passphrase string `json:"passphrase"`
Gateway string `json:"gateway"`
ShareInterfaec string `json:"shared_interface"`
}
func (a *AccessPoint) Update(ap *AccessPointConfig) {
if ap.ShareInterfaec != "" {
a.ShareMethod = "nat"
a.InternetIface = ap.ShareInterfaec
} else {
a.ShareMethod = "none"
a.InternetIface = ""
}
if ap.SSID != "" {
a.SSID = ap.SSID
a.Passphrase = ap.Passphrase
}
if ap.Gateway != "" {
a.Gateway = ap.Gateway
}
if ap.Channel > 0 {
a.Channel = fmt.Sprint(ap.Channel)
}
if ap.Hidden {
a.Hidden = 1
} else {
a.Hidden = 0
}
if ap.Interface != "" {
a.WifiIface = ap.Interface
}
}
func DefaultAccesPoint() *AccessPoint {
var txt = `
{
"channel": "default",
"gateway": "192.168.12.1",
"wpa_version": 2,
"etc_hosts": 0,
"dhcp_dns": "gateway",
"no_dns": 0,
"hidden": 0,
"mac_filter": 0,
"mac-filter_accept": "/etc/hostapd/hostapd.accept",
"isolate_clients": 1,
"share_method": "nat",
"IEEE80211N": 0,
"IEEE80211AC": 0,
"ht_capab": "[HT40+]",
"vht_capab": "",
"driver": "nl80211",
"no_virt": 1,
"country": "",
"freq_band": 2.4,
"new_macaddr": "",
"daemonize": 0,
"no_haveged": 0,
"wifi_interface": "wlan0",
"internet_interface": "eth0",
"ssid": "voxbox",
"passphrase": "voxbox99",
"use_psk": 0
}
`
a := &AccessPoint{}
_ = json.Unmarshal([]byte(txt), a)
return a
}
func (a *AccessPoint) State() *AccessPointConfig {
ap := &AccessPointConfig{
SSID: a.SSID,
Passphrase: a.Passphrase,
Gateway: a.Gateway,
Interface: a.WifiIface,
ShareInterfaec: a.InternetIface,
}
if a.Hidden == 1 {
ap.Hidden = true
}
if a.Channel != "" && a.Channel != "default" {
i, err := strconv.Atoi(a.Channel)
if err != nil {
log.Fatal(err)
}
ap.Channel = i
}
return ap
}
|
package helper
func ConvInterfaceSliceToStringSlice(slice []interface{}) []string {
outp := make([]string, len(slice))
for k, v := range slice {
outp[k], _ = v.(string)
}
return outp
}
|
package vsphere
// Metadata contains vSphere metadata (e.g. for uninstalling the cluster).
type Metadata struct {
// VCenter is the domain name or IP address of the vCenter.
VCenter string `json:"vCenter"`
// Username is the name of the user to use to connect to the vCenter.
Username string `json:"username"`
// Password is the password for the user to use to connect to the vCenter.
Password string `json:"password"`
// TerraformPlatform is the type...
TerraformPlatform string `json:"terraform_platform"`
}
|
package main
import (
"context"
"flag"
"fmt"
"log"
"net/http"
"os"
"os/signal"
"syscall"
"time"
"github.com/danikarik/handler/pkg/service"
)
var addr = flag.String("http.addr", "", "Address for listening")
func main() {
flag.Parse()
if *addr == "" {
*addr = ":" + os.Getenv("PORT")
}
var (
srv = &http.Server{
Addr: *addr,
Handler: service.New(),
ReadTimeout: 10 * time.Second,
WriteTimeout: 10 * time.Second,
}
errC = make(chan error, 1)
)
go func() {
c := make(chan os.Signal)
signal.Notify(c, syscall.SIGINT, syscall.SIGTERM)
errC <- fmt.Errorf("%s", <-c)
}()
go func() {
log.Println("start listening on: " + *addr)
errC <- srv.ListenAndServe()
}()
<-errC
fmt.Println("")
log.Println("shutting down server ...")
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
if err := srv.Shutdown(ctx); err != nil {
log.Fatalf("shutdown failed: %v", err)
}
}
|
// Copyright 2020 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package store
import (
"context"
"fmt"
"testing"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
"github.com/pingcap/kvproto/pkg/metapb"
"github.com/pingcap/tidb/domain"
"github.com/pingcap/tidb/store/mockstore"
"github.com/pingcap/tidb/store/mockstore/unistore"
"github.com/pingcap/tidb/testkit"
"github.com/pingcap/tidb/testkit/external"
"github.com/stretchr/testify/require"
"github.com/tikv/client-go/v2/testutils"
)
func createMockTiKVStoreOptions(tiflashNum int) []mockstore.MockTiKVStoreOption {
return []mockstore.MockTiKVStoreOption{
mockstore.WithClusterInspector(func(c testutils.Cluster) {
mockCluster := c.(*unistore.Cluster)
_, _, region1 := mockstore.BootstrapWithSingleStore(c)
tiflashIdx := 0
for tiflashIdx < tiflashNum {
store2 := c.AllocID()
peer2 := c.AllocID()
addr2 := fmt.Sprintf("tiflash%d", tiflashIdx)
mockCluster.AddStore(store2, addr2, &metapb.StoreLabel{Key: "engine", Value: "tiflash"})
mockCluster.AddPeer(region1, store2, peer2)
tiflashIdx++
}
}),
mockstore.WithStoreType(mockstore.EmbedUnistore),
}
}
func TestStoreErr(t *testing.T) {
store := testkit.CreateMockStore(t, createMockTiKVStoreOptions(1)...)
require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/infoschema/mockTiFlashStoreCount", `return(true)`))
defer func() {
require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/infoschema/mockTiFlashStoreCount"))
}()
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("create table t(a int not null, b int not null)")
tk.MustExec("alter table t set tiflash replica 1")
tb := external.GetTableByName(t, tk, "test", "t")
tk.MustExec("set @@session.tidb_allow_tiflash_cop=ON")
err := domain.GetDomain(tk.Session()).DDL().UpdateTableReplicaInfo(tk.Session(), tb.Meta().ID, true)
require.NoError(t, err)
tk.MustExec("insert into t values(1,0)")
tk.MustExec("set @@session.tidb_isolation_read_engines=\"tiflash\"")
tk.MustExec("set @@session.tidb_allow_mpp=OFF")
require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/store/mockstore/unistore/BatchCopCancelled", "1*return(true)"))
err = tk.QueryToErr("select count(*) from t")
require.Equal(t, context.Canceled, errors.Cause(err))
require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/store/mockstore/unistore/BatchCopRpcErrtiflash0", "1*return(\"tiflash0\")"))
tk.MustQuery("select count(*) from t").Check(testkit.Rows("1"))
require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/store/mockstore/unistore/BatchCopRpcErrtiflash0", "return(\"tiflash0\")"))
err = tk.QueryToErr("select count(*) from t")
require.Error(t, err)
}
func TestStoreSwitchPeer(t *testing.T) {
store := testkit.CreateMockStore(t, createMockTiKVStoreOptions(2)...)
require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/infoschema/mockTiFlashStoreCount", `return(true)`))
defer func() {
require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/infoschema/mockTiFlashStoreCount"))
}()
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("create table t(a int not null, b int not null)")
tk.MustExec("alter table t set tiflash replica 1")
tb := external.GetTableByName(t, tk, "test", "t")
tk.MustExec("set @@session.tidb_allow_tiflash_cop=ON")
err := domain.GetDomain(tk.Session()).DDL().UpdateTableReplicaInfo(tk.Session(), tb.Meta().ID, true)
require.NoError(t, err)
tk.MustExec("insert into t values(1,0)")
tk.MustExec("set @@session.tidb_isolation_read_engines=\"tiflash\"")
tk.MustExec("set @@session.tidb_allow_mpp=OFF")
require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/store/mockstore/unistore/BatchCopRpcErrtiflash0", "return(\"tiflash0\")"))
tk.MustQuery("select count(*) from t").Check(testkit.Rows("1"))
require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/store/mockstore/unistore/BatchCopRpcErrtiflash1", "return(\"tiflash1\")"))
err = tk.QueryToErr("select count(*) from t")
require.Error(t, err)
}
|
package utils
import "os"
// GetEnvOrDefault is a helper func for get env or use default value
func GetEnvOrDefault(k, defaultValue string) string {
v := os.Getenv(k)
if v == "" {
return defaultValue
}
return v
}
|
package order
import (
"github.com/jinzhu/gorm"
"github.com/qor/transition"
"github.com/satori/go.uuid"
"github.com/tppgit/we_service/entity/service"
"github.com/tppgit/we_service/entity/user"
"time"
)
type Comment struct {
ID uuid.UUID `gorm:"type:char(36); primary_key"`
Content string `gorm:"type:text; not null"`
OrderID uuid.UUID `gorm:"type:char(36); not null; column:fk_order"`
UserID uuid.UUID `gorm:"type:char(36); not null; column:fk_user"`
User user.User `gorm:"foreignkey:UserID"`
UserName string `gorm:"type:varchar(64); not null"`
CreatedAt time.Time
UpdatedAt time.Time
}
func (d *Comment) BeforeCreate(scope *gorm.Scope) error {
return scope.SetColumn("ID", uuid.NewV4())
}
type History struct {
ID uuid.UUID `gorm:"type:char(36); primary_key"`
UserID string `gorm:"type:char(36); column:fk_user"`
User user.User `gorm:"foreignkey:UserID"`
OrderID string `gorm:"type:char(36); column:fk_order"`
CreatedAt time.Time
UpdatedAt time.Time
}
func (d *History) BeforeCreate(scope *gorm.Scope) error {
return scope.SetColumn("ID", uuid.NewV4())
}
type Order struct {
ID uuid.UUID `gorm:"type:char(36); primary_key"`
ApartmentNo string `gorm:"type:varchar(64)"`
BuildingName string `gorm:"type:varchar(64)"`
Code string `gorm:"type:varchar(100); not null"`
DateStart time.Time `gorm:"type:date; not null"`
TimeStart string `gorm:"not null"`
Duration string `gorm:"type:text; not null"`
Destination string `gorm:"type:text"`
AdditionDetails string `gorm:"type:text"`
TotalCost float32 `gorm:"float; not null"`
ExtraCost float32 `gorm:"float"`
Service service.Service `gorm:"foreignkey:ServiceID" json:"-"`
ServiceID uuid.UUID `gorm:"type:char(36); column:fk_service"`
ResidentID uuid.UUID `gorm:"type:char(36); column:fk_resident"`
Resident user.User `gorm:"foreignkey:ResidentID" json:"-"`
Comments []Comment `gorm:"foreignkey:fk_order" json:"-"`
Histories []History `gorm:"foreignkey:fk_order" json:"-"`
StaffID uuid.UUID `gorm:"type:char(36); column:fk_staff"`
Staff user.User `gorm:"foreignkey:StaffID" json:"-"`
CompanyID uuid.UUID `gorm:"type:char(36); column:fk_company"`
Company user.Company `gorm:"foreignkey:CompanyID" json:"-"`
transition.Transition `gorm:"column:state"`
FlagNotification bool
CreatedAt time.Time
UpdatedAt time.Time
}
type NotificationForOrderMobile struct {
Total int32
}
func (d *Order) BeforeCreate(scope *gorm.Scope) error {
return scope.SetColumn("ID", uuid.NewV4())
}
type Payment struct {
ID uuid.UUID `gorm:"type:char(36); primary_key"`
OrderID string `gorm:"type:char(36); column:fk_order"`
ResidentID string `gorm:"type:char(36); column:fk_resident"`
ProviderID string `gorm:"type:char(36); column:fk_provider"`
CreatedAt time.Time
UpdatedAt time.Time
}
func (d *Payment) BeforeCreate(scope *gorm.Scope) error {
return scope.SetColumn("ID", uuid.NewV4())
}
type Status struct {
ID int32 `gorm:"AUTO_INCREMENT; primary_key"`
Name string `gorm:"type:varchar(64); not null"`
Alias string `gorm:"type:varchar(64); not null"`
CreatedAt time.Time
UpdatedAt time.Time
}
type OrderState string
const (
NewRequest = "Pending"
SPPending = OrderState("SP_Pending")
ResidentPending = OrderState("Resident_Pending")
RequestConfirmed = OrderState("Request_Confirmed")
InProgress = OrderState("In_Progress")
PaymentPending = OrderState("Payment_Pending")
Completed = OrderState("Completed")
Cancel = OrderState("Cancelled")
)
const (
LabelNew = "NEW"
LabelPending = "PENDING"
LabelAccepted = "ACCEPTED"
LabelPayment = "PAYMENT"
)
type CountOrderByState struct {
Label string
Total int32
}
type StatusId int32
|
package schema
// AccessControl represents the configuration related to ACLs.
type AccessControl struct {
// The default policy if no other policy matches the request.
DefaultPolicy string `koanf:"default_policy" json:"default_policy" jsonschema:"default=deny,enum=deny,enum=one_factor,enum=two_factor,title=Default Authorization Policy" jsonschema_description:"The default policy applied to all authorization requests. Not relevant to OpenID Connect."`
// Represents a list of named network groups.
Networks []AccessControlNetwork `koanf:"networks" json:"networks" jsonschema:"title=Named Networks" jsonschema_description:"The list of named networks which can be reused in any ACL rule"`
// The ACL rules list.
Rules []AccessControlRule `koanf:"rules" json:"rules" jsonschema:"title=Rules List" jsonschema_description:"The list of ACL rules to enumerate for requests"`
}
// AccessControlNetwork represents one ACL network group entry.
type AccessControlNetwork struct {
Name string `koanf:"name" json:"name" jsonschema:"required,title=Network Name" jsonschema_description:"The name of this network to be used in the networks section of the rules section"`
Networks AccessControlNetworkNetworks `koanf:"networks" json:"networks" jsonschema:"required,title=Networks" jsonschema_description:"The remote IP's or network ranges in CIDR notation that this rule applies to"`
}
// AccessControlRule represents one ACL rule entry.
type AccessControlRule struct {
Domains AccessControlRuleDomains `koanf:"domain" json:"domain" jsonschema:"oneof_required=Domain,uniqueItems,title=Domain Literals" jsonschema_description:"The literal domains to match the domain against that this rule applies to"`
DomainsRegex AccessControlRuleRegex `koanf:"domain_regex" json:"domain_regex" jsonschema:"oneof_required=Domain Regex,title=Domain Regex Patterns" jsonschema_description:"The regex patterns to match the domain against that this rule applies to"`
Policy string `koanf:"policy" json:"policy" jsonschema:"required,enum=bypass,enum=deny,enum=one_factor,enum=two_factor,title=Rule Policy" jsonschema_description:"The policy this rule applies when all criteria match"`
Subjects AccessControlRuleSubjects `koanf:"subject" json:"subject" jsonschema:"title=AccessControlRuleSubjects" jsonschema_description:"The users or groups that this rule applies to"`
Networks AccessControlRuleNetworks `koanf:"networks" json:"networks" jsonschema:"title=Networks" jsonschema_description:"The remote IP's, network ranges in CIDR notation, or network names that this rule applies to"`
Resources AccessControlRuleRegex `koanf:"resources" json:"resources" jsonschema:"title=Resources or Paths" jsonschema_description:"The regex patterns to match the resource paths that this rule applies to"`
Methods AccessControlRuleMethods `koanf:"methods" json:"methods" jsonschema:"enum=GET,enum=HEAD,enum=POST,enum=PUT,enum=DELETE,enum=CONNECT,enum=OPTIONS,enum=TRACE,enum=PATCH,enum=PROPFIND,enum=PROPPATCH,enum=MKCOL,enum=COPY,enum=MOVE,enum=LOCK,enum=UNLOCK" jsonschema_description:"The list of request methods this rule applies to"`
Query [][]AccessControlRuleQuery `koanf:"query" json:"query" jsonschema:"title=Query Rules" jsonschema_description:"The list of query parameter rules this rule applies to"`
}
// AccessControlRuleQuery represents the ACL query criteria.
type AccessControlRuleQuery struct {
Operator string `koanf:"operator" json:"operator" jsonschema:"enum=equal,enum=not equal,enum=present,enum=absent,enum=pattern,enum=not pattern,title=Operator" jsonschema_description:"The list of query parameter rules this rule applies to"`
Key string `koanf:"key" json:"key" jsonschema:"required,title=Key" jsonschema_description:"The Query Parameter key this rule applies to"`
Value any `koanf:"value" json:"value" jsonschema:"title=Value" jsonschema_description:"The Query Parameter value for this rule"`
}
// DefaultACLNetwork represents the default configuration related to access control network group configuration.
var DefaultACLNetwork = []AccessControlNetwork{
{
Name: "localhost",
Networks: []string{"127.0.0.1"},
},
{
Name: "internal",
Networks: []string{"10.0.0.0/8"},
},
}
// DefaultACLRule represents the default configuration related to access control rule configuration.
var DefaultACLRule = []AccessControlRule{
{
Domains: []string{"public.example.com"},
Policy: "bypass",
},
{
Domains: []string{"singlefactor.example.com"},
Policy: "one_factor",
},
{
Domains: []string{"secure.example.com"},
Policy: policyTwoFactor,
},
}
|
package main
import (
"./dock"
"github.com/ssgo/s"
)
func main() {
dock.Registers()
dock.AsyncStart()
s.Start()
dock.AsyncStop()
}
|
package main
import (
"fmt"
"io/ioutil"
"log"
"net/url"
"path/filepath"
"time"
"github.com/jonmorehouse/gatekeeper/gatekeeper"
"github.com/jonmorehouse/gatekeeper/gatekeeper/utils"
"github.com/mitchellh/go-homedir"
"gopkg.in/yaml.v2"
)
// serviceDef represents an individual upstream configuration in a yaml
// file. Specifically, this exposes
type serviceDef struct {
ID string `yaml:"id"`
Name string `yaml:"name"`
Timeout time.Duration `yaml:"timeout"`
Protocols []string `yaml:"protocols"`
Prefixes []string `yaml:"prefixes"`
Hostnames []string `yaml:"hostnames"`
Extra map[string]interface{} `yaml:"extra"`
Backends []string `yaml:"backends"`
BackendExtra map[string]interface{} `yaml:"backend_extra"`
}
type serviceDefs map[string]serviceDef
// parseConfig accepts a configuration filepath and is responsible for parsing
// that into a set of upstream definitions. This method is responsible for
// verifying the filepath, loading the file and parsing it as JSON.
func parseConfig(rawPath string) (serviceDefs, error) {
// expand the homedirectory, if one is present in the given path
expandedPath, err := homedir.Expand(rawPath)
if err != nil {
log.Println(err)
return nil, InvalidConfigErr
}
// attempt to load, read and parse the configuration file
absPath, err := filepath.Abs(expandedPath)
if err != nil {
log.Println(err)
return nil, InvalidConfigErr
}
rawYaml, err := ioutil.ReadFile(absPath)
if err != nil {
log.Println(err)
return nil, InvalidConfigErr
}
var defs serviceDefs
err = yaml.Unmarshal(rawYaml, &defs)
if err != nil {
log.Println(err)
return nil, UnparseableConfigErr
}
return defs, nil
}
// syncServices accepts a config map with upstream definitions as well as
// ServiceContainer with which to sync the upstreams too. For each upstream and
// its backends, it is responsible for writing the correct types into
// serviceContainer; bubbling up and casting any errors where needed.
func syncServices(upstreams serviceDefs, container utils.ServiceContainer) error {
for name, serviceDef := range upstreams {
rawID := serviceDef.ID
if rawID == "" {
rawID = fmt.Sprintf("static-upstreams:%s", name)
}
id := gatekeeper.UpstreamID(rawID)
protocols, err := gatekeeper.ParseProtocols(serviceDef.Protocols)
if err != nil {
return err
}
upstream := &gatekeeper.Upstream{
ID: id,
Name: name,
Timeout: serviceDef.Timeout,
Protocols: protocols,
Hostnames: serviceDef.Hostnames,
Prefixes: serviceDef.Prefixes,
Extra: serviceDef.Extra,
}
if err := container.AddUpstream(upstream); err != nil {
return err
}
for idx, address := range serviceDef.Backends {
if _, err := url.Parse(address); err != nil {
return err
}
backend := &gatekeeper.Backend{
ID: gatekeeper.BackendID(fmt.Sprintf("%s:backend:%d", id, idx)),
Address: address,
Extra: serviceDef.BackendExtra,
}
if err := container.AddBackend(id, backend); err != nil {
return err
}
}
}
return nil
}
|
package etcd
import (
"time"
"github.com/yuexclusive/utils/config"
etcd "go.etcd.io/etcd/client/v3"
)
func Client() (*etcd.Client, error) {
config := etcd.Config{
Endpoints: config.MustGet().ETCDAddress,
DialTimeout: 10 * time.Second,
}
return etcd.New(config)
}
|
package kcpNetwork
import (
"bytes"
"encoding/binary"
"github.com/yaice-rx/yaice/network"
"github.com/yaice-rx/yaice/utils"
)
const (
ConstMsgLength = 4 //消息长度
ConstMsgIdLen = 4
)
type packet struct {
}
func NewPacket() network.IPacket {
return &packet{}
}
func (dp *packet) GetHeadLen() uint32 {
return ConstMsgLength
}
//封包
func (dp *packet) Pack(msg network.TransitData, ispos int64) []byte {
msgLength := int32(len(msg.Data) + ConstMsgIdLen)
dataLen := utils.IntToBytes(msgLength)
dataId := utils.IntToBytes(msg.MsgId)
return append(append(dataLen, dataId...), msg.Data...)
}
//解包
func (dp *packet) Unpack(binaryData []byte) (network.IMessage, error, func(conn network.IConn)) {
//创建一个从输入二进制数据的ioReader
dataBuff := bytes.NewReader(binaryData)
//只解压head的信息,得到dataLen和msgID
msg := &Message{}
//读msgID
if err := binary.Read(dataBuff, binary.BigEndian, &msg.ID); err != nil {
return nil, err, nil
}
//读msgID
msg.Data = binaryData[ConstMsgIdLen:]
//这里只需要把head的数据拆包出来就可以了,然后再通过head的长度,再从conn读取一次数据
return msg, nil, nil
}
|
package p05
func checkRecord(s string) bool {
aNums := 0
lNums := 0
for i := 0; i < len(s); i++ {
if s[i] == 'A' {
aNums++
if aNums > 1 {
return false
}
lNums = 0
} else if s[i] == 'L' {
lNums++
if lNums > 2 {
return false
}
} else {
lNums = 0
}
}
return true
}
|
package Problem0522
import (
"sort"
)
func findLUSlength(strs []string) int {
// 统计每个单词出现的次数
count := make(map[string]int, len(strs))
for _, s := range strs {
count[s]++
}
// 让 strs 中的每个单词只出现一次
// 这样的话,后面检查是否为子字符串的时候,可以避免重复检查
strs = strs[:0]
for s := range count {
strs = append(strs, s)
}
// 按照字符串的长度进行降序排列
// 后面检查是否为子字符串的时候,不用每个都检查一遍
sort.Sort(stringSlice(strs))
for i, s := range strs {
// 筛掉重复的字符串
if count[s] > 1 {
continue
}
// s 必须不是比 Ta 长的字符串的子字符串才能是要找的解答
if !isSubOf(s, strs[:i]) {
return len(s)
}
}
return -1
}
// 如果 s 是 ss 中某一个比 s 长的字符串的子字符串,返回 true
// ss 是排序过的
func isSubOf(s string, ss []string) bool {
for i := range ss {
if isSub(s, ss[i]) {
return true
}
}
return false
}
// 如果 a 是 b 的子字符串,返回 true
func isSub(a, b string) bool {
aLen, bLen := len(a), len(b)
i, j := 0, 0
for i < aLen && j < bLen {
if a[i] == b[j] {
i++
}
j++
}
return i == aLen
}
// stringSlice 实现了 sort.Interface 接口
type stringSlice []string
func (ss stringSlice) Len() int { return len(ss) }
func (ss stringSlice) Less(i, j int) bool { return len(ss[i]) > len(ss[j]) }
func (ss stringSlice) Swap(i, j int) { ss[i], ss[j] = ss[j], ss[i] }
|
package main
import (
"fmt"
"log"
"os"
"os/exec"
"strings"
cli "gopkg.in/urfave/cli.v1"
"github.com/transactional-cloud-serving-benchmark/tcsb/kv_query_util"
)
func main() {
app := cli.NewApp()
app.Flags = []cli.Flag{
cli.StringFlag{Name: "cmd", Value: "", Usage: `Command, with arguments, to invoke as IPC child process. Example: "./artifacts/simple_ram_client --validation=100"`},
cli.Uint64Flag{Name: "validation", Value: 0, Usage: "Number of read responses to print for validation purposes."},
cli.StringFlag{Name: "validation-filename", Value: "/dev/stderr", Usage: "Destination file to write validation results, if applicable."},
cli.Uint64Flag{Name: "burn-in", Value: 0, Usage: "Number of read/write requests to use for burn-in before collecting statistics."},
}
app.Action = func(c *cli.Context) error {
nValidation := c.Uint64("validation")
fmt.Printf("Validation: %d\n", nValidation)
validationFilename := c.String("validation-filename")
fmt.Printf("Validation filename: %s\n", validationFilename)
nBurnIn := c.Uint64("burn-in")
fmt.Printf("Burn-in: %d\n", nBurnIn)
cmdString := c.String("cmd")
fmt.Printf("cmd: %s\n", cmdString)
if cmdString == "" {
log.Fatal("missing cmd")
}
cmdFields := strings.Fields(cmdString)
cmd := exec.Command(cmdFields[0], cmdFields[1:]...)
ipcStdin, err := cmd.StdinPipe()
if err != nil {
log.Fatal(err)
}
ipcStdout, err := cmd.StdoutPipe()
if err != nil {
log.Fatal(err)
}
ipcStderr, err := cmd.StderrPipe()
if err != nil {
log.Fatal(err)
}
if err := cmd.Start(); err != nil {
log.Fatal(err)
}
driver := kv_query_util.NewIPCDriver(ipcStdin, ipcStdout, ipcStderr)
kv_query_util.RunIPCDriver(driver, nValidation, validationFilename, nBurnIn)
if err := cmd.Wait(); err != nil {
log.Fatal(err)
}
return nil
}
err := app.Run(os.Args)
if err != nil {
log.Fatal(err)
}
}
|
package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"os"
"time"
"golang.org/x/crypto/ssh"
"golang.org/x/crypto/ssh/terminal"
)
type serverInfo struct {
Host string `json:"host"`
Port int `json:"port"`
User string `json:"user"`
Passwd string `json:"passwd"`
}
type areaSer struct {
Area string `json:"area"`
Info serverInfo `json:"info"`
}
func connect(user, password, host string, port int) (*ssh.Session, error) {
var (
auth []ssh.AuthMethod
addr string
clientConfig *ssh.ClientConfig
client *ssh.Client
session *ssh.Session
err error
)
// get auth method
auth = make([]ssh.AuthMethod, 0)
auth = append(auth, ssh.Password(password))
clientConfig = &ssh.ClientConfig{
User: user,
Auth: auth,
Timeout: 30 * time.Second,
}
// connet to ssh
addr = fmt.Sprintf("%s:%d", host, port)
if client, err = ssh.Dial("tcp", addr, clientConfig); err != nil {
return nil, err
}
// create session
if session, err = client.NewSession(); err != nil {
return nil, err
}
return session, nil
}
func main() {
var (
allInfo []areaSer
num int
)
b, err := ioutil.ReadFile("./config.json")
if err != nil {
fmt.Println("ReadFile: ", err.Error())
}
json.Unmarshal(b, &allInfo)
fmt.Println("================================================")
fmt.Println("===================AUTO SSH=====================")
fmt.Println("=================SERVER LIST====================")
for i, elem := range allInfo {
fmt.Println(i, ":", elem.Area)
}
fmt.Println("================================================")
fmt.Println("=========please input the number of area==========")
fmt.Print("=========NUMBER=======> ")
fmt.Scanf("%d", &num)
if num < 0 || num > len(allInfo) {
fmt.Println("Please select the right number.")
os.Exit(1)
}
session, err := connect(allInfo[num].Info.User, allInfo[num].Info.Passwd, allInfo[num].Info.Host, allInfo[num].Info.Port)
if err != nil {
log.Fatal(err)
}
defer session.Close()
fd := int(os.Stdin.Fd())
oldState, err := terminal.MakeRaw(fd)
if err != nil {
panic(err)
}
defer terminal.Restore(fd, oldState)
// excute command
session.Stdout = os.Stdout
session.Stderr = os.Stderr
session.Stdin = os.Stdin
termWidth, termHeight, err := terminal.GetSize(fd)
if err != nil {
panic(err)
}
// Set up terminal modes
modes := ssh.TerminalModes{
ssh.ECHO: 1, // enable echoing
ssh.TTY_OP_ISPEED: 14400, // input speed = 14.4kbaud
ssh.TTY_OP_OSPEED: 14400, // output speed = 14.4kbaud
}
// Request pseudo terminal
if err := session.RequestPty("xterm-256color", termHeight, termWidth, modes); err != nil {
log.Fatal(err)
}
// session.Run("top")
err = session.Shell()
if err != nil {
fmt.Println("Shell error: ", err)
return
}
err = session.Wait()
if err != nil {
fmt.Println("Wait error: ", err)
return
}
}
|
package parser
import (
"bytes"
"golang.org/x/net/html"
"strings"
)
// Parser : struct that takes parses HTML documents, using a domain to find links for,
// patterns to look for, links to exclude, and a trim marker to crop HTML at
type Parser struct {
domain string
pattern []string
exclude []string
trimMarkers []string
}
// NewParser : Creates a new parser object with the given parameters
func NewParser(domain string, pattern []string, exclude []string, trimMarkers []string) *Parser {
p := Parser{domain: domain, pattern: pattern, exclude: exclude, trimMarkers: trimMarkers}
return &p
}
// GetLinks : Parse all links from the HTML document
func (p *Parser) GetLinks(htm string) ([]string, error) {
cleanedHtm := strings.NewReader(p.trimDocument(htm))
htmlTree, err := html.Parse(cleanedHtm)
links := make(map[string]bool)
if err != nil {
return nil, err
}
var crawl func(node *html.Node)
crawl = func(node *html.Node) {
if node.Type == html.ElementNode && node.Data == "a" {
attributes := node.Attr
for _, attr := range attributes {
if attr.Key == "href" {
links[attr.Val] = true
}
}
}
for child := node.FirstChild; child != nil; child = child.NextSibling {
crawl(child)
}
}
crawl(htmlTree)
keys := make([]string, 0, len(links))
for key := range links {
keys = append(keys, key)
}
keys = p.prependDomainToLinks(p.removeExcludedLinks(p.filterPatternLinks(keys)))
return keys, nil
}
// ExtractDocumentTitle : Extracts document title from HTML string
func (p *Parser) ExtractDocumentTitle(htm string) (string, error) {
htmStream := strings.NewReader(p.trimDocument(htm))
startNode, err := html.Parse(htmStream)
title := ""
if err != nil {
return "", err
}
var crawl func(node *html.Node)
crawl = func(node *html.Node) {
if node.Type == html.ElementNode && node.Data == "title" {
title = node.FirstChild.Data
}
for child := node.FirstChild; child != nil; child = child.NextSibling {
crawl(child)
}
}
crawl(startNode)
return title, nil
}
func (p *Parser) filterPatternLinks(links []string) []string {
if p.pattern == nil || len(p.pattern) == 0 {
return links
}
resultLinks := make([]string, 0)
for _, link := range links {
for _, pattern := range p.pattern {
if strings.HasPrefix(link, pattern) {
resultLinks = append(resultLinks, link)
break
}
}
}
return resultLinks
}
func (p *Parser) removeExcludedLinks(links []string) []string {
if p.exclude == nil || len(p.exclude) == 0 {
return links
}
results := make([]string, 0)
for _, link := range links {
shouldInclude := true
for _, subStr := range p.exclude {
if strings.Contains(link, subStr) {
shouldInclude = false
break
}
}
if shouldInclude {
results = append(results, link)
}
}
return results
}
func (p *Parser) prependDomainToLinks(links []string) []string {
if p.domain == "" {
return links
}
newLinks := make([]string, 0)
for _, link := range links {
var buffer bytes.Buffer
buffer.WriteString(p.domain)
buffer.WriteString(link)
newLinks = append(newLinks, buffer.String())
}
return newLinks
}
func (p *Parser) trimDocument(htm string) string {
if p.trimMarkers == nil || len(p.trimMarkers) == 0 {
return htm
}
parts := htm
for _, marker := range p.trimMarkers {
parts = strings.Split(htm, marker)[0]
}
return parts
}
func sanitizeLink(link string) string {
newString := strings.ReplaceAll(link, "\"", "")
return strings.ReplaceAll(newString, " ", "")
}
|
package semt
import (
"encoding/xml"
"github.com/thought-machine/finance-messaging/iso20022"
)
type Document02000101 struct {
XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:semt.020.001.01 Document"`
Message *SecuritiesMessageCancellationAdviceV01 `xml:"SctiesMsgCxlAdvc"`
}
func (d *Document02000101) AddMessage() *SecuritiesMessageCancellationAdviceV01 {
d.Message = new(SecuritiesMessageCancellationAdviceV01)
return d.Message
}
// Scope
// An account servicer sends a SecuritiesMessageCancellationAdvice to an account owner to inform of the cancellation of a securities message previously sent by an account servicer.
// The account servicer/owner relationship may be:
// - a central securities depository or another settlement market infrastructure acting on behalf of their participants
// - an agent (sub-custodian) acting on behalf of their global custodian customer, or
// - a custodian acting on behalf of an investment management institution or a broker/dealer.
// Usage
// The previously sent message may be:
// - a securities settlement transaction confirmation
// - a report (transactions, pending transactions, allegements, accounting and custody securities balance)
// - an allegement notification (when sent by mistake or because the counterparty cancelled its instruction)
// - a portfolio transfer notification
// - an intra-position movement confirmation
// - a transaction generation notification
// The previously sent message cannot be a status advice message (any). If a status advice should not have been sent, a new status advice with the correct status should be sent, not a cancellation advice.
// The message may also be used to:
// - re-send a message previously sent (the sub-function of the message is Duplicate),
// - provide a third party with a copy of a message for information (the sub-function of the message is Copy),
// - re-send to a third party a copy of a message for information (the sub-function of the message is Copy Duplicate).
// ISO 15022 - 20022 Coexistence
// This ISO 20022 message is reversed engineered from ISO 15022. Both standards will coexist for a certain number of years. Until this coexistence period ends, the usage of certain data types is restricted to ensure interoperability between ISO 15022 and 20022 users. Compliance to these rules is mandatory in a coexistence environment. The coexistence restrictions are described in a Textual Rule linked to the Message Items they concern. These coexistence textual rules are clearly identified as follows: “CoexistenceXxxxRule”.
type SecuritiesMessageCancellationAdviceV01 struct {
// Information that unambiguously identifies a SecuritiesMessageCancellationAdvice message as known by the account servicer.
Identification *iso20022.DocumentIdentification11 `xml:"Id"`
// Details of the transaction.
Details *iso20022.TransactionDetails12 `xml:"Dtls"`
// Party that originated the message, if other than the sender.
MessageOriginator *iso20022.PartyIdentification10Choice `xml:"MsgOrgtr,omitempty"`
// Party that is the final destination of the message, if other than the receiver.
MessageRecipient *iso20022.PartyIdentification10Choice `xml:"MsgRcpt,omitempty"`
// Additional information that cannot be captured in the structured elements and/or any other specific block.
Extension []*iso20022.Extension2 `xml:"Xtnsn,omitempty"`
}
func (s *SecuritiesMessageCancellationAdviceV01) AddIdentification() *iso20022.DocumentIdentification11 {
s.Identification = new(iso20022.DocumentIdentification11)
return s.Identification
}
func (s *SecuritiesMessageCancellationAdviceV01) AddDetails() *iso20022.TransactionDetails12 {
s.Details = new(iso20022.TransactionDetails12)
return s.Details
}
func (s *SecuritiesMessageCancellationAdviceV01) AddMessageOriginator() *iso20022.PartyIdentification10Choice {
s.MessageOriginator = new(iso20022.PartyIdentification10Choice)
return s.MessageOriginator
}
func (s *SecuritiesMessageCancellationAdviceV01) AddMessageRecipient() *iso20022.PartyIdentification10Choice {
s.MessageRecipient = new(iso20022.PartyIdentification10Choice)
return s.MessageRecipient
}
func (s *SecuritiesMessageCancellationAdviceV01) AddExtension() *iso20022.Extension2 {
newValue := new(iso20022.Extension2)
s.Extension = append(s.Extension, newValue)
return newValue
}
|
package travis
import (
"os"
"testing"
)
func TestIsRunning(t *testing.T) {
tr := os.Getenv("TRAVIS") == "true" && os.Getenv("CI") == "true"
if tr != IsRunning() {
t.Error("IsRunning() does not match TRAVIS && CI env var check")
}
}
|
package c31_hmac_sha1_timing_leak
import (
"errors"
"fmt"
"time"
"github.com/vodafon/cryptopals/set1/c1_hex_to_base64"
)
type ServerExp struct {
BasePath string
BaseDuration time.Duration
}
func Exploit(server Server, fn string, dur time.Duration) ([]byte, error) {
se := ServerExp{
BasePath: "file=" + fn + "&signature=",
BaseDuration: dur,
}
mac := make([]byte, 20)
retry := 3
pos := 0
for pos < len(mac) {
bt, err := se.findByte(server, mac, pos)
if err != nil {
if retry == 0 {
return nil, err
}
retry -= 1
if pos > 0 {
pos -= 1
}
continue
}
mac[pos] = bt
fmt.Printf("%d:\t%x\n", pos, mac)
pos += 1
retry = 3
}
return mac, nil
}
func (obj ServerExp) findByte(server Server, mac []byte, pos int) (byte, error) {
targetDuration := time.Duration(pos+1) * obj.BaseDuration
for i := 0; i < 256; i++ {
mac[pos] = byte(i)
sign := c1_hex_to_base64.EncodeHex(mac)
start := time.Now()
server.CheckFile(obj.BasePath + string(sign))
t := time.Now()
elapsed := t.Sub(start)
// fmt.Printf("%d: %d %d %s %t\n", i, targetDuration.Nanoseconds(), elapsed.Nanoseconds(), elapsed, elapsed >= targetDuration)
if elapsed >= targetDuration {
return byte(i), nil
}
}
return byte(0), errors.New("Byte not found")
}
|
package datastruct
import (
"github.com/sirupsen/logrus"
"gopkg.in/yaml.v2"
)
func Yml2Map(text string) map[interface{}]interface{} {
mapYml := make(map[interface{}]interface{})
bText := []byte(text)
err := yaml.Unmarshal(bText,&mapYml)
if err != nil {
logrus.Error(err)
}
return mapYml
}
func Map2Yml(mpaz map[interface{}]interface{}) string {
//txtYml := ""
txtYml, err := yaml.Marshal(&mpaz)
if(err == nil){
return string(txtYml)
}else{
logrus.Error(err)
return ""
}
}
func MapMerge(source, add map[interface{}]interface{}) map[interface{}]interface{} {
for k, v := range add {
source[k] = v
}
return source
}
|
func isSubsequence(s string, t string) bool {
i:=0
n:=len(s)
if n==0{return true}
for _,v:=range t{
if v==rune(s[i]){
i++
if i==n{return true}
}
}
return false
}
|
package main
import "fmt"
var age int32 = 25
func main() {
// Main types
// string
// bool
// int
// int int8 int16 int32 int64
// uint uint8 uint16 uint32 uint64 - unsigned
// byte - alias for uint8
// rune - alias for int32
// float32 float64
// complex64 complex128
// Using var
//var name = "Varien"
//var age int32 = 25
const isCool = true
// Shorthand
//name := "Varien"
name, email := "Varien", "varien@gmail.com"
fmt.Println(name, age, isCool, email)
fmt.Printf("type of name is %T\n",name)
fmt.Printf("type of age is %T\n",age)
fmt.Printf("type of isCool is %T\n",isCool)
} |
package builder
import (
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"bldy.build/build"
"bldy.build/build/graph"
)
const (
SCSSLOG = "success"
FAILLOG = "fail"
)
func (b *Builder) buildpath(n *graph.Node) string {
return filepath.Join(
*b.config.Cache,
nodeid(n),
)
}
func (b *Builder) cached(n *graph.Node) bool {
_, err := os.Lstat(b.buildpath(n))
n.Cached = !os.IsNotExist(err)
return n.Cached
}
func (b *Builder) builderror(n *graph.Node) error {
nspath := b.buildpath(n)
if file, err := os.Open(filepath.Join(nspath, FAILLOG)); err == nil {
n.Status = build.Fail
errString, _ := ioutil.ReadAll(file)
return fmt.Errorf("%s", errString)
} else if _, err := os.Lstat(filepath.Join(nspath, SCSSLOG)); err == nil {
n.Status = build.Success
}
return nil
}
func (b *Builder) saveLog(n *graph.Node) {
logName := "/dev/null"
switch n.Status {
case build.Success:
logName = SCSSLOG
case build.Fail:
logName = FAILLOG
}
if logfile, err := os.Create(filepath.Join(b.buildpath(n), logName)); err != nil {
l.Fatalf("error creating log for %s: %s", n.Target.Name(), err.Error())
} else {
if _, err := io.WriteString(logfile, n.Output); err != nil {
l.Fatalf("error writing log for %s: %s", n.Target.Name(), err.Error())
}
}
}
|
//FOR
package main
import "fmt"
func main(){
alumnos := 1
for alumnos <= 3{
fmt.Println(alumnos)
alumnos = alumnos+1
}
for calificaciones :=7; calificaciones <= 9; calificaciones++{
fmt.Println(calificaciones)
}
}
|
package hrp
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
)
var (
stepGET = NewStep("get with params").
GET("/get").
WithParams(map[string]interface{}{"foo1": "bar1", "foo2": "bar2"}).
WithHeaders(map[string]string{"User-Agent": "HttpRunnerPlus"}).
WithCookies(map[string]string{"user": "debugtalk"}).
Validate().
AssertEqual("status_code", 200, "check status code").
AssertEqual("headers.\"Content-Type\"", "application/json; charset=utf-8", "check header Content-Type").
AssertEqual("body.args.foo1", "bar1", "check param foo1").
AssertEqual("body.args.foo2", "bar2", "check param foo2")
stepPOSTData = NewStep("post form data").
POST("/post").
WithParams(map[string]interface{}{"foo1": "bar1", "foo2": "bar2"}).
WithHeaders(map[string]string{"User-Agent": "HttpRunnerPlus", "Content-Type": "application/x-www-form-urlencoded"}).
WithBody("a=1&b=2").
WithCookies(map[string]string{"user": "debugtalk"}).
Validate().
AssertEqual("status_code", 200, "check status code")
)
func TestRunRequestGetToStruct(t *testing.T) {
tStep := stepGET.step
if tStep.Request.Method != httpGET {
t.Fatalf("tStep.Request.Method != GET")
}
if tStep.Request.URL != "/get" {
t.Fatalf("tStep.Request.URL != '/get'")
}
if tStep.Request.Params["foo1"] != "bar1" || tStep.Request.Params["foo2"] != "bar2" {
t.Fatalf("tStep.Request.Params mismatch")
}
if tStep.Request.Headers["User-Agent"] != "HttpRunnerPlus" {
t.Fatalf("tStep.Request.Headers mismatch")
}
if tStep.Request.Cookies["user"] != "debugtalk" {
t.Fatalf("tStep.Request.Cookies mismatch")
}
validator, ok := tStep.Validators[0].(Validator)
if !ok || validator.Check != "status_code" || validator.Expect != 200 {
t.Fatalf("tStep.Validators mismatch")
}
}
func TestRunRequestPostDataToStruct(t *testing.T) {
tStep := stepPOSTData.step
if tStep.Request.Method != httpPOST {
t.Fatalf("tStep.Request.Method != POST")
}
if tStep.Request.URL != "/post" {
t.Fatalf("tStep.Request.URL != '/post'")
}
if tStep.Request.Params["foo1"] != "bar1" || tStep.Request.Params["foo2"] != "bar2" {
t.Fatalf("tStep.Request.Params mismatch")
}
if tStep.Request.Headers["User-Agent"] != "HttpRunnerPlus" {
t.Fatalf("tStep.Request.Headers mismatch")
}
if tStep.Request.Cookies["user"] != "debugtalk" {
t.Fatalf("tStep.Request.Cookies mismatch")
}
if tStep.Request.Body != "a=1&b=2" {
t.Fatalf("tStep.Request.Data mismatch")
}
validator, ok := tStep.Validators[0].(Validator)
if !ok || validator.Check != "status_code" || validator.Expect != 200 {
t.Fatalf("tStep.Validators mismatch")
}
}
func TestRunRequestStatOn(t *testing.T) {
testcase := &TestCase{
Config: NewConfig("test").SetBaseURL("https://postman-echo.com"),
TestSteps: []IStep{stepGET, stepPOSTData},
}
caseRunner, _ := NewRunner(t).SetHTTPStatOn().NewCaseRunner(testcase)
sessionRunner := caseRunner.NewSession()
if err := sessionRunner.Start(nil); err != nil {
t.Fatal()
}
summary, _ := sessionRunner.GetSummary()
stat := summary.Records[0].HttpStat
if !assert.GreaterOrEqual(t, stat["DNSLookup"], int64(0)) {
t.Fatal()
}
if !assert.Greater(t, stat["TCPConnection"], int64(0)) {
t.Fatal()
}
if !assert.Greater(t, stat["TLSHandshake"], int64(0)) {
t.Fatal()
}
if !assert.Greater(t, stat["ServerProcessing"], int64(1)) {
t.Fatal()
}
if !assert.GreaterOrEqual(t, stat["ContentTransfer"], int64(0)) {
t.Fatal()
}
if !assert.GreaterOrEqual(t, stat["NameLookup"], int64(0)) {
t.Fatal()
}
if !assert.Greater(t, stat["Connect"], int64(0)) {
t.Fatal()
}
if !assert.Greater(t, stat["Pretransfer"], int64(0)) {
t.Fatal()
}
if !assert.Greater(t, stat["StartTransfer"], int64(0)) {
t.Fatal()
}
if !assert.Greater(t, stat["Total"], int64(5)) {
t.Fatal()
}
if !assert.Less(t, stat["Total"]-summary.Records[0].Elapsed, int64(3)) {
t.Fatal()
}
// reuse connection
stat = summary.Records[1].HttpStat
if !assert.Equal(t, int64(0), stat["DNSLookup"]) {
t.Fatal()
}
if !assert.Equal(t, int64(0), stat["TCPConnection"]) {
t.Fatal()
}
if !assert.Equal(t, int64(0), stat["TLSHandshake"]) {
t.Fatal()
}
if !assert.Greater(t, stat["ServerProcessing"], int64(1)) {
t.Fatal()
}
if !assert.Equal(t, int64(0), stat["NameLookup"]) {
t.Fatal()
}
if !assert.Equal(t, int64(0), stat["Connect"]) {
t.Fatal()
}
if !assert.Equal(t, int64(0), stat["Pretransfer"]) {
t.Fatal()
}
if !assert.Greater(t, stat["StartTransfer"], int64(0)) {
t.Fatal()
}
if !assert.Greater(t, stat["Total"], int64(1)) {
t.Fatal()
}
if !assert.Less(t, stat["Total"]-summary.Records[0].Elapsed, int64(100)) {
t.Fatal()
}
}
func TestRunCaseWithTimeout(t *testing.T) {
r := NewRunner(t)
// global timeout
testcase1 := &TestCase{
Config: NewConfig("TestCase1").
SetRequestTimeout(10). // set global timeout to 10s
SetBaseURL("https://httpbin.org"),
TestSteps: []IStep{
NewStep("step1").
GET("/delay/1").
Validate().
AssertEqual("status_code", 200, "check status code"),
},
}
err := r.Run(testcase1)
if !assert.NoError(t, err) { // assert no error
t.FailNow()
}
testcase2 := &TestCase{
Config: NewConfig("TestCase2").
SetRequestTimeout(10). // set global timeout to 10s
SetBaseURL("https://httpbin.org"),
TestSteps: []IStep{
NewStep("step1").
GET("/delay/11").
Validate().
AssertEqual("status_code", 200, "check status code"),
},
}
err = r.Run(testcase2)
if !assert.Error(t, err) { // assert error
t.FailNow()
}
// step timeout
testcase3 := &TestCase{
Config: NewConfig("TestCase3").
SetRequestTimeout(10).
SetBaseURL("https://httpbin.org"),
TestSteps: []IStep{
NewStep("step2").
GET("/delay/11").
SetTimeout(15*time.Second). // set step timeout to 4s
Validate().
AssertEqual("status_code", 200, "check status code"),
},
}
err = r.Run(testcase3)
if !assert.NoError(t, err) {
t.FailNow()
}
}
|
package problem0429
type Node struct {
Val int
Children []*Node
}
func levelOrder(root *Node) [][]int {
result := [][]int{}
if root == nil {
return result
}
queue := []*Node{root}
nextQueue := []*Node{}
for len(queue) > 0 {
level := []int{}
for len(queue) > 0 {
node := queue[0]
queue = queue[1:]
level = append(level, node.Val)
nextQueue = append(nextQueue, node.Children...)
}
queue = nextQueue
result = append(result, level)
}
return result
}
|
package pgsql
import (
"database/sql"
"database/sql/driver"
"time"
)
// TimetzArrayFromTimeSlice returns a driver.Valuer that produces a PostgreSQL timetz[] from the given Go []time.Time.
func TimetzArrayFromTimeSlice(val []time.Time) driver.Valuer {
return timetzArrayFromTimeSlice{val: val}
}
// TimetzArrayToTimeSlice returns an sql.Scanner that converts a PostgreSQL timetz[] into a Go []time.Time and sets it to val.
func TimetzArrayToTimeSlice(val *[]time.Time) sql.Scanner {
return timetzArrayToTimeSlice{val: val}
}
type timetzArrayFromTimeSlice struct {
val []time.Time
}
func (v timetzArrayFromTimeSlice) Value() (driver.Value, error) {
if v.val == nil {
return nil, nil
} else if len(v.val) == 0 {
return []byte{'{', '}'}, nil
}
size := 2 + (len(v.val) - 1) + (len(v.val) * 14) // len("hh:mm:ss-hh:ss") == 14
out := make([]byte, 1, size)
out[0] = '{'
for _, t := range v.val {
out = append(out, t.Format(timetzLayout)...)
out = append(out, ',')
}
out[len(out)-1] = '}' // replace last "," with "}"
return out, nil
}
type timetzArrayToTimeSlice struct {
val *[]time.Time
}
func (v timetzArrayToTimeSlice) Scan(src interface{}) error {
data, err := srcbytes(src)
if err != nil {
return err
} else if data == nil {
*v.val = nil
return nil
}
elems := pgParseCommaArray(data)
times := make([]time.Time, len(elems))
for i := 0; i < len(elems); i++ {
t, err := time.Parse(timetzLayout, string(elems[i]))
if err != nil {
return err
}
times[i] = t
}
*v.val = times
return nil
}
|
package stream
import (
"io"
"time"
)
type Sample struct {
SampleID int64 `json:"sampleId"`
Current int64 `json:"bytesPerSec"`
Peak int64 `json:"peak"`
Low int64 `json:"low"`
Average int64 `json:"average"`
MovingPeak int64 `json:"movingPeak"`
MovingLow int64 `json:"movingLow"`
MovingAverage int64 `json:"movingAverage"`
}
type SpeedSampler struct {
r io.Reader
callback SampleCallback
samplesPerSecond int64
samplesTaken int64
ticker *time.Ticker
samples *ring
total int64
current int64
peak int64
low int64
average int64
movingPeak int64
movingLow int64
movingAverage int64
}
type SampleCallback func(sampler *SpeedSampler, sample *Sample)
func NewSpeedSampler(r io.Reader, callback SampleCallback) (*SpeedSampler, error) {
sampler := &SpeedSampler{
r: r,
callback: callback,
samplesPerSecond: 2,
}
if err := sampler.Start(); err != nil {
return nil, err
}
return sampler, nil
}
func (s *SpeedSampler) Reset() {
if s.ticker != nil {
s.ticker.Stop()
s.ticker = nil
}
if s.samples != nil {
s.samples = nil
}
s.samplesTaken = 0
s.current, s.peak, s.average, s.movingPeak, s.movingAverage = 0, 0, 0, 0, 0
s.low, s.movingLow = -1, -1
}
func (s *SpeedSampler) Start() error {
s.Reset()
s.samples = newRing(s.samplesPerSecond * 5)
s.ticker = time.NewTicker(time.Duration(1000/s.samplesPerSecond) * time.Millisecond)
go func() {
for {
if s.ticker == nil {
break
}
_, open := <-s.ticker.C
if !open {
break
}
s.takeSample()
}
}()
return nil
}
func (s *SpeedSampler) takeSample() *Sample {
s.samplesTaken++
s.current *= s.samplesPerSecond
s.average += (s.current - s.average) / s.samplesTaken
s.peak = max(s.peak, s.current)
if s.low == -1 {
s.low = s.current
} else {
s.low = min(s.low, s.current)
}
sample := &Sample{
SampleID: s.samplesTaken,
Current: s.current,
Peak: s.peak,
Low: s.low,
Average: s.average,
}
s.samples.add(sample)
s.movingPeak, s.movingLow, s.movingAverage = 0, -1, 0
for i := int64(0); i < s.samples.cl; i++ {
tempSample := s.samples.get(i).(*Sample)
s.movingPeak = max(s.movingPeak, tempSample.Current)
if s.movingLow == -1 {
s.movingLow = tempSample.Current
} else {
s.movingLow = min(s.movingLow, tempSample.Current)
}
s.movingAverage += tempSample.Current
}
s.movingAverage /= min(s.samples.cl, s.samples.l)
sample.MovingPeak = s.movingPeak
sample.MovingLow = s.movingLow
sample.MovingAverage = s.movingAverage
s.current = 0
if s.callback != nil {
s.callback(s, sample)
}
return sample
}
func (s *SpeedSampler) Sample() *Sample {
if s.samples == nil || s.samples.cl == 0 {
return nil
}
return s.samples.get(0).(*Sample)
}
func (s *SpeedSampler) Total() int64 {
return s.total
}
func (s *SpeedSampler) Read(p []byte) (n int, err error) {
n, err = s.r.Read(p)
s.current += int64(n)
s.total += int64(n)
return n, err
}
func max(a, b int64) int64 {
if a > b {
return a
}
return b
}
func min(a, b int64) int64 {
if a <= b {
return a
}
return b
}
type ring struct {
e []interface{}
l int64
cl int64
c int64
n int64
}
func (s *SpeedSampler) Close() error {
s.Reset()
return nil
}
// Simple ring buffer to keep the samples in
func newRing(length int64) *ring {
return &ring{
e: make([]interface{}, length, length),
l: length,
cl: 0,
c: 0,
n: 0,
}
}
func (r *ring) add(values ...interface{}) {
for _, value := range values {
r.e[r.n] = value
if r.cl == r.l {
r.c = (r.c + 1) % r.l
}
r.cl = min(r.cl+1, r.l)
r.n = (r.n + 1) % r.l
}
}
func (r *ring) get(i int64) interface{} {
return r.e[(r.c+i)%r.l]
}
|
// Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package ast_test
import (
"testing"
"github.com/pingcap/tidb/parser"
"github.com/pingcap/tidb/parser/ast"
"github.com/stretchr/testify/require"
)
func TestHasAggFlag(t *testing.T) {
expr := &ast.BetweenExpr{}
flagTests := []struct {
flag uint64
hasAgg bool
}{
{ast.FlagHasAggregateFunc, true},
{ast.FlagHasAggregateFunc | ast.FlagHasVariable, true},
{ast.FlagHasVariable, false},
}
for _, tt := range flagTests {
expr.SetFlag(tt.flag)
require.Equal(t, tt.hasAgg, ast.HasAggFlag(expr))
}
}
func TestFlag(t *testing.T) {
flagTests := []struct {
expr string
flag uint64
}{
{
"1 between 0 and 2",
ast.FlagConstant,
},
{
"case 1 when 1 then 1 else 0 end",
ast.FlagConstant,
},
{
"case 1 when 1 then 1 else 0 end",
ast.FlagConstant,
},
{
"case 1 when a > 1 then 1 else 0 end",
ast.FlagConstant | ast.FlagHasReference,
},
{
"1 = ANY (select 1) OR exists (select 1)",
ast.FlagHasSubquery,
},
{
"1 in (1) or 1 is true or null is null or 'abc' like 'abc' or 'abc' rlike 'abc'",
ast.FlagConstant,
},
{
"row (1, 1) = row (1, 1)",
ast.FlagConstant,
},
{
"(1 + a) > ?",
ast.FlagHasReference | ast.FlagHasParamMarker,
},
{
"trim('abc ')",
ast.FlagHasFunc,
},
{
"now() + EXTRACT(YEAR FROM '2009-07-02') + CAST(1 AS UNSIGNED)",
ast.FlagHasFunc,
},
{
"substring('abc', 1)",
ast.FlagHasFunc,
},
{
"sum(a)",
ast.FlagHasAggregateFunc | ast.FlagHasReference,
},
{
"(select 1) as a",
ast.FlagHasSubquery,
},
{
"@auto_commit",
ast.FlagHasVariable,
},
{
"default(a)",
ast.FlagHasDefault,
},
{
"a is null",
ast.FlagHasReference,
},
{
"1 is true",
ast.FlagConstant,
},
{
"a in (1, count(*), 3)",
ast.FlagConstant | ast.FlagHasReference | ast.FlagHasAggregateFunc,
},
{
"'Michael!' REGEXP '.*'",
ast.FlagConstant,
},
{
"a REGEXP '.*'",
ast.FlagHasReference,
},
{
"-a",
ast.FlagHasReference,
},
}
p := parser.New()
for _, tt := range flagTests {
stmt, err := p.ParseOneStmt("select "+tt.expr, "", "")
require.NoError(t, err)
selectStmt := stmt.(*ast.SelectStmt)
ast.SetFlag(selectStmt)
expr := selectStmt.Fields.Fields[0].Expr
require.Equalf(t, tt.flag, expr.GetFlag(), "For %s", tt.expr)
}
}
|
package detail
import (
"fpdapp/models/entity"
"github.com/gin-gonic/gin"
)
type Response struct {
ID uint `json:"id"`
CreatedAt string `json:"created_at"`
UpdatedAt string `json:"updated_at"`
Publishing int `json:"publishing"`
DogName string `json:"dog_name"`
Breed string `json:"breed"`
Gender int `json:"gender"`
Spay int `json:"spay"`
Old string `json:"old"`
SinglePerson int `json:"single_person"`
SeniorPerson int `json:"senior_person"`
TransferStatus int `json:"transfer_status"`
Introduction string `json:"introduction"`
AppealPoint string `json:"appeal_point"`
OtherMessage string `json:"other_message"`
PostImages []PostImageResponse `json:"post_images"`
PostPrefectures []PostPrefectureResponse `json:"post_prefectures"`
User UserResponse `json:"user"`
}
type PostImageResponse struct {
PostId uint `json:"post_id"`
PostImageId uint `json:"post_image_id"`
Position int `json:"position"`
ObjectKey string `json:"object_key"`
}
type PostPrefectureResponse struct {
PostId uint `json:"post_id"`
PostPrefectureId int `json:"post_prefecture_id"`
}
type UserResponse struct {
TwitterAccount string `json:"twitter_account"`
Nickname string `json:"nickname"`
Profile string `json:"profile"`
WebUrl string `json:"web_url"`
}
type Serializer struct {
C *gin.Context
Post entity.Post
}
func (r *Serializer) Response() Response {
var postImageResponseList []PostImageResponse
for _, p := range r.Post.PostImages {
postImageResponseList = append(postImageResponseList, PostImageResponse{
PostId: p.PostId,
PostImageId: p.ID,
Position: p.Position,
// TODO: 画像情報追加
})
}
var postPrefectureResponseList []PostPrefectureResponse
for _, p := range r.Post.PostPrefectures {
postPrefectureResponseList = append(postPrefectureResponseList, PostPrefectureResponse{
PostId: p.PostId,
PostPrefectureId: p.PostPrefectureId,
})
}
response := Response{
ID: r.Post.ID,
CreatedAt: r.Post.CreatedAt.Format("2006-01-02 15:04"),
UpdatedAt: r.Post.UpdatedAt.Format("2006-01-02 15:04"),
Publishing: r.Post.Publishing,
DogName: r.Post.DogName,
Breed: r.Post.Breed,
Gender: r.Post.Gender,
Spay: r.Post.SeniorPerson,
Old: r.Post.Old,
SinglePerson: r.Post.SinglePerson,
SeniorPerson: r.Post.SeniorPerson,
TransferStatus: r.Post.TransferStatus,
Introduction: r.Post.Introduction,
AppealPoint: r.Post.AppealPoint,
OtherMessage: r.Post.OtherMessage,
PostImages: postImageResponseList,
PostPrefectures: postPrefectureResponseList,
User: UserResponse{
TwitterAccount: r.Post.User.TwitterAccount,
Nickname: r.Post.User.Nickname,
Profile: r.Post.User.Profile,
WebUrl: r.Post.User.WebUrl,
},
}
return response
}
|
package entity
type KubePod struct {
Name string `json:"name"`
Status string `json:"status"`
Restarts string `json:"restarts"`
Age string `json:"age"`
}
|
package friend
import (
"GP/db"
"database/sql"
"log"
)
type CheckFriendInfo struct {
Id string `json:"id"`
FriendId string `json:"friendid"`
UserName string `json:"username"`
FriendName string `json:"friendname"`
Label string `json:"label"`
}
func GetCheckFriend(username string) (friendInfo []*CheckFriendInfo, err error) {
friendInfo = []*CheckFriendInfo{}
querySql := "select id, id2, username2, nickname2, label2 from gp.friend where username1 = ? and ischeck = 0;"
stmt, err := db.DB.Prepare(querySql)
if err != nil {
log.Println("GetCheckFriend Querysql prepare fail")
return nil, err
}
defer stmt.Close()
rows, err := stmt.Query(username)
if err != nil {
log.Println("GetCheckFriend Querysql query fail")
return nil, err
}
for rows.Next() {
var friend CheckFriendInfo
err := rows.Scan(&friend.Id, &friend.FriendId, &friend.UserName, &friend.FriendName, &friend.Label)
if err != nil {
return nil, err
}
friendInfo = append(friendInfo, &friend)
}
return friendInfo, nil
}
type FriendInfo struct {
Id string `json:"id"`
FriendId string `json:"friendid"`
UserName string `json:"username"`
FriendUserName string `json:"friendusername"`
FriendName string `json:"friendname"`
Label string `json:"label"`
}
func GetFriendList(username string) (friendInfo []*FriendInfo, err error) {
friendInfo = []*FriendInfo{}
querySql := "select id, username1, id2, username2, nickname2, label2 from gp.friend where username1 = ? and ischeck = 1;"
stmt, err := db.DB.Prepare(querySql)
if err != nil {
log.Println("GetFriendList Querysql prepare fail")
return nil, err
}
defer stmt.Close()
rows, err := stmt.Query(username)
if err != nil {
log.Println("GetFriendList Querysql query fail")
return nil, err
}
for rows.Next() {
var friend FriendInfo
err := rows.Scan(&friend.Id, &friend.UserName, &friend.FriendId, &friend.FriendUserName, &friend.FriendName, &friend.Label)
if err != nil {
return nil, err
}
friendInfo = append(friendInfo, &friend)
}
return friendInfo, nil
}
func NewFriendCheck(username1, username2 string) (ok bool, err error) {
var have string
querySql := "select id from gp.friend where username1 = ? and username2 = ? and ischeck = 0"
err = db.DB.QueryRow(querySql, username1, username2).Scan(&have)
if err != nil {
if err == sql.ErrNoRows {
return false, nil
} else {
log.Println("NewFriendCheck Querysql query fail" + err.Error())
return true, err
}
}
log.Println("NewFriendCheck found")
return true, nil
}
func NewFriend(username1, nickname1, id2, username2, nickname2, label2 string) (id string, err error) {
insertSql := "insert into gp.friend(username1, nickname1, id2, username2, nickname2, label2, ischeck) values (?, ?, ?, ?, ?, ?, 0)"
stmt, err := db.DB.Prepare(insertSql)
if err != nil {
log.Println("NewFriend insertSql fail")
return "", err
}
defer stmt.Close()
_, err = stmt.Exec(username1, nickname1, id2, username2, nickname2, label2)
if err != nil {
log.Println("NewFriend exec fail")
return "", err
}
querySql := "select id from gp.friend where username1 = ? and username2 = ?"
err = db.DB.QueryRow(querySql, username1, username2).Scan(&id)
if err != nil {
if err == sql.ErrNoRows {
log.Println("FriendId not found")
return "", nil
} else {
log.Println("FriendId query fail" + err.Error())
return "", err
}
}
return id, nil
}
func PassFriendIdCheck(id string) (ok bool, err error) {
var haveid string
querySql := "select id2 from gp.friend where id = ?"
err = db.DB.QueryRow(querySql, id).Scan(&haveid)
if err != nil {
if err == sql.ErrNoRows {
log.Println("PassFriendIdCheck not found")
return false, nil
} else {
log.Println("PassFriendIdCheckQuerysql query fail" + err.Error())
return true, err
}
}
return true, nil
}
func PassFriend(id string) (err error) {
updateSql := "update gp.friend set ischeck = 1 where id = ?"
stmt, err := db.DB.Prepare(updateSql)
if err != nil {
log.Println("PassFriend updateSql fail")
return err
}
defer stmt.Close()
_, err = stmt.Exec(id)
if err != nil {
log.Println("PassFriend exec fail")
return err
}
return nil
}
func UnPassFriend(id string) (err error) {
updateSql := "update gp.friend set ischeck = -1 where id = ?"
stmt, err := db.DB.Prepare(updateSql)
if err != nil {
log.Println("UnPassFriend updateSql fail")
return err
}
defer stmt.Close()
_, err = stmt.Exec(id)
if err != nil {
log.Println("UnPassFriend exec fail")
return err
}
return nil
}
|
package main
import (
"fmt"
)
// 定义一个人的结构体
type Person struct {
name string
sex byte
age int
}
// 为Person结构体新增一个打印方法
func (p Person) printInfo() {
fmt.Println("p = ", p)
}
// 定义一个Student结构体并继承Person
type Student struct {
Person
id int
addr string
}
func main() {
// 定义并初始化Student结构体
s := Student{Person: Person{name: "jane", sex: 'f', age: 18}, id: 123, addr: "wh"}
s.printInfo()
// 结果:
// p = {jane 102 18}
// 结论:
// 通过匿名字段可以继承其属性,也可以继承其的方法
}
|
package types
import (
sdk "github.com/irisnet/irishub/types"
)
// the address for where distributions rewards are withdrawn to by default
// this struct is only used at genesis to feed in default withdraw addresses
type DelegatorWithdrawInfo struct {
DelegatorAddr sdk.AccAddress `json:"delegator_addr"`
WithdrawAddr sdk.AccAddress `json:"withdraw_addr"`
}
// GenesisState - all distribution state that must be provided at genesis
type GenesisState struct {
Params Params `json:"params"`
FeePool FeePool `json:"fee_pool"`
ValidatorDistInfos []ValidatorDistInfo `json:"validator_dist_infos"`
DelegationDistInfos []DelegationDistInfo `json:"delegator_dist_infos"`
DelegatorWithdrawInfos []DelegatorWithdrawInfo `json:"delegator_withdraw_infos"`
PreviousProposer sdk.ConsAddress `json:"previous_proposer"`
}
func NewGenesisState(params Params, feePool FeePool, vdis []ValidatorDistInfo,
ddis []DelegationDistInfo, dwis []DelegatorWithdrawInfo, pp sdk.ConsAddress) GenesisState {
return GenesisState{
Params: params,
FeePool: feePool,
ValidatorDistInfos: vdis,
DelegationDistInfos: ddis,
DelegatorWithdrawInfos: dwis,
PreviousProposer: pp,
}
}
// get raw genesis raw message for testing
func DefaultGenesisState() GenesisState {
return GenesisState{
Params: DefaultParams(),
FeePool: InitialFeePool(),
}
}
// default genesis utility function, initialize for starting validator set
func DefaultGenesisWithValidators(valAddrs []sdk.ValAddress) GenesisState {
vdis := make([]ValidatorDistInfo, len(valAddrs))
ddis := make([]DelegationDistInfo, len(valAddrs))
for i, valAddr := range valAddrs {
vdis[i] = NewValidatorDistInfo(valAddr, 0)
accAddr := sdk.AccAddress(valAddr)
ddis[i] = NewDelegationDistInfo(accAddr, valAddr, 0)
}
return GenesisState{
Params: DefaultParams(),
FeePool: InitialFeePool(),
ValidatorDistInfos: vdis,
DelegationDistInfos: ddis,
}
}
|
package handlers
import "encoding/json"
type InvalidParam struct {
Name string `json:"name"`
Reason string `json:"reason,omitempty"`
Code string `json:"code,omitempty"`
}
type Error struct {
Status int `json:"status"`
Title string `json:"title"`
Type string `json:"type,omitempty"`
InvalidParams []InvalidParam `json:"invalid_params,omitempty"`
Code string `json:"code,omitempty"`
}
func NewErrorWithStatus(title string, status int) *Error {
return &Error{Title: title, Status: status}
}
func NewErrorWithStatusAndCode(title string, code string, status int) *Error {
return &Error{Title: title, Status: status, Code: code}
}
func (e *Error) String() string {
if e == nil {
return ""
}
b, _ := json.Marshal(e)
return string(b)
}
|
package honeycombio
import (
"context"
"fmt"
)
// Queries describe all the query-related methods that the Honeycomb API
// supports.
//
// API docs: https://docs.honeycomb.io/api/queries/
type Queries interface {
// Get a query by its ID.
Get(ctx context.Context, dataset string, id string) (*QuerySpec, error)
// Create a new query in this dataset. When creating a new query ID may
// not be set.
Create(ctx context.Context, dataset string, c *QuerySpec) (*QuerySpec, error)
}
// queries implements Queries.
type queries struct {
client *Client
}
// Compile-time proof of interface implementation by type queries.
var _ Queries = (*queries)(nil)
func (s *queries) Get(ctx context.Context, dataset string, id string) (*QuerySpec, error) {
var q QuerySpec
err := s.client.performRequest(ctx, "GET", fmt.Sprintf("/1/queries/%s/%s", urlEncodeDataset(dataset), id), nil, &q)
return &q, err
}
func (s *queries) Create(ctx context.Context, dataset string, data *QuerySpec) (*QuerySpec, error) {
var q QuerySpec
err := s.client.performRequest(ctx, "POST", "/1/queries/"+urlEncodeDataset(dataset), data, &q)
return &q, err
}
|
package task
import (
"errors"
"fmt"
"github.com/tornadoyi/viking/goplus/runtime"
"sync"
"time"
)
const (
Init int = iota
Running
Finished
Canceled
)
type Task struct {
function *runtime.JITFunc
state int
result interface{}
error error
stack runtime.StackInfo
wg *sync.WaitGroup
mutex sync.RWMutex
}
func NewTask(f interface{}, args... interface{}) *Task {
return newTask(&sync.WaitGroup{}, f, args...)
}
func (h *Task) State() int {
h.mutex.RLock()
defer h.mutex.RUnlock()
return h.state
}
func (h *Task) Error() error {
h.mutex.RLock()
defer h.mutex.RUnlock()
return h.error
}
func (h *Task) Result() interface{} {
h.mutex.RLock()
defer h.mutex.RUnlock()
return h.result
}
func (h *Task) Finished() bool {
h.mutex.RLock()
defer h.mutex.RUnlock()
return h.state == Finished
}
func (h *Task) Canceled() bool {
h.mutex.RLock()
defer h.mutex.RUnlock()
return h.state == Canceled
}
func (h *Task) Terminated() bool {
h.mutex.RLock()
defer h.mutex.RUnlock()
return h.state == Finished || h.state == Canceled
}
func (h *Task) StateDesc() string {
h.mutex.RLock()
defer h.mutex.RUnlock()
switch h.state {
case Init: return "Init"
case Running: return "Running"
case Finished: return "Finished"
case Canceled: return "Canceled"
default: return "Invalid"
}
}
func (h *Task) Start(){
var stateErr error = nil
h.mutex.Lock()
if h.state != Init {
stateErr = fmt.Errorf("Task can not start, current state is %v", h.StateDesc())
} else {
h.state = Running
h.wg.Add(1)
}
h.mutex.Unlock()
if stateErr != nil { panic(stateErr) }
go func() {
defer func(){
h.mutex.Lock()
if h.state == Running {
h.wg.Done()
h.state = Finished
}
h.mutex.Unlock()
}()
// collect results
result, err := h.function.Call()
// save result
h.mutex.Lock()
if h.state != Canceled {
h.result = result
h.error = err
}
h.mutex.Unlock()
}()
}
func (h *Task) Cancel(){
switch h.State() {
case Init:
h.mutex.Lock()
h.state = Canceled
h.mutex.Unlock()
return
case Canceled, Finished: return
}
h.mutex.Lock()
h.wg.Done()
h.state = Canceled
h.mutex.Unlock()
}
func (h *Task) Wait(){
switch h.State() {
case Init: panic(errors.New("Can't wait for a task that hasn't started"))
case Finished: return
}
h.wg.Wait()
}
func (h *Task) WaitTimeout(timeout time.Duration) {
c := make(chan struct{})
go func() {
defer close(c)
h.Wait()
}()
select {
case <-c: return
case <-time.After(timeout):
h.Cancel()
return
}
}
func newTask(wg *sync.WaitGroup, f interface{}, args... interface{}) *Task {
return &Task{
function: runtime.NewJITFunc(f, args...),
state: Init,
result: nil,
error: nil,
stack: runtime.Trace(2),
wg: wg,
mutex: sync.RWMutex{},
}
} |
package main
func main(){
print("hello world\n")
}
/*
//launch.json
{
// Use IntelliSense to learn about possible attributes.
// Hover to view descriptions of existing attributes.
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
"version": "0.2.0",
"configurations": [
{
"name": "Launch file",
"type": "go",
"request": "launch",
"mode": "debug",
"program": "${workspaceRoot}/GoVSCode"
}
]
}
*/ |
package rolluptracer
import (
"time"
"github.com/opentracing/opentracing-go"
"github.com/opentracing/opentracing-go/ext"
)
// RollupTracer wraps another tracer, and produces spans which roll up
// tags and durations to the nearest ancestor span from the parent tracer.
type RollupTracer struct {
parent opentracing.Tracer
}
// NewRollupTracer creates a RollupTracer wrapping the parent tracer provided
func NewRollupTracer(parent opentracing.Tracer) opentracing.Tracer {
return &RollupTracer{parent}
}
func findParentContext(sso opentracing.StartSpanOptions) opentracing.SpanContext {
for _, ref := range sso.References {
if ref.Type == opentracing.ChildOfRef {
return ref.ReferencedContext
}
}
return nil
}
// StartSpan creates, starts, and returns a new Span with the given `operationName` and
// incorporate the given StartSpanOption `opts`.
func (t *RollupTracer) StartSpan(operationName string, opts ...opentracing.StartSpanOption) opentracing.Span {
sso := opentracing.StartSpanOptions{}
for _, o := range opts {
o.Apply(&sso)
}
if sso.Tags != nil {
// Always start a new "real" span for the two ends of an RPC call or producer/consumer request
kind := sso.Tags[string(ext.SpanKind)]
if kind == ext.SpanKindRPCClientEnum ||
kind == ext.SpanKindRPCServerEnum ||
kind == ext.SpanKindConsumerEnum ||
kind == ext.SpanKindProducerEnum {
return t.StartRealSpan(operationName, opts...)
}
}
if sso.StartTime.IsZero() {
sso.StartTime = time.Now()
}
parentCtx := findParentContext(sso)
if parentCtx == nil {
return &wrapperSpan{
tracer: t,
context: &wrapperSpanContext{
root: t.parent.StartSpan(operationName, opts...),
},
}
}
var context *rollupSpanContext
if rc, ok := parentCtx.(*rollupSpanContext); ok {
context = &rollupSpanContext{
root: rc.root,
parent: rc,
}
} else if wc, ok := parentCtx.(*wrapperSpanContext); ok {
context = &rollupSpanContext{
root: wc.root,
}
} else {
// This should never happen, but we'll return a wrapped "real" span just in case
return &wrapperSpan{
tracer: t,
context: &wrapperSpanContext{
root: t.parent.StartSpan(operationName, opts...),
},
}
}
span := &rollupSpan{
tracer: t,
context: context,
startTime: sso.StartTime,
}
span.SetOperationName(operationName)
if sso.Tags != nil {
for k, v := range sso.Tags {
span.SetTag(k, v)
}
}
return span
}
// StartRealSpan delegates to the parent tracer to start a "real" span. This creates a new
// rollup context.
func (t *RollupTracer) StartRealSpan(operationName string, opts ...opentracing.StartSpanOption) opentracing.Span {
for i, o := range opts {
if ref, ok := o.(opentracing.SpanReference); ok && ref.Type == opentracing.ChildOfRef {
ctx := ref.ReferencedContext
if rc, ok := ctx.(*rollupSpanContext); ok {
ref.ReferencedContext = rc.root.Context()
} else if rc, ok := ctx.(*wrapperSpanContext); ok {
ref.ReferencedContext = rc.root.Context()
}
opts[i] = ref
}
}
return &wrapperSpan{
tracer: t,
context: &wrapperSpanContext{
root: t.parent.StartSpan(operationName, opts...),
},
}
}
// Inject takes the `sm` SpanContext instance and injects it for
// propagation within `carrier`. The actual type of `carrier` depends on
// the value of `format`.
func (t *RollupTracer) Inject(sm opentracing.SpanContext, format interface{}, carrier interface{}) error {
if rc, ok := sm.(*rollupSpanContext); ok {
sm = rc.root.Context()
}
if rc, ok := sm.(*wrapperSpanContext); ok {
sm = rc.root.Context()
}
return t.parent.Inject(sm, format, carrier)
}
// Extract returns a SpanContext instance given `format` and `carrier`.
func (t *RollupTracer) Extract(format interface{}, carrier interface{}) (opentracing.SpanContext, error) {
return t.parent.Extract(format, carrier)
}
|
// Copyright 2016 Matthew Endsley
// All rights reserved
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted providing that the following conditions
// are met:
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
// OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
// IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
package main
import (
"fmt"
"io"
"net"
"os"
"os/user"
"strconv"
"strings"
"sync"
"time"
"github.com/mendsley/parchment/binfmt"
pnet "github.com/mendsley/parchment/net"
)
type InputManager struct {
wg sync.WaitGroup
currentChain *RefOutputChain
currentChainLock sync.RWMutex
inputs []*Input
}
type Input struct {
address string
l net.Listener
lwait sync.WaitGroup
timeout time.Duration
closing bool
connectionLock sync.Mutex
connections map[net.Conn]*sync.Mutex
}
type RefOutputChain struct {
Chain OutputChain
wg sync.WaitGroup
}
func (roc *RefOutputChain) Release() {
roc.wg.Done()
}
func (im *InputManager) Run(config *Config) {
im.currentChain = new(RefOutputChain)
im.Reconfigure(config)
// wait for inputs to die off
im.wg.Wait()
// grab the current output chain
im.currentChainLock.Lock()
chain := im.currentChain
im.currentChainLock.Unlock()
//
chain.wg.Wait()
chain.Chain.Close()
}
// Reconfigure the input manager for a new coniguration
func (im *InputManager) Reconfigure(config *Config) {
// replace the output chain
refchain := &RefOutputChain{
Chain: config.Outputs,
}
im.currentChainLock.Lock()
oldchain := im.currentChain
im.currentChain = refchain
im.currentChainLock.Unlock()
// kill off inputs that are no longer in the list
for _, input := range im.inputs {
index := -1
for ii := range config.Inputs {
if input.address == config.Inputs[ii].Address {
index = ii
break
}
}
if index == -1 {
input.closing = true
input.close()
}
}
// remove inputs that are now closed
write := 0
for _, input := range im.inputs {
if !input.closing {
im.inputs[write] = input
write++
}
}
im.inputs = im.inputs[:write]
// start inputs that are new to the list
for _, input := range config.Inputs {
index := -1
for ii := range im.inputs {
if input.Address == im.inputs[ii].address {
index = ii
break
}
}
if index == -1 {
in := &Input{
address: input.Address,
timeout: time.Duration(input.TimeoutMS) * time.Millisecond,
connections: make(map[net.Conn]*sync.Mutex),
}
addrParts := strings.SplitN(input.Address, ":", 2)
if len(addrParts) != 2 || !strings.HasPrefix(addrParts[1], "//") {
panic("Configuration compiled, but is invalid: " + input.Address)
}
// try to remove the existing socket
isNonAbstractUnix := addrParts[0] == "unix" && !strings.HasPrefix(addrParts[1][2:], "@")
if isNonAbstractUnix {
os.Remove(addrParts[1][2:])
}
l, err := net.Listen(addrParts[0], addrParts[1][2:])
if err != nil {
fmt.Fprintf(os.Stderr, "ERROR: Failed to create listener for %s: %v\n", input.Address, err)
continue
}
// adjust permissions
if isNonAbstractUnix {
// set permissions
if input.FileMode != "" {
mode, err := strconv.ParseUint(input.FileMode, 8, 32)
if err != nil {
mode, err = strconv.ParseUint(input.FileMode, 10, 32)
if err != nil {
fmt.Fprintf(os.Stderr, "ERROR: Failed to parse file permissions for %s: %v\n", input.Address, err)
continue
}
}
err = os.Chmod(addrParts[1][2:], os.ModeSocket|os.FileMode(mode))
if err != nil {
fmt.Fprintf(os.Stderr, "ERROR: Failed to change permissions on %s: %v\n", input.Address, err)
continue
}
}
if input.User != "" {
var groupid uint64
userid, err := strconv.ParseUint(input.User, 10, 32)
if err != nil {
user, err := user.Lookup(input.User)
if err != nil {
fmt.Fprintf(os.Stderr, "ERROR: Failed to lookup user %s: %v", input.User, err)
continue
}
userid, err = strconv.ParseUint(user.Uid, 10, 32)
if err != nil {
fmt.Fprintf(os.Stderr, "ERROR: Malformed user %s: %v", user.Uid, err)
continue
}
// ignore error, and default to 'root' group
groupid, _ = strconv.ParseUint(user.Gid, 10, 32)
}
if input.Group != "" {
gid, err := strconv.ParseUint(input.Group, 10, 32)
if err != nil {
fmt.Fprintf(os.Stderr, "ERROR: Failed to parse group id %s (must be numeric right now): %v", input.Group, err)
}
groupid = gid
}
err = os.Chown(addrParts[1][2:], int(userid), int(groupid))
if err != nil {
fmt.Fprintf(os.Stderr, "ERROR: Failed to change owner on %s: %v\n", input.Group, err)
continue
}
}
}
in.l = l
im.inputs = append(im.inputs, in)
im.wg.Add(1)
in.lwait.Add(1)
go func(input *Input) {
defer im.wg.Done()
defer input.lwait.Done()
err := input.run(im)
if err != nil {
fmt.Fprintf(os.Stderr, "ERROR: Input %s terminated unexpectedly: %v\n", input.address, err)
}
}(in)
}
}
// wait for the previous chain to be released
oldchain.wg.Wait()
oldchain.Chain.Close()
}
func (im *InputManager) AcquireOutputs() *RefOutputChain {
im.currentChainLock.RLock()
current := im.currentChain
current.wg.Add(1)
im.currentChainLock.RUnlock()
return current
}
func (input *Input) run(im *InputManager) error {
defer fmt.Fprintf(os.Stderr, "INFO: No longer listening at %s\n", input.address)
fmt.Fprintf(os.Stderr, "INFO: Listening for connections at %s\n", input.address)
for {
conn, err := input.l.Accept()
if err != nil {
if !input.closing {
return fmt.Errorf("Failed to accept - %v", err)
}
fmt.Fprintf(os.Stderr, "INFO: Closing input %s\n", input.address)
return nil
}
connLock := new(sync.Mutex)
input.connectionLock.Lock()
input.connections[conn] = connLock
input.connectionLock.Unlock()
im.wg.Add(1)
go func(conn net.Conn, l *sync.Mutex) {
defer func() {
input.connectionLock.Lock()
if input.connections != nil {
delete(input.connections, conn)
}
input.connectionLock.Unlock()
conn.Close()
im.wg.Done()
}()
err := input.serve(conn, im, l)
if err != nil && !input.closing {
fmt.Fprintf(os.Stderr, "ERROR: Failed to serve %v for %s: %v\n", conn.RemoteAddr(), input.address, err)
}
}(conn, connLock)
}
}
func (input *Input) close() {
input.l.Close()
input.lwait.Wait()
input.connectionLock.Lock()
m := input.connections
input.connections = nil
input.connectionLock.Unlock()
for conn, lock := range m {
lock.Lock()
conn.Close()
lock.Unlock()
}
}
func calcTimeout(now time.Time, d time.Duration) time.Time {
if d == 0 {
return time.Time{}
}
return now.Add(d)
}
func (input *Input) serve(conn net.Conn, im *InputManager, connLock *sync.Mutex) error {
connLock.Lock()
defer connLock.Unlock()
nr, err := pnet.NewConnReader(conn, calcTimeout(time.Now(), input.timeout))
if err != nil {
return fmt.Errorf("Failed to negotiate connection: %v", err)
}
defer nr.Close()
for {
now := time.Now()
connLock.Unlock()
chain, err := nr.Read(calcTimeout(now, input.timeout))
connLock.Lock()
if chain != nil {
if err := im.processChain(chain); err != nil {
return err
}
err = nr.AcknowledgeLast(calcTimeout(time.Now(), input.timeout))
}
if err == io.EOF {
break
} else if err != nil {
return fmt.Errorf("Failed to read incoming data: %v", err)
}
}
return nil
}
func (im *InputManager) processChain(chain *binfmt.Log) error {
out := im.AcquireOutputs()
defer out.Release()
for chain != nil {
p, remain := out.Chain.SplitForProcessor(chain)
if p != nil {
err := p.WriteChain(chain)
if err != nil {
return fmt.Errorf("Failed to process chain for category %v: %v", chain.Category, err)
}
}
chain = remain
}
return nil
}
|
package catfile
import (
"errors"
"fmt"
"got/internal/got/filesystem"
"github.com/spf13/cobra"
"got/internal/objects"
)
var Cmd = &cobra.Command{
Use: "cat-file { -t | -p } object",
DisableFlagsInUseLine: true,
Short: "Provide content or type and size information for repository objects",
Args: cobra.ExactArgs(1),
}
type flag string
const (
flagType = "type"
flagPrettyPrint = "pretty-print"
)
func init() {
showType := Cmd.Flags().BoolP(flagType, "t", false, "Show type of object")
prettyPrintContent := Cmd.Flags().BoolP(flagPrettyPrint, "p", false, "Pretty-print content of object")
Cmd.Run = func(cmd *cobra.Command, args []string) {
run(cmd, args, *showType, *prettyPrintContent)
}
}
func run(cmd *cobra.Command, args []string, showType bool, prettyPrint bool) {
flagUsed, err := flagsAreCompatible(showType, prettyPrint)
if err != nil {
fmt.Println(err.Error())
return
}
g, err := filesystem.NewGot()
if err != nil {
fmt.Println(err)
return
}
id, err := objects.IdFromString(args[0])
if err != nil {
fmt.Println(err)
return
}
t, err := g.Objects.TypeOf(id)
if err != nil {
fmt.Println(err)
return
}
var o objects.Object
switch t {
case objects.TypeBlob:
o, _ = g.Objects.GetBlob(id)
case objects.TypeTree:
o, _ = g.Objects.GetTree(id)
default:
fmt.Println("no object found")
return
}
switch flagUsed {
case flagType:
fmt.Println(o.Type())
case flagPrettyPrint:
fmt.Println(o.Content())
}
}
func flagsAreCompatible(showType, prettyPrint bool) (flag, error) {
if !showType && !prettyPrint {
return "", errors.New("one of -t or -p flag must be used")
}
if showType && prettyPrint {
return "", errors.New("-t and -p are not compatible")
}
if showType {
return flagType, nil
}
return flagPrettyPrint, nil
}
|
package main
import (
"fmt"
"testing"
)
func TestFilterForGoRepos(t *testing.T) {
testCases := []struct {
languages map[string]int
expected bool
}{
{
languages: map[string]int{"Go": 24031},
expected: true,
},
{
languages: map[string]int{"Perl": 19111, "Shell": 8593, "Perl 6": 2945, "Makefile": 1355, "Ruby": 935, "Go": 401},
expected: false,
},
{
languages: map[string]int{"Shell": 12464},
expected: false,
},
{
languages: map[string]int{"Shell": 6455},
expected: false,
},
{
languages: map[string]int{"Go": 24369, "Perl": 2497, "Makefile": 1582},
expected: true,
},
{
languages: map[string]int{"Haskell": 193968, "Shell": 3663},
expected: false,
},
{
languages: map[string]int{"Shell": 23490, "Ruby": 1444},
expected: false,
},
{
languages: map[string]int{"Vim script": 31878, "Shell": 6547},
expected: false,
},
{
languages: map[string]int{"Shell": 8131, "Batchfile": 5516},
expected: false,
},
{
languages: map[string]int{"CSS": 7643, "Ruby": 6012, "HTML": 2970, "JavaScript": 537, "Shell": 218},
expected: false,
},
}
for _, tC := range testCases {
desc := fmt.Sprint(tC.languages)
t.Run(desc, func(t *testing.T) {
if filterForGoRepos(tC.languages) != tC.expected {
t.Fatalf("'%s' failed", desc)
}
})
}
}
|
package main
import (
"fmt"
"resk/infra/algo"
)
func main() {
fmt.Printf("%v\n",
algo.AfterShuffle(int64(10), int64(100)*100))
}
|
// Copyright 2017 Vector Creations Ltd
// Copyright 2018 New Vector Ltd
// Copyright 2019-2020 The Matrix.org Foundation C.I.C.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package api
import (
"github.com/matrix-org/gomatrixserverlib"
)
// QueryLatestEventsAndStateRequest is a request to QueryLatestEventsAndState
type QueryLatestEventsAndStateRequest struct {
// The room ID to query the latest events for.
RoomID string `json:"room_id"`
// The state key tuples to fetch from the room current state.
// If this list is empty or nil then *ALL* current state events are returned.
StateToFetch []gomatrixserverlib.StateKeyTuple `json:"state_to_fetch"`
}
// QueryLatestEventsAndStateResponse is a response to QueryLatestEventsAndState
// This is used when sending events to set the prev_events, auth_events and depth.
// It is also used to tell whether the event is allowed by the event auth rules.
type QueryLatestEventsAndStateResponse struct {
// Does the room exist?
// If the room doesn't exist this will be false and LatestEvents will be empty.
RoomExists bool `json:"room_exists"`
// The room version of the room.
RoomVersion gomatrixserverlib.RoomVersion `json:"room_version"`
// The latest events in the room.
// These are used to set the prev_events when sending an event.
LatestEvents []gomatrixserverlib.EventReference `json:"latest_events"`
// The state events requested.
// This list will be in an arbitrary order.
// These are used to set the auth_events when sending an event.
// These are used to check whether the event is allowed.
StateEvents []gomatrixserverlib.HeaderedEvent `json:"state_events"`
// The depth of the latest events.
// This is one greater than the maximum depth of the latest events.
// This is used to set the depth when sending an event.
Depth int64 `json:"depth"`
}
// QueryStateAfterEventsRequest is a request to QueryStateAfterEvents
type QueryStateAfterEventsRequest struct {
// The room ID to query the state in.
RoomID string `json:"room_id"`
// The list of previous events to return the events after.
PrevEventIDs []string `json:"prev_event_ids"`
// The state key tuples to fetch from the state
StateToFetch []gomatrixserverlib.StateKeyTuple `json:"state_to_fetch"`
}
// QueryStateAfterEventsResponse is a response to QueryStateAfterEvents
type QueryStateAfterEventsResponse struct {
// Does the room exist on this roomserver?
// If the room doesn't exist this will be false and StateEvents will be empty.
RoomExists bool `json:"room_exists"`
// The room version of the room.
RoomVersion gomatrixserverlib.RoomVersion `json:"room_version"`
// Do all the previous events exist on this roomserver?
// If some of previous events do not exist this will be false and StateEvents will be empty.
PrevEventsExist bool `json:"prev_events_exist"`
// The state events requested.
// This list will be in an arbitrary order.
StateEvents []gomatrixserverlib.HeaderedEvent `json:"state_events"`
}
// QueryEventsByIDRequest is a request to QueryEventsByID
type QueryEventsByIDRequest struct {
// The event IDs to look up.
EventIDs []string `json:"event_ids"`
}
// QueryEventsByIDResponse is a response to QueryEventsByID
type QueryEventsByIDResponse struct {
// A list of events with the requested IDs.
// If the roomserver does not have a copy of a requested event
// then it will omit that event from the list.
// If the roomserver thinks it has a copy of the event, but
// fails to read it from the database then it will fail
// the entire request.
// This list will be in an arbitrary order.
Events []gomatrixserverlib.HeaderedEvent `json:"events"`
}
// QueryMembershipForUserRequest is a request to QueryMembership
type QueryMembershipForUserRequest struct {
// ID of the room to fetch membership from
RoomID string `json:"room_id"`
// ID of the user for whom membership is requested
UserID string `json:"user_id"`
}
// QueryMembershipForUserResponse is a response to QueryMembership
type QueryMembershipForUserResponse struct {
// The EventID of the latest "m.room.member" event for the sender,
// if HasBeenInRoom is true.
EventID string `json:"event_id"`
// True if the user has been in room before and has either stayed in it or left it.
HasBeenInRoom bool `json:"has_been_in_room"`
// True if the user is in room.
IsInRoom bool `json:"is_in_room"`
// The current membership
Membership string
}
// QueryMembershipsForRoomRequest is a request to QueryMembershipsForRoom
type QueryMembershipsForRoomRequest struct {
// If true, only returns the membership events of "join" membership
JoinedOnly bool `json:"joined_only"`
// ID of the room to fetch memberships from
RoomID string `json:"room_id"`
// ID of the user sending the request
Sender string `json:"sender"`
}
// QueryMembershipsForRoomResponse is a response to QueryMembershipsForRoom
type QueryMembershipsForRoomResponse struct {
// The "m.room.member" events (of "join" membership) in the client format
JoinEvents []gomatrixserverlib.ClientEvent `json:"join_events"`
// True if the user has been in room before and has either stayed in it or
// left it.
HasBeenInRoom bool `json:"has_been_in_room"`
}
// QueryServerAllowedToSeeEventRequest is a request to QueryServerAllowedToSeeEvent
type QueryServerAllowedToSeeEventRequest struct {
// The event ID to look up invites in.
EventID string `json:"event_id"`
// The server interested in the event
ServerName gomatrixserverlib.ServerName `json:"server_name"`
}
// QueryServerAllowedToSeeEventResponse is a response to QueryServerAllowedToSeeEvent
type QueryServerAllowedToSeeEventResponse struct {
// Wether the server in question is allowed to see the event
AllowedToSeeEvent bool `json:"can_see_event"`
}
// QueryMissingEventsRequest is a request to QueryMissingEvents
type QueryMissingEventsRequest struct {
// Events which are known previous to the gap in the timeline.
EarliestEvents []string `json:"earliest_events"`
// Latest known events.
LatestEvents []string `json:"latest_events"`
// Limit the number of events this query returns.
Limit int `json:"limit"`
// The server interested in the event
ServerName gomatrixserverlib.ServerName `json:"server_name"`
}
// QueryMissingEventsResponse is a response to QueryMissingEvents
type QueryMissingEventsResponse struct {
// Missing events, arbritrary order.
Events []gomatrixserverlib.HeaderedEvent `json:"events"`
}
// QueryStateAndAuthChainRequest is a request to QueryStateAndAuthChain
type QueryStateAndAuthChainRequest struct {
// The room ID to query the state in.
RoomID string `json:"room_id"`
// The list of prev events for the event. Used to calculate the state at
// the event
PrevEventIDs []string `json:"prev_event_ids"`
// The list of auth events for the event. Used to calculate the auth chain
AuthEventIDs []string `json:"auth_event_ids"`
// Should state resolution be ran on the result events?
// TODO: check call sites and remove if we always want to do state res
ResolveState bool `json:"resolve_state"`
}
// QueryStateAndAuthChainResponse is a response to QueryStateAndAuthChain
type QueryStateAndAuthChainResponse struct {
// Does the room exist on this roomserver?
// If the room doesn't exist this will be false and StateEvents will be empty.
RoomExists bool `json:"room_exists"`
// The room version of the room.
RoomVersion gomatrixserverlib.RoomVersion `json:"room_version"`
// Do all the previous events exist on this roomserver?
// If some of previous events do not exist this will be false and StateEvents will be empty.
PrevEventsExist bool `json:"prev_events_exist"`
// The state and auth chain events that were requested.
// The lists will be in an arbitrary order.
StateEvents []gomatrixserverlib.HeaderedEvent `json:"state_events"`
AuthChainEvents []gomatrixserverlib.HeaderedEvent `json:"auth_chain_events"`
}
// QueryRoomVersionCapabilitiesRequest asks for the default room version
type QueryRoomVersionCapabilitiesRequest struct{}
// QueryRoomVersionCapabilitiesResponse is a response to QueryRoomVersionCapabilitiesRequest
type QueryRoomVersionCapabilitiesResponse struct {
DefaultRoomVersion gomatrixserverlib.RoomVersion `json:"default"`
AvailableRoomVersions map[gomatrixserverlib.RoomVersion]string `json:"available"`
}
// QueryRoomVersionForRoomRequest asks for the room version for a given room.
type QueryRoomVersionForRoomRequest struct {
RoomID string `json:"room_id"`
}
// QueryRoomVersionForRoomResponse is a response to QueryRoomVersionForRoomRequest
type QueryRoomVersionForRoomResponse struct {
RoomVersion gomatrixserverlib.RoomVersion `json:"room_version"`
}
type QueryPublishedRoomsRequest struct {
// Optional. If specified, returns whether this room is published or not.
RoomID string
}
type QueryPublishedRoomsResponse struct {
// The list of published rooms.
RoomIDs []string
}
|
package main
import "fmt"
func array() {
var a [2]int
a[0] = 100
a[1] = 200
fmt.Println(a)
var b [2]int = [2]int{100, 200}
fmt.Println(b)
var c []int = []int{100, 200}
d := append(c, 300)
fmt.Println(c, d)
}
func slice() {
n := []int{1, 2, 3, 4, 5, 6}
fmt.Println(n)
fmt.Println(n[2])
fmt.Println(n[2:4])
fmt.Println(n[2:])
fmt.Println(n[:2])
fmt.Println(n[:])
n[2] = 100
fmt.Println(n)
n = append(n, 100, 200, 300)
fmt.Println(n)
var board = [][]int{
[]int{1, 2, 3},
[]int{4, 5, 6},
[]int{7, 8, 9},
}
fmt.Println(board)
board2 := [][]int{
[]int{1, 2, 3},
[]int{4, 5, 6},
[]int{7, 8, 9},
}
fmt.Println(board2)
}
func main() {
array()
slice()
}
|
package urldispatch
import (
"errors"
"fmt"
"strings"
)
const (
nullptr = index(^uint8(0))
)
type segment struct {
value string
amap argsMap
next []segment
}
type args2 struct {
psection indexes
params []string
asection indexes
array []string
}
func (a *args2) appendParamValue(value string) {
pIdx := index(len(a.params))
a.params = append(a.params, value)
a.psection = append(a.psection, pIdx)
}
func (a *args2) appendArrayValue(value string) {
a.array = append(a.array, value)
}
func (a *args2) nextArray() {
aIdx := index(len(a.array))
a.asection = append(a.asection, aIdx)
}
func (a *args2) addNullPtrParams(count index) {
for i := index(0); i < count; i++ {
// point to the largest index.
a.psection = append(a.psection, nullptr)
}
}
func (d *Dispatcher) addRoute(segs []segment) error {
if len(segs) > 0 {
cseg := segs[0]
refmap := cseg.amap
// check if all the amaps are equal
for _, s := range segs {
if !s.amap.eq(refmap) {
return errors.New("amaps on the segments are not equal.")
}
}
// check if the segments are addable
err := d.root.addable2(segs, refmap, 0)
if err != nil {
return err
}
// insert the segments.
d.root.insertSegments(segs)
}
return nil
}
func (d Dispatcher) dispatchPath(pathSegs []string) (Outargs, error) {
ar := args2{}
if len(pathSegs) > 0 {
return d.root.dispatchPath(pathSegs, ar, -1)
}
return Outargs{}, errors.New("nothing to dispatch.")
}
func (s segment) dispatchPath(pathSegs []string, ar args2, idx int) (Outargs, error) {
var pIdx index
for len(pathSegs) > 0 {
ps := pathSegs[0]
for _, cs := range s.next {
if cs.value == ps {
if idx > -1 {
pCount := cs.amap.psections[idx]
// fix the array args
ar.addNullPtrParams(pCount - pIdx)
}
if cs.amap.asections[idx+1] > 0 {
ar.nextArray()
}
return cs.dispatchPath(pathSegs[1:], ar, idx+1)
}
}
if idx == -1 {
return Outargs{}, errors.New("nothing to dispatch for " + ps)
}
// if there is room for another param.
hasRoomForParam, err := s.amap.psections.isItemAtIndexBigger(idx, pIdx)
if err != nil {
return Outargs{}, err
}
if hasRoomForParam {
ar.appendParamValue(ps)
pIdx += 1
pathSegs = pathSegs[1:]
} else if s.amap.asections[idx] > 0 {
// has room for another array item.
ar.appendArrayValue(ps)
pathSegs = pathSegs[1:]
} else {
return Outargs{}, errors.New("param overflow with segment:" + ps + fmt.Sprintf(" idx:%v", idx))
}
}
if len(s.next) > 0 {
return Outargs{}, errors.New("only partial dispatch.")
} else {
return Outargs{amap: s.amap, ar: ar}, nil
}
}
func dispatchQuery(query string, am argsMap, ar args2, idx index) (args2, error) {
pc := index(len(am.params)) - idx
ar.addNullPtrParams(pc)
kvs := strings.Split(query, "&")
for _, rkv := range kvs {
kv := strings.Split(rkv, "=")
if len(kv) != 2 {
return ar, errors.New("invalid query")
}
for i := idx; i < pc+idx; i++ {
if kv[0] == am.params[i] {
if ar.psection[i] == nullptr {
ar.psection[i] = index(len(ar.params))
ar.params = append(ar.params, kv[1])
continue
}
}
}
}
return ar, nil
}
func (s segment) addable2(segs []segment, amap argsMap, index int) error {
if len(segs) > 0 {
for _, cs := range s.next {
cseg := segs[0]
if cs.value == cseg.value {
err := cs.compareParams(amap, index)
if err != nil {
return err
}
// only check if the segment is not the root segment.
if index != 0 {
if len(segs) < 2 {
return errors.New(`partial route`)
}
if len(cs.next) == 0 {
return errors.New(`partial route detected`)
}
}
return cs.addable2(segs[1:], amap, index+1)
}
}
}
return nil
}
func (s segment) compareParams(amap argsMap, index int) error {
eq, err := s.amap.compareAtIndex(amap, index)
if err != nil {
return err
}
if !eq {
return errors.New("segment is equal but params differ.")
}
return nil
}
func (s *segment) insertSegments(segments []segment) {
if len(segments) > 0 {
nseg := segments[0]
for i, cs := range s.next {
if cs.value == nseg.value {
s.next[i].insertSegments(segments[1:])
return
}
}
s.next = append(s.next, nseg)
// insert from the appended struct.
s.next[len(s.next)-1].insertSegments(segments[1:])
return
}
}
|
/*
给定一个链表,删除链表的倒数第 n 个节点,并且返回链表的头结点。
示例:
给定一个链表: 1->2->3->4->5, 和 n = 2.
当删除了倒数第二个节点后,链表变为 1->2->3->5.
*/
/**
* Definition for singly-linked list.
* type ListNode struct {
* Val int
* Next *ListNode
* }
*/
func removeNthFromEnd(head *ListNode, n int) *ListNode {
p1,p2:=head,head
for n>0{
if p1.Next==nil{ //删除第一个节点的情况,不单独处理的话这步操作会超出内存
return head.Next
}
p1=p1.Next //p1先向前走 n个节点
n--
}
for p1.Next!=nil{
p1=p1.Next //p1,p2一起走,p1到终点时,p2到达操作位置
p2=p2.Next
}
p2.Next=p2.Next.Next //p2跳过下一个节点,连接下下个节点
return head
} |
package game
type Puff struct {
*Entity
lived float32
lifeTime float32
}
func newPuff(gameMap *Map, x, y, vx, vy, minSize, maxSize float32) *Puff {
puff := &Puff{}
puff.Entity = newEntity(gameMap,
puff, "puff",
x, y,
randRange(minSize, maxSize),
randRange(minSize, maxSize),
)
puff.lifeTime = 0.1 + randMax(1)
puff.vx, puff.vy = vx, vy
return puff
}
func (puff *Puff) expand(dt float32) {
cx, cy := puff.GetCenter()
percent := puff.lived / puff.lifeTime
if percent < 0.2 {
puff.w = puff.w + (200+percent)*dt
puff.h = puff.h + (200+percent)*dt
} else {
puff.w = puff.w + (20+percent)*dt
}
puff.l = cx - puff.w/2
puff.t = cy - puff.h/2
}
func (puff *Puff) update(dt float32) {
puff.lived = puff.lived + dt
if puff.lived >= puff.lifeTime {
puff.destroy()
} else {
puff.expand(dt)
next_l, next_t := puff.l+puff.vx*dt, puff.t+puff.vy*dt
puff.body.Update(next_l, next_t)
puff.l, puff.t = next_l, next_t
}
}
func (puff *Puff) draw(debug bool) {
percent := min(1, (puff.lived/puff.lifeTime)*1.8)
r, g, b := 255-floor(155*percent), 255-floor(155*percent), float32(100)
l, t, w, h := puff.Extents()
drawFilledRectangle(l, t, w, h, r, g, b)
}
|
package main
import (
"fmt"
"movies_api/handlers"
"log"
)
func main() {
server := handlers.NewServer()
fmt.Println("Server is running on port 8080")
log.Fatal(server.ListenAndServe())
} |
package fashionjson
import (
"encoding/json"
"log"
"strings"
"testing"
)
func TestJsonDecode(t *testing.T) {
const jsonStream = `{
"info": {
"url": "https://www.wish.com",
"dateCreated": "2-27-2018",
"version": "2",
"description": "Train Set for FGVC5 CVPR 2018 by https://www.wish.com",
"year": "2018"
},
} `
dec := json.NewDecoder(strings.NewReader(jsonStream))
// read open bracket
token, err := dec.Token()
if err != nil {
log.Fatal(err)
}
log.Printf("%T: %v\n", token, token)
token, err = dec.Token()
if err != nil {
log.Fatal(err)
}
log.Printf("%T: %v\n", token, token)
// while the array contains values
var info TrainInfo
// decode an array value (Message)
err = dec.Decode(&info)
if err != nil {
log.Fatal(err)
}
log.Printf("%T: %+v\n", info, info)
log.Fatal("Debug")
}
|
package main
import (
"net/http"
"github.com/atselitsky/wb-mqtt-web-client-go/pkg/devices"
"github.com/atselitsky/wb-mqtt-web-client-go/pkg/mqttConn"
"github.com/atselitsky/wb-mqtt-web-client-go/pkg/rules"
"github.com/atselitsky/wb-mqtt-web-client-go/pkg/websocketConn"
"github.com/gin-contrib/cors"
"github.com/gin-gonic/gin"
)
func CORS() gin.HandlerFunc {
// TO allow CORS
return func(c *gin.Context) {
c.Writer.Header().Set("Access-Control-Allow-Origin", "*")
c.Writer.Header().Set("Access-Control-Allow-Headers", "Content-Type, Content-Length, Accept-Encoding, X-CSRF-Token, Authorization, accept, origin, Cache-Control, X-Requested-With")
c.Writer.Header().Set("Access-Control-Allow-Methods", "POST, OPTIONS, GET, PUT, DELETE")
if c.Request.Method == "OPTIONS" {
c.AbortWithStatus(200)
return
}
c.Next()
}
}
func main() {
rules.GetRulesFiles()
websocketconn := websocketConn.NewConn()
go websocketconn.Run()
client := mqttConn.NewMQTTConn(websocketconn)
go client.StartMQTTConnection()
r := gin.Default()
// r.Use(CORS())
r.Use(cors.New(cors.Config{
AllowOrigins: []string{"*"},
AllowMethods: []string{"GET", "POST", "OPTIONS"},
AllowHeaders: []string{"Access-Control-Allow-Headers",
"Content-Type", "Content-Length", "Accept-Encoding",
"X-CSRF-Token", "Authorization", "accept", "origin", "Cache-Control", "X-Requested-With"},
ExposeHeaders: []string{"Content-Length"},
AllowCredentials: true,
}))
r.StaticFS("/static", http.Dir("../../web/build/static"))
r.LoadHTMLFiles("../../web/build/index.html")
r.GET("/", func(c *gin.Context) {
c.HTML(200, "index.html", nil)
})
r.GET("/ws", func(c *gin.Context) {
websocketconn.WsConn(c.Writer, c.Request)
})
r.GET("/config/rs485", func(c *gin.Context) {
devices.GetRS485Config(c)
})
r.POST("/config/rs485", devices.PostRS485Config)
r.Run()
}
// err := os.Remove("users.json")
// if err != nil {
// fmt.Println(err)
// }
// jsonFile, err := os.Create("users.json")
// if err != nil {
// fmt.Println(err)
// }
// defer jsonFile.Close()
// // decoder := json.NewDecoder(c.Request.Body)
// // decoder.Decode(&jsonFile)
// scanner := bufio.NewScanner(c.Request.Body)
// for scanner.Scan() {
// //fmt.Println(scanner.Text())
// //jsonFile.WriteString(scanner.Text())
// fmt.Fprintln(jsonFile, scanner.Text())
// }
// //json.NewDecoder(c.Request.Body).Decode(encoder)
// c.JSON(200, gin.H{
// "status": "posted",
// })
// // devices.PostRS485Config(c.Writer, c.Request)
|
// Copyright 2021 BoCloud
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package store
import (
"sync"
"github.com/fabedge/fabedge/pkg/operator/types"
"github.com/jjeffery/stringset"
)
type Interface interface {
SaveEndpoint(ep types.Endpoint)
GetEndpoint(name string) (types.Endpoint, bool)
GetEndpoints(names ...string) []types.Endpoint
GetAllEndpointNames() stringset.Set
DeleteEndpoint(name string)
SaveCommunity(ep types.Community)
GetCommunity(name string) (types.Community, bool)
GetCommunitiesByEndpoint(name string) []types.Community
DeleteCommunity(name string)
}
var _ Interface = &store{}
type store struct {
endpoints map[string]types.Endpoint
communities map[string]types.Community
endpointToCommunities map[string]stringset.Set
mux sync.RWMutex
}
func NewStore() Interface {
return &store{
endpoints: make(map[string]types.Endpoint),
communities: make(map[string]types.Community),
endpointToCommunities: make(map[string]stringset.Set),
}
}
func (s *store) SaveEndpoint(ep types.Endpoint) {
s.mux.Lock()
defer s.mux.Unlock()
s.endpoints[ep.Name] = ep
}
func (s *store) GetEndpoint(name string) (types.Endpoint, bool) {
s.mux.Lock()
defer s.mux.Unlock()
ep, ok := s.endpoints[name]
return ep, ok
}
func (s *store) GetEndpoints(names ...string) []types.Endpoint {
s.mux.Lock()
defer s.mux.Unlock()
endpoints := make([]types.Endpoint, 0, len(names))
for _, name := range names {
ep, ok := s.endpoints[name]
if !ok {
continue
}
endpoints = append(endpoints, ep)
}
return endpoints
}
func (s *store) GetAllEndpointNames() stringset.Set {
s.mux.Lock()
defer s.mux.Unlock()
names := make(stringset.Set, len(s.endpoints))
for name := range s.endpoints {
names.Add(name)
}
return names
}
func (s *store) DeleteEndpoint(name string) {
s.mux.Lock()
defer s.mux.Unlock()
delete(s.endpoints, name)
}
func (s *store) SaveCommunity(c types.Community) {
s.mux.Lock()
defer s.mux.Unlock()
oldCommunity := s.communities[c.Name]
if oldCommunity.Members.Equal(c.Members) {
return
}
s.communities[c.Name] = c
// add new member to communities index
for member := range c.Members {
cs := s.endpointToCommunities[member]
cs.Add(c.Name)
s.endpointToCommunities[member] = cs
}
// remove old member to communities index
for member := range oldCommunity.Members {
if c.Members.Contains(member) {
continue
}
cs := s.endpointToCommunities[member]
cs.Remove(c.Name)
if len(cs) == 0 {
delete(s.endpointToCommunities, member)
}
}
}
func (s *store) GetCommunity(name string) (types.Community, bool) {
s.mux.Lock()
defer s.mux.Unlock()
c, ok := s.communities[name]
return c, ok
}
func (s *store) GetCommunitiesByEndpoint(name string) []types.Community {
s.mux.Lock()
defer s.mux.Unlock()
var communities []types.Community
cs, ok := s.endpointToCommunities[name]
if !ok {
return communities
}
for communityName := range cs {
cmm, ok := s.communities[communityName]
if ok {
communities = append(communities, cmm)
}
}
return communities
}
func (s *store) DeleteCommunity(name string) {
s.mux.Lock()
defer s.mux.Unlock()
// remove this community from endpointToCommunity
cmm := s.communities[name]
for member := range cmm.Members {
cs := s.endpointToCommunities[member]
cs.Remove(name)
if len(cs) == 0 {
delete(s.endpointToCommunities, member)
}
}
delete(s.communities, name)
}
|
package main
import (
"fmt"
"math/rand"
"reflect"
"sync"
"testing"
"time"
)
type TestData struct {
data []int
target int
}
var (
data TestData
cases = []struct {
data []int
target int
expected []int
}{
{
[]int{1, 2, 3},
3,
[]int{0, 1},
},
{
[]int{2, 3, 4, 5},
8,
[]int{1, 3},
},
{
[]int{3, 3},
6,
[]int{0, 1},
},
{
[]int{3, 3},
8,
nil,
},
}
)
func Test_doubleSumCycle(t *testing.T) {
for _, cs := range cases {
t.Run(fmt.Sprintf("Case %v, target %v", cs.data, cs.target), func(t *testing.T) {
result := doubleSumCycle(cs.data, cs.target)
if !reflect.DeepEqual(result, cs.expected) {
t.Error("unexpected result")
}
})
}
}
func Test_doubleSumMap(t *testing.T) {
for _, cs := range cases {
t.Run(fmt.Sprintf("Case %v, target %v", cs.data, cs.target), func(t *testing.T) {
result := doubleSumMap(cs.data, cs.target)
if !reflect.DeepEqual(result, cs.expected) {
t.Error("unexpected result")
}
})
}
}
func getTestData() *TestData {
once := &sync.Once{}
once.Do(func() {
sums := make(map[int]bool)
var target int
r := rand.New(rand.NewSource(time.Now().Unix()))
nums := r.Perm(2000)
for i := 0; i < len(nums)-1; i++ {
for j := i + 1; j < len(nums); j++ {
if _, exist := sums[nums[i]+nums[j]]; exist {
sums[nums[i]+nums[j]] = false
} else {
sums[nums[i]+nums[j]] = true
}
}
}
for key, val := range sums {
if val {
target = key
break
}
}
data.data = nums
data.target = target
})
return &data
}
func Benchmark_doubleSumCycle(b *testing.B) {
data := getTestData()
for i := 0; i < b.N; i++ {
doubleSumCycle(data.data, data.target)
}
}
func Benchmark_doubleSumMap(b *testing.B) {
data := getTestData()
for i := 0; i < b.N; i++ {
doubleSumMap(data.data, data.target)
}
}
|
// Copyright © 2018 Inanc Gumus
// Learn Go Programming Course
// License: https://creativecommons.org/licenses/by-nc-sa/4.0/
//
// For more tutorials : https://learngoprogramming.com
// In-person training : https://www.linkedin.com/in/inancgumus/
// Follow me on twitter: https://twitter.com/inancgumus
package main
import "fmt"
func main() {
var (
planet string
isTrue bool
temp float64
)
planet, isTrue, temp = "Mars", true, 19.5
fmt.Println("Air is good on", planet)
fmt.Println("It's", isTrue)
fmt.Println("It is", temp, "degrees")
}
|
package slacktest
import (
"github.com/slack-go/slack/internal/errorsx"
)
const (
// ErrEmptyServerToHub is the error when attempting an empty server address to the hub
ErrEmptyServerToHub = errorsx.String("Unable to add an empty server address to hub")
// ErrPassedEmptyServerAddr is the error when being passed an empty server address
ErrPassedEmptyServerAddr = errorsx.String("Passed an empty server address")
// ErrNoQueuesRegisteredForServer is the error when there are no queues for a server in the hub
ErrNoQueuesRegisteredForServer = errorsx.String("No queues registered for server")
)
|
package rpubsub
import (
"context"
"encoding/json"
"fmt"
"time"
"github.com/pkg/errors"
uuid "github.com/satori/go.uuid"
"github.com/batchcorp/plumber-schemas/build/go/protos/opts"
"github.com/batchcorp/plumber-schemas/build/go/protos/records"
"github.com/batchcorp/plumber/util"
"github.com/batchcorp/plumber/validate"
)
func (r *RedisPubsub) Read(ctx context.Context, readOpts *opts.ReadOptions, resultsChan chan *records.ReadRecord, errorChan chan *records.ErrorRecord) error {
if err := validateReadOptions(readOpts); err != nil {
return errors.Wrap(err, "invalid read options")
}
var count int64
ps := r.client.Subscribe(ctx, readOpts.RedisPubsub.Args.Channels...)
defer ps.Unsubscribe(ctx)
r.log.Info("Listening for message(s) ...")
doneCh := make(chan struct{})
go func() {
for {
msg, err := ps.ReceiveMessage(ctx)
if err != nil {
util.WriteError(r.log, errorChan, fmt.Errorf("unable to receive redis pubsub messsage: %s", err))
if !readOpts.Continuous {
doneCh <- struct{}{}
break
}
continue
}
count++
serializedMsg, err := json.Marshal(msg)
if err != nil {
errorChan <- &records.ErrorRecord{
OccurredAtUnixTsUtc: time.Now().UTC().Unix(),
Error: errors.Wrap(err, "unable to serialize message into JSON").Error(),
}
continue
}
count++
resultsChan <- &records.ReadRecord{
MessageId: uuid.NewV4().String(),
Num: count,
Metadata: nil,
ReceivedAtUnixTsUtc: time.Now().UTC().Unix(),
Payload: []byte(msg.Payload),
XRaw: serializedMsg,
Record: &records.ReadRecord_RedisPubsub{
RedisPubsub: &records.RedisPubsub{
Value: []byte(msg.Payload),
Timestamp: time.Now().UTC().Unix(),
},
},
}
if !readOpts.Continuous {
doneCh <- struct{}{}
return
}
}
}()
select {
case <-ctx.Done():
return nil
case <-doneCh:
return nil
}
}
func validateReadOptions(readOpts *opts.ReadOptions) error {
if readOpts == nil {
return validate.ErrMissingReadOptions
}
if readOpts.RedisPubsub == nil {
return validate.ErrEmptyBackendGroup
}
if readOpts.RedisPubsub.Args == nil {
return validate.ErrEmptyBackendArgs
}
if len(readOpts.RedisPubsub.Args.Channels) == 0 {
return ErrMissingChannel
}
return nil
}
|
// Copyright 2015-2018 trivago N.V.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package router
import (
"gollum/core"
)
// Broadcast router
//
// This router implements the default behavior of routing all messages to all
// producers registered to the configured stream.
//
// Examples
//
// rateLimiter:
// Type: router.Broadcast
// Stream: errorlogs
// Filters:
// - filter.Rate:
// MessagesPerSec: 200
type Broadcast struct {
core.SimpleRouter `gollumdoc:"embed_type"`
}
func init() {
core.TypeRegistry.Register(Broadcast{})
}
// Configure initializes this distributor with values from a plugin config.
func (router *Broadcast) Configure(conf core.PluginConfigReader) {
}
// Start the router
func (router *Broadcast) Start() error {
return nil
}
// Enqueue enques a message to the router
func (router *Broadcast) Enqueue(msg *core.Message) error {
producers := router.GetProducers()
if len(producers) == 0 {
return core.NewModulateResultError(
"Router %s: no producers configured", router.GetID())
}
timeout := router.GetTimeout()
lastProdIdx := len(producers) - 1
for _, prod := range producers[:lastProdIdx] {
prod.Enqueue(msg.Clone(), timeout)
}
// Cloning is a rather expensive operation, so skip cloning for the last
// message (not required)
producers[lastProdIdx].Enqueue(msg, timeout)
return nil
}
|
package routes
import (
"fmt"
"joebot/rds"
t "joebot/tools"
"strings"
)
func passiveRouteDWU(playerName string) (res string) {
playerName = strings.ToLower(playerName)
lookupKey := fmt.Sprintf("passive_%s", playerName)
res, err := rds.RedisGet(rds.RC, lookupKey)
if err != nil {
t.WriteErr(err)
res = "Player's passives not found! Make sure it's spelled correctly!"
}
return
}
func officerRouteDWU(playerName string) (res string) {
playerName = strings.ToLower(playerName)
lookupKey := fmt.Sprintf("officer_%s", playerName)
res, err := rds.RedisGet(rds.RC, lookupKey)
if err != nil {
t.WriteErr(err)
res = "Officer not found! Make sure it's spelled correctly!"
}
return
}
|
package main
import (
"github.com/go-chi/chi"
)
func getRoutes() chi.Router {
// We're using chi as the router. You'll want to read
// the documentation https://github.com/go-chi/chi
// so that you can capture parameters like /events/5
// or /api/events/4 -- where you want to get the
// event id (5 and 4, respectively).
r := chi.NewRouter()
r.Get("/", indexController)
r.Get("/about", aboutHandler)
r.Get("/events/{eventID:[0-9]+}", eventDetailController)
r.Get("/events/{eventID:[0-9]+}/{action:[a-z-]+}", eventDetailController)
r.Post("/events/{eventID:[0-9]+}", eventDetailController)
r.Get("/events/new", eventCreateController)
r.Get("/api/events", apiEventListController)
r.Get("/api/events/{eventID:[0-9]+}", apiEventController)
r.Post("/events/new", eventCreateController)
r.Get("/search", searchController)
r.Post("/search", searchController)
r.Get("/search/category", searchCategoryController)
r.Post("/search/category", searchCategoryController)
addStaticFileServer(r, "/static/", "staticfiles")
return r
}
|
package game_map
import (
"fmt"
"github.com/steelx/go-rpg-cgm/combat"
)
func (c *CombatState) AddTurns(actorList []*combat.Actor) {
for _, v := range actorList {
hpNow := v.Stats.Get("HpNow")
if hpNow > 0 && !c.EventQueue.ActorHasEvent(v) {
event := CETurnCreate(c, v)
tp := event.TimePoints(c.EventQueue)
c.EventQueue.Add(event, tp)
}
}
}
func (c *CombatState) GetTarget(owner *combat.Actor) *combat.Actor {
if owner.IsPlayer() {
return c.Actors[enemies][len(c.Actors[enemies])-1]
}
return c.Actors[party][len(c.Actors[enemies])-1]
}
func (c CombatState) GetAlivePartyActors() []*combat.Actor {
var alive []*combat.Actor
for _, a := range c.Actors[party] {
if !a.IsKOed() {
alive = append(alive, a)
}
}
return alive
}
//OnDead makes Actor KnockOut
func (c *CombatState) OnDead(actor *combat.Actor) {
if actor.IsPlayer() {
actor.KO()
} else {
for i := len(c.Actors[enemies]) - 1; i >= 0; i-- {
if actor == c.Actors[enemies][i] {
c.Actors[enemies] = removeActorAtIndex(c.Actors[enemies], i)
}
}
}
//Remove owned events
c.EventQueue.RemoveEventsOwnedBy(actor)
if c.IsPartyDefeated() {
fmt.Println("CombatState OnDead: Party loses")
} else if c.IsEnemyDefeated() {
fmt.Println("CombatState OnDead: Enemy loses")
}
}
func removeActorAtIndex(arr []*combat.Actor, i int) []*combat.Actor {
return append(arr[:i], arr[i+1:]...)
}
func (c CombatState) removeCharAtIndex(arr []*Character, i int) []*Character {
return append(arr[:i], arr[i+1:]...)
}
func (c CombatState) removeFxAtIndex(arr []EffectState, i int) []EffectState {
return append(arr[:i], arr[i+1:]...)
}
func (c *CombatState) insertFxAtIndex(index int, fxI EffectState) {
temp := append([]EffectState{}, c.EffectList[index:]...)
c.EffectList = append(c.EffectList[0:index], fxI)
c.EffectList = append(c.EffectList, temp...)
}
//IsPartyDefeated check's at least 1 Actor is standing return false
func (c CombatState) IsPartyDefeated() bool {
for _, actor := range c.Actors[party] {
if !actor.IsKOed() {
return false
}
}
return true
}
func (c CombatState) PartyWins() bool {
return !c.HasLiveActors(c.Actors[enemies])
}
func (c CombatState) IsEnemyDefeated() bool {
return len(c.Actors[enemies]) == 0
}
func (c CombatState) EnemyWins() bool {
return !c.HasLiveActors(c.Actors[party])
}
func (c CombatState) HasLiveActors(actorList []*combat.Actor) bool {
for _, v := range actorList {
hpNow := v.Stats.Get("HpNow")
if hpNow > 0 {
return true
}
}
return false
}
func (c *CombatState) IsPartyMember(owner *combat.Actor) bool {
for _, v := range c.Actors[party] {
if v == owner {
return true
}
}
return false
}
|
package internal
import (
"log"
"math"
"testing"
"time"
)
func TestWhoisWorkerCheckDomains(t *testing.T) {
appConfig := InitConfiguration()
if len(appConfig.Domains) < 4 {
t.Errorf("expected at least four domains in configuration, found %d", len(appConfig.Domains))
}
whoisWorker := NewWhoisWorker(ApplicationNamespace, appConfig.Domains)
if len(whoisWorker.domains) < 4 {
t.Errorf("expected at least four domains in configuration, found %d", len(appConfig.Domains))
}
queryChannel := make(chan WhoisResponse, len(whoisWorker.domains))
whoisWorker.queryDomains(queryChannel)
for i := 0; i < len(whoisWorker.domains); i++ {
resp := <-queryChannel
if resp.status == ResponseAvailable {
log.Printf("queried %v, status is available", resp.target)
} else if resp.status == ResponseError {
log.Printf("queried %v, status is error, %v", resp.target, resp.status.String())
} else if resp.status == ResponseExceededRate {
log.Printf("queried %v, exceeded rate with %v", resp.target, resp.refer)
} else if resp.status == ResponseOk {
if resp.hasExpiration {
delta := -(time.Since(resp.expiration))
daysRemaining := math.Round((delta.Hours()/24)*100) / 100
log.Printf("queried %v, expires in %v days", resp.target, daysRemaining)
} else {
log.Printf("queried %v, status is ok", resp.target)
}
} else if resp.status == ResponseUnauthorized {
log.Printf("queried %v, unauthorized with %v", resp.target, resp.hostPort)
} else if resp.status == ResponseUnknown {
log.Printf("queried %v, unknown with %v", resp.target, resp.refer)
} else {
log.Printf("queried %v, unexpected status is %v", resp.target, resp.status.String())
}
if resp.status == ResponseUnknown {
t.Errorf("queried %v, not expecting status %v", resp.target, ResponseUnknown)
}
}
}
|
package main
import "fmt"
func main() {
fmt.Println("binarySearch")
fmt.Println(binarySearch(23, []int{10, 11, 12, 16, 18, 23, 29, 33, 48, 54, 57, 68, 77, 84, 98}))
fmt.Println(binarySearch(50, []int{10, 11, 12, 16, 18, 23, 29, 33, 48, 54, 57, 68, 77, 84, 98}))
}
func binarySearch(key int, sorted []int) int {
var low = 0
var high = len(sorted) - 1
for low <= high {
var mid = low + (high-low)/2
if key < sorted[mid] {
high = mid - 1
} else if key > sorted[mid] {
low = mid + 1
} else {
return mid
}
}
return -1
}
|
package frontend
import (
"crypto/rand"
"crypto/rsa"
"encoding/json"
"io/ioutil"
"math/big"
"net/http"
"github.com/gorilla/mux"
uuid "github.com/satori/go.uuid"
"github.com/sirupsen/logrus"
"github.com/jim-minter/rp/pkg/api"
"github.com/jim-minter/rp/pkg/database/cosmosdb"
)
func (f *frontend) putOrPatchOpenShiftCluster(w http.ResponseWriter, r *http.Request) {
log := r.Context().Value(contextKeyLog).(*logrus.Entry)
vars := mux.Vars(r)
toExternal, found := api.APIs[api.APIVersionType{APIVersion: r.URL.Query().Get("api-version"), Type: "OpenShiftCluster"}]
if !found {
api.WriteError(w, http.StatusNotFound, api.CloudErrorCodeInvalidResourceType, "", "The resource type '%s' could not be found in the namespace '%s' for api version '%s'.", vars["resourceType"], vars["resourceProviderNamespace"], r.URL.Query().Get("api-version"))
return
}
if r.Header.Get("Content-Type") != "application/json" {
api.WriteError(w, http.StatusUnsupportedMediaType, api.CloudErrorCodeUnsupportedMediaType, "", "The content media type '%s' is not supported. Only 'application/json' is supported.", r.Header.Get("Content-Type"))
return
}
body, err := ioutil.ReadAll(http.MaxBytesReader(w, r.Body, 1048576))
if err != nil {
api.WriteError(w, http.StatusUnsupportedMediaType, api.CloudErrorCodeInvalidResource, "", "The resource definition is invalid.")
return
}
var b []byte
var created bool
err = cosmosdb.RetryOnPreconditionFailed(func() error {
b, created, err = f._putOrPatchOpenShiftCluster(&request{
context: r.Context(),
method: r.Method,
resourceID: r.URL.Path,
resourceName: vars["resourceName"],
resourceType: vars["resourceProviderNamespace"] + "/" + vars["resourceType"],
body: body,
toExternal: toExternal,
})
return err
})
if err != nil {
switch err := err.(type) {
case *api.CloudError:
api.WriteCloudError(w, err)
default:
log.Error(err)
api.WriteError(w, http.StatusInternalServerError, api.CloudErrorCodeInternalServerError, "", "Internal server error.")
}
return
}
if created {
w.WriteHeader(http.StatusCreated)
}
w.Write(b)
w.Write([]byte{'\n'})
}
func (f *frontend) _putOrPatchOpenShiftCluster(r *request) ([]byte, bool, error) {
doc, err := f.db.Get(r.resourceID)
if err != nil && !cosmosdb.IsErrorStatusCode(err, http.StatusNotFound) {
return nil, false, err
}
isCreate := doc == nil
var external api.External
if isCreate {
doc = &api.OpenShiftClusterDocument{
ID: uuid.NewV4().String(),
}
external = r.toExternal(&api.OpenShiftCluster{
ID: r.resourceID,
Name: r.resourceName,
Type: r.resourceType,
Properties: api.Properties{
ProvisioningState: api.ProvisioningStateUpdating,
},
})
} else {
err = validateProvisioningState(doc.OpenShiftCluster.Properties.ProvisioningState, api.ProvisioningStateSucceeded)
if err != nil {
return nil, false, err
}
switch r.method {
case http.MethodPut:
external = r.toExternal(&api.OpenShiftCluster{
ID: r.resourceID,
Name: r.resourceName,
Type: r.resourceType,
Properties: api.Properties{
ProvisioningState: doc.OpenShiftCluster.Properties.ProvisioningState,
},
})
case http.MethodPatch:
external = r.toExternal(doc.OpenShiftCluster)
}
}
err = json.Unmarshal(r.body, &external)
if err != nil {
return nil, false, api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidRequestContent, "", "The request content was invalid and could not be deserialized: %q.", err)
}
err = external.Validate(r.context, r.resourceID, doc.OpenShiftCluster)
if err != nil {
return nil, false, err
}
if doc.OpenShiftCluster == nil {
doc.OpenShiftCluster = &api.OpenShiftCluster{
Properties: api.Properties{
Installation: &api.Installation{},
},
}
}
external.ToInternal(doc.OpenShiftCluster)
doc.OpenShiftCluster.Properties.ProvisioningState = api.ProvisioningStateUpdating
if isCreate {
doc.OpenShiftCluster.Properties.ResourceGroup = doc.OpenShiftCluster.Name
doc.OpenShiftCluster.Properties.SSHKey, err = rsa.GenerateKey(rand.Reader, 2048)
if err != nil {
return nil, false, err
}
doc.OpenShiftCluster.Properties.StorageSuffix, err = randomLowerCaseAlphanumericString(5)
if err != nil {
return nil, false, err
}
doc, err = f.db.Create(doc)
} else {
doc, err = f.db.Update(doc)
}
if err != nil {
return nil, false, err
}
doc.OpenShiftCluster.ID = r.resourceID
doc.OpenShiftCluster.Name = r.resourceName
doc.OpenShiftCluster.Type = r.resourceType
doc.OpenShiftCluster.Properties.ServicePrincipalProfile.ClientSecret = ""
b, err := json.MarshalIndent(r.toExternal(doc.OpenShiftCluster), "", " ")
if err != nil {
return nil, false, err
}
return b, isCreate, nil
}
func randomLowerCaseAlphanumericString(n int) (string, error) {
return randomString("abcdefghijklmnopqrstuvwxyz0123456789", n)
}
func randomString(letterBytes string, n int) (string, error) {
b := make([]byte, n)
for i := range b {
o, err := rand.Int(rand.Reader, big.NewInt(int64(len(letterBytes))))
if err != nil {
return "", err
}
b[i] = letterBytes[o.Int64()]
}
return string(b), nil
}
|
package main
import (
"bufio"
"bytes"
"crypto/aes"
"crypto/cipher"
"crypto/rand"
"encoding/base64"
"fmt"
"io"
"io/ioutil"
"os"
)
func generateIV(bytes int) []byte {
b := make([]byte, bytes)
rand.Read(b)
return b
}
func encrypt(block cipher.Block, value []byte, iv []byte) []byte {
stream := cipher.NewCTR(block, iv)
ciphertext := make([]byte, len(value))
stream.XORKeyStream(ciphertext, value)
return ciphertext
}
func decrypt(block cipher.Block, ciphertext []byte, iv []byte) []byte {
stream := cipher.NewCTR(block, iv)
plain := make([]byte, len(ciphertext))
// XORKeyStream is used to decrypt too!
stream.XORKeyStream(plain, ciphertext)
return plain
}
func main1() {
block, err := aes.NewCipher([]byte("1234567890123456"))
if err != nil {
panic(err)
}
iv := generateIV(block.BlockSize())
if bs, err := ioutil.ReadFile("./qiyu.jpeg"); err != nil {
panic(err)
} else {
fmt.Println("====== file len====: ", len(bs))
}
// read a file
fi, err := os.Open("./qiyu.jpeg")
if err != nil {
panic(err)
}
defer fi.Close()
bufioReader := bufio.NewReader(fi)
var encryptedBytes []byte
fmt.Println("============================ Begin to encrypt")
for {
tmpBytes := make([]byte, block.BlockSize())
if _, err := bufioReader.Read(tmpBytes); err != nil {
if err == io.EOF {
break
} else {
panic(err)
}
} else {
ciphertext := encrypt(block, tmpBytes, iv)
encryptedBytes = append(encryptedBytes, ciphertext...)
}
}
fmt.Println("======", len(encryptedBytes))
newFile, err := os.Create("./qiyu1.jpeg")
fmt.Println("========== test encrypted file")
if _, err := newFile.Write(encryptedBytes); err != nil {
panic(err)
}
fmt.Println("============================== Begin to decrypt")
bufReader := bytes.NewReader(encryptedBytes)
newFile, err = os.Create("./qiyu2.jpeg")
// for {
// tmpBytes := make([]byte, block.BlockSize())
// if _, err := bufReader.Read(tmpBytes); err != nil {
// if err == io.EOF {
// break
// } else {
// panic(err)
// }
// } else {
// plaintext := decrypt(block, tmpBytes, iv)
// if _, err := newFile.Write(plaintext); err != nil {
// panic(err)
// }
// }
// }
aescrt(bufReader, newFile, block, iv)
}
var key = []uint8{0x30, 0x82, 0x04, 0xa2, 0x02, 0x01, 0x00, 0x02, 0x82, 0x01, 0x01, 0x00, 0xbe, 0xea, 0xce, 0x8b}
var iv = []uint8{0xc1, 0x2a, 0x9a, 0xcc, 0x16, 0x78, 0xa5, 0x7f, 0x1c, 0x39, 0x22, 0x8c, 0x17, 0x0b, 0x4f, 0xc8}
func main() {
block, err := aes.NewCipher(key)
if err != nil {
panic(err)
}
test(block, iv)
test1(block, iv)
// createIV()
}
func createIV() {
const key = "48656c6c6f20476f"
iv := generateIV(16)
var (
block cipher.Block
encryptedIV []byte
err error
)
if block, err = aes.NewCipher([]byte(key)); err != nil {
panic(err)
}
encryptedIV = aesctrHelp(iv, block, []byte(iv))
fmt.Println("=======", base64.URLEncoding.EncodeToString(encryptedIV))
}
func aesctrHelp(value []byte, block cipher.Block, iv []byte) []byte {
stream := cipher.NewCTR(block, iv)
ciphertext := make([]byte, len(value))
stream.XORKeyStream(ciphertext, value)
return ciphertext
}
func test(block cipher.Block, iv []byte) {
fmt.Println("======== test")
fi, err := os.Open("./qiyu.jpeg")
if err != nil {
panic(err)
}
defer fi.Close()
newFile, err := os.Create("./qiyu1.jpeg")
defer newFile.Close()
aescrt(fi, newFile, block, iv)
// newFile1, err := os.Create("./qiyu2.jpeg")
// aescrt(newFile, newFile1, block, iv)
}
func test1(block cipher.Block, iv []byte) {
fmt.Println("======== test1")
fi, err := os.Open("./qiyu1.jpeg")
if err != nil {
panic(err)
}
defer fi.Close()
newFile, err := os.Create("./qiyu2.jpeg")
defer newFile.Close()
aescrt(fi, newFile, block, iv)
}
func aescrt(rc io.Reader, wc io.Writer, block cipher.Block, iv []byte) {
for {
tmpBytes := make([]byte, block.BlockSize())
fmt.Println("===============")
if _, err := rc.Read(tmpBytes); err != nil {
if err == io.EOF {
break
} else {
panic(err)
}
} else {
plaintext := decrypt(block, tmpBytes, iv)
if _, err := wc.Write(plaintext); err != nil {
panic(err)
}
}
}
}
|
/*
* @lc app=leetcode id=74 lang=golang
*
* [74] Search a 2D Matrix
*
* https://leetcode.com/problems/search-a-2d-matrix/description/
*
* algorithms
* Medium (35.04%)
* Likes: 907
* Dislikes: 108
* Total Accepted: 234.7K
* Total Submissions: 669.9K
* Testcase Example: '[[1,3,5,7],[10,11,16,20],[23,30,34,50]]\n3'
*
* Write an efficient algorithm that searches for a value in an m x n matrix.
* This matrix has the following properties:
*
*
* Integers in each row are sorted from left to right.
* The first integer of each row is greater than the last integer of the
* previous row.
*
*
* Example 1:
*
*
* Input:
* matrix = [
* [1, 3, 5, 7],
* [10, 11, 16, 20],
* [23, 30, 34, 50]
* ]
* target = 3
* Output: true
*
*
* Example 2:
*
*
* Input:
* matrix = [
* [1, 3, 5, 7],
* [10, 11, 16, 20],
* [23, 30, 34, 50]
* ]
* target = 13
* Output: false
*
*/
func searchMatrix(matrix [][]int, target int) bool {
// Bianry Search Version
l := 0
r := len(matrix)-1
row := -1
for l <= r {
if len(matrix[l]) == 0 {
l++
continue
}
if matrix[l][0] > target {
row = l -1
break
}
if len(matrix[r]) == 0 {
r --
continue
}
if matrix[r][0] <= target {
row = r
break
}
if l + 1 == r {
row = l
break
}
m := (l+r)/2
if matrix[m][0] > target {
r = m
} else {
l = m
}
}
if row == -1 {
return false
}
l = 0
r = len(matrix[row]) - 1
if target > matrix[row][r] {
return false
}
for ;; {
m := (l+r)/2
if matrix[row][l] == target ||
matrix[row][r] == target ||
matrix[row][m] == target {
return true
}
if l == m || r == m {
return false
}
if matrix[row][m] > target {
r = m
} else {
l = m
}
}
return false
}
func searchMatrix2(matrix [][]int, target int) bool {
// Simple Search Version
row := -1
for i := 0 ; i < len(matrix) ; i ++ {
if len(matrix[i]) == 0 {
continue
}
if target < matrix[i][0] {
row = i-1
break
}
if i == len(matrix)-1 {
row = len(matrix) -1
break
}
}
if row == -1 {
return false
}
for _, a := range(matrix[row]) {
if target == a {
return true
}
}
return false
}
|
package server
import (
"time"
"github.com/sirupsen/logrus"
)
func (s *server) logInfo(pkg, function, msg string, t time.Duration) {
s.log.WithFields(logrus.Fields{
"db": s.dbPlatform,
"cache": s.cachePlatform,
"grpcAddr": s.listener.Addr().String(),
"httpPort": s.httpPort,
"ssl": s.ssl,
"pkg": pkg,
"func": function,
"in": t,
}).Info(msg)
}
func (s *server) logError(pkg, function, err, msg string) {
s.log.WithFields(logrus.Fields{
"db": s.dbPlatform,
"cache": s.cachePlatform,
"grpcAddr": s.listener.Addr().String(),
"httpPort": s.httpPort,
"ssl": s.ssl,
"pkg": pkg,
"func": function,
"error": err,
}).Error(msg)
}
func (s *server) logWarning(pkg, function, err, msg string) {
s.log.WithFields(logrus.Fields{
"db": s.dbPlatform,
"cache": s.cachePlatform,
"grpcAddr": s.listener.Addr().String(),
"httpPort": s.httpPort,
"ssl": s.ssl,
"pkg": pkg,
"func": function,
"error": err,
}).Warning(msg)
}
func (s *server) logFatal(pkg, function, err, msg string) {
s.log.WithFields(logrus.Fields{
"db": s.dbPlatform,
"cache": s.cachePlatform,
"grpcAddr": s.listener.Addr().String(),
"httpPort": s.httpPort,
"ssl": s.ssl,
"pkg": pkg,
"func": function,
"error": err,
}).Fatal(msg)
}
|
package storage
//Storage is an interface to storage of Users
type Storage interface {
Users() UserRepo
}
|
package service
import (
"github.com/irisnet/irishub/app/v1/auth"
"github.com/irisnet/irishub/app/v1/service/tags"
"github.com/irisnet/irishub/types"
)
func EndBlocker(ctx types.Context, keeper Keeper) (resTags types.Tags) {
ctx = ctx.WithLogger(ctx.Logger().With("handler", "endBlock").With("module", "iris/service"))
logger := ctx.Logger()
// Reset the intra-transaction counter.
keeper.SetIntraTxCounter(ctx, 0)
resTags = types.NewTags()
params := keeper.GetParamSet(ctx)
slashFraction := params.SlashFraction
activeIterator := keeper.ActiveRequestQueueIterator(ctx, ctx.BlockHeight())
defer activeIterator.Close()
for ; activeIterator.Valid(); activeIterator.Next() {
var req SvcRequest
keeper.cdc.MustUnmarshalBinaryLengthPrefixed(activeIterator.Value(), &req)
// if not Profiling mode,should slash provider
slashCoins := types.Coins{}
if !req.Profiling {
binding, found := keeper.GetServiceBinding(ctx, req.DefChainID, req.DefName, req.BindChainID, req.Provider)
if found {
for _, coin := range binding.Deposit {
taxAmount := types.NewDecFromInt(coin.Amount).Mul(slashFraction).TruncateInt()
slashCoins = append(slashCoins, types.NewCoin(coin.Denom, taxAmount))
}
}
slashCoins = slashCoins.Sort()
_, err := keeper.ck.BurnCoins(ctx, auth.ServiceDepositCoinsAccAddr, slashCoins)
if err != nil {
panic(err)
}
err = keeper.Slash(ctx, binding, slashCoins)
if err != nil {
panic(err)
}
}
keeper.AddReturnFee(ctx, req.Consumer, req.ServiceFee)
keeper.DeleteActiveRequest(ctx, req)
keeper.metrics.ActiveRequests.Add(-1)
keeper.DeleteRequestExpiration(ctx, req)
resTags = resTags.AppendTag(tags.Action, tags.ActionSvcCallTimeOut)
resTags = resTags.AppendTag(tags.RequestID, []byte(req.RequestID()))
resTags = resTags.AppendTag(tags.Provider, []byte(req.Provider))
resTags = resTags.AppendTag(tags.SlashCoins, []byte(slashCoins.String()))
logger.Info("Remove timeout request", "request_id", req.RequestID(), "consumer", req.Consumer.String())
}
return resTags
}
|
package inttest
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"tagallery.com/api/config"
)
type ErrorResponse struct {
Error string `json:"error"`
}
// apiURL takes a route and returns the full API url.
func apiURL(route string) string {
return fmt.Sprintf("http://localhost:%v%v", config.Get().Port, route)
}
// GetRequest sends a HTTP GET request and parses the returned data into the type of {response}.
func GetRequest(url string, response interface{}) error {
return Request("GET", url, nil, response)
}
// PostRequest sends a HTTP POST request and parses the returned data into the type of {response}.
func PostRequest(url string, body interface{}, response interface{}) error {
return Request("POST", url, body, response)
}
// PostRequest sends a HTTP DELETE request and parses the returned data into the type of {response}.
func DeleteRequest(url string, response interface{}) error {
return Request("DELETE", url, nil, response)
}
// Request sends a HTTP request to {url} and parses the returned data into the type of {response}.
func Request(method string, url string, body interface{}, response interface{}) error {
var resp *http.Response
content, err := json.Marshal(body)
if err != nil {
return err
}
req, err := http.NewRequest(method, url, bytes.NewBuffer(content))
if err != nil {
return err
}
client := &http.Client{}
resp, err = client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
respBody, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
return json.Unmarshal(respBody, response)
}
|
package ds
/**
*
In a deck of cards, each card has an integer written on it.
Return true if and only if you can choose X >= 2 such that
it is possible to split the entire deck into 1 or more groups of cards, where:
Each group has exactly X cards.
All the cards in each group have the same integer.
Example 1:
Input: deck = [1,2,3,4,4,3,2,1]
Output: true
Explanation: Possible partition [1,1],[2,2],[3,3],[4,4].
Example 2:
Input: deck = [1,1,1,2,2,2,3,3]
Output: false´
Explanation: No possible partition.
Example 3:
Input: deck = [1]
Output: false
Explanation: No possible partition.
Example 4:
Input: deck = [1,1]
Output: true
Explanation: Possible partition [1,1].
Example 5:
Input: deck = [1,1,2,2,2,2]
Output: true
Explanation: Possible partition [1,1],[2,2],[2,2].
Constraints:
1 <= deck.length <= 10^4
0 <= deck[i] < 10^4
Accepted
56,718
Submissions
164,886
*
*
*
*/
/**
* @param {number[]} deck
* @return {boolean}
*/
func hasGroupsSizeX(deck []int) bool {
length := len(deck)
if length < 2 {
return false
}
m := make(map[int]int)
for i := 0; i < length; i++ {
cur := deck[i]
val, ok := m[cur]
if ok {
m[cur] = val + 1
} else {
m[cur] = 1
}
}
// min := 1<<31 - 1
// for val := range m {
// if m[val] < min {
// min = m[val]
// }
// }
g := m[deck[0]]
for val := range m {
g = gcd(g, m[val])
}
return g >= 2
}
func gcd(a, b int) int {
for a%b != 0 {
c := a % b
a = b
b = c
}
return b
}
|
package caam
import (
"encoding/xml"
"github.com/thought-machine/finance-messaging/iso20022"
)
type Document01200101 struct {
XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:caam.012.001.01 Document"`
Message *ATMExceptionAcknowledgementV01 `xml:"ATMXcptnAck"`
}
func (d *Document01200101) AddMessage() *ATMExceptionAcknowledgementV01 {
d.Message = new(ATMExceptionAcknowledgementV01)
return d.Message
}
// The ATMExceptionAcknowledgement message is sent by an acquirer or its agent to an ATM to acknowledge the receipt of an ATMExceptionAdvice message.
type ATMExceptionAcknowledgementV01 struct {
// Information related to the protocol management on a segment of the path from the ATM to the acquirer.
Header *iso20022.Header32 `xml:"Hdr"`
// Encrypted body of the message.
ProtectedATMExceptionAcknowledgement *iso20022.ContentInformationType10 `xml:"PrtctdATMXcptnAck,omitempty"`
// Information related to the acknowledgement of an ATM exception.
ATMExceptionAcknowledgement *iso20022.ATMExceptionAcknowledgement1 `xml:"ATMXcptnAck,omitempty"`
// Trailer of the message containing a MAC.
SecurityTrailer *iso20022.ContentInformationType15 `xml:"SctyTrlr,omitempty"`
}
func (a *ATMExceptionAcknowledgementV01) AddHeader() *iso20022.Header32 {
a.Header = new(iso20022.Header32)
return a.Header
}
func (a *ATMExceptionAcknowledgementV01) AddProtectedATMExceptionAcknowledgement() *iso20022.ContentInformationType10 {
a.ProtectedATMExceptionAcknowledgement = new(iso20022.ContentInformationType10)
return a.ProtectedATMExceptionAcknowledgement
}
func (a *ATMExceptionAcknowledgementV01) AddATMExceptionAcknowledgement() *iso20022.ATMExceptionAcknowledgement1 {
a.ATMExceptionAcknowledgement = new(iso20022.ATMExceptionAcknowledgement1)
return a.ATMExceptionAcknowledgement
}
func (a *ATMExceptionAcknowledgementV01) AddSecurityTrailer() *iso20022.ContentInformationType15 {
a.SecurityTrailer = new(iso20022.ContentInformationType15)
return a.SecurityTrailer
}
|
/**
* @program: Go
*
* @description:
*
* @author: Mr.chen
*
* @create: 2020-03-06 10:54
**/
package admin
import (
"github.com/kataras/iris"
"github.com/kataras/iris/mvc"
"iris_demo/services"
)
type OrderController struct {
Ctx iris.Context
OrderService services.IOrderService
}
func (o *OrderController) Get() mvc.View {
orderArray,err:=o.OrderService.GetAllOrderInfo()
if err !=nil {
o.Ctx.Application().Logger().Debug("查询订单信息失败")
}
return mvc.View{
Name:"/admin/order/view.html",
Data:iris.Map{
"order":orderArray,
},
}
}
|
package main
import "fmt"
type person struct {
firstName string
lastName string
}
type secretAgent struct {
person
ltk bool
}
func (s secretAgent) speak() {
fmt.Println(s.firstName)
fmt.Println(s.lastName)
}
func main() {
sa := secretAgent{
person: person{
firstName: "Selva",
lastName: "Mohandoss",
},
ltk: true,
}
sa.speak()
fmt.Println(sa.firstName)
}
|
package activitylog
import (
md "github.com/ebikode/eLearning-core/model"
ut "github.com/ebikode/eLearning-core/utils"
)
// Service provides activityLog operations
type ActivityLogService interface {
GetActivityLogs(int, int) []*md.ActivityLog
CreateActivityLog(md.ActivityLog) error
}
type service struct {
alRepo ActivityLogRepository
}
// NewService creates a activityLog service with the necessary dependencies
func NewService(
alRepo ActivityLogRepository,
) ActivityLogService {
return &service{alRepo}
}
// Get a activityLog
func (s *service) GetActivityLogs(page, limit int) []*md.ActivityLog {
return s.alRepo.GetAll(page, limit)
}
// Create New activityLog
func (s *service) CreateActivityLog(c md.ActivityLog) error {
aID := ut.RandomBase64String(8, "MDlg")
c.ID = aID
err := s.alRepo.Store(c)
if err != nil {
return err
}
return nil
}
|
package 单调栈
// --------------- 从左到右遍历的 单调递增栈 ---------------
func finalPrices(prices []int) []int {
indexStack := NewMyStack()
result := make([]int, len(prices))
for i := 0; i < len(prices); i++ {
for !indexStack.IsEmpty() && prices[indexStack.GetTop()] >= prices[i] {
result[indexStack.GetTop()] = prices[indexStack.GetTop()] - prices[i]
indexStack.Pop()
}
indexStack.Push(i)
}
for !indexStack.IsEmpty() {
result[indexStack.GetTop()] = prices[indexStack.GetTop()]
indexStack.Pop()
}
return result
}
// ------------------------- MyStack -------------------------
type MyStack struct {
data []int
}
func NewMyStack() *MyStack {
return &MyStack{}
}
func (ms *MyStack) Push(val int) {
ms.data = append(ms.data, val)
}
func (ms *MyStack) Pop() int {
top := ms.data[ms.GetSize()-1]
ms.data = ms.data[:ms.GetSize()-1]
return top
}
func (ms *MyStack) GetTop() int {
return ms.data[ms.GetSize()-1]
}
func (ms *MyStack) IsEmpty() bool {
return ms.GetSize() == 0
}
func (ms *MyStack) GetSize() int {
return len(ms.data)
}
// --------------- 从右到左遍历的 单调非递减栈 ---------------
func finalPrices(prices []int) []int {
stack := NewMyStack()
pay := make([]int,len(prices))
for i:=len(prices)-1;i>=0;i--{
for !stack.IsEmpty() && prices[stack.GetTop()] > prices[i]{
stack.Pop()
}
if !stack.IsEmpty() {
pay[i] = prices[i] - prices[stack.GetTop()]
}else{
pay[i] = prices[i]
}
stack.Push(i)
}
return pay
}
// ------------------------- MyStack -------------------------
type MyStack struct {
data []int
}
func NewMyStack() *MyStack {
return &MyStack{}
}
func (ms *MyStack) Push(val int) {
ms.data = append(ms.data, val)
}
func (ms *MyStack) Pop() int {
top := ms.data[ms.GetSize()-1]
ms.data = ms.data[:ms.GetSize()-1]
return top
}
func (ms *MyStack) GetTop() int {
return ms.data[ms.GetSize()-1]
}
func (ms *MyStack) IsEmpty() bool {
return ms.GetSize() == 0
}
func (ms *MyStack) GetSize() int {
return len(ms.data)
}
func max(a, b int) int {
if a > b {
return a
}
return b
}
func min(a, b int) int {
if a > b {
return b
}
return a
}
/*
题目链接:
总结:
1. 这题就是找 A[i+1:] 中第一个小于等于 A[i]的数
*/
|
package testing
import (
"time"
"github.com/selectel/go-selvpcclient/selvpcclient/resell/v2/crossregionsubnets"
"github.com/selectel/go-selvpcclient/selvpcclient/resell/v2/servers"
"github.com/selectel/go-selvpcclient/selvpcclient/resell/v2/subnets"
)
// TestGetCrossRegionSubnetResponseRaw represents a raw response from the Get request.
const TestGetCrossRegionSubnetResponseRaw = `
{
"cross_region_subnet": {
"id": 12,
"cidr": "192.168.200.0/24",
"vlan_id": 1003,
"status": "ACTIVE",
"project_id": "b63ab68796e34858befb8fa2a8b1e12a",
"servers": [
{
"status": "ACTIVE",
"updated": "2019-01-04T08:09:43Z",
"id": "22170dcf-2e58-49b7-9115-951b84d366f6",
"name": "Node01"
},
{
"status": "ACTIVE",
"updated": "2019-01-04T08:09:43Z",
"id": "df842202-fdcc-490e-b92a-6e252e5577c7",
"name": "Node02"
}
],
"subnets": [
{
"id": 10,
"vlan_id": 1003,
"cidr": "192.168.200.0/24",
"network_id": "78c1cbe1-c34d-4685-be2d-a877a1b1dec4",
"subnet_id": "7db1255f-2545-4b8a-9446-22608c0f6cb8",
"region": "ru-1",
"vtep_ip_address": "10.10.0.101"
},
{
"id": 20,
"vlan_id": 1003,
"cidr": "192.168.200.0/24",
"network_id": "67f7ab15-9424-4b50-999a-1c4de12372ec",
"subnet_id": "66ee047b-c699-4d62-9b64-363d2d77f021",
"region": "ru-3",
"vtep_ip_address": "10.10.0.201"
}
]
}
}
`
var crossregionSubnetServerTimeStamp, _ = time.Parse(time.RFC3339, "2019-01-04T08:09:43Z")
// TestGetCrossRegionSubnetResponse represents an unmarshalled TestGetCrossRegionSubnetResponseRaw.
var TestGetCrossRegionSubnetResponse = &crossregionsubnets.CrossRegionSubnet{
ID: 12,
CIDR: "192.168.200.0/24",
VLANID: 1003,
Status: "ACTIVE",
ProjectID: "b63ab68796e34858befb8fa2a8b1e12a",
Servers: []servers.Server{
{
ID: "22170dcf-2e58-49b7-9115-951b84d366f6",
Name: "Node01",
Status: "ACTIVE",
Updated: crossregionSubnetServerTimeStamp,
},
{
ID: "df842202-fdcc-490e-b92a-6e252e5577c7",
Name: "Node02",
Status: "ACTIVE",
Updated: crossregionSubnetServerTimeStamp,
},
},
Subnets: []subnets.Subnet{
{
ID: 10,
Region: "ru-1",
CIDR: "192.168.200.0/24",
NetworkID: "78c1cbe1-c34d-4685-be2d-a877a1b1dec4",
SubnetID: "7db1255f-2545-4b8a-9446-22608c0f6cb8",
VLANID: 1003,
VTEPIPAddress: "10.10.0.101",
},
{
ID: 20,
Region: "ru-3",
CIDR: "192.168.200.0/24",
NetworkID: "67f7ab15-9424-4b50-999a-1c4de12372ec",
SubnetID: "66ee047b-c699-4d62-9b64-363d2d77f021",
VLANID: 1003,
VTEPIPAddress: "10.10.0.201",
},
},
}
// TestListCrossRegionSubnetsResponseRaw represents a raw response from the List request.
const TestListCrossRegionSubnetsResponseRaw = `
{
"cross_region_subnets": [
{
"id": 12,
"cidr": "192.168.200.0/24",
"vlan_id": 1003,
"status": "ACTIVE",
"project_id": "b63ab68796e34858befb8fa2a8b1e12a",
"servers": [
{
"status": "ACTIVE",
"updated": "2019-01-04T08:09:43Z",
"id": "22170dcf-2e58-49b7-9115-951b84d366f6",
"name": "Node01"
},
{
"status": "ACTIVE",
"updated": "2019-01-04T08:09:43Z",
"id": "df842202-fdcc-490e-b92a-6e252e5577c7",
"name": "Node02"
}
],
"subnets": [
{
"id": 10,
"vlan_id": 1003,
"cidr": "192.168.200.0/24",
"network_id": "78c1cbe1-c34d-4685-be2d-a877a1b1dec4",
"subnet_id": "7db1255f-2545-4b8a-9446-22608c0f6cb8",
"region": "ru-1",
"vtep_ip_address": "10.10.0.101"
},
{
"id": 20,
"vlan_id": 1003,
"cidr": "192.168.200.0/24",
"network_id": "67f7ab15-9424-4b50-999a-1c4de12372ec",
"subnet_id": "66ee047b-c699-4d62-9b64-363d2d77f021",
"region": "ru-3",
"vtep_ip_address": "10.10.0.201"
}
]
}
]
}
`
// TestListCrossRegionSubnetsResponse represents an unmarshalled TestListCrossRegionSubnetsResponseRaw
var TestListCrossRegionSubnetsResponse = []*crossregionsubnets.CrossRegionSubnet{
{
ID: 12,
CIDR: "192.168.200.0/24",
VLANID: 1003,
Status: "ACTIVE",
ProjectID: "b63ab68796e34858befb8fa2a8b1e12a",
Servers: []servers.Server{
{
ID: "22170dcf-2e58-49b7-9115-951b84d366f6",
Name: "Node01",
Status: "ACTIVE",
Updated: crossregionSubnetServerTimeStamp,
},
{
ID: "df842202-fdcc-490e-b92a-6e252e5577c7",
Name: "Node02",
Status: "ACTIVE",
Updated: crossregionSubnetServerTimeStamp,
},
},
Subnets: []subnets.Subnet{
{
ID: 10,
Region: "ru-1",
CIDR: "192.168.200.0/24",
NetworkID: "78c1cbe1-c34d-4685-be2d-a877a1b1dec4",
SubnetID: "7db1255f-2545-4b8a-9446-22608c0f6cb8",
VLANID: 1003,
VTEPIPAddress: "10.10.0.101",
},
{
ID: 20,
Region: "ru-3",
CIDR: "192.168.200.0/24",
NetworkID: "67f7ab15-9424-4b50-999a-1c4de12372ec",
SubnetID: "66ee047b-c699-4d62-9b64-363d2d77f021",
VLANID: 1003,
VTEPIPAddress: "10.10.0.201",
},
},
},
}
// TestCreateCrossRegionSubnetsOptsRaw represents marshalled options for the Create request.
const TestCreateCrossRegionSubnetsOptsRaw = `
{
"cross_region_subnets": [
{
"quantity": 1,
"regions": [
{
"region": "ru-1"
},
{
"region": "ru-3"
}
],
"cidr": "192.168.200.0/24"
}
]
}
`
// TestCreateCrossRegionSubnetsOpts represents options for the Create request.
var TestCreateCrossRegionSubnetsOpts = crossregionsubnets.CrossRegionSubnetOpts{
CrossRegionSubnets: []crossregionsubnets.CrossRegionSubnetOpt{
{
Quantity: 1,
Regions: []crossregionsubnets.CrossRegionOpt{
{
Region: "ru-1",
},
{
Region: "ru-3",
},
},
CIDR: "192.168.200.0/24",
},
},
}
// TestCreateCrossRegionSubnetsResponseRaw represents a raw response from the Create request.
const TestCreateCrossRegionSubnetsResponseRaw = `
{
"cross_region_subnets": [
{
"id": 12,
"cidr": "192.168.200.0/24",
"vlan_id": 1003,
"status": "DOWN",
"project_id": "b63ab68796e34858befb8fa2a8b1e12a",
"subnets": [
{
"id": 10,
"vlan_id": 1003,
"cidr": "192.168.200.0/24",
"network_id": "78c1cbe1-c34d-4685-be2d-a877a1b1dec4",
"subnet_id": "7db1255f-2545-4b8a-9446-22608c0f6cb8",
"region": "ru-1",
"vtep_ip_address": "10.10.0.101"
},
{
"id": 20,
"vlan_id": 1003,
"cidr": "192.168.200.0/24",
"network_id": "67f7ab15-9424-4b50-999a-1c4de12372ec",
"subnet_id": "66ee047b-c699-4d62-9b64-363d2d77f021",
"region": "ru-3",
"vtep_ip_address": "10.10.0.201"
}
]
}
]
}
`
// TestCreateCrossRegionSubnetsResponse represents an unmarshalled TestCreateCrossRegionSubnetsResponseRaw
var TestCreateCrossRegionSubnetsResponse = []*crossregionsubnets.CrossRegionSubnet{
{
ID: 12,
CIDR: "192.168.200.0/24",
VLANID: 1003,
Status: "DOWN",
ProjectID: "b63ab68796e34858befb8fa2a8b1e12a",
Subnets: []subnets.Subnet{
{
ID: 10,
Region: "ru-1",
CIDR: "192.168.200.0/24",
NetworkID: "78c1cbe1-c34d-4685-be2d-a877a1b1dec4",
SubnetID: "7db1255f-2545-4b8a-9446-22608c0f6cb8",
VLANID: 1003,
VTEPIPAddress: "10.10.0.101",
},
{
ID: 20,
Region: "ru-3",
CIDR: "192.168.200.0/24",
NetworkID: "67f7ab15-9424-4b50-999a-1c4de12372ec",
SubnetID: "66ee047b-c699-4d62-9b64-363d2d77f021",
VLANID: 1003,
VTEPIPAddress: "10.10.0.201",
},
},
},
}
// TestManyCrossRegionSubnetsInvalidResponseRaw represents a raw invalid response with
// several cross-region subnets.
const TestManyCrossRegionSubnetsInvalidResponseRaw = `
{
"cross_region_subnets": [
{
"id": "b63ab68796e34858befb8fa2a8b1e12a"
}
]
}
`
// TestSingleCrossRegionSubnetInvalidResponseRaw represents a raw invalid response with
// a single cross-region subnet.
const TestSingleCrossRegionSubnetInvalidResponseRaw = `
{
"cross_region_subnet": {
"id": "b63ab68796e34858befb8fa2a8b1e12a"
}
}
`
|
package pojo
import (
"tesou.io/platform/brush-parent/brush-api/common/base/pojo"
)
/**
近期战绩
*/
type BFJin struct {
//比赛ID
ScheduleID int64 `json:"ScheduleID" xorm:"comment('比赛ID') index"`
//联赛ID
SclassID int64 `json:"SclassID" xorm:"comment('联赛ID') index"`
//联赛名称
SclassName string `json:"SclassName" xorm:"comment('联赛名称') index"`
//主队ID
HomeTeamID int64 `json:"HomeTeamID" xorm:"comment('主队ID') index"`
//客队ID
GuestTeamID int64 `json:"GuestTeamID" xorm:"comment('客队ID') index"`
HomeTeam string `json:"HomeTeam" xorm:"comment('主队') unique(MatchTimeStr_HomeTeam_GuestTeam) index"`
GuestTeam string `json:"GuestTeam" xorm:"comment('客队') unique(MatchTimeStr_HomeTeam_GuestTeam) index"`
MatchState int `json:"MatchState" xorm:"comment('') index"`
HomeScore int `json:"HomeScore" xorm:"comment('主队进球') index"`
GuestScore int `json:"GuestScore" xorm:"comment('客队进球') index"`
HomeHalfScore int `json:"HomeHalfScore" xorm:"comment('主队半场进球') index"`
GuestHalfScore int `json:"GuestHalfScore" xorm:"comment('客队半场进球') index"`
MatchTimeStr string `json:"MatchTimeStr" xorm:"comment('比赛时间') unique(MatchTimeStr_HomeTeam_GuestTeam) index"`
//让球
Letgoal float64 `json:"Letgoal" xorm:"comment('让球指数') index"`
//开始让球
FirstLetgoalHalf float64 `json:"FirstLetgoalHalf" xorm:"comment('让球半场指数') index"`
FirstOU float64 `json:"FirstOU" xorm:"comment('大小指数') index"`
FirstOUHalf float64 `json:"FirstOUHalf" xorm:"comment('大小半场指数') index"`
IsN int `json:"IsN" xorm:"comment('') index"`
Result string `json:"Result" xorm:"comment('结果') index"`
ResultHalf string `json:"ResultHalf" xorm:"comment('半场结果') index"`
ResultOU string `json:"ResultOU" xorm:"comment('大小结果') index"`
ResultOUHalf string `json:"ResultOUHalf" xorm:"comment('大小半场结果') index"`
pojo.BasePojo `xorm:"extends"`
}
|
package models
import (
"github.com/jinzhu/gorm"
_ "github.com/jinzhu/gorm/dialects/mysql"
)
type AdminAccount struct {
gorm.Model
Id int `json:"id"`
UserName string `json:"user_name"`
Phone string `json:"phone"`
Pwd string `json:"pwd"`
Level int `json:"level"`
State int `json:"state"`
}
func (m *AdminAccount) GetUserBydPwd(phone string, password string, admin AdminAccount) AdminAccount {
db = GetInstance().GetMysqlDB()
db.Where("phone = ? and pwd = ?", phone, password).First(&admin)
return admin
}
func (m *AdminAccount) GetUserList(page int, pageSize int, admins []AdminAccount) []AdminAccount {
db = GetInstance().GetMysqlDB()
db.Where("level < ? and state = ?", 3, 1).Find(&admins)
return admins
}
func (m *AdminAccount) SaveUser(account AdminAccount) bool {
db = GetInstance().GetMysqlDB()
db.Create(&account)
return true
}
func (m *AdminAccount) DelUser(account AdminAccount) bool {
db = GetInstance().GetMysqlDB()
db.Model(&account).Where("phone = ?", account.Phone).Update("state", 0)
return true
}
|
/*
* Tencent is pleased to support the open source community by making Blueking Container Service available.
* Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
* Licensed under the MIT License (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
* http://opensource.org/licenses/MIT
* Unless required by applicable law or agreed to in writing, software distributed under
* the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package audit
import (
"encoding/json"
"fmt"
"sync"
"time"
"k8s.io/klog"
)
// ActivityStatus is the activity status
type ActivityStatus string
// ActivityType is the activity type
type ActivityType string
// ResourceType is the resource type
type ResourceType string
const (
// ActivityStatusUnknow means the activity status is unknow
ActivityStatusUnknow ActivityStatus = "unknow"
// ActivityStatusSuccess means the activity is success
ActivityStatusSuccess ActivityStatus = "success"
// ActivityStatusFailed means the activity is failed
ActivityStatusFailed ActivityStatus = "failed"
// ActivityStatusPending means the activity is pending
ActivityStatusPending ActivityStatus = "pending"
// ActivityTypeCreate means the activity type is create
ActivityTypeCreate ActivityType = "create"
// ActivityTypeUpdate means the activity type is update
ActivityTypeUpdate ActivityType = "update"
// ActivityTypeDelete means the activity type is delete
ActivityTypeDelete ActivityType = "delete"
// ActivityTypeStart means the activity type is start
ActivityTypeStart ActivityType = "start"
// ActivityTypeStop means the activity type is stop
ActivityTypeStop ActivityType = "stop"
// ResourceTypeProject means the resource type is project
ResourceTypeProject ResourceType = "project"
// ResourceTypeCluster means the resource type is cluster
ResourceTypeCluster ResourceType = "cluster"
// ResourceTypeNode means the resource type is node
ResourceTypeNode ResourceType = "node"
// ResourceTypeNodeGroup means the resource type is node group
ResourceTypeNodeGroup ResourceType = "node_group"
// ResourceTypeCloudAccount means the resource type is cloud account
ResourceTypeCloudAccount ResourceType = "cloud_account"
// ResourceTypeNamespace means the resource type is namespace
ResourceTypeNamespace ResourceType = "namespace"
// ResourceTypeTemplateSet means the resource type is template set
ResourceTypeTemplateSet ResourceType = "template_set"
// ResourceTypeVariable means the resource type is variable
ResourceTypeVariable ResourceType = "variable"
// ResourceTypeK8SResource means the resource type is k8s resource
ResourceTypeK8SResource ResourceType = "k8s_resource"
// ResourceTypeHelm means the resource type is helm
ResourceTypeHelm ResourceType = "helm"
// ResourceTypeAddons means the resource type is addons
ResourceTypeAddons ResourceType = "addons"
// ResourceTypeChart means the resource type is chart
ResourceTypeChart ResourceType = "chart"
// ResourceTypeWebConsole means the resource type is web console
ResourceTypeWebConsole ResourceType = "web_console"
// ResourceTypeLogRule means the resource type is log rule
ResourceTypeLogRule ResourceType = "log_rule"
)
// Activity is the struct of activity
type Activity struct {
ProjectCode string `json:"project_code"`
ResourceType ResourceType `json:"resource_type"`
ResourceName string `json:"resource_name"`
ResourceID string `json:"resource_id"`
ActivityType ActivityType `json:"activity_type"`
Status ActivityStatus `json:"status"`
Username string `json:"username"`
Description string `json:"description"`
Extra string `json:"extra"`
}
// ActivityReq is the request of activity
type ActivityReq struct {
Activities []Activity `json:"activities"`
}
// ErrorResponse is the error response for restful response
type ErrorResponse struct {
Error Error `json:"error"`
}
// Error is the error response for restful response
type Error struct {
Code string `json:"code"`
Message string `json:"message"`
Data interface{} `json:"data"`
}
var (
activityChan = make(chan Activity, 10000)
activityOnce sync.Once
bcsHost string
token string
)
func init() {
activityOnce.Do(func() {
go func() {
// pushActivity every 10 seconds
for range time.Tick(10 * time.Second) {
activity := make([]Activity, 0)
for {
select {
case a := <-activityChan:
activity = append(activity, a)
default:
batchPushActivity(activity)
if len(activity) > 0 {
klog.Infof("push activity success, total %d", len(activity))
// reset activity
activity = activity[:0]
}
goto END
}
}
END:
}
}()
})
}
// PushActivity push activity to queue
func PushActivity(activity Activity) {
go func() {
activityChan <- activity
}()
}
func batchPushActivity(activity []Activity) {
activities := SplitSlice(activity, 100)
for _, v := range activities {
go func(data []Activity) {
if err := pushActivity(data); err != nil {
klog.Errorf("push activity failed, %s", err.Error())
}
}(v)
}
}
// PushActivity push activity to audit
func pushActivity(activity []Activity) error {
body := ActivityReq{
Activities: activity,
}
url := fmt.Sprintf("%s/bcsapi/v4/usermanager/v3/activity_logs", bcsHost)
resp, err := GetClient().R().SetAuthToken(token).SetBody(body).Post(url)
if err != nil {
return err
}
requestID := resp.Header().Get("x-request-id")
if resp.StatusCode() != 200 {
var errorResponse ErrorResponse
if err = json.Unmarshal(resp.Body(), &errorResponse); err != nil {
return fmt.Errorf("unmarshal error response failed, %s", err.Error())
}
return fmt.Errorf("push activity failed, requestID: %s, code: %s, message: %s", requestID,
errorResponse.Error.Code, errorResponse.Error.Message)
}
return nil
}
|
package models
type Root struct {
EndPoint string `json:"end_point"`
Arguments map[string]Argument `json:"arguments"`
}
|
package main
import (
"bufio"
"os"
"strings"
"github.com/gookit/color"
)
func logInfo(m string) {
color.Printf("<fg=white>[</><fg=cyan;op=bold>info</><fg=white>]</> » %s\n", m)
}
func logSuccess(m string) {
color.Printf("<fg=white>[</><fg=green;op=bold>success</><fg=white>]</> » %s\n", m)
}
func logErr(m string) {
color.Printf("<fg=white>[</><fg=red;op=bold>err</><fg=white>]</> » %s\n", m)
}
func logWarn(m string) {
color.Printf("<fg=white>[</><fg=yellow;op=bold>warn</><fg=white>]</> » %s\n", m)
}
func logFatal(m string) {
color.Printf("<fg=white>[</><fg=red;op=bold>fatal err</><fg=white>]</> » %s\n", m)
}
func userInput(m string) string {
reader := bufio.NewReader(os.Stdin)
var out string
color.Printf("<fg=white>[</><fg=cyan;op=bold>input</><fg=white>]</> %s » ", m)
out, _ = reader.ReadString('\n')
out = strings.TrimSuffix(out, "\r\n")
out = strings.TrimSuffix(out, "\n")
return out
}
|
package main
type PaymentMethod interface {
Pay(amount float32) string
}
type PaymentType int
const (
Cash PaymentType = iota
DebitCard
)
type CashPM struct{}
type DebitCardPM struct{}
func (c *CashPM) Pay(amount float32) string {
return ""
}
func (c *DebitCardPM) Pay(amount float32) string {
return ""
}
func GetPaymentMethod(t PaymentType) PaymentMethod {
switch t {
case Cash:
return new(CashPM)
default:
return new(DebitCardPM)
}
}
func main() {
payment := GetPaymentMethod(DebitCard)
payment.Pay(20)
payment = GetPaymentMethod(Cash)
payment.Pay(20)
}
|
package searching
// BinarySearch will search the key from item array and return boolean value...
func BinarySearch(key int, item []int) bool {
low := 0
high := len(item) - 1
for low <= high {
median := (high + low) / 2
if item[median] == key {
return true
} else if item[median] < key {
low = median + 1
} else {
high = median - 1
}
}
return false
}
|
package message
import (
"bytes"
"encoding/json"
"io"
"io/ioutil"
)
// JSONMessage json message format
type JSONMessage struct{}
// Marshal marshal
func (j *JSONMessage) Marshal(v interface{}) (io.Reader, error) {
data, err := json.Marshal(v)
if err != nil {
return nil, err
}
return bytes.NewBuffer(data), nil
}
// Unmarshal unmarshal
func (j *JSONMessage) Unmarshal(reader io.Reader, v interface{}) error {
data, err := ioutil.ReadAll(reader)
if err != nil {
return err
}
err = json.Unmarshal(data, v)
return err
}
|
package address
import (
"encoding/json"
"strings"
)
type Address struct {
local string
domain string
}
func (a *Address) Local() string {
return a.local
}
func (a *Address) Domain() string {
return a.domain
}
func (a *Address) String() string {
if a.domain == "" {
return a.local
}
return a.local + "@" + a.domain
}
func (a *Address) MarshalJSON() ([]byte, error) {
type Alias Address
return json.Marshal(&struct {
Local string
Domain string
Address string
}{
a.Local(),
a.Domain(),
a.String(),
})
}
func FromString(address string) *Address {
a := &Address{}
a.local, a.domain = messageParseAddress(address, true)
return a
}
func FromAddressOrDomain(address string) *Address {
a := &Address{}
a.local, a.domain = messageParseAddress(address, false)
return a
}
func messageParseAddress(address string, singleIsUser bool) (local, domain string) {
if strings.Index(address, "@") != -1 {
local = strings.SplitN(address, "@", 2)[0]
domain = strings.SplitN(address, "@", 2)[1]
} else if singleIsUser {
local = address
domain = ""
} else {
local = ""
domain = address
}
return
}
|
package pg
import (
"github.com/kyleconroy/sqlc/internal/sql/ast"
)
type AlterFdwStmt struct {
Fdwname *string
FuncOptions *ast.List
Options *ast.List
}
func (n *AlterFdwStmt) Pos() int {
return 0
}
|
package codebase
import "github.com/almighty/almighty-core/errors"
// CodebaseContent defines all parameters those are useful to associate Che Editor's window to a WI
type CodebaseContent struct {
Repository string `json:"repository"`
Branch string `json:"branch"`
FileName string `json:"filename"`
LineNumber int `json:"linenumber"`
}
// Following keys define attribute names in the map of Codebase
const (
RepositoryKey = "repository"
BranchKey = "branch"
FileNameKey = "filename"
LineNumberKey = "linenumber"
)
// ToMap converts CodebaseContent to a map of string->Interface{}
func (c *CodebaseContent) ToMap() map[string]interface{} {
res := make(map[string]interface{})
res[RepositoryKey] = c.Repository
res[BranchKey] = c.Branch
res[FileNameKey] = c.FileName
res[LineNumberKey] = c.LineNumber
return res
}
// IsValid perform following checks
// Repository value is mandatory
func (c *CodebaseContent) IsValid() error {
if c.Repository == "" {
return errors.NewBadParameterError("system.codebase", RepositoryKey+" is mandatory")
}
return nil
}
// NewCodebaseContent builds CodebaseContent instance from input Map.
func NewCodebaseContent(value map[string]interface{}) (CodebaseContent, error) {
cb := CodebaseContent{}
validKeys := []string{RepositoryKey, BranchKey, FileNameKey, LineNumberKey}
for _, key := range validKeys {
if v, ok := value[key]; ok {
switch key {
case RepositoryKey:
cb.Repository = v.(string)
case BranchKey:
cb.Branch = v.(string)
case FileNameKey:
cb.FileName = v.(string)
case LineNumberKey:
switch v.(type) {
case int:
cb.LineNumber = v.(int)
case float64:
y := v.(float64)
cb.LineNumber = int(y)
}
}
}
}
err := cb.IsValid()
if err != nil {
return cb, err
}
return cb, nil
}
// NewCodebaseContentFromValue builds CodebaseContent from interface{}
func NewCodebaseContentFromValue(value interface{}) (*CodebaseContent, error) {
if value == nil {
return nil, nil
}
switch value.(type) {
case CodebaseContent:
result := value.(CodebaseContent)
return &result, nil
case map[string]interface{}:
result, err := NewCodebaseContent(value.(map[string]interface{}))
if err != nil {
return nil, err
}
return &result, nil
default:
return nil, nil
}
}
|
package torbula
import (
"bytes"
"fmt"
"os"
"path/filepath"
"time"
"github.com/kjk/dailyrotate"
)
var logger struct {
path string
file *dailyrotate.File
}
func logInit(path string) error {
err := os.MkdirAll(path, 0755)
if err != nil {
return fmt.Errorf("failed create log dir: %s %v", path, err)
}
logger.path = path
logger.file, err = dailyrotate.NewFileWithPathGenerator(
func(t time.Time) string {
time := t.Format("2006-01-02") + ".log"
return filepath.Join(path, time)
},
nil,
)
if err != nil {
return fmt.Errorf("init logger: %v", err)
}
logger.file.Location = time.Local
logger.file.Write([]byte{'\n'})
return nil
}
func logTagged(tag string, format string, v ...interface{}) {
var buf []byte
builder := bytes.NewBuffer(buf)
builder.WriteString(fmt.Sprintf("%s[%s] ", tag, time.Now().Format("15:04:05")))
builder.WriteString(fmt.Sprintf(format, v...))
builder.WriteRune('\n')
logger.file.Write(builder.Bytes())
}
func logAlways(format string, v ...interface{}) {
logTagged("[ALWAYS]", format, v...)
}
func logWarning(format string, v ...interface{}) {
logTagged("[WARNING]", format, v...)
}
|
/*
Written by mint.zhao.chiu@gmail.com. github.com: https://www.github.com/mintzhao
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package taskqueue
import (
"log"
)
type TaskFunc func(args ...interface{}) error
// Tasker model, every kind of task has a name and a executer
type Tasker struct {
name string
executer TaskFunc
tasks TaskQueuer
}
// NewTasker constructed a new task model
func NewTasker(name string, executer TaskFunc, queuer TaskQueuer) *Tasker {
if name == "" {
return nil
}
return &Tasker{
name: name,
executer: executer,
tasks: queuer,
}
}
// Put puts task into queue
func (t *Tasker) Put(task *Task) {
if task == nil {
return
}
if task.Name != t.name {
return
}
t.tasks.Push(task)
log.Printf("tasker put task %v\n", task)
}
// Get gets task from queue
func (t *Tasker) Get() *Task {
task := t.tasks.Pop()
if task == nil {
return nil
}
tt := task.(*Task)
log.Printf("tasker get task: %v\n", tt)
return tt
}
// task instance
type Task struct {
Name string
Args []interface{}
Priority int
}
// NewTask returns a new task instance
func NewTask(name string, args []interface{}, priority int) *Task {
return &Task{
Name: name,
Args: args,
Priority: priority,
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.