text stringlengths 11 4.05M |
|---|
package cmd
import (
"log"
config "mysql-metadata/configuration"
"mysql-metadata/internal/cmd"
"mysql-metadata/internal/repositories"
"mysql-metadata/internal/storage"
"os"
)
func MysqlMetaData() {
path, err := os.Getwd()
path = path + "/configuration/properties"
config.Configuration, err = config.InitConfig("dev", path)
if err != nil {
log.Println(err)
}
repositories.InitDbConnection()
storage.InitStorage()
cmd.StartApplication()
}
|
package main
import (
"fmt"
"net/http"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
"github.com/rigglo/gql"
"github.com/rigglo/gql/pkg/handler"
"github.com/rigglo/gqlws"
)
func main() {
exec := gql.DefaultExecutor(Schema)
h := handler.New(handler.Config{
Executor: exec,
Playground: true,
})
wsh := gqlws.New(
gqlws.Config{
Subscriber: exec.Subscribe,
},
h,
)
http.Handle("/graphql", wsh)
if err := http.ListenAndServe(":9999", nil); err != nil {
panic(err)
}
}
var (
RootSubscription = &gql.Object{
Name: "Subscription",
Fields: gql.Fields{
"new_things": &gql.Field{
Type: gql.String,
Resolver: func(c gql.Context) (interface{}, error) {
out := make(chan interface{})
client, err := mongo.NewClient(options.Client().ApplyURI("mongodb://foo:bar@localhost:27017"))
if err != nil {
return nil, err
}
err = client.Connect(c.Context())
if err != nil {
return nil, err
}
database := client.Database("foobar")
productsCollection := database.Collection("products")
/* matchPipeline := bson.D{
{
"$match", bson.D{
{"operationType", "insert"},
},
},
} */
changeStream, err := productsCollection.Watch(c.Context(), mongo.Pipeline{})
if err != nil {
panic(err)
}
go func() {
for changeStream.Next(c.Context()) {
var data bson.M
if err := changeStream.Decode(&data); err != nil {
panic(err)
}
fmt.Printf("%v\n", data)
out <- changeStream.Current.String()
}
}()
return out, nil
},
},
},
}
RootQuery = &gql.Object{
Name: "Query",
Fields: gql.Fields{},
}
Schema = &gql.Schema{
Query: RootQuery,
Subscription: RootSubscription,
}
)
/*
use admin
db.createUser(
{
user: "foo",
pwd: "bar",
roles: [ { role: "userAdminAnyDatabase", db: "admin" } ]
}
)
*/
|
package routing
import "github.com/gorilla/mux"
func createSubRouter(urlText string) *mux.Router {
return r.PathPrefix("/" + urlText).Subrouter()
}
|
package goodreadQuoteScraper
import (
"net/http"
"errors"
"fmt"
"github.com/PuerkitoBio/goquery"
"log"
"strconv"
"github.com/gin-gonic/gin/json"
"io/ioutil"
"regexp"
)
type Quote struct {
Author string `json:"author"`
AuthorAvatar string `json:"author_avatar"`
Content string `json:"content"`
Tags []string `json:"tags"`
}
type Crawler struct {
BaseUrl string
Quotes []*Quote `json:"quotes"`
}
func (c *Crawler)fetchDocument(url string)(*goquery.Document, error){
res, err := http.Get(url)
if err != nil {
return nil, err
}
defer res.Body.Close()
if res.StatusCode != 200 {
return nil, errors.New(fmt.Sprintf("status code error: %d %s", res.StatusCode, res.Status))
}
return goquery.NewDocumentFromReader(res.Body)
}
func (c *Crawler)worker(id int, jobs <-chan int, results chan<- []*Quote) {
for j := range jobs {
log.Println(fmt.Sprintf("Worker %d is take page %d", id, j))
results <- c.getQuoteInPage(j)
}
}
func (c *Crawler)Crawl(){
totalPage := c.getTotalPage()
jobs := make(chan int, totalPage)
results := make(chan []*Quote, totalPage)
for workerIndex := 1; workerIndex <= 3; workerIndex++ {
go c.worker(workerIndex, jobs, results)
}
for j := 1; j <= totalPage; j++ {
jobs <- j
}
close(jobs)
var quotes []*Quote
for a := 1; a <= totalPage; a++ {
quotes = append(quotes, <-results...)
}
bQuotes, _ := json.Marshal(quotes)
err := ioutil.WriteFile("./quote.json", bQuotes, 0644)
if err != nil {
log.Fatalln(err)
}
}
func (c *Crawler)getTotalPage()int{
doc, err := c.fetchDocument(c.BaseUrl)
if err != nil {
log.Fatalln(err)
}
docPageList := doc.Find(".next_page").Parent().Find("a")
docLastPage := docPageList.Eq(docPageList.Length() - 2)
totalPage, err := strconv.Atoi(docLastPage.Text())
if err != nil {
log.Fatalln(err)
}
return totalPage
}
func (c *Crawler)getQuoteInPage(pageIndex int)([]*Quote){
doc, err := c.fetchDocument(fmt.Sprintf("%s?page=%d", c.BaseUrl, pageIndex))
if err != nil {
log.Fatalln(err)
}
var quotes []*Quote
quoteDiv := doc.Find("div.quote")
quoteDiv.Each(func(i int, selection *goquery.Selection) {
quote, err := c.parseQuote(selection)
if err != nil {
log.Println(err)
} else {
quotes = append(quotes, quote)
}
})
return quotes
}
func (c *Crawler)parseQuote(selection *goquery.Selection)(*Quote, error){
quote := &Quote{
Author: selection.Find(".authorOrTitle").Text(),
Tags: []string{},
}
avatarUrl, ok := selection.Find(".leftAlignedImage img").Attr("src")
if ok {
quote.AuthorAvatar = avatarUrl
}
var re = regexp.MustCompile(`(?m)“([^”]+)”`)
quote.Content = re.FindStringSubmatch(selection.Find(".quoteText").Text())[1]
selection.Find(".quoteFooter .greyText a").Each(func(i int, selection *goquery.Selection) {
quote.Tags = append(quote.Tags, selection.Text())
})
return quote, nil
} |
/*Copyright [2019] housepower
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package task
import (
"fmt"
"runtime"
"strconv"
"strings"
"time"
"github.com/housepower/clickhouse_sinker/input"
"github.com/housepower/clickhouse_sinker/model"
"github.com/housepower/clickhouse_sinker/output"
"github.com/housepower/clickhouse_sinker/parser"
"github.com/sundy-li/go_commons/log"
)
// TaskService holds the configuration for each task
type Service struct {
stopped chan struct{}
kafka *input.Kafka
clickhouse *output.ClickHouse
p parser.Parser
FlushInterval int
BufferSize int
MinBufferSize int
}
// NewTaskService creates an instance of new tasks with kafka, clickhouse and paser instances
func NewTaskService(kafka *input.Kafka, clickhouse *output.ClickHouse, p parser.Parser) *Service {
return &Service{
stopped: make(chan struct{}),
kafka: kafka,
clickhouse: clickhouse,
p: p,
}
}
// Init initializes the kafak and clickhouse task associated with this service
func (service *Service) Init() error {
err := service.kafka.Init()
if err != nil {
return err
}
return service.clickhouse.Init()
}
// Run starts the task
func (service *Service) Run() {
if err := service.kafka.Start(); err != nil {
panic(err)
}
log.Infof("TaskService %s TaskService has started", service.clickhouse.GetName())
tick := time.NewTicker(time.Duration(service.FlushInterval) * time.Second)
msgs := make([]model.Metric, 0, service.BufferSize)
FOR:
for {
select {
case msg, more := <-service.kafka.Msgs():
if !more {
break FOR
}
msgs = append(msgs, service.parse(msg))
if len(msgs) >= service.BufferSize {
service.flush(msgs)
msgs = msgs[:0]
tick = time.NewTicker(time.Duration(service.FlushInterval) * time.Second)
}
case <-tick.C:
log.Info(service.clickhouse.GetName() + " tick")
if len(msgs) == 0 || len(msgs) < service.MinBufferSize {
continue
}
service.flush(msgs)
msgs = msgs[:0]
}
}
service.flush(msgs)
service.stopped <- struct{}{}
}
func (service *Service) parse(data []byte) model.Metric {
return service.p.Parse(data)
}
func (service *Service) flush(metrics []model.Metric) {
log.Info("buf size:", len(metrics))
service.clickhouse.LoopWrite(metrics)
}
// Stop stop kafak and clickhouse client
func (service *Service) Stop() {
log.Info("close TaskService size:")
if err := service.kafka.Stop(); err != nil {
panic(err)
}
<-service.stopped
_ = service.clickhouse.Close()
log.Info("closed TaskService size:")
}
// GoID returns go routine id 获取goroutine的id
func GoID() int {
var buf [64]byte
n := runtime.Stack(buf[:], false)
idField := strings.Fields(strings.TrimPrefix(string(buf[:n]), "goroutine "))[0]
id, err := strconv.Atoi(idField)
if err != nil {
panic(fmt.Sprintf("cannot get goroutine id: %v", err))
}
return id
}
|
package testdata
import (
"github.com/frk/gosql/internal/testdata/common"
)
type UpdatePKeyCompositeSingleQuery struct {
Data *common.ConflictData `rel:"test_composite_pkey:p"`
}
|
package dialect
type Mssql struct {
CommonDialect
}
func (Mssql) GetName() string {
return "mssql"
}
|
package iot
const (
// WsnToken is the token of wsn iot cloud
WsnToken = "your_wsn_token"
// WsnNumericalAPI is the api of wsn iot cloud for pushing numerical datapoints
WsnNumericalAPI = "http://www.wsncloud.com/api/data/v1/numerical/insert"
// WsnGenericAPI is the api of wsn iot cloud for pushing generic datapoints
WsnGenericAPI = "http://www.wsncloud.com/api/data/v1/generic/insert"
)
const (
// OneNetToken is the token of OneNet iot cloud
OneNetToken = "your_onenet_token"
// OneNetAPI is the api of OneNet iot cloud for pushing datapoints
OneNetAPI = "http://api.heclouds.com/devices/540381180/datapoints"
)
// WsnConfig ...
type WsnConfig struct {
Token string `json:"token"`
API string `json:"api"`
}
// OneNetConfig ...
type OneNetConfig struct {
Token string `json:"token"`
API string `json:"api"`
}
|
package main
import (
"bingjian-go/com/ning/iris-demo/01-hello/util"
"fmt"
)
func main() {
fmt.Println(util.Rand())
} |
package pgsql
import (
"database/sql"
"database/sql/driver"
)
// UUIDArrayFromByteArray16Slice returns a driver.Valuer that produces a PostgreSQL uuid[] from the given Go [][16]byte.
func UUIDArrayFromByteArray16Slice(val [][16]byte) driver.Valuer {
return uuidArrayFromByteArray16Slice{val: val}
}
// UUIDArrayToByteArray16Slice returns an sql.Scanner that converts a PostgreSQL uuid[] into a Go [][16]byte and sets it to val.
func UUIDArrayToByteArray16Slice(val *[][16]byte) sql.Scanner {
return uuidArrayToByteArray16Slice{val: val}
}
// UUIDArrayFromStringSlice returns a driver.Valuer that produces a PostgreSQL uuid[] from the given Go []string.
func UUIDArrayFromStringSlice(val []string) driver.Valuer {
return uuidArrayFromStringSlice{val: val}
}
// UUIDArrayToStringSlice returns an sql.Scanner that converts a PostgreSQL uuid[] into a Go []string and sets it to val.
func UUIDArrayToStringSlice(val *[]string) sql.Scanner {
return uuidArrayToStringSlice{val: val}
}
// UUIDArrayFromByteSliceSlice returns a driver.Valuer that produces a PostgreSQL uuid[] from the given Go [][]byte.
func UUIDArrayFromByteSliceSlice(val [][]byte) driver.Valuer {
return uuidArrayFromByteSliceSlice{val: val}
}
// UUIDArrayToByteSliceSlice returns an sql.Scanner that converts a PostgreSQL uuid[] into a Go [][]byte and sets it to val.
func UUIDArrayToByteSliceSlice(val *[][]byte) sql.Scanner {
return uuidArrayToByteSliceSlice{val: val}
}
type uuidArrayFromByteArray16Slice struct {
val [][16]byte
}
func (v uuidArrayFromByteArray16Slice) Value() (driver.Value, error) {
if v.val == nil {
return nil, nil
} else if len(v.val) == 0 {
return []byte{'{', '}'}, nil
}
size := 1 + len(v.val) + (len(v.val) * 36)
out := make([]byte, size)
out[0] = '{'
var pos int
for i := 0; i < len(v.val); i++ {
pos += 1
uuid := pgFormatUUID(v.val[i])
copy(out[pos:pos+36], uuid)
pos += 36
out[pos] = ','
}
out[pos] = '}'
return out, nil
}
type uuidArrayToByteArray16Slice struct {
val *[][16]byte
}
func (v uuidArrayToByteArray16Slice) Scan(src interface{}) error {
data, err := srcbytes(src)
if err != nil {
return err
} else if data == nil {
*v.val = nil
return nil
}
elems := pgParseCommaArray(data)
uuids := make([][16]byte, len(elems))
for i := 0; i < len(elems); i++ {
uuid, err := pgParseUUID(elems[i])
if err != nil {
return err
}
uuids[i] = uuid
}
*v.val = uuids
return nil
}
type uuidArrayFromStringSlice struct {
val []string
}
func (v uuidArrayFromStringSlice) Value() (driver.Value, error) {
if v.val == nil {
return nil, nil
} else if len(v.val) == 0 {
return []byte{'{', '}'}, nil
}
size := 2 + (len(v.val) - 1)
for i := 0; i < len(v.val); i++ {
size += len(v.val[i])
}
out := make([]byte, size)
out[0] = '{'
var pos int
for i := 0; i < len(v.val); i++ {
pos += 1
length := len(v.val[i])
copy(out[pos:pos+length], v.val[i])
pos += length
out[pos] = ','
}
out[pos] = '}'
return out, nil
}
type uuidArrayToStringSlice struct {
val *[]string
}
func (v uuidArrayToStringSlice) Scan(src interface{}) error {
data, err := srcbytes(src)
if err != nil {
return err
} else if data == nil {
*v.val = nil
return nil
}
elems := pgParseCommaArray(data)
strings := make([]string, len(elems))
for i := 0; i < len(elems); i++ {
strings[i] = string(elems[i])
}
*v.val = strings
return nil
}
type uuidArrayFromByteSliceSlice struct {
val [][]byte
}
func (v uuidArrayFromByteSliceSlice) Value() (driver.Value, error) {
if v.val == nil {
return nil, nil
} else if len(v.val) == 0 {
return []byte{'{', '}'}, nil
}
size := 2 + (len(v.val) - 1)
for i := 0; i < len(v.val); i++ {
if v.val[i] == nil {
size += 4 // len("NULL")
} else {
size += len(v.val[i])
}
}
out := make([]byte, size)
out[0] = '{'
var pos int
for i := 0; i < len(v.val); i++ {
pos += 1
if v.val[i] == nil {
out[pos+0] = 'N'
out[pos+1] = 'U'
out[pos+2] = 'L'
out[pos+3] = 'L'
pos += 4
} else {
length := len(v.val[i])
copy(out[pos:pos+length], v.val[i])
pos += length
}
out[pos] = ','
}
out[pos] = '}'
return out, nil
}
type uuidArrayToByteSliceSlice struct {
val *[][]byte
}
func (v uuidArrayToByteSliceSlice) Scan(src interface{}) error {
data, err := srcbytes(src)
if err != nil {
return err
} else if data == nil {
*v.val = nil
return nil
}
elems := pgParseCommaArray(data)
bytess := make([][]byte, len(elems))
for i := 0; i < len(elems); i++ {
if len(elems[i]) == 4 && elems[i][0] == 'N' { // NULL?
continue
}
bytess[i] = make([]byte, len(elems[i]))
copy(bytess[i], elems[i])
}
*v.val = bytess
return nil
}
|
package config
import (
"bytes"
"encoding/json"
"errors"
"gopkg.in/yaml.v2"
"os"
"path"
"reflect"
"strconv"
"strings"
"text/template"
)
//- ConfigBytes Functions ------------------------------------------------------
type ConfigBytes []byte
// Reads the specified config file. Note that bedrock.Application will process
// the config file, using text/template, with the following extra functions:
//
// {{.Env "ENVIRONMENT_VARIABLE"}}
// {{.Cat "File name"}}
// {{.Base64 "a string"}}
func ReadConfigFile(file string) (ConfigBytes, error) {
if _, err := os.Stat(file); err != nil {
return nil, errors.New("config path not valid")
}
tmpl, err := template.New(path.Base(file)).ParseFiles(file)
if err != nil {
return nil, err
}
var configBytes bytes.Buffer
tc := TemplateContext{}
err = tmpl.Execute(&configBytes, &tc)
if err != nil {
return nil, err
}
return ConfigBytes(configBytes.Bytes()), nil
}
func (c ConfigBytes) Bytes() []byte {
return []byte(c)
}
func (c ConfigBytes) Unmarshal(dst interface{}) error {
return yaml.Unmarshal(c, dst)
}
func (c ConfigBytes) UnmarshalJson(dst interface{}) error {
return json.Unmarshal(c, dst)
}
//UnmarshalAt unmarshals a specific key in the config into dst
func (c ConfigBytes) UnmarshalAt(dst interface{}, key string) error {
var full = make(map[interface{}]interface{})
if err := c.Unmarshal(&full); err != nil {
return err
}
d, err := yaml.Marshal(full[key])
if err != nil {
return err
}
return yaml.Unmarshal([]byte(d), dst)
}
//UnmarshalJsonAt unmarshals a specific key in the config into dst
func (c ConfigBytes) UnmarshalJsonAt(dst interface{}, key string) error {
var full = make(map[interface{}]interface{})
if err := c.UnmarshalJson(&full); err != nil {
return err
}
d, err := json.Marshal(full[key])
if err != nil {
return err
}
return json.Unmarshal([]byte(d), dst)
}
//PopulateEnvConfig uses the "env" tag for struct fields to load environment
//variable values into respective struct fields.
func PopulateEnvConfig(c interface{}) {
configType := reflect.TypeOf(c).Elem()
configValue := reflect.ValueOf(c).Elem()
for i := 0; i < configType.NumField(); i++ {
configField := configType.Field(i)
envValue := os.Getenv(configField.Tag.Get("env"))
if envValue != "" {
switch configValue.Field(i).Type().String() {
case "bool":
v, _ := strconv.ParseBool(envValue)
configValue.FieldByName(configField.Name).SetBool(v)
case "int", "int64":
v, _ := strconv.ParseInt(envValue, 10, 64)
configValue.FieldByName(configField.Name).SetInt(v)
case "float32":
v, _ := strconv.ParseFloat(envValue, 32)
configValue.FieldByName(configField.Name).SetFloat(v)
case "float64":
v, _ := strconv.ParseFloat(envValue, 64)
configValue.FieldByName(configField.Name).SetFloat(v)
default:
configValue.FieldByName(configField.Name).SetString(envValue)
}
}
}
}
//SplitByCommaSpace converts strings separated by comma's into a string slice.
//It can be used to convert comma-separated env values into a string slice.
func SplitByCommaSpace(s string) []string {
f := func(c rune) bool {
return c == ',' || c == ' '
}
return strings.FieldsFunc(s, f)
}
|
// DRUNKWATER TEMPLATE(add description and prototypes)
// Question Title and Description on leetcode.com
// Function Declaration and Function Prototypes on leetcode.com
//808. Soup Servings
//There are two types of soup: type A and type B. Initially we have N ml of each type of soup. There are four kinds of operations:
//Serve 100 ml of soup A and 0 ml of soup B
//Serve 75 ml of soup A and 25 ml of soup B
//Serve 50 ml of soup A and 50 ml of soup B
//Serve 25 ml of soup A and 75 ml of soup B
//When we serve some soup, we give it to someone and we no longer have it. Each turn, we will choose from the four operations with equal probability 0.25. If the remaining volume of soup is not enough to complete the operation, we will serve as much as we can. We stop once we no longer have some quantity of both types of soup.
//Note that we do not have the operation where all 100 ml's of soup B are used first.
//Return the probability that soup A will be empty first, plus half the probability that A and B become empty at the same time.
// Example:
//Input: N = 50
//Output: 0.625
//Explanation:
//If we choose the first two operations, A will become empty first. For the third operation, A and B will become empty at the same time. For the fourth operation, B will become empty first. So the total probability of A becoming empty first plus half the probability that A and B become empty at the same time, is 0.25 * (1 + 1 + 0.5 + 0) = 0.625.
//Notes:
//0 <= N <= 10^9.
//Answers within 10^-6 of the true value will be accepted as correct.
//func soupServings(N int) float64 {
//}
// Time Is Money |
package main
import (
"context"
"fmt"
"time"
)
func Yield(ctx context.Context) <-chan string {
ch := make(chan string)
go func() {
for {
tCh := time.After(3 * time.Second)
select {
case <-ctx.Done():
return
case <-tCh:
ch <- fmt.Sprintf("%+v", time.Now())
}
}
}()
return ch
}
func main() {
c := Yield(context.Background())
for {
a := <-c
fmt.Println(a)
}
}
|
// Copyright © 2020 Attestant Limited.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package standard
import (
"context"
"encoding/binary"
"github.com/attestantio/go-eth2-client/spec/phase0"
"github.com/pkg/errors"
e2wtypes "github.com/wealdtech/go-eth2-wallet-types/v2"
)
// SignRANDAOReveal returns a RANDAO reveal signature.
// This signs an epoch with the "RANDAO reveal" domain.
func (s *Service) SignRANDAOReveal(ctx context.Context,
account e2wtypes.Account,
slot phase0.Slot,
) (
phase0.BLSSignature,
error,
) {
var messageRoot phase0.Root
epoch := phase0.Epoch(slot / s.slotsPerEpoch)
binary.LittleEndian.PutUint64(messageRoot[:], uint64(epoch))
// Obtain the RANDAO reveal signature domain.
domain, err := s.domainProvider.Domain(ctx,
s.randaoDomainType,
epoch)
if err != nil {
return phase0.BLSSignature{}, errors.Wrap(err, "failed to obtain signature domain for RANDAO reveal")
}
var epochBytes phase0.Root
binary.LittleEndian.PutUint64(epochBytes[:], uint64(epoch))
sig, err := s.sign(ctx, account, epochBytes, domain)
if err != nil {
return phase0.BLSSignature{}, errors.Wrap(err, "failed to sign RANDO reveal")
}
return sig, nil
}
|
package proto
import (
"context"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"errors"
"fmt"
"log"
"github.com/DragonF0rm/decent_5_remote_key/netemu"
)
var ErrChallengeFailed = errors.New("challenge failed")
type Peer struct {
name string
pubKey *ecdsa.PublicKey
prvKey *ecdsa.PrivateKey
// Assume that both car and key know each other public keys
targetPubKey *ecdsa.PublicKey
nh *netemu.NetworkHandle
logger *log.Logger
}
func NewPeer(name string, net *netemu.Network) (*Peer, error) {
prvKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil {
return nil, fmt.Errorf("ecdsa.GenerateKey: %w", err)
}
return &Peer{
name: name,
pubKey: &prvKey.PublicKey,
prvKey: prvKey,
nh: net.Join(),
logger: log.New(log.Writer(), "proto.Peer: [" + name + "] ", log.Flags()),
}, nil
}
func (peer *Peer) PubKey() *ecdsa.PublicKey {
return peer.pubKey
}
func (peer *Peer) Addr() netemu.Addr {
return peer.nh.Addr()
}
func (peer *Peer) SetTargetPubKey(targetPubKey *ecdsa.PublicKey) {
peer.targetPubKey = targetPubKey
}
func (peer *Peer) Send(ctx context.Context, req *Request) error {
if err := peer.nh.Send(ctx, &netemu.Packet{
Sender: peer.nh.Addr(),
Recipient: netemu.ADDR_BROADCAST,
Payload: req,
}); err != nil {
return fmt.Errorf("peer.nh.Send: %w", err)
}
return nil
}
func (peer *Peer) RecieveVerified(ctx context.Context, reqType uint32) (*Request, error) {
for {
p, err := peer.nh.Recieve(ctx)
if err != nil {
return nil, fmt.Errorf("peer.nh.Recieve: %w", err)
}
r, ok := p.Payload.(*Request)
if !ok {
peer.logger.Printf("got packet with invalid payload, dropping: %v\n", p)
continue
}
if r.Type != reqType {
peer.logger.Printf("got packet with unexpected request type, dropping: %v\n", p)
continue
}
ok, err = r.Verify(peer.targetPubKey)
if err != nil {
return nil, fmt.Errorf("r.Verify: %w", err)
}
if !ok {
peer.logger.Printf("request verification failed, dropping: %v\n", r)
continue
}
return r, nil
}
}
func (peer *Peer) ExecAction(ctx context.Context) error {
reqAction, err := NewSignedRequest(PROTO_REQ_ACTION, 0, peer.prvKey)
if err != nil {
return fmt.Errorf("NewSignedRequest: %w", err)
}
if err = peer.Send(ctx, reqAction); err != nil {
return fmt.Errorf("peer.Send: %w", err)
}
reqChallenge, err := peer.RecieveVerified(ctx, PROTO_REQ_CHALLENGE)
if err != nil {
return fmt.Errorf("peer.RecieveVerified: %w", err)
}
if reqAction.Challenge != reqChallenge.ChallengeResp {
return fmt.Errorf("%w: expected %d, got %d", ErrChallengeFailed,
reqAction.Challenge, reqChallenge.ChallengeResp)
}
reqProof, err := NewSignedRequest(PROTO_REQ_PROOF, reqChallenge.Challenge, peer.prvKey)
if err != nil {
return fmt.Errorf("NewSignedRequest: %w", err)
}
if err = peer.Send(ctx, reqProof); err != nil {
return fmt.Errorf("peer.Send: %w", err)
}
return nil
}
func (peer *Peer) HandleAction(ctx context.Context) error {
reqAction, err := peer.RecieveVerified(ctx, PROTO_REQ_ACTION)
if err != nil {
return fmt.Errorf("peer.RecieveVerified: %w", err)
}
reqChallenge, err := NewSignedRequest(PROTO_REQ_CHALLENGE, reqAction.Challenge, peer.prvKey)
if err != nil {
return fmt.Errorf("NewSignedRequest: %w", err)
}
if err = peer.Send(ctx, reqChallenge); err != nil {
return fmt.Errorf("peer.Send: %w", err)
}
reqProof, err := peer.RecieveVerified(ctx, PROTO_REQ_PROOF)
if err != nil {
return fmt.Errorf("peer.RecieveVerified: %w", err)
}
if reqChallenge.Challenge != reqProof.ChallengeResp {
return fmt.Errorf("%w: expected %d, got %d", ErrChallengeFailed,
reqChallenge.Challenge, reqProof.ChallengeResp)
}
peer.logger.Println("ACTION EXECUTED")
return nil
}
|
package main
import (
"fmt"
"log"
"strconv"
"strings"
"text/scanner"
)
var input string = "input.txt"
func add(nums ...int) (sum int) {
//fmt.Println(" add", nums)
for _, n := range nums {
sum += n
}
return sum
}
func mul(nums ...int) (product int) {
//fmt.Println(" mul", nums)
product = 1
for _, n := range nums {
product *= n
}
return product
}
func id(nums ...int) int {
//fmt.Println(" id", nums)
return nums[1]
}
func findcorrespondingclosing(set []string) int {
op := "("
cl := ")"
c := 0
for i := 0; i < len(set); i++ {
if set[i] == op {
c++
}
if set[i] == cl {
c--
}
if c == 0 {
return i
}
}
log.Printf("malformat parenthesis")
return -1
}
func evaluate(expr []string) int {
ops := id
if len(expr) == 0 {
return 0
}
result, _ := strconv.Atoi(expr[0])
for i := 0; i < len(expr); i++ {
x := expr[i]
switch x {
case "+":
ops = add
break
case "*":
ops = mul
break
case "(":
cpi := findcorrespondingclosing(expr[i:])
result = ops(result, evaluate(expr[i+1:i+cpi]))
i += cpi
break
default:
if n, err := strconv.Atoi(x); err == nil {
result = ops(result, n)
}
}
}
return result
}
func evaluateExpression(line string) int {
var s scanner.Scanner
s.Init(strings.NewReader(line))
var expr []string
for tok := s.Scan(); tok != scanner.EOF; tok = s.Scan() {
expr = append(expr, s.TokenText())
}
i := evaluate(expr)
return i
}
func t1(lines []string) int {
var results []int
for _, l := range lines {
//fmt.Println(l)
results = append(results, evaluateExpression(l))
}
return add(results...)
}
func main() {
nums := readdata(input)
fmt.Printf("Task1: %d\n", t1(nums))
fmt.Printf("Task2: %d\n", t2(nums))
}
|
package main
import (
"errors"
"fmt"
"strconv"
"strings"
)
type interpreter struct {
sentence string
}
func (i *interpreter) tokens() []string {
return strings.Split(i.sentence, " ")
}
func (i *interpreter) exec() int {
sum := 0
tokens := i.tokens()
for k, item := range tokens {
if item == "*" {
fmt.Println(i.tokens())
a, _ := strconv.Atoi(string(tokens[k-1]))
b, _ := strconv.Atoi(string(tokens[k+1]))
return a * b
}
if item != "+" {
number, _ := strconv.Atoi(item)
sum += number
}
}
return sum
}
func dict() map[string]string {
var m map[string]string
m = make(map[string]string)
m["+"] = "plus"
return m
}
func (i *interpreter) contains(s string) bool {
m := dict()
if _, ok := m[s]; ok {
return true
}
return false
}
func (i *interpreter) of(s string) error {
if s == "normal" {
return errors.New("non va")
}
i.sentence = s
return nil
}
func (i *interpreter) numberOfWords() int {
s := i.tokens()
return len(s)
}
|
package redisDB
const (
TaskResult_Success = 0
TaskResult_EmptyAuth = 1
TaskResult_FailAuth = 2
)
|
package types
type Schema string
|
package constant
const (
//group admin
OrdinaryMember = 0
GroupOwner = 1
Administrator = 2
//group application
Application = 0
AgreeApplication = 1
//feiend related
BlackListFlag = 1
ApplicationFriendFlag = 0
FriendFlag = 1
RefuseFriendFlag = -1
//Websocket Protocol
WSGetNewestSeq = 1001
WSPullMsg = 1002
WSSendMsg = 1003
WSPullMsgBySeqList = 1004
WSPushMsg = 2001
WSDataError = 3001
///ContentType
//UserRelated
Text = 101
Picture = 102
Voice = 103
Video = 104
File = 105
AtText = 106
Custom = 110
HasReadReceipt = 112
Typing = 113
Common = 200
GroupMsg = 201
//SysRelated
AcceptFriendApplicationTip = 201
AddFriendTip = 202
RefuseFriendApplicationTip = 203
SetSelfInfoTip = 204
Revoke = 205
C2CMessageAsRead = 206
KickOnlineTip = 303
TransferGroupOwnerTip = 501
CreateGroupTip = 502
GroupApplicationResponseTip = 503
JoinGroupTip = 504
QuitGroupTip = 505
SetGroupInfoTip = 506
AcceptGroupApplicationTip = 507
RefuseGroupApplicationTip = 508
KickGroupMemberTip = 509
InviteUserToGroupTip = 510
//MsgFrom
UserMsgType = 100
SysMsgType = 200
//SessionType
SingleChatType = 1
GroupChatType = 2
)
var ContentType2PushContent = map[int64]string{
Picture: "[图片]",
Voice: "[语音]",
Video: "[视频]",
File: "[文件]",
Text: "你收到了一条文本消息",
AtText: "[有人@你]",
GroupMsg: "你收到一条群聊消息",
Common: "你收到一条新消息",
}
const FriendAcceptTip = "You have successfully become friends, so start chatting"
|
package main
import (
"bytes"
"fmt"
"io"
"strings"
"testing"
"github.com/blang/semver"
"github.com/orion-labs/go-scmversion/cmd"
)
// OpenerFunc is a function that operats as an `Opener`
type OpenerFunc func(path string) (io.WriteCloser, error)
func (f OpenerFunc) Open(path string) (io.WriteCloser, error) {
return f(path)
}
type closer struct {
w io.Writer
}
func (c *closer) Write(b []byte) (n int, err error) {
return c.w.Write(b)
}
func (c *closer) Close() error {
return nil
}
func NopCloser(w io.Writer) io.WriteCloser {
return &closer{w}
}
func TestCurrent(t *testing.T) {
vStr := "9.8.7"
v, _ := semver.Make(vStr)
r := &MockRepo{Ver: &v}
var b bytes.Buffer
o := &cmd.Options{Current: true}
err := Process(o, &b, r, nil)
if err != nil {
t.Errorf("Unexpected error: %v\n", err)
}
out := b.String()
// t.Logf("Output: %s\n", out)
if !strings.Contains(out, vStr) {
t.Errorf("Missing output: %s\n", out)
}
}
func TestPatch(t *testing.T) {
vStr := "9.8.7"
v, _ := semver.Make(vStr)
r := &MockRepo{Ver: &v}
var b bytes.Buffer
o := &cmd.Options{Patch: true, Debug: true}
err := Process(o, &b, r, nil)
if err != nil {
t.Errorf("Unexpected error: %v\n", err)
}
out := b.String()
t.Logf("Output: %s\n", out)
if !strings.Contains(out, vStr) && !strings.Contains(out, "9.8.8") {
t.Errorf("Missing output: %s\n", out)
}
}
func TestMinor(t *testing.T) {
vStr := "9.8.7"
v, _ := semver.Make(vStr)
r := &MockRepo{Ver: &v}
var b bytes.Buffer
o := &cmd.Options{Minor: true, Patch: true, Debug: true}
err := Process(o, &b, r, nil)
if err != nil {
t.Errorf("Unexpected error: %v\n", err)
}
out := b.String()
t.Logf("Output: %s\n", out)
if !strings.Contains(out, vStr) && !strings.Contains(out, "9.9.0") {
t.Errorf("Missing output: %s\n", out)
}
}
func TestMajor(t *testing.T) {
vStr := "9.8.7"
end := "10.0.0"
v, _ := semver.Make(vStr)
r := &MockRepo{Ver: &v}
var b bytes.Buffer
file := "./bubbagump.txt"
o := &cmd.Options{Major: true, Minor: true, Patch: true, Debug: true, Write: true, File: file}
var d bytes.Buffer
dest := NopCloser(&d)
op := func(path string) (io.WriteCloser, error) { return dest, nil }
err := Process(o, &b, r, OpenerFunc(op))
if err != nil {
t.Errorf("Unexpected error: %v\n", err)
}
out := b.String()
t.Logf("Output: %s\n", out)
if !strings.Contains(out, vStr) && !strings.Contains(out, end) {
t.Errorf("Missing output: %s\n", out)
}
written := d.String()
t.Logf("Written: %s\n", written)
if !strings.Contains(written, end) {
t.Errorf("Missing write: %s\n", written)
}
}
type MockRepo struct {
Ver *semver.Version
HasMajor bool
HasMinor bool
UpdateFail bool
}
func (m *MockRepo) Current() (*semver.Version, error) {
if m.Ver == nil {
return nil, fmt.Errorf("forced error")
}
return m.Ver, nil
}
func (m *MockRepo) Since(v *semver.Version) (bool, bool, error) {
return m.HasMajor, m.HasMinor, nil
}
func (m *MockRepo) Update(v *semver.Version) error {
if m.UpdateFail {
return fmt.Errorf("forced error")
}
return nil
}
|
package realm_test
import (
"testing"
"github.com/10gen/realm-cli/internal/cloud/realm"
"github.com/10gen/realm-cli/internal/local"
u "github.com/10gen/realm-cli/internal/utils/test"
"github.com/10gen/realm-cli/internal/utils/test/assert"
)
func TestFunctions(t *testing.T) {
u.SkipUnlessRealmServerRunning(t)
t.Run("should fail without an auth client", func(t *testing.T) {
client := realm.NewClient(u.RealmServerURL())
_, err := client.Functions(u.CloudGroupID(), "test-app-1234")
assert.Equal(t, realm.ErrInvalidSession{}, err)
})
t.Run("should return list of functions associated with app", func(t *testing.T) {
client := newAuthClient(t)
groupID := u.CloudGroupID()
app, teardown := setupTestApp(t, client, groupID, "functions-test")
defer teardown()
t.Run("should find 0 functions", func(t *testing.T) {
functions, err := client.Functions(u.CloudGroupID(), app.ID)
assert.Nil(t, err)
assert.Equal(t, 0, len(functions))
})
t.Run("should find 1 function", func(t *testing.T) {
appData := local.AppDataV2{local.AppStructureV2{
ConfigVersion: realm.AppConfigVersion20210101,
ID: app.ClientAppID,
Name: app.Name,
Location: app.Location,
DeploymentModel: app.DeploymentModel,
Functions: local.FunctionsStructure{
Configs: []map[string]interface{}{
{"name": "test", "private": true},
},
Sources: map[string]string{
"test.js": "exports = function(){\n return \"successful test\";\n};",
},
},
}}
err := client.Import(groupID, app.ID, appData)
assert.Nil(t, err)
functions, err := client.Functions(u.CloudGroupID(), app.ID)
assert.Nil(t, err)
assert.Equal(t, 1, len(functions))
assert.Equal(t, "test", functions[0].Name)
})
})
}
func TestAppDebugExecuteFunction(t *testing.T) {
u.SkipUnlessRealmServerRunning(t)
t.Run("should fail without an auth client", func(t *testing.T) {
client := realm.NewClient(u.RealmServerURL())
_, err := client.AppDebugExecuteFunction(u.CloudGroupID(), "test-app-1234", "", "test-function", nil)
assert.Equal(t, realm.ErrInvalidSession{}, err)
})
t.Run("should execute function", func(t *testing.T) {
client := newAuthClient(t)
groupID := u.CloudGroupID()
app, teardown := setupTestApp(t, client, groupID, "app-debug-execute-function-test")
defer teardown()
appData := local.AppDataV2{local.AppStructureV2{
ConfigVersion: realm.AppConfigVersion20210101,
ID: app.ClientAppID,
Name: app.Name,
Location: app.Location,
DeploymentModel: app.DeploymentModel,
Functions: local.FunctionsStructure{
Configs: []map[string]interface{}{
{"name": "simple_test", "private": true},
{"name": "passed_args_test", "private": true},
},
Sources: map[string]string{
"simple_test.js": "exports = function(){\n return \"successful test\";\n};",
"passed_args_test.js": "exports = function(arg1, arg2){\n return {arg1: arg1, arg2: arg2};\n};",
},
},
}}
err := client.Import(groupID, app.ID, appData)
assert.Nil(t, err)
t.Run("should return string", func(t *testing.T) {
response, err := client.AppDebugExecuteFunction(u.CloudGroupID(), app.ID, "", "simple_test", nil)
assert.Nil(t, err)
assert.Equal(t, "successful test", response.Result)
})
t.Run("should return passed args", func(t *testing.T) {
args := []interface{}{
map[string]interface{}{
"value1": 1,
"abcs": []string{"x", "y", "z"},
},
[]int{1, 2},
}
response, err := client.AppDebugExecuteFunction(u.CloudGroupID(), app.ID, "", "passed_args_test", args)
assert.Nil(t, err)
assert.Equal(t, map[string]interface{}{
"arg1": map[string]interface{}{
"value1": map[string]interface{}{"$numberInt": "1"},
"abcs": []interface{}{"x", "y", "z"},
},
"arg2": []interface{}{
map[string]interface{}{
"$numberInt": "1",
},
map[string]interface{}{
"$numberInt": "2",
},
},
}, response.Result)
})
})
}
|
package utils
import (
"errors"
"fmt"
"time"
"github.com/dgrijalva/jwt-go"
"github.com/gin-gonic/gin"
)
type CustomClaims struct {
ID uint `json:"userId"`
jwt.StandardClaims
}
const TokenExpireDuration = time.Hour * 2
var CustomSecret = []byte("dapan")
func GenToken(userId uint) (string, error) {
fmt.Println(CustomSecret)
c := CustomClaims {
userId,
jwt.StandardClaims{
ExpiresAt: time.Now().Add(TokenExpireDuration).Unix(),
},
}
token := jwt.NewWithClaims(jwt.SigningMethodHS256, c)
return token.SignedString(CustomSecret)
}
// 解析token
func parseToken(tokenString string) (*CustomClaims, error) {
token, err := jwt.ParseWithClaims(tokenString, &CustomClaims{}, func(tokenString *jwt.Token) (interface{}, error) {
return CustomSecret, nil
})
if err != nil {
return nil, err
}
if claims, ok := token.Claims.(*CustomClaims); ok && token.Valid { // 校验token
return claims, nil
}
return nil, errors.New("invalid token")
}
func Auth(c *gin.Context) {
token := c.Request.Header.Get("Authorization")
claims, err := parseToken(token)
if err != nil {
c.JSON(422, gin.H{
"msg": err.Error(),
})
c.Abort()
return
}
c.Set("user_id", claims.ID)
c.Next()
} |
package bean
import (
"errors"
"log"
"github.com/astaxie/beego/orm"
)
var (
ErrPaymentAlreadyAchieved = errors.New("payment already achieved")
)
// 安卓支付
type AndroidPayment struct {
OrderID string `orm:"column(order_id);pk"` // SDK订单号
UUID string `orm:"column(uuid)"` // SDK唯一用户编号
ZoneID int32 `orm:"column(zone_id)"` // 游戏大区编号
UID uint32 `orm:"column(uid)"` // 游戏角色编号
SKU string `orm:"column(sku);size(128)"` // 商品编号
Amount int32 `orm:"column(amount)"` // 商品价格
PayTime int64 `orm:"column(pay_time)"` // 订单支付时间
Sandbox bool `orm:"column(sandbox)"` // 是否测试订单
Happen int64 `orm:"column(happen)"` // 收到记录时间
Achieved bool `orm:"column(achieved)"`
}
func (self *AndroidPayment) TableName() string {
return "android_payment"
}
func (bean *AndroidPayment) UpdateAsAchieved() error {
// 判断是否已领取
if bean.Achieved {
return ErrPaymentAlreadyAchieved
}
bean.Achieved = true
o := orm.NewOrm()
_, e := o.Update(bean)
if e != nil {
return e
}
return nil
}
func InsertAndroidPayment(bean *AndroidPayment) error {
o := orm.NewOrm()
_, e := o.Insert(bean)
if e != nil {
log.Printf("新增FYSDK支付订单记录%v时出错: %v", bean, e)
return e
}
return nil
}
func LoadAndroidPayments(uid uint32) (error, []*AndroidPayment) {
o := orm.NewOrm()
var beans []*AndroidPayment
qs := o.QueryTable("android_payment")
_, e := qs.Filter("uid", uid).Filter("achieved", false).All(&beans)
if e != nil {
return e, nil
}
return nil, beans
}
|
package main
import (
"fmt"
"strconv"
"strings"
)
//X'x go first 0
func main() {
game := []string{"102",
"457",
"386"}
//buildStr := ""
gameCombine := ""
for _, x := range game {
gameCombine += string(x)
}
gameBoard := "........."
for strings.Contains(gameBoard, ".") {
for i := 0; i < len(gameCombine); i++ {
tempStr := strconv.Itoa(i)
idx := strings.Index(gameCombine, tempStr)
gameBoardSplit := strings.Split(gameBoard, "")
if i%2 == 0 {
gameBoardSplit[idx] = "X"
} else {
gameBoardSplit[idx] = "O"
}
gameBoard = strings.Join(gameBoardSplit, "")
if string(gameBoard[0]) == "X" && string(gameBoard[1]) == "X" && string(gameBoard[2]) == "X" {
fmt.Println("X")
return
}
if string(gameBoard[0]) == "O" && string(gameBoard[1]) == "O" && string(gameBoard[2]) == "O" {
fmt.Println("O")
return
}
if string(gameBoard[3]) == "X" && string(gameBoard[4]) == "X" && string(gameBoard[5]) == "X" {
fmt.Println("X")
return
}
if string(gameBoard[3]) == "O" && string(gameBoard[4]) == "O" && string(gameBoard[5]) == "O" {
fmt.Println("O")
return
}
if string(gameBoard[6]) == "X" && string(gameBoard[7]) == "X" && string(gameBoard[8]) == "X" {
fmt.Println("X")
return
}
if string(gameBoard[6]) == "O" && string(gameBoard[7]) == "O" && string(gameBoard[8]) == "O" {
fmt.Println("O")
return
}
if string(gameBoard[0]) == "X" && string(gameBoard[3]) == "X" && string(gameBoard[6]) == "X" {
fmt.Println("X")
return
}
if string(gameBoard[0]) == "O" && string(gameBoard[3]) == "O" && string(gameBoard[6]) == "O" {
fmt.Println("O")
return
}
if string(gameBoard[1]) == "X" && string(gameBoard[4]) == "X" && string(gameBoard[7]) == "X" {
fmt.Println("X")
return
}
if string(gameBoard[1]) == "O" && string(gameBoard[4]) == "O" && string(gameBoard[7]) == "O" {
fmt.Println("O")
return
}
if string(gameBoard[2]) == "X" && string(gameBoard[5]) == "X" && string(gameBoard[8]) == "X" {
fmt.Println("X")
return
}
if string(gameBoard[2]) == "O" && string(gameBoard[5]) == "O" && string(gameBoard[8]) == "O" {
fmt.Println("O")
return
}
if string(gameBoard[0]) == "X" && string(gameBoard[4]) == "X" && string(gameBoard[8]) == "X" {
fmt.Println("X")
return
}
if string(gameBoard[0]) == "O" && string(gameBoard[4]) == "O" && string(gameBoard[8]) == "O" {
fmt.Println("O")
return
}
if string(gameBoard[2]) == "X" && string(gameBoard[4]) == "X" && string(gameBoard[6]) == "X" {
fmt.Println("X")
return
}
if string(gameBoard[2]) == "O" && string(gameBoard[4]) == "O" && string(gameBoard[6]) == "O" {
fmt.Println("O")
return
}
}
} // end of loop
fmt.Println("draw")
}
|
package repo
import (
"strings"
"github.com/bitnami-labs/charts-syncer/api"
"github.com/bitnami-labs/charts-syncer/pkg/utils"
"github.com/juju/errors"
helmRepo "helm.sh/helm/v3/pkg/repo"
"k8s.io/klog"
)
// HarborClient implements ChartRepoAPI for a Harbor implementation.
type HarborClient struct {
repo *api.Repo
}
// NewHarborClient creates a new `HarborClient`.
func NewHarborClient(repo *api.Repo) *HarborClient {
return &HarborClient{repo: repo}
}
// PublishChart publishes a packaged chart to Harbor repository.
func (c *HarborClient) PublishChart(filepath string, targetRepo *api.Repo) error {
klog.V(3).Infof("Publishing %s to Harbor repo", filepath)
apiEndpoint := strings.Replace(targetRepo.Url, "/chartrepo/", "/api/chartrepo/", 1) + "/charts"
if err := pushToChartMuseumLike(apiEndpoint, filepath, targetRepo); err != nil {
return errors.Trace(err)
}
return nil
}
// DownloadChart downloads a packaged chart from Harbor repository.
func (c *HarborClient) DownloadChart(filepath string, name string, version string, sourceRepo *api.Repo, index *helmRepo.IndexFile) error {
klog.V(3).Infof("Downloading %s-%s from Harbor repo", name, version)
apiEndpoint, err := utils.FindChartURL(name, version, index, sourceRepo.Url)
if err != nil {
return errors.Trace(err)
}
if err := downloadFromChartMuseumLike(apiEndpoint, filepath, sourceRepo); err != nil {
return errors.Trace(err)
}
return nil
}
// ChartExists checks if a chart exists in the repo.
func (c *HarborClient) ChartExists(name string, version string, index *helmRepo.IndexFile) (bool, error) {
klog.V(3).Infof("Checking if %s-%s chart exists", name, version)
chartExists, err := utils.ChartExistInIndex(name, version, index)
if err != nil {
return false, errors.Trace(err)
}
return chartExists, nil
}
|
package problem0598
import "testing"
func TestSolve(t *testing.T) {
t.Log(maxCount(3, 3, [][]int{[]int{2, 2}, []int{3, 3}}))
}
|
package api
import (
"fmt"
"octlink/mirage/src/modules/user"
"octlink/mirage/src/modules/usergroup"
"octlink/mirage/src/utils"
"octlink/mirage/src/utils/merrors"
"octlink/mirage/src/utils/octlog"
"octlink/mirage/src/utils/uuid"
)
func APIAddUser(paras *ApiParas) *ApiResponse {
resp := new(ApiResponse)
newUser := user.FindUserByName(paras.Db, paras.InParas.Paras["account"].(string))
if newUser != nil {
logger.Errorf("user %s already exist\n", newUser.Name)
resp.Error = merrors.ERR_SEGMENT_ALREADY_EXIST
return resp
}
gid := paras.InParas.Paras["groupId"].(string)
if gid == "" {
gid = paras.InParas.Session["uuid"].(string)
}
if gid == "" {
resp.Error = merrors.ERR_USERGROUP_NOT_EXIST
return resp
}
group := usergroup.FindGroup(paras.Db, gid)
if group == nil {
resp.Error = merrors.ERR_USERGROUP_NOT_EXIST
return resp
}
newUser = new(user.User)
newUser.Id = uuid.Generate().Simple()
newUser.Name = paras.InParas.Paras["account"].(string)
newUser.Type = 0
newUser.Email = paras.InParas.Paras["email"].(string)
newUser.PhoneNumber = paras.InParas.Paras["phoneNumber"].(string)
resp.Error = newUser.Add(paras.Db)
if resp.Error == 0 {
group.AllowUser(paras.Db, newUser.Id)
}
return resp
}
func APILoginByUser(paras *ApiParas) *ApiResponse {
resp := new(ApiResponse)
uid := paras.InParas.Paras["account"].(string)
password := paras.InParas.Paras["password"].(string)
logger.Debugf("Login %s:%s", uid, password)
user := user.FindUserByName(paras.Db, uid)
if user == nil {
logger.Errorf("account %s already exist\n", uid)
resp.Error = merrors.ERR_USER_NOT_EXIST
return resp
}
session := user.Login(paras.Db, password)
if session == nil {
resp.Error = merrors.ERR_PASSWORD_DONT_MATCH
return resp
}
resp.Data = session
return resp
}
func APIShowUser(paras *ApiParas) *ApiResponse {
octlog.Debug("running in APIShowUser\n")
resp := new(ApiResponse)
userId := paras.InParas.Paras["id"].(string)
temp := user.FindUser(paras.Db, userId)
if temp == nil {
resp.Error = merrors.ERR_SEGMENT_NOT_EXIST
resp.ErrorLog = fmt.Sprintf("user %s not found", userId)
return resp
}
resp.Data = temp
octlog.Debug("found User %s", temp.Name)
return resp
}
func APIUpdateUser(paras *ApiParas) *ApiResponse {
octlog.Debug("running in APIUpdateUser\n")
resp := new(ApiResponse)
resp.Error = 0
return resp
}
func APIShowAllUser(paras *ApiParas) *ApiResponse {
resp := new(ApiResponse)
offset := utils.ParasInt(paras.InParas.Paras["start"])
limit := utils.ParasInt(paras.InParas.Paras["limit"])
rows, err := paras.Db.Query("SELECT ID,U_Name,U_State,U_Type,U_Email,U_PhoneNumber,"+
"U_Description,U_CreateTime,U_LastLogin,U_LastSync "+
"FROM tb_user LIMIT ?,?", offset, limit)
if err != nil {
logger.Errorf("query user list error %s\n", err.Error())
resp.Error = merrors.ERR_DB_ERR
return resp
}
defer rows.Close()
userList := make([]user.User, 0)
for rows.Next() {
var user user.User
err = rows.Scan(&user.Id, &user.Name, &user.State,
&user.Type, &user.Email, &user.PhoneNumber, &user.Desc,
&user.CreateTime, &user.LastLogin, &user.LastSync)
if err == nil {
logger.Debugf("query result: %s:%d\n", user.Id,
user.Name, user.State, user.Type)
} else {
logger.Errorf("query user list error %s\n", err.Error())
}
userList = append(userList, user)
}
userCount := user.GetUserCount(paras.Db)
result := make(map[string]interface{}, 3)
result["total"] = userCount
result["count"] = len(userList)
result["data"] = userList
resp.Data = result
return resp
}
func APIDeleteUser(paras *ApiParas) *ApiResponse {
octlog.Debug("running in APIDeleteUser\n")
resp := new(ApiResponse)
user := user.FindUser(paras.Db, paras.InParas.Paras["id"].(string))
if user == nil {
resp.Error = merrors.ERR_SEGMENT_NOT_EXIST
return resp
}
user.Delete(paras.Db)
return resp
}
func APIShowUserList(paras *ApiParas) *ApiResponse {
resp := new(ApiResponse)
rows, err := paras.Db.Query("SELECT ID,U_Name FROM tb_user")
if err != nil {
logger.Errorf("query user list error %s\n", err.Error())
resp.Error = merrors.ERR_DB_ERR
return resp
}
defer rows.Close()
userList := make([]map[string]string, 0)
for rows.Next() {
var user user.User
err = rows.Scan(&user.Id, &user.Name)
if err == nil {
logger.Debugf("query result: %s:%s\n", user.Id, user.Name)
} else {
logger.Errorf("query user list error %s\n", err.Error())
}
userList = append(userList, user.Brief())
}
resp.Data = userList
return resp
}
func APIResetUserPassword(paras *ApiParas) *ApiResponse {
octlog.Debug("running in APIResetUserPassword\n")
resp := new(ApiResponse)
resp.Error = 0
return resp
}
func APIUpdateUserPassword(paras *ApiParas) *ApiResponse {
octlog.Debug("running in APIUpdateUserPassword\n")
resp := new(ApiResponse)
resp.Error = 0
return resp
}
func APIUserLogOut(paras *ApiParas) *ApiResponse {
octlog.Debug("running in APILogOut\n")
resp := new(ApiResponse)
resp.Error = 0
return resp
}
|
package structs
type LNode struct {
data int
next *LNode
}
type SliceStack struct {
slice []int
sliceSize int
}
type LinklistStack struct {
head *LNode
end *LNode
} |
package pain
import (
"encoding/xml"
"github.com/thought-machine/finance-messaging/iso20022"
)
type Document01200104 struct {
XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:pain.012.001.04 Document"`
Message *MandateAcceptanceReportV04 `xml:"MndtAccptncRpt"`
}
func (d *Document01200104) AddMessage() *MandateAcceptanceReportV04 {
d.Message = new(MandateAcceptanceReportV04)
return d.Message
}
// Scope
// The MandateAcceptanceReport message is sent from the agent of the receiver (debtor or creditor) of the MandateRequest message (initiation, amendment or cancellation) to the agent of the initiator of the MandateRequest message (debtor or creditor).
// A MandateAcceptanceReport message is used to confirm the acceptance or rejection of a MandateRequest message. Where acceptance is part of the full process flow, a MandateRequest message only becomes valid after a confirmation of acceptance is received through a MandateAcceptanceReport message from the agent of the receiver.
// Usage
// The MandateAcceptanceReport message can contain one or more confirmation(s) of acceptance or rejection of a specific Mandate Request.
// The messages can be exchanged between debtor agent and creditor agent and between debtor agent and debtor and creditor agent and creditor.
// The MandateAcceptanceReport message can be used in domestic and cross-border scenarios.
type MandateAcceptanceReportV04 struct {
// Set of characteristics to identify the message and parties playing a role in the mandate acceptance, but which are not part of the mandate.
GroupHeader *iso20022.GroupHeader47 `xml:"GrpHdr"`
// Set of elements used to provide information on the acception or rejection of the mandate request.
UnderlyingAcceptanceDetails []*iso20022.MandateAcceptance4 `xml:"UndrlygAccptncDtls"`
// Additional information that cannot be captured in the structured elements and/or any other specific block.
SupplementaryData []*iso20022.SupplementaryData1 `xml:"SplmtryData,omitempty"`
}
func (m *MandateAcceptanceReportV04) AddGroupHeader() *iso20022.GroupHeader47 {
m.GroupHeader = new(iso20022.GroupHeader47)
return m.GroupHeader
}
func (m *MandateAcceptanceReportV04) AddUnderlyingAcceptanceDetails() *iso20022.MandateAcceptance4 {
newValue := new(iso20022.MandateAcceptance4)
m.UnderlyingAcceptanceDetails = append(m.UnderlyingAcceptanceDetails, newValue)
return newValue
}
func (m *MandateAcceptanceReportV04) AddSupplementaryData() *iso20022.SupplementaryData1 {
newValue := new(iso20022.SupplementaryData1)
m.SupplementaryData = append(m.SupplementaryData, newValue)
return newValue
}
|
package conn
import(
"net"
"fmt"
"bytes"
"encoding/binary"
"handler"
)
const(
MAX_RECV_QUEUE_SIZE = 1024
MAX_SEND_QUEUE_SIZE = 1024
MAX_RECV_BUFF_SIZE = 65536
MSG_HEADER_LEN = 4
OP_CODE_LEN = 4
CACHE_SIZE = 1024
)
type RecvQueue chan []byte
type SendQueue chan []byte
type Connection struct{
tcp_server ITcpServer
conn net.Conn
recv RecvQueue
send SendQueue
}
func NewConnection(tcp_server ITcpServer, conn net.Conn) *Connection{
task := new(Connection)
task.tcp_server = tcp_server
task.conn = conn
task.recv = make(RecvQueue, MAX_RECV_QUEUE_SIZE)
task.send = make(SendQueue, MAX_SEND_QUEUE_SIZE)
go task.onRecv()
go task.onSend()
return task
}
func (this *Connection) onRecv(){
cache := make([]byte, CACHE_SIZE)
buf := bytes.NewBuffer(make([]byte, 0, MAX_RECV_BUFF_SIZE))
var contentLen uint32
for{
size, err := this.conn.Read(cache)
if err != nil {
fmt.Printf("Read error, %v\n", err.Error())
break
}
//把cache读取的内容写到buf
buf.Write(cache[:size])
for{
//本次缓冲区数据包正好读完,重置内容长度
if buf.Len() == 0 {
contentLen = 0
break
}
// 开始读取一个新的数据包
if contentLen == 0 {
// 判断缓冲区剩余数据是否足够读取一个包长
if buf.Len() < MSG_HEADER_LEN {
break
}
packByteSize := make([]byte, MSG_HEADER_LEN)
_, err = buf.Read(packByteSize)
contentLen = binary.BigEndian.Uint32(packByteSize)
}
//判断缓冲区剩余数据是否足够读取一个完整的包
//true -> 继续读取(contentLen - buf.Len())长度的字节数据
if int(contentLen) > buf.Len() || contentLen == 0 {
break
}
data := make([]byte, contentLen)
//data为完整数据包
_, err = buf.Read(data)
opCode := binary.BigEndian.Uint32(data[:OP_CODE_LEN])
pbData := data[OP_CODE_LEN:]
contentLen = 0
//解包
decodeMsg, _ := handler.DecodePbMsg(opCode, pbData)
fmt.Println(decodeMsg)
//this.recv <- decodeMsg
}
}
}
func (this *Connection) onSend(){
}
|
package frame
func (config *Config) VisibleHeight() int {
height := config.Height()
forwardDrawAreaHeight := terminalHeight - (config.startRow - 1)
if height > forwardDrawAreaHeight {
return forwardDrawAreaHeight
}
return height
}
func (config *Config) Height() int {
height := config.Lines
if config.HasHeader {
height++
}
if config.HasFooter {
height++
}
return height
}
|
package resp
import (
"fmt"
"io"
"net"
"reflect"
"runtime/debug"
"github.com/dvirsky/go-pylog/logging"
"gitlab.doit9.com/backend/instrument"
"github.com/EverythingMe/meduza/driver"
"github.com/EverythingMe/meduza/errors"
"github.com/EverythingMe/meduza/protocol"
"github.com/EverythingMe/meduza/query"
"github.com/EverythingMe/meduza/transport"
)
type Server struct {
listener net.Listener
numClients uint
isRunning bool
driver driver.Driver
proto protocol.Protocol
}
func NewServer(d driver.Driver, p protocol.Protocol) *Server {
return &Server{
driver: d,
proto: p,
}
}
func (r *Server) Listen(addr string) error {
listener, err := net.Listen("tcp", addr)
if err != nil {
return err
}
logging.Info("Redis adapter listening on %s", addr)
r.listener = listener
r.isRunning = true
for {
conn, err := r.listener.Accept()
if err != nil {
logging.Error("Error accepting: %s", err)
return err
}
logging.Info("Handling connection from", conn.RemoteAddr())
go r.handleConnection(conn)
}
return nil
}
func (r *Server) handleConnection(c net.Conn) {
defer func() {
err := recover()
if err != nil {
logging.Error("PANIC hanlding request: %s. Stack: %s", err, string(debug.Stack()))
instrument.Increment("handler_panic", 1)
func() {
defer recover()
c.Close()
}()
}
}()
instrument.Hit("connection_rcv")
trans := NewTransport(c)
var err error = nil
var msg transport.Message
for err == nil {
if msg, err = trans.ReadMessage(); err == nil {
// query handling logic
var q interface{}
var res query.QueryResult
if q, err = r.proto.ReadMessage(msg); err == nil {
// answering ping/pong messages is out of band and does not get transfered to the drivers
if msg.Type == transport.PingMessage {
logging.Debug("Got ping message, writing PONG")
res, _ := r.proto.WriteMessage(query.NewPingResponse())
trans.WriteMessage(res)
continue
}
instrument.Profile(fmt.Sprintf("query.%s", msg.Type), func() error {
res = r.handleQuery(q)
return res.Err()
})
logging.Info("Query result: %s", res)
if msg, err = r.proto.WriteMessage(res); err == nil {
err = trans.WriteMessage(msg)
} else {
instrument.Increment("send_error", 1)
logging.Error("Error serializing response: %s", err)
}
} else {
instrument.Increment("deserialize_error", 1)
logging.Error("Error deserializing query: %s", err)
}
} else {
if err == io.EOF || err == io.ErrClosedPipe {
break
}
instrument.Increment("receive_error", 1)
logging.Error("Error reading from redis transport: %s", err)
}
}
logging.Debug("Exiting handler loop")
c.Close()
}
func (r *Server) handleQuery(qu interface{}) query.QueryResult {
switch q := qu.(type) {
case query.PutQuery:
if err := q.Validate(); err != nil {
logging.Error("Error validating put query: %s", err)
return query.NewPutResponse(err)
}
return r.driver.Put(q)
case query.GetQuery:
if err := q.Validate(); err != nil {
logging.Error("Error validating GET query: %s", err)
return query.NewGetResponse(err)
}
return r.driver.Get(q)
case query.UpdateQuery:
if err := q.Validate(); err != nil {
logging.Error("Error validating UPDATE query: %s", err)
return query.NewUpdateResponse(err, 0)
}
return r.driver.Update(q)
case query.DelQuery:
if err := q.Validate(); err != nil {
logging.Error("Error validating DEL query: %s", err)
return query.NewDelResponse(err, 0)
}
return r.driver.Delete(q)
default:
return query.NewResponse(errors.NewError("Invalid query type object %s", reflect.TypeOf(q)))
}
}
|
package services
import (
"fmt"
r "github.com/davelaursen/idealogue-go/Godeps/_workspace/src/github.com/dancannon/gorethink"
)
// UserSvc represents a service that provides read/write access to user data.
type UserSvc interface {
GetAll() (Users, *Error)
GetByID(id string) (*User, *Error)
GetByEmail(email string) (*User, *Error)
Insert(user *User) *Error
Update(user *User) *Error
Delete(id string) *Error
}
type userSvcImpl struct {
session *r.Session
}
// GetAll returns all the users in the system, or nil.
// Potential error types:
// ErrDB: error reading/writing to the database
func (svc *userSvcImpl) GetAll() (Users, *Error) {
res, err := r.Table("Users").Run(svc.session)
if err != nil {
return nil, NewError(ErrDB, err)
}
users := []*User{}
err = res.All(&users)
if err != nil {
return nil, NewError(ErrDB, err)
}
return users, nil
}
// GetByID returns the user that has the specified id, or nil.
// Potential error types:
// ErrDB: error reading/writing to the database
func (svc *userSvcImpl) GetByID(id string) (*User, *Error) {
res, err := r.Table("Users").Get(id).Run(svc.session)
if err != nil {
return nil, NewError(ErrDB, err)
}
if res.IsNil() {
return nil, nil
}
user := &User{}
err = res.One(user)
if err != nil {
return nil, NewError(ErrDB, err)
}
return user, nil
}
// GetByEmail returns the user that has the specified email, or nil.
// Potential error types:
// ErrDB: error reading/writing to the database
func (svc *userSvcImpl) GetByEmail(email string) (*User, *Error) {
res, err := r.Table("Users").GetAllByIndex("email", email).Run(svc.session)
if err != nil {
fmt.Println("ERROR 1: ", err)
return nil, NewError(ErrDB, err)
}
if res.IsNil() {
return nil, nil
}
user := &User{}
err = res.One(user)
if err != nil {
fmt.Println("ERROR 2: ", err)
return nil, NewError(ErrDB, err)
}
return user, nil
}
// Insert persists an user and returns an error if the operation failed.
// Potential error types:
// ErrBadData: the user is invalid
// ErrDB: error reading/writing to the database
func (svc *userSvcImpl) Insert(user *User) *Error {
//TODO: lookup by email - check for conflict
//TODO: validate user to insert
res, err := r.Table("Users").Insert(user).RunWrite(svc.session)
if err != nil {
return NewError(ErrDB, err)
}
user.ID = res.GeneratedKeys[0]
return nil
}
// Update persists an user and returns an error if the operation failed.
// Potential error types:
// ErrBadData: the user is invalid
// ErrNotFound: the user to update doesn't exist
// ErrDB: error reading/writing to the database
func (svc *userSvcImpl) Update(user *User) *Error {
//TODO: validate user to update
existing, err := svc.GetByID(user.ID)
if err != nil {
return err
}
if existing == nil {
return NewError(ErrNotFound, nil)
}
_, err2 := r.Table("Users").Get(user.ID).Update(user).RunWrite(svc.session)
if err2 != nil {
return NewError(ErrDB, err2)
}
return nil
}
// Delete removes the user with the specified id; if the user does not exist, no action is taken.
// Potential error types:
// ErrNotFound: the user to delete doesn't exist
// ErrDB: error reading/writing to the database
func (svc *userSvcImpl) Delete(id string) *Error {
existing, err := svc.GetByID(id)
if err != nil {
return err
}
if existing == nil {
return NewError(ErrNotFound, nil)
}
_, err2 := r.Table("Users").Get(id).Delete().RunWrite(svc.session)
if err2 != nil {
return NewError(ErrDB, err2)
}
return nil
}
|
package manager_test
import (
"testing"
"github.com/wst-libs/wst-sdk/sdk/manager"
)
const (
url = "http://39.105.49.69:48083/api/v1/storage/file"
)
// func TestAdd(t *testing.T) {
// manager.Add(url)
// }
func TestPut(t *testing.T) {
manager.Put(url)
}
|
package secretconf
import (
"io"
"os"
"path/filepath"
"github.com/cosmos/go-bip39"
conf "github.com/tendermint/starport/starport/chainconf"
"gopkg.in/yaml.v2"
)
const (
SecretFile = "secret.yml"
SelfRelayerAccountName = "relayer"
)
var (
selfRelayerAccountDefaultCoins = []string{"800token"}
)
type Config struct {
// Accounts of the local chain.
Accounts []conf.Account `yaml:"accounts"`
// Relayer configuration.
Relayer Relayer `yaml:"relayer"`
}
func (c *Config) SelfRelayerAccount(name string) (account conf.Account, found bool) {
for _, a := range c.Accounts {
if a.Name == name {
return a, true
}
}
return conf.Account{}, false
}
func (c *Config) SetSelfRelayerAccount(accName string) error {
entropy, err := bip39.NewEntropy(256)
if err != nil {
return err
}
mnemonic, err := bip39.NewMnemonic(entropy)
if err != nil {
return err
}
c.Accounts = append(c.Accounts, conf.Account{
Name: accName,
Coins: selfRelayerAccountDefaultCoins,
Mnemonic: mnemonic,
})
return nil
}
func (c *Config) UpsertRelayerAccount(acc conf.Account) {
var found bool
for i, account := range c.Relayer.Accounts {
if account.Name == acc.Name {
found = true
c.Relayer.Accounts[i] = acc
break
}
}
if !found {
c.Relayer.Accounts = append(c.Relayer.Accounts, acc)
}
}
// Account holds the options related to setting up Cosmos wallets.
type Relayer struct {
// Accounts of remote chains.
Accounts []conf.Account `yaml:"accounts"`
}
// Parse parses config.yml into Config.
func Parse(r io.Reader) (*Config, error) {
var conf Config
return &conf, yaml.NewDecoder(r).Decode(&conf)
}
func Open(path string) (*Config, error) {
file, err := os.Open(filepath.Join(path, SecretFile))
if err != nil {
return &Config{}, nil
}
defer file.Close()
return Parse(file)
}
func Save(path string, conf *Config) error {
file, err := os.OpenFile(filepath.Join(path, SecretFile), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0755)
if err != nil {
return err
}
defer file.Close()
return yaml.NewEncoder(file).Encode(conf)
}
|
package keys
import (
"github.com/cosmos/cosmos-sdk/crypto/keyring"
"github.com/gookit/gcli/v3"
"github.com/gookit/gcli/v3/interact"
"github.com/ovrclk/akcmd/client"
"github.com/ovrclk/akcmd/flags"
)
const (
flagYes = "yes"
flagForce = "force"
)
var deleteOpts = struct {
Yes bool
Force bool
}{}
// DeleteKeyCommand deletes a key from the key store.
func DeleteKeyCommand() *gcli.Command {
cmd := &gcli.Command{
Name: "delete",
Desc: "Delete the given keys",
Help: `Delete keys from the Keybase backend.
Note that removing offline or ledger keys will remove
only the public key references stored locally, i.e.
private keys stored in a ledger device cannot be deleted with the CLI.
`,
Config: func(cmd *gcli.Command) {
flags.AddKeysFlags(cmd)
cmd.BoolOpt(&deleteOpts.Yes, flagYes, "y", false,
"Skip confirmation prompt when deleting offline or ledger key references")
cmd.BoolOpt(&deleteOpts.Force, flagForce, "f", false,
"Remove the key unconditionally without asking for the passphrase. Deprecated.")
cmd.AddArg("name", "name of the key", true, true)
},
Func: func(cmd *gcli.Command, args []string) error {
clientCtx, err := client.GetClientQueryContext()
if err != nil {
return err
}
for _, name := range args {
info, err := clientCtx.Keyring.Key(name)
if err != nil {
return err
}
// confirm deletion, unless -y is passed
if !deleteOpts.Yes {
if yes := interact.Confirm("Key reference will be deleted. Continue?"); !yes {
continue
}
}
if err := clientCtx.Keyring.Delete(name); err != nil {
return err
}
if info.GetType() == keyring.TypeLedger || info.GetType() == keyring.TypeOffline {
cmd.Errorln("Public key reference deleted")
continue
}
cmd.Errorln("Key deleted forever (uh oh!)")
}
return nil
},
}
return cmd
}
|
package models
import (
"time"
)
type ItemTag struct {
ItemID uint64 `json:"item_id" gorm:"column:item_id;primary_key" sql:"not null;type:bigint(20);index:idx_tag_item_id_tag_id"`
TagID uint64 `json:"tag_id" gorm:"column:tag_id;primary_key" sql:"not null;type:bigint(20);index:idx_tag_item_id_tag_id"`
CreatedAt time.Time `json:"created_at" gorm:"column:created_at" sql:"not null;type:datetime"`
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at" sql:"not null;type:datetime"`
}
func NewItemTag(itemID, tagID uint64) *ItemTag {
return &ItemTag{
ItemID: itemID,
TagID: tagID,
}
}
func (e ItemTag) TableName() string {
return "item_tag"
}
|
package main
import (
"log"
"math/rand"
"time"
)
func main() {
rand.Seed(time.Now().UnixNano())
log.Printf("random num generated: %v", rand.Uint64())
}
|
package ingest
import (
"strings"
sq "github.com/Masterminds/squirrel"
"github.com/stellar/go/services/horizon/internal/db2/core"
"github.com/stellar/go/services/horizon/internal/db2/history"
"github.com/stellar/go/support/db"
"github.com/stellar/go/xdr"
)
func (assetsModified AssetsModified) handlePaymentOp(paymentOp *xdr.PaymentOp, sourceAccount *xdr.AccountId) error {
err := assetsModified.updateIfAssetIssuerInvolved(paymentOp.Asset, *sourceAccount)
if err != nil {
return err
}
return assetsModified.updateIfAssetIssuerInvolved(paymentOp.Asset, paymentOp.Destination)
}
func defaultSourceAccount(sourceAccount *xdr.AccountId, defaultAccount *xdr.AccountId) *xdr.AccountId {
if sourceAccount != nil {
return sourceAccount
}
return defaultAccount
}
func (assetsModified AssetsModified) add(asset xdr.Asset) {
assetsModified[asset.String()] = asset
}
// IngestOperation updates the assetsModified using the passed in operation
func (assetsModified AssetsModified) IngestOperation(err error, op *xdr.Operation, source *xdr.AccountId, coreQ *core.Q) error {
if err != nil {
return err
}
body := op.Body
sourceAccount := defaultSourceAccount(op.SourceAccount, source)
switch body.Type {
// TODO NNS 2 need to fix GetCreateAssetID call when adding assets from account
// case xdr.OperationTypeSetOptions:
// assetsModified.addAssetsFromAccount(coreQ, sourceAccount)
case xdr.OperationTypePayment:
// payments is the only operation where we currently perform the optimization of checking against the issuer
return assetsModified.handlePaymentOp(body.PaymentOp, sourceAccount)
case xdr.OperationTypePathPayment:
// if this gets expensive then we can limit it to only include those assets that includes the issuer
assetsModified.add(body.PathPaymentOp.DestAsset)
assetsModified.add(body.PathPaymentOp.SendAsset)
for _, asset := range body.PathPaymentOp.Path {
assetsModified.add(asset)
}
case xdr.OperationTypeManageOffer:
// if this gets expensive then we can limit it to only include those assets that includes the issuer
assetsModified.add(body.ManageOfferOp.Buying)
assetsModified.add(body.ManageOfferOp.Selling)
case xdr.OperationTypeCreatePassiveOffer:
// if this gets expensive then we can limit it to only include those assets that includes the issuer
assetsModified.add(body.CreatePassiveOfferOp.Buying)
assetsModified.add(body.CreatePassiveOfferOp.Selling)
case xdr.OperationTypeChangeTrust:
assetsModified.add(body.ChangeTrustOp.Line)
case xdr.OperationTypeAllowTrust:
asset := body.AllowTrustOp.Asset.ToAsset(*sourceAccount)
assetsModified.add(asset)
}
return nil
}
// UpdateAssetStats updates the db with the latest asset stats for the assets that were modified
func (assetsModified AssetsModified) UpdateAssetStats(is *Session) {
if is.Err != nil {
return
}
hasValue := false
for _, asset := range assetsModified {
assetStat := computeAssetStat(is, &asset)
if is.Err != nil {
return
}
if assetStat != nil {
hasValue = true
is.Ingestion.assetStats = is.Ingestion.assetStats.Values(
assetStat.ID,
assetStat.Amount,
assetStat.NumAccounts,
assetStat.Flags,
assetStat.Toml,
)
}
}
if hasValue {
// perform a delete first since upsert is not supported if postgres < 9.5
is.Err = assetsModified.deleteRows(is.Ingestion.DB)
if is.Err != nil {
return
}
// can perform a direct upsert if postgres > 9.4
// is.Ingestion.assetStats = is.Ingestion.assetStats.
// Suffix("ON CONFLICT (id) DO UPDATE SET (amount, num_accounts, flags, toml) = (excluded.amount, excluded.num_accounts, excluded.flags, excluded.toml)")
_, is.Err = is.Ingestion.DB.Exec(is.Ingestion.assetStats)
}
}
// func (assetsModified AssetsModified) addAssetsFromAccount(coreQ *core.Q, account *xdr.AccountId) {
// if account == nil {
// return
// }
// var assets []xdr.Asset
// coreQ.AssetsForAddress(&assets, account.Address())
// for _, asset := range assets {
// if asset.Type != xdr.AssetTypeAssetTypeNative {
// assetsModified.add(asset)
// }
// }
// }
func (assetsModified AssetsModified) deleteRows(session *db.Session) error {
if len(assetsModified) == 0 {
return nil
}
assets := make([]xdr.Asset, 0, len(assetsModified))
for _, asset := range assetsModified {
assets = append(assets, asset)
}
historyQ := history.Q{Session: session}
ids, err := historyQ.GetAssetIDs(assets)
if err != nil {
return err
}
if len(ids) == 0 {
return nil
}
deleteStmt := sq.Delete("asset_stats").Where(sq.Eq{"id": ids})
_, err = session.Exec(deleteStmt)
return err
}
func (assetsModified AssetsModified) updateIfAssetIssuerInvolved(asset xdr.Asset, account xdr.AccountId) error {
var assetType, assetCode, assetIssuer string
err := asset.Extract(&assetType, &assetCode, &assetIssuer)
if err != nil {
return err
}
if assetIssuer == account.Address() {
assetsModified.add(asset)
}
return nil
}
func computeAssetStat(is *Session, asset *xdr.Asset) *history.AssetStat {
if asset.Type == xdr.AssetTypeAssetTypeNative {
return nil
}
historyQ := history.Q{Session: is.Ingestion.DB}
assetID, err := historyQ.GetCreateAssetID(*asset)
if err != nil {
is.Err = err
return nil
}
var assetType xdr.AssetType
var assetCode, assetIssuer string
err = asset.Extract(&assetType, &assetCode, &assetIssuer)
if err != nil {
is.Err = err
return nil
}
coreQ := &core.Q{Session: is.Cursor.DB}
numAccounts, amount, err := statTrustlinesInfo(coreQ, assetType, assetCode, assetIssuer)
if err != nil {
is.Err = err
return nil
}
flags, toml, err := statAccountInfo(coreQ, assetIssuer)
if err != nil {
is.Err = err
return nil
}
return &history.AssetStat{
ID: assetID,
Amount: amount,
NumAccounts: numAccounts,
Flags: flags,
Toml: toml,
}
}
// statTrustlinesInfo fetches all the stats from the trustlines table
func statTrustlinesInfo(coreQ *core.Q, assetType xdr.AssetType, assetCode string, assetIssuer string) (int32, int64, error) {
return coreQ.BalancesForAsset(int32(assetType), assetCode, assetIssuer)
}
// statAccountInfo fetches all the stats from the accounts table
func statAccountInfo(coreQ *core.Q, accountID string) (int8, string, error) {
var account core.Account
err := coreQ.AccountByAddress(&account, accountID)
if err != nil {
return -1, "", err
}
var toml string
if !account.HomeDomain.Valid {
toml = ""
} else {
trimmed := strings.TrimSpace(account.HomeDomain.String)
if trimmed != "" {
toml = "https://" + account.HomeDomain.String + "/.well-known/stellar.toml"
} else {
toml = ""
}
}
return int8(account.Flags), toml, nil
}
|
package queryrange
import (
"context"
"github.com/prometheus/client_golang/prometheus"
"github.com/weaveworks/common/instrument"
)
// InstrumentMiddleware can be inserted into the middleware chain to expose timing information.
func InstrumentMiddleware(name string, queryRangeDuration *prometheus.HistogramVec) Middleware {
return MiddlewareFunc(func(next Handler) Handler {
return HandlerFunc(func(ctx context.Context, req *Request) (*APIResponse, error) {
var resp *APIResponse
err := instrument.TimeRequestHistogram(ctx, name, queryRangeDuration, func(ctx context.Context) error {
var err error
resp, err = next.Do(ctx, req)
return err
})
return resp, err
})
})
}
|
package management
import "encoding/json"
type DBConnectionsChangePassword struct {
// The client_id of your client. We strongly recommend including a Client ID so that the email template knows from which client the request was triggered.
ClientID *string `json:"client_id,omitempty"`
// The user's email address.
Email *string `json:"email"`
// The new password. See the next paragraph for the case when a password can be set.
Password *string `json:"password,omitempty"`
// The name of the database connection configured to your client.
Connection *string `json:"connection"`
}
func (c *DBConnectionsChangePassword) String() string {
b, _ := json.MarshalIndent(c, "", " ")
return string(b)
}
type DBConnectionsManager struct {
m *Management
}
func NewDBConnectionsManager(m *Management) *DBConnectionsManager {
return &DBConnectionsManager{m}
}
func (dbcm *DBConnectionsManager) ChangePassword(dbChangePw *DBConnectionsChangePassword) (string, error) {
resp, err := dbcm.m.plainHTTPRequest("POST", dbcm.m.plainURI("dbconnections", "change_password"), dbChangePw)
return string(resp), err
}
|
package client
import (
"context"
"sync"
"github.com/drand/drand/chain"
"github.com/drand/drand/client"
dclient "github.com/drand/drand/client"
"github.com/drand/drand/cmd/relay-gossip/lp2p"
"github.com/drand/drand/protobuf/drand"
"github.com/gogo/protobuf/proto"
logging "github.com/ipfs/go-log/v2"
pubsub "github.com/libp2p/go-libp2p-pubsub"
"golang.org/x/xerrors"
)
var (
log = logging.Logger("drand-client")
)
// Client is a concrete pubsub client implementation
type Client struct {
cancel func()
latest uint64
subs struct {
sync.Mutex
M map[*int]chan drand.PublicRandResponse
}
}
// WithPubsub provides an option for integrating pubsub notification
// into a drand client.
func WithPubsub(ps *pubsub.PubSub, chainHash string) dclient.Option {
return dclient.WithWatcher(func(_ *chain.Info) (dclient.Watcher, error) {
c, err := NewWithPubsub(ps, chainHash)
if err != nil {
return nil, err
}
return c, nil
})
}
// NewWithPubsub creates a gossip randomness client.
func NewWithPubsub(ps *pubsub.PubSub, chainHash string) (*Client, error) {
t, err := ps.Join(lp2p.PubSubTopic(chainHash))
if err != nil {
return nil, xerrors.Errorf("joining pubsub: %w", err)
}
s, err := t.Subscribe()
if err != nil {
return nil, xerrors.Errorf("subscribe: %w", err)
}
ctx, cancel := context.WithCancel(context.Background())
c := &Client{
cancel: cancel,
}
c.subs.M = make(map[*int]chan drand.PublicRandResponse)
go func() {
for {
msg, err := s.Next(ctx)
if ctx.Err() != nil {
c.subs.Lock()
for _, ch := range c.subs.M {
close(ch)
}
c.subs.M = make(map[*int]chan drand.PublicRandResponse)
c.subs.Unlock()
t.Close()
s.Cancel()
return
}
if err != nil {
log.Warnf("topic.Next error: %+v", err)
continue
}
var rand drand.PublicRandResponse
err = proto.Unmarshal(msg.Data, &rand)
if err != nil {
log.Warnf("unmarshaling randomness: %+v", err)
continue
}
// TODO: verification, need to pass drand network public key in
if c.latest >= rand.Round {
continue
}
c.latest = rand.Round
c.subs.Lock()
for _, ch := range c.subs.M {
select {
case ch <- rand:
default:
log.Warn("randomness notification dropped due to a full channel")
}
}
c.subs.Unlock()
}
}()
return c, nil
}
// UnsubFunc is a cancel function for pubsub subscription
type UnsubFunc func()
// Sub subscribes to notfications about new randomness.
// Client instnace owns the channel after it is passed to Sub function,
// thus the channel should not be closed by library user
//
// It is recommended to use a buffered channel. If the channel is full,
// notification about randomness will be dropped.
//
// Notification channels will be closed when the client is Closed
func (c *Client) Sub(ch chan drand.PublicRandResponse) UnsubFunc {
id := new(int)
c.subs.Lock()
c.subs.M[id] = ch
c.subs.Unlock()
return func() {
c.subs.Lock()
delete(c.subs.M, id)
close(ch)
c.subs.Unlock()
}
}
// Watch implements the dclient.Watcher interface
func (c *Client) Watch(ctx context.Context) <-chan client.Result {
innerCh := make(chan drand.PublicRandResponse)
outerCh := make(chan dclient.Result)
end := c.Sub(innerCh)
go func() {
for {
select {
case resp, ok := <-innerCh:
if !ok {
close(outerCh)
return
}
select {
case outerCh <- &result{resp.Round, resp.Randomness, resp.Signature}:
default:
log.Warn("randomness notification dropped due to a full channel")
}
case <-ctx.Done():
close(outerCh)
end()
// drain leftover on innerCh
for range innerCh {
}
return
}
}
}()
return outerCh
}
type result struct {
round uint64
randomness []byte
signature []byte
}
func (r *result) Round() uint64 {
return r.round
}
func (r *result) Randomness() []byte {
return r.randomness
}
func (r *result) Signature() []byte {
return r.signature
}
// Close stops Client, cancels PubSub subscription and closes the topic.
func (c *Client) Close() error {
c.cancel()
return nil
}
// TODO: New for users without libp2p already running
|
package controllers
import (
"time"
"github.com/chrilnth/apikeymanager/models"
"github.com/gofiber/fiber"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo"
)
type LogController struct {
Collection mongo.Collection
}
func NewLogController(collection *mongo.Collection) *LogController {
return &LogController{
Collection: *collection,
}
}
func (ctr *LogController) GetAll(c *fiber.Ctx) error {
var id = c.Params("id")
query := bson.M{"apikey": id}
cursor, err := ctr.Collection.Find(c.Context(), query)
if err != nil {
return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{
"success": false,
"message": "Something went wrong",
"error": err.Error(),
})
}
var logs []models.Log = make([]models.Log, 0)
err = cursor.All(c.Context(), &logs)
if err != nil {
return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{
"success": false,
"message": "Something went wrong",
"error": err.Error(),
})
}
return c.Status(fiber.StatusOK).JSON(fiber.Map{
"success": true,
"data": fiber.Map{
"request": logs,
},
})
}
func (ctr *LogController) Create(c *fiber.Ctx) error {
data := new(models.Log)
data.Date = time.Now()
err := c.BodyParser(&data)
if err != nil {
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{
"success": false,
"message": "Cannot parse JSON",
"error": err,
})
}
data.ID = nil
result, err := ctr.Collection.InsertOne(c.Context(), data)
if err != nil {
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{
"success": false,
"message": "Cannot insert request",
"error": err,
})
}
log := &models.Log{}
query := bson.D{{Key: "_id", Value: result.InsertedID}}
ctr.Collection.FindOne(c.Context(), query).Decode(log)
return c.Status(fiber.StatusCreated).JSON(fiber.Map{
"success": true,
"data": fiber.Map{
"request": log,
},
})
}
func (ctr *LogController) GetById(c *fiber.Ctx) error {
return c.Status(fiber.StatusOK).JSON(fiber.Map{
"success": true,
"data": fiber.Map{
"message": "Not implemented",
},
})
}
func (ctr *LogController) Update(c *fiber.Ctx) error {
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{
"success": false,
"message": "Not implemented",
})
}
func (ctr *LogController) Delete(c *fiber.Ctx) error {
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{
"success": false,
"message": "Not implemented",
})
}
|
package raftor
// Stopper stops whatever you need to be stopped. It is meant to be used inside a select statement.
type Stopper interface {
Stop() <-chan struct{}
}
|
package dushengchen
/**
Submission:
https://leetcode.com/submissions/detail/372752824/
Runtime: 4 ms, faster than 77.21% of Go online submissions for Permutations II.
Memory Usage: 3.5 MB, less than 77.78% of Go online submissions for Permutations II.
*/
func permuteUnique(nums []int) [][]int {
nums = SortInt(nums)
var ret [][]int
ret = append(ret, CopyInt(nums))
for {
if x := nextPermutationX(nums); !x {
break
}
ret = append(ret, CopyInt(nums))
}
return ret
}
//这段代码来源q31,不同的是,源代码会在完全逆序时翻转,而本场景不需要,所以返回fasle
func nextPermutationX(nums []int) bool {
if len(nums) <= 1 {
return false
}
var i int
find := false
for i = len(nums) - 1; i > 0; i-- {
if nums[i-1] < nums[i] {
find = true
break
}
}
if !find {
return false
}
var j int
for j = i; true; j++ {
if j+1 == len(nums) {
nums[i-1], nums[j] = nums[j], nums[i-1]
break
}
if nums[i-1] < nums[j] && nums[i-1] >= nums[j+1] {
nums[i-1], nums[j] = nums[j], nums[i-1]
break
}
}
RevertInt(nums[i:])
return true
}
|
package main
// go run .\main.go .\model.go .\mwscalls.go .\xmlhelpers.go .\parsers.go
import (
"MyGoPackages/Amazon/Mws"
"encoding/json"
"fmt"
)
var (
// SellerID or merchant id from user
SellerID = ""
// AuthToken from user
AuthToken = ""
// Region from user
Region = "US"
// AccessKey is from main account
AccessKey = ""
// SecretKey is from main account
SecretKey = ""
)
func main() {
fmt.Println("start")
//listProducts, listProductsError := Mws.GetProductsByASIN("B00IGR5EQE", SellerID, AuthToken, Region, AccessKey, SecretKey)
//getLowestOfferListingsForASIN("B00078ZLLI", "New")
// listProducts, _ := parseLowestOfferListingsForASIN("B00IGR5EQE", "New", ProductTracking{})
listProducts, listProductsError := Mws.GetProductsByKeyword("Spin Master Games - Moustache Smash", "New", SellerID, AuthToken, Region, AccessKey, SecretKey)
if listProductsError != nil {
fmt.Println("error")
} else {
// for _, listProduct := range listProducts {
// //items := listpro
// }
res2B1mws, _ := json.Marshal(listProducts)
fmt.Println("item : ", string(res2B1mws))
}
fmt.Println("stop")
}
|
package main
import (
"net/http"
"io/ioutil"
"fmt"
"encoding/json"
"log"
)
func fListCompany(w http.ResponseWriter, r *http.Request) {
slFile, err := ioutil.ReadDir("./csv")
w.Header().Set("Content-Type", "application/json")
if err != nil {
fmt.Fprintf(w, `{"error": "%s"}`, err)
log.Println(err)
return
}
var slFileName []string
for _, oFile := range slFile {
slFileName = append(slFileName, oFile.Name()[:len(oFile.Name()) - 4])
}
var jsonb []byte
jsonb, err = json.Marshal(slFileName)
if err != nil {
fmt.Fprintf(w, `{"error": "%s"}`, err)
log.Println(err)
return
}
fmt.Fprint(w, string(jsonb))
}
|
package main
import "fmt"
func main() {
s := []int{2, 3, 5, 7, 11, 13}
fmt.Println("s ==", s)
fmt.Println("s[1:4] ==", s[1:4])
fmt.Println("s[:3] ==", s[:3])
fmt.Println("s[4:] ==", s[4:])
// s == [2 3 5 7 11 13]
// s[1:4] == [3 5 7]
// s[:3] == [2 3 5]
// s[4:] == [11 13]
}
/*
对 slice 切片
slice可以重新切片, 创建一个新的 slice值指向相同的数组。
表达式
s[lo:hi]
表示 从 lo 到 hi-1 的slice元素, 含前端, 不含后端。
因此, s[lo:lo] 是空的, s[lo:lo+1] 有一个元素
*/
|
package main
import (
"log"
"sort"
"strconv"
"strings"
)
type Manager struct {
FullName string
Position string
Age int32
YearsInCompany int32
}
func main() {
// log.Println(DuplicateEncode("Supralapsarian"))
// log.Println(findSubstring("aqkgwyolaososilnvnquimtcawuwtqrigjnxfjtcwvuoxgtzfehciaocgsossobzyrmxjutsudkrxhzwtmsythfecwdmdznwuopelbqjyuqtkymglfdlnlwhkpkoovqkwjwfcjnaannjuynslpkcrzeqgxnanunxpbqbzxvuozahizdsxflvysenhkisrnwtggsoirefeshuhjjooxpuoisicpncrnqvirkfjfeczyynvpdmweexheegpfitcnromuibrmfvvlkqeglsuxcyobplyelhwjfcbnnkapkyommzhqwsrjylepywakzfzhvxghfmnckkxcdagkmffhrnfklinamnrgicdzzwtvbordqvkomjespinaqrqhnpngymosaucbotyimoyofzkvrudcipafbykwshygeeexiefqczuhldwikewhunxemxininwftuvpnoyjirgqmjvvwlydyrdxqxglsbyxgeafvwrznbljpjtbrjubyyrjjkjqjcfziaekqsevbzsiebjhdemivfmwuyfrayvqfvmvlvdjurwrrtubhtiqwzyugpgcfreybdyirtitvkoymaotyrzpftvyyacbwohgrhcpwuqxjjjjzipsmeqtumkvummqfsdysilplbjzggkbwohoayzzapfjwnlzchwyymptxhrvalnoklboawyoecosrkbwjngbfdzfamfiqfwyancpatfccxpflvlirzgwygedqxcgnxcusunjtlyscbwpbqvvuexialqevkcwzduvkuuyofjmjalctxfgemxbfyvfudqketktzyrxcbroefhjukjmynohrvvsajempoiunxbztjualnpcdznfxsrizgtfbdkcrmtvxbgetcgucsrjpzjbtufbfnaxkipqkisbchzymbsqymjxjuojbxoljpbcgwkycvxymoawzopjuaypkgwwpxggjakhpfqbsjaznatalnznezsncyqgflwtdphpnfjjxgykupkbqdwpcnuncvyjsep{-truncated-}{-truncated-}", 10))
RemainderSorting([]string{"Colorado", "Utah", "Montana", "Wisconsin", "Oregon", "Maine"})
EncodeManager(&Manager{"Bekzhan", "SEO", 27, 1})
}
type T struct {
Key string
Value int
Len int
}
func RemainderSorting(strArr []string) []string {
result := []string{}
type T struct {
Word string
Modulo int
Len int
}
seqStruct := []T{}
// m[word] = []int{len(word) % 3, len(word)}
for _, word := range strArr {
seqStruct = append(seqStruct, T{word, len(word) % 3, len(word)})
}
//two step sort
sort.Slice(seqStruct, func(i, j int) bool {
if seqStruct[i].Modulo == seqStruct[j].Modulo {
return seqStruct[i].Len > seqStruct[j].Len
} else {
return seqStruct[i].Modulo < seqStruct[j].Modulo
}
})
for _, obj := range seqStruct {
result = append(result, obj.Word)
}
log.Println(result)
return result
}
// (io.Reader, error)
func EncodeManager(manager *Manager) {
result := "{"
if manager.FullName != "" {
result += `"full_name:"` + manager.FullName + ","
}
if manager.Position != "" {
result += `"position:"` + manager.Position + ","
}
if manager.Age != 0 {
result += `"age:"` + strconv.Itoa(int(manager.Age)) + ","
}
if manager.YearsInCompany != 0 {
result += `"full_year_company:"` + strconv.Itoa(int(manager.YearsInCompany))
}
result += "}"
log.Println(result)
// return io.Reader, nil
}
func parallelProccessing() {
}
func findSubstring(s string, k int32) string {
maxCountVowel := 0
indexStart := 0
kInt := int(k)
for i := range s {
if len(s)-i >= kInt {
// count :=
count := countVowel(s[i : kInt+i]) // get count, each slice word
if maxCountVowel <= count {
indexStart = i
maxCountVowel = count
}
if maxCountVowel == kInt {
indexStart = i
break
}
}
}
log.Println(maxCountVowel)
if maxCountVowel == 0 {
return "Not found!"
}
return s[indexStart : indexStart+kInt]
}
func countVowel(slice string) int {
// vowels := make(map[rune]bool, 5)
vowels := []rune{'a', 'e', 'i', 'o', 'u'}
count := 0
for _, char := range slice {
for _, vowel := range vowels {
if vowel == char {
count++
}
}
}
return count // return count
}
// for _, c := range word { t[c]++ }
// for _, c := range word {
func DuplicateEncode(word string) string {
m := make(map[rune][]int, 10)
word = strings.ToLower(word)
result := ""
for i, ch := range word {
if _, ok := m[ch]; ok {
arr := m[ch] //[0,2] get prev value
arr = append(arr, i) // append, prev and current
m[ch] = arr // rewrite data
// obj.Duplicate[ch] = append(obj.Duplicate[ch], i)
} else {
m[ch] = []int{i} //map[w]=[0,]
}
}
// log.Println(m)
//compare len & value in array indexes == char index
//case same letter
if len(m[rune(word[0])]) == len(word) {
for i := 0; i < len(word); i++ {
result += ")"
}
} else {
for idxChar, char := range word { //0,1,2,3,4,5..
for key, indexs := range m { //[2,3], [4]
if key == char {
if len(indexs) == 1 && indexs[0] == idxChar {
result += "("
}
if len(indexs) == 2 && (indexs[0] == idxChar || indexs[1] == idxChar) {
result += ")"
}
if len(indexs) == 3 && (indexs[0] == idxChar || indexs[1] == idxChar || indexs[2] == idxChar) {
result += ")"
}
if len(indexs) == 4 && (indexs[0] == idxChar || indexs[1] == idxChar || indexs[2] == idxChar || indexs[3] == idxChar) {
result += ")"
}
// if ok := helper(len(indexs), indexs, idxChar); ok {
// result += ")"
// } else {
}
}
}
}
return result
}
func helper(n int, seq []int, currentCharIdx int) bool {
for i := 0; i < n; i++ { //< 3
if seq[i] == currentCharIdx {
return true
}
}
return false
}
|
package main
import (
"encoding/hex"
"flag"
"fmt"
"log"
"reflect"
"dfnpf/examples/iniths"
)
func main() {
flag.Parse()
if len(flag.Args()) < 4 || len(flag.Args()) > 11 {
log.Fatalln("Please provide Noise Protocol name, initiator's static and ephemeral keys,",
"\n responder's static and ephemeral keys, remote key, prologue, preshared key, message")
}
handshInit, handshResp, payload := iniths.InitHandshake(flag.Args())
var err error
var msg []byte
//var csWrite0, csWrite1, csRead0, csRead1 *noise.CipherState
msg, _, _, _ = handshInit.WriteMessage(nil, payload)
ptrHandsh := reflect.ValueOf(handshResp)
ptrSymSt := ptrHandsh.Elem().FieldByName("ss")
ckI := reflect.Indirect(ptrSymSt).FieldByName("ck")
_, _, _, err = handshResp.ReadMessage(nil, msg)
ptrHandsh = reflect.ValueOf(handshResp)
ptrSymSt = ptrHandsh.Elem().FieldByName("ss")
ckR := reflect.Indirect(ptrSymSt).FieldByName("ck")
fmt.Println(hex.EncodeToString(ckI.Bytes()))
fmt.Println(hex.EncodeToString(ckR.Bytes()))
if err != nil {
panic(err)
}
}
|
// Package outputstream represents the messages which the ircserver package
// generates in response to what is being sent to RobustIRC.
//
// Data is stored in a temporary LevelDB database so that not all data is kept
// in main memory at all times. The working set we are talking about is ≈100M,
// but using LevelDB (with its default Snappy compression), that gets
// compressed down to ≈35M.
package outputstream
import (
"context"
"encoding/binary"
"io/ioutil"
"log"
"math"
"os"
"path/filepath"
"strings"
"sync"
"github.com/robustirc/robustirc/internal/robust"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/opt"
"github.com/syndtr/goleveldb/leveldb/util"
)
// Message is similar to robust.Message, but more compact. This speeds up
// (de)serialization, which is useful for storing messages outside of main
// memory.
type Message struct {
Id robust.Id
Data string
InterestingFor map[uint64]bool
}
type messageBatch struct {
Messages []Message
// NextID is the id of the next message, or math.MaxUint64 if there is no
// next message yet.
NextID uint64
}
type OutputStream struct {
// tmpdir is the directory which we pass to ioutil.TempDir.
tmpdir string
// dirname is the directory returned by ioutil.TempDir which
// contains our database.
dirname string
// messagesMu guards |db|, |batch| and |lastseen|.
messagesMu sync.RWMutex
newMessage *sync.Cond
db *leveldb.DB
batch leveldb.Batch
lastseen messageBatch
cacheMu sync.RWMutex
messagesCache map[uint64]*messageBatch
}
func DeleteOldDatabases(tmpdir string) error {
dir, err := os.Open(tmpdir)
if err != nil {
return err
}
defer dir.Close()
names, err := dir.Readdirnames(-1)
if err != nil {
return err
}
for _, name := range names {
if strings.HasPrefix(name, "tmp-outputstream-") {
if err := os.RemoveAll(filepath.Join(tmpdir, name)); err != nil {
return err
}
}
}
return nil
}
func NewOutputStream(tmpdir string) (*OutputStream, error) {
os := &OutputStream{
tmpdir: tmpdir,
messagesCache: make(map[uint64]*messageBatch),
}
os.newMessage = sync.NewCond(&os.messagesMu)
return os, os.reset()
}
func (o *OutputStream) Close() error {
if o.db == nil {
return nil
}
if err := o.db.Close(); err != nil {
return err
}
return os.RemoveAll(o.dirname)
}
// Reset deletes all messages.
func (os *OutputStream) reset() error {
var key [8]byte
os.messagesMu.Lock()
defer os.messagesMu.Unlock()
if err := os.Close(); err != nil {
return err
}
dirname, err := ioutil.TempDir(os.tmpdir, "tmp-outputstream-")
if err != nil {
return err
}
os.dirname = dirname
// Open a temporary database, i.e. one whose values we only use as long as
// this RobustIRC process is running, and which will be deleted at the next
// startup. This implies we don’t need to fsync(), and leveldb should error
// out when there already is a database in our newly created tempdir.
db, err := leveldb.OpenFile(dirname, &opt.Options{
ErrorIfExist: true,
NoSync: true,
BlockCacheCapacity: 2 * 1024 * 1024,
})
if err != nil {
return err
}
os.db = db
os.lastseen = messageBatch{
Messages: []Message{
{
Id: robust.Id{Id: 0},
InterestingFor: make(map[uint64]bool),
},
},
NextID: math.MaxUint64,
}
binary.BigEndian.PutUint64(key[:], uint64(0))
return os.db.Put(key[:], os.lastseen.marshal(), nil)
}
// Add adds messages to the output stream. The Id.Id field of all messages must
// be identical, i.e. they must all be replies to the same input IRC message.
func (os *OutputStream) Add(msgs []Message) error {
var key [8]byte
os.messagesMu.Lock()
defer os.messagesMu.Unlock()
os.batch.Reset()
os.lastseen.NextID = uint64(msgs[0].Id.Id)
binary.BigEndian.PutUint64(key[:], uint64(os.lastseen.Messages[0].Id.Id))
os.batch.Put(key[:], os.lastseen.marshal())
os.cacheMu.Lock()
delete(os.messagesCache, uint64(os.lastseen.Messages[0].Id.Id))
os.cacheMu.Unlock()
os.lastseen = messageBatch{
Messages: msgs,
NextID: math.MaxUint64,
}
binary.BigEndian.PutUint64(key[:], uint64(msgs[0].Id.Id))
os.batch.Put(key[:], os.lastseen.marshal())
if err := os.db.Write(&os.batch, nil); err != nil {
return err
}
os.newMessage.Broadcast()
return nil
}
func (os *OutputStream) LastSeen() robust.Id {
os.messagesMu.RLock()
defer os.messagesMu.RUnlock()
if len(os.lastseen.Messages) > 0 {
return os.lastseen.Messages[0].Id
}
return robust.Id{Id: 0}
}
// Delete deletes all IRC output messages that were generated in reply to the
// input message with inputID.
func (os *OutputStream) Delete(inputID robust.Id) error {
var key [8]byte
os.messagesMu.Lock()
defer os.messagesMu.Unlock()
if inputID.Id == os.lastseen.Messages[0].Id.Id {
// When deleting the last message, lastseen needs to be set to the
// previous message to avoid blocking in GetNext() forever.
i := os.db.NewIterator(nil, nil)
defer i.Release()
if !i.Last() {
log.Panicf("outputstream LevelDB is empty, which is a BUG\n")
}
if !i.Prev() {
// We should always keep the first message (RobustId{Id: 0}).
log.Panicf("Delete() called on _all_ messages\n")
}
mb := unmarshalMessageBatch(i.Value())
os.lastseen = messageBatch{
Messages: mb.Messages,
NextID: math.MaxUint64,
}
binary.BigEndian.PutUint64(key[:], uint64(os.lastseen.Messages[0].Id.Id))
if err := os.db.Put(key[:], os.lastseen.marshal(), nil); err != nil {
return err
}
}
os.cacheMu.Lock()
delete(os.messagesCache, uint64(inputID.Id))
os.cacheMu.Unlock()
binary.BigEndian.PutUint64(key[:], uint64(inputID.Id))
return os.db.Delete(key[:], nil)
}
// GetNext returns the next IRC output message after lastseen, even if lastseen
// was deleted in the meanwhile. In case there is no next message yet,
// GetNext blocks until it appears.
// GetNext(types.RobustId{Id: 0}) returns the first message.
func (os *OutputStream) GetNext(ctx context.Context, lastseen robust.Id) []Message {
// GetNext handles 4 different cases:
//
// ┌──────────────────┬───────────┬───────────────────────────────────────┐
// │ lastseen message │ nextid │ outcome │
// ├──────────────────┼───────────┼───────────────────────────────────────┤
// │ exists │ valid │ return │
// │ exists │ not found │ binary search for more recent message │
// │ exists │ MaxInt64 │ block until next message │
// │ not found │ / │ binary search for more recent message │
// └──────────────────┴───────────┴───────────────────────────────────────┘
//
// Note that binary search may fall-through to blocking in case it cannot
// find a more recent message.
os.messagesMu.RLock()
current, ok := os.getUnlocked(uint64(lastseen.Id))
if ok && current.NextID < math.MaxUint64 {
next, okNext := os.getUnlocked(current.NextID)
if okNext {
os.messagesMu.RUnlock()
return next.Messages
}
// NextID points to a deleted message, fall back to binary search.
ok = false
}
if !ok {
// Anything _newer_ than lastseen, i.e. the interval [lastseen.Id+1, ∞)
var key [8]byte
binary.BigEndian.PutUint64(key[:], uint64(lastseen.Id)+1)
i := os.db.NewIterator(&util.Range{
Start: key[:],
Limit: nil,
}, nil)
defer i.Release()
if i.First() {
mb := unmarshalMessageBatch(i.Value())
os.messagesMu.RUnlock()
return mb.Messages
}
// There is no message which is more recent than lastseen, so just take
// the last message and fallthrough into the code path that waits for
// newer messages.
i = os.db.NewIterator(nil, nil)
defer i.Release()
if !i.Last() {
log.Panicf("outputstream LevelDB is empty, which is a BUG\n")
}
current = unmarshalMessageBatch(i.Value())
}
os.messagesMu.RUnlock()
// Wait until a new message appears.
os.messagesMu.Lock()
for {
current, _ = os.getUnlocked(uint64(current.Messages[0].Id.Id))
next, ok := os.getUnlocked(current.NextID)
if ok {
os.messagesMu.Unlock()
return next.Messages
}
select {
case <-ctx.Done():
os.messagesMu.Unlock()
return []Message{}
default:
}
os.newMessage.Wait()
}
}
// InterruptGetNext interrupts any running GetNext() calls so that they return
// if |cancelled| is specified and true in the GetNext() call.
func (os *OutputStream) InterruptGetNext() {
os.messagesMu.Lock()
defer os.messagesMu.Unlock()
os.newMessage.Broadcast()
}
func (os *OutputStream) getUnlocked(id uint64) (*messageBatch, bool) {
var key [8]byte
os.cacheMu.RLock()
mb, ok := os.messagesCache[id]
os.cacheMu.RUnlock()
if ok {
return mb, ok
}
binary.BigEndian.PutUint64(key[:], id)
value, err := os.db.Get(key[:], nil)
if err != nil {
if err == leveldb.ErrNotFound {
return nil, false
}
log.Panicf("Unexpected outputstream LevelDB error: %v\n", err)
}
mb = unmarshalMessageBatch(value)
os.cacheMu.Lock()
// A cache size of 1000 has empirically worked best so far.
if len(os.messagesCache) > 1000 {
// Just randomly delete entries to free up memory.
for id := range os.messagesCache {
delete(os.messagesCache, id)
if len(os.messagesCache) < 500 {
break
}
}
}
os.messagesCache[id] = mb
os.cacheMu.Unlock()
return mb, true
}
// Get returns the next IRC output message for 'input', if present.
func (os *OutputStream) Get(input robust.Id) ([]Message, bool) {
os.messagesMu.RLock()
defer os.messagesMu.RUnlock()
mb, ok := os.getUnlocked(uint64(input.Id))
if !ok {
return nil, ok
}
return mb.Messages, true
}
|
package main
import "fmt"
//func funcname(parameters) returntype {funcdesc}
func add(list []int) int{
sum:=0
for _,val:=range list {
sum+=val
}
return sum
}
//functions in Go can return multiple values
func nextnums(num int) (int, int) {
return num+1, num+2
}
//for functions where you don't know number of parameters
func addm(vals ...int) int {
sum:=0
for _, value:=range vals{
sum+=value
}
return sum
}
//Recursion
func fact(n int) int{
if n==0 {
return 1
} else {
return n*fact(n-1)
}
}
//Recover Function
func safeDiv(i1, i2 int) int{
defer func(){
fmt.Println(recover())
//This will thro up error but still continue program. To continue without printing error, just use recover().
}()
sol:=i1/i2
return sol
}
func main() {
numlist:=[]int {1,2,3,4,5}
fmt.Println("Sum =", add(numlist))
n1,n2:=nextnums(3)
fmt.Println(n1,n2)
fmt.Println("Sum =", addm(1,2,3,4,5))
//defer executes adjoining function after the parent function has ended
defer fmt.Println(add(numlist))
//Demonstration of the recover function
fmt.Println(safeDiv(2,0))
fmt.Println(safeDiv(2,1))
//Defining functions inside other functions.
n3:=3
doub:= func() int {
n3*=2
return n3
}
fmt.Println(doub())
fmt.Println(fact(5))
}
|
package main
import (
"flag"
"github.com/sergei-svistunov/hlcup-checker/phase"
"log"
)
var (
argServer = flag.String("server", "http://localhost:8080", "Server address")
argDataDir = flag.String("data-dir", "./", "Directory with ammo and answers")
argPhase = flag.Int("phase", 1, "Number of phase")
argMaxErrors = flag.Int("max-errors", 0, "Number of phase")
)
func main() {
flag.Parse()
if *argPhase < 1 || *argPhase > 3 {
log.Fatalf("Invalid phase ID: %d", *argPhase)
}
p := phase.New(*argDataDir, uint8(*argPhase))
p.Check(*argServer, phase.CheckOpts{
MaxErrors: *argMaxErrors,
})
}
|
package task
import (
log "code.google.com/p/log4go"
"github.com/d-d-j/ddj_master/common"
"github.com/d-d-j/ddj_master/dto"
"github.com/d-d-j/ddj_master/node"
"github.com/d-d-j/ddj_master/reduce"
"time"
)
type job func(dto.RestRequest) bool
func (w *TaskWorker) getJob(taskType int32) job {
switch taskType {
case common.TASK_INSERT:
return w.insert
case common.TASK_SELECT:
return w.selectTask
case common.TASK_INFO:
return w.info
case common.TASK_FLUSH:
return w.flush
}
log.Error("Worker can't handle task type ", taskType)
return func(req dto.RestRequest) bool { return false }
}
func (w *TaskWorker) insert(req dto.RestRequest) bool {
log.Finest(w, " is processing [insert] task")
// GET NODE FOR INSERT
insertNode, err := w.getNodeForInsert()
if err != nil {
log.Warn(w, " has problem with getting node to insert, ", err)
req.Response <- dto.NewRestResponse("No node connected", common.TASK_UNINITIALIZED, nil)
return false
}
// CREATE TASK
id := w.GetId()
t := dto.NewTask(id, req, nil)
log.Fine(w, " created new %s", t)
TaskManager.AddChan <- t // add task to dictionary
// CREATE MESSAGE
message, err := t.MakeRequest(insertNode.PreferredDeviceId).Encode()
if err != nil {
log.Error(w, " encourage error while encoding request - ", err)
req.Response <- dto.NewRestResponse("Internal server error", t.Id, nil)
return false
}
// SEND MESSAGE
log.Finest(w, " is sending message to node #%d", id, insertNode.Id)
insertNode.Incoming <- message
TaskManager.DelChan <- t.Id
// PASS RESPONSE TO CLIENT
req.Response <- dto.NewRestResponse("", id, nil)
return true
}
func (w *TaskWorker) selectTask(req dto.RestRequest) bool {
log.Debug(w, " is processing [select] task")
availableNodes := node.NodeManager.GetNodesLen()
t, responseChan := CreateTaskForRequest(req, availableNodes, w.GetId())
if availableNodes == 0 {
log.Error("No nodes connected")
req.Response <- dto.NewRestResponse("No nodes connected", t.Id, nil)
return false
}
if !BroadcastTaskToAllNodes(t) {
req.Response <- dto.NewRestResponse("Internal server error", t.Id, nil)
return false
}
responses := parseResults(GatherAllResponses(availableNodes, responseChan), req.Data.(*dto.Query))
TaskManager.DelChan <- t.Id
log.Fine("Got %d responses", len(responses))
aggregate := reduce.GetAggregator(t.AggregationType)
responseToClient := aggregate(responses)
req.Response <- dto.NewRestResponse("", t.Id, responseToClient)
return true
}
func (w *TaskWorker) info(req dto.RestRequest) bool {
log.Debug(w, " is processing [info] task")
// TODO: Handle errors better
availableNodes := node.NodeManager.GetNodesLen()
t, responseChan := CreateTaskForRequest(req, availableNodes, w.GetId())
if availableNodes == 0 {
log.Error("No nodes connected")
return false
}
if !BroadcastTaskToAllNodes(t) {
return false
}
responses := parseResultsToInfos(GatherAllResponses(availableNodes, responseChan))
if responses == nil {
return false
}
TaskManager.DelChan <- t.Id
node.NodeManager.InfoChan <- responses
// TODO: SET NODE INFO IN NODES
for i := 0; i < len(responses); i++ {
log.Finest(w, "Get info %v", responses[i])
}
return true
}
func parseResultsToInfos(results []*dto.Result) []*dto.Info {
infoSize := (&dto.MemoryInfo{}).Size()
resultsCount := len(results)
elements := make([]*dto.Info, 0, resultsCount)
for i := 0; i < resultsCount; i++ {
length := len(results[i].Data) / infoSize
for j := 0; j < length; j++ {
var e dto.Info
err := e.MemoryInfo.Decode(results[i].Data[j*infoSize:])
if err != nil {
log.Error("Problem with parsing data", err)
continue
}
e.NodeId = results[i].NodeId
elements = append(elements, &e)
}
}
return elements
}
func (w *TaskWorker) flush(req dto.RestRequest) bool {
log.Debug(w, " is processing flush task")
availableNodes := node.NodeManager.GetNodesLen()
t, responseChan := CreateTaskForRequest(req, availableNodes, w.GetId())
if availableNodes == 0 {
log.Error("No nodes connected")
req.Response <- dto.NewRestResponse("No nodes connected", t.Id, nil)
return false
}
if !BroadcastTaskToAllNodes(t) {
req.Response <- dto.NewRestResponse("Internal server error", t.Id, nil)
return false
}
GatherAllResponses(availableNodes, responseChan)
TaskManager.DelChan <- t.Id
log.Finest("Flush is done, sending response to client")
req.Response <- dto.NewRestResponse("", t.Id, nil)
return true
}
func CreateTaskForRequest(req dto.RestRequest, numResponses int, taskId int64) (*dto.Task, chan *dto.Result) {
responseChan := make(chan *dto.Result, numResponses)
// CREATE TASK
t := dto.NewTask(taskId, req, responseChan)
log.Fine("Created new %s", t)
TaskManager.AddChan <- t // add task to dictionary
return t, responseChan
}
func BroadcastTaskToAllNodes(t *dto.Task) bool {
// CREATE MESSAGE
message, err := t.MakeRequestForAllGpus().Encode()
if err != nil {
log.Error("Error while encoding request - ", err)
return false
}
// SEND MESSAGE TO ALL NODES
node.NodeManager.SendToAllNodes(message)
return true
}
func GatherAllResponses(numResponses int, responseChan chan *dto.Result) []*dto.Result {
responses := make([]*dto.Result, 0, numResponses)
timeoutDuration := time.Duration(5000) * time.Second
timeout := time.After(timeoutDuration)
// WAIT FOR ALL RESPONSES
for i := 0; i < numResponses; i++ {
select {
case response := <-responseChan:
responses = append(responses, response)
case <-timeout:
log.Error("Timeout! Got %d/%d responses", i, numResponses)
break
}
}
return responses
}
|
package main
import (
"fmt"
)
func main() {
cliPrams := ParseFlags()
if err := InitLogger(cliPrams.Debug, cliPrams.Verbose); err != nil {
panic(fmt.Errorf("cannot init logger, exited with error: %v", err))
}
Logger.Debug("server params: %v", cliPrams)
bot, err := NewBot()
if err != nil {
panic(fmt.Errorf("cannot create bot instance, exited with error: %v", err))
}
go bot.Listen()
if err := InitServer(cliPrams); err != nil {
panic(fmt.Errorf("cannot init server, exited with error: %v", err))
}
}
|
package main
import "fmt"
func main() {
chann := make(chan int, 10)
for i := 0; i < 10; i++{
chann <- i
}
close(chann)
for {
v, ok := <- chann
//检测管道是否关闭
if ok == false {
fmt.Println("chan is close")
break
}
fmt.Println(v)
}
}
|
package main
import (
"bufio"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"regexp"
"runtime/debug"
"sort"
"strconv"
"strings"
"text/tabwriter"
"time"
"github.com/bndr/gopencils"
"github.com/docopt/docopt-go"
"github.com/op/go-logging"
)
var (
reStashURL = regexp.MustCompile(
`(https?://.*/)` +
`((users|projects)/([^/]+))` +
`/repos/([^/]+)` +
`/pull-requests/(\d+)`)
)
var configPath = os.Getenv("HOME") + "/.config/ash/ashrc"
var logger = logging.MustGetLogger("main")
var tmpWorkDir = ""
var panicState = false
const logFormat = "%{time:15:04:05.00} [%{level:.4s}] %{message}"
const logFormatColor = "%{color}" + logFormat + "%{color:reset}"
const startUrlExample = "http[s]://<host>/(users|projects)/<project>/repos/<repo>/pull-requests/<id>"
type CmdLineArgs string
func parseCmdLine(cmd []string) (map[string]interface{}, error) {
help := `Atlassian Stash Reviewer.
Most convenient usage is specify pull request url and file you want to review:
ash ` + startUrlExample + ` review <file-to-review>
However, you can set up --url and --project flags in ~/.config/ash/ashrc file
and access pull requests by shorthand commands:
ash proj/mycoolrepo/1 review # if --url is given
ash mycoolrepo/1 review # if --url and --project is given
ash mycoolrepo ls-reviews # --//--
Ash then open $EDITOR for commenting on pull request.
You can add comments by just specifying them after line you want to comment,
beginning with '# '.
You can delete comment by deleting it from file, and, of course, modify comment
you own by modifying it in the file.
After you finish your edits, just save file and exit from editor. Ash will
apply all changes made to the review.
If <file-name> is omitted, ash welcomes you to review the overview.
'ls' command can be used to list various things, including:
* files in pull request;
* opened/merged/declined pull requests for repo;
* repositories in specified project [NOT IMPLEMENTED];
* projects [NOT IMPLEMENTED];
Usage:
ash [options] inbox [-d] [(reviewer|author|all)]
ash [options] <project>/<repo> ls-reviews [-d] [(open|merged|declined)]
ash [options] <project>/<repo>/<pr> ls
ash [options] <project>/<repo>/<pr> (approve|decline|merge)
ash [options] <project>/<repo>/<pr> [review] [<file-name>] [-w]
ash -h | --help
ash -v | --version
Options:
-h --help Show this help.
-v --version Show version
-u --user=<user> Stash username.
-p --pass=<pass> Stash password. You want to set this flag in .ashrc file.
-d Show descriptions for the listed PRs.
-l=<count> Number of activities to retrieve. [default: 1000]
-w Ignore whitespaces
-e=<editor> Editor to use. This has priority over $EDITOR env var.
-i Interactive mode. Ask before commiting changes.
--debug=<level> Verbosity [default: 0].
--url=<url> Stash server URL. http:// will be used if no protocol is
specified.
--input=<input> File for loading diff in review file
--output=<output> Output review to specified file. Editor is ignored.
--origin=<origin> Do not download review from stash and use specified file
instead.
--project=<proj> Use to specify default project that can be used when
serching pull requests. Can be set in either <project> or
<project>/<repo> format.
--no-color Do not use color in output.
--reset-colors Start with terminal style-reset sequence. Most useful with
vim.
`
args, err := docopt.Parse(help, cmd, true, "1.3", false, false)
if _, ok := err.(*docopt.UserError); ok {
fmt.Println()
fmt.Println("Command line entered is invalid.")
fmt.Println()
fmt.Println(
"Arguments were merged with config values and " +
"the resulting command line is:")
fmt.Printf("\t%s\n\n", CmdLineArgs(fmt.Sprintf("%s", cmd)).Redacted())
os.Exit(1)
}
if err == nil && args == nil {
os.Exit(0)
}
return args, err
}
func main() {
rawArgs := mergeArgsWithConfig(configPath)
args, err := parseCmdLine(rawArgs)
if err != nil {
logger.Critical(err.Error())
}
if args["--reset-colors"].(bool) {
fmt.Print("\x1b[0m")
}
tmpWorkDir, err = ioutil.TempDir(os.TempDir(), "ash.")
if err != nil {
logger.Critical(err.Error())
}
setupLogger(args)
logger.Info("cmd line args are read from %s", configPath)
logger.Debug("cmd line args: %s", CmdLineArgs(fmt.Sprintf("%s", rawArgs)))
if args["--user"] == nil || args["--pass"] == nil {
fmt.Println("--user and --pass should be specified.")
os.Exit(1)
}
uri := parseUri(args)
if !strings.HasPrefix(uri.base, "http") {
uri.base = "http://" + uri.base
}
uri.base = strings.TrimSuffix(uri.base, "/")
user := args["--user"].(string)
pass := args["--pass"].(string)
auth := gopencils.BasicAuth{user, pass}
api := Api{uri.base, auth, nil}
project := Project{&api, uri.project}
repo := project.GetRepo(uri.repo)
switch {
case args["<project>/<repo>/<pr>"] != nil:
reviewMode(args, repo, uri.pr)
case args["<project>/<repo>"] != nil:
repoMode(args, repo)
case args["inbox"].(bool):
inboxMode(args, api)
}
if !panicState {
// in case of everything is fine
logger.Debug("removing %s", tmpWorkDir)
os.RemoveAll(tmpWorkDir)
}
}
func setupLogger(args map[string]interface{}) {
debugLogFile, err := os.Create(tmpWorkDir + "/debug.log")
if err != nil {
logger.Critical(err.Error())
}
debugLog := logging.AddModuleLevel(
logging.NewLogBackend(debugLogFile, "", 0))
debugLog.SetLevel(logging.DEBUG, "main")
logging.SetBackend(
logging.MultiLogger(
debugLog,
logging.AddModuleLevel(logging.NewLogBackend(os.Stderr, "", 0))),
)
targetLogFormat := logFormatColor
if args["--no-color"].(bool) {
targetLogFormat = logFormat
}
logging.SetFormatter(logging.MustStringFormatter(targetLogFormat))
logLevels := []logging.Level{
logging.WARNING,
logging.INFO,
logging.DEBUG,
}
requestedLogLevel := int64(0)
if args["--debug"] != nil {
requestedLogLevel, _ = strconv.ParseInt(args["--debug"].(string), 10, 16)
}
for _, lvl := range logLevels[:requestedLogLevel+1] {
logging.SetLevel(lvl, "main")
}
debugLog.SetLevel(logging.DEBUG, "main")
}
func inboxMode(args map[string]interface{}, api Api) {
roles := []string{"author", "reviewer"}
for _, role := range roles {
if args[role].(bool) {
roles = []string{role}
break
}
}
channels := make(map[string]chan []PullRequest)
for _, role := range roles {
channels[role] = requestInboxFor(role, api)
}
writer := tabwriter.NewWriter(os.Stdout, 0, 8, 1, ' ', 0)
for _, role := range roles {
for _, pullRequest := range <-channels[role] {
printPullRequest(writer, pullRequest, args["-d"].(bool), false)
}
}
writer.Flush()
}
func requestInboxFor(role string, api Api) chan []PullRequest {
resultChannel := make(chan []PullRequest, 0)
go func() {
reviews, err := api.GetInbox(role)
if err != nil {
logger.Criticalf(
"error retrieving inbox for '%s': %s",
role,
err.Error(),
)
}
resultChannel <- reviews
}()
return resultChannel
}
func reviewMode(args map[string]interface{}, repo Repo, pr int64) {
editor := os.Getenv("EDITOR")
if args["-e"] != nil {
editor = args["-e"].(string)
}
path := ""
if args["<file-name>"] != nil {
path = args["<file-name>"].(string)
}
input := ""
if args["--input"] != nil {
input = args["--input"].(string)
}
output := ""
if args["--output"] != nil {
output = args["--output"].(string)
}
ignoreWhitespaces := false
if args["-w"].(bool) {
ignoreWhitespaces = true
}
activitiesLimit := args["-l"].(string)
pullRequest := repo.GetPullRequest(pr)
origin := ""
if args["--origin"] != nil {
origin = args["--origin"].(string)
}
interactiveMode := args["-i"].(bool)
switch {
case args["ls"]:
showFilesList(pullRequest)
case args["approve"].(bool):
approve(pullRequest)
case args["decline"].(bool):
decline(pullRequest)
case args["merge"].(bool):
merge(pullRequest)
default:
review(
pullRequest, editor, path,
origin, input, output,
activitiesLimit, ignoreWhitespaces,
interactiveMode,
)
}
}
func approve(pr PullRequest) {
logger.Debug("Approving pr")
err := pr.Approve()
if err != nil {
logger.Criticalf("error approving: %s", err.Error())
os.Exit(1)
}
fmt.Println("Pull request successfully approved")
}
func decline(pr PullRequest) {
logger.Debug("Declining pr")
err := pr.Decline()
if err != nil {
logger.Criticalf("error declining: %s", err.Error())
os.Exit(1)
}
fmt.Println("Pull request successfully declined")
}
func merge(pr PullRequest) {
logger.Debug("Merging pr")
err := pr.Merge()
if err != nil {
logger.Criticalf("error merging: %s", err.Error())
os.Exit(1)
}
fmt.Println("Pull request successfully merged")
}
func repoMode(args map[string]interface{}, repo Repo) {
switch {
case args["ls-reviews"]:
state := "open"
switch {
case args["declined"]:
state = "declined"
case args["merged"]:
state = "merged"
}
showReviewsInRepo(repo, state, args["-d"].(bool))
}
}
func showReviewsInRepo(repo Repo, state string, withDesc bool) {
reviews, err := repo.ListPullRequest(state)
if err != nil {
logger.Criticalf("can not list reviews: %s", err.Error())
}
writer := tabwriter.NewWriter(os.Stdout, 0, 8, 1, ' ', 0)
for _, r := range reviews {
printPullRequest(writer, r, withDesc, true)
}
writer.Flush()
}
func printPullRequest(writer io.Writer, pr PullRequest, withDesc bool, printStatus bool) {
slug := fmt.Sprintf("%s/%s/%d",
strings.ToLower(pr.ToRef.Repository.Project.Key),
pr.ToRef.Repository.Slug,
pr.Id,
)
fmt.Fprintf(writer, "%-30s", slug)
refSegments := strings.Split(pr.FromRef.Id, "/")
branchName := refSegments[len(refSegments)-1]
fmt.Fprintf(writer, "\t%s", branchName)
relativeUpdateDate := time.Since(pr.UpdatedDate.AsTime())
updatedAt := "now"
switch {
case relativeUpdateDate.Minutes() < 1:
updatedAt = "now"
case relativeUpdateDate.Hours() < 1:
updatedAt = fmt.Sprintf("%dm", int(relativeUpdateDate.Minutes()))
case relativeUpdateDate.Hours() < 24:
updatedAt = fmt.Sprintf("%dh", int(relativeUpdateDate.Hours()))
case relativeUpdateDate.Hours() < 24*7:
updatedAt = fmt.Sprintf("%dd", int(relativeUpdateDate.Hours()/24))
case relativeUpdateDate.Hours() < 24*7*4:
updatedAt = fmt.Sprintf("%dw", int(relativeUpdateDate.Hours()/24/7))
default:
updatedAt = fmt.Sprintf(
"%dmon", int(relativeUpdateDate.Hours()/24/7/4),
)
}
fmt.Fprintf(writer,
"\t%5s %s",
updatedAt,
pr.Author.User.Name,
)
var approvedCount int
var pendingReviewers []string
for _, reviewer := range pr.Reviewers {
if reviewer.Approved {
approvedCount += 1
} else {
pendingReviewers = append(pendingReviewers, reviewer.User.Name)
}
}
fmt.Fprintf(
writer,
"\t%3d +%d/%d",
pr.Properties.CommentCount, approvedCount, len(pr.Reviewers),
)
if printStatus {
fmt.Fprintf(writer, " %s", pr.State)
}
sort.Strings(pendingReviewers)
fmt.Fprintf(writer, "\t%s\n", strings.Join(pendingReviewers, " "))
if withDesc && pr.Description != "" {
fmt.Fprintln(writer, fmt.Sprintf("\n---\n%s\n---", pr.Description))
}
}
func parseUri(args map[string]interface{}) (
result struct {
base string
project string
repo string
pr int64
},
) {
uri := ""
keyName := ""
should := 0
if args["<project>/<repo>/<pr>"] != nil {
keyName = "<project>/<repo>/<pr>"
uri = args[keyName].(string)
should = 3
}
if args["<project>/<repo>"] != nil {
keyName = "<project>/<repo>"
uri = args[keyName].(string)
should = 2
}
matches := reStashURL.FindStringSubmatch(uri)
if len(matches) != 0 {
result.base = matches[1]
result.project = matches[2]
result.repo = matches[5]
result.pr, _ = strconv.ParseInt(matches[6], 10, 16)
return result
}
if args["--url"] == nil {
fmt.Println(
"In case of shorthand syntax --url should be specified")
os.Exit(1)
}
if should == 0 {
result.base = args["--url"].(string)
return
}
matches = strings.Split(uri, "/")
result.base = args["--url"].(string)
if len(matches) == 2 && should == 3 && args["--project"] != nil {
result.repo = matches[0]
result.pr, _ = strconv.ParseInt(matches[1], 10, 16)
}
if args["--project"] != nil {
result.project = args["--project"].(string)
}
if len(matches) == 1 && should == 2 {
result.repo = matches[0]
}
if len(matches) == 2 && should == 2 {
result.project = matches[0]
result.repo = matches[1]
}
if len(matches) >= 3 && should == 3 {
result.project = matches[0]
result.repo = matches[1]
result.pr, _ = strconv.ParseInt(matches[2], 10, 16)
}
enough := result.project != "" && result.repo != "" &&
(result.pr != 0 || should == 2)
if !enough {
fmt.Println(
"<pull-request> should be in either:\n" +
" - URL Format: " + startUrlExample + "\n" +
" - Shorthand format: " + keyName,
)
os.Exit(1)
}
if result.project[0] == '~' || result.project[0] == '%' {
result.project = "users/" + result.project[1:]
} else {
result.project = "projects/" + result.project
}
return result
}
func editReviewInEditor(
editor string, reviewToEdit *Review, fileToUse *os.File,
) ([]ReviewChange, error) {
if editor == "" {
fileToUse.Close()
fmt.Printf("%s", fileToUse.Name())
os.Exit(0)
}
logger.Debug("opening editor: %s %s", editor, fileToUse.Name())
editorCmd := exec.Command(editor, fileToUse.Name())
editorCmd.Stdin = os.Stdin
editorCmd.Stdout = os.Stdout
editorCmd.Stderr = os.Stderr
err := editorCmd.Run()
if err != nil {
logger.Fatal(err)
}
fileToUse.Sync()
fileToUse.Seek(0, os.SEEK_SET)
logger.Debug("reading modified review back")
editedReview, err := ReadReview(fileToUse)
if err != nil {
return nil, err
}
logger.Debug("comparing old and new reviews")
return reviewToEdit.Compare(editedReview), nil
}
func mergeArgsWithConfig(path string) []string {
args := make([]string, 0)
conf, err := ioutil.ReadFile(path)
if err != nil {
logger.Warning("can not access config: %s", err.Error())
} else {
confLines := strings.Split(string(conf), "\n")
for _, line := range confLines {
line = strings.TrimSpace(line)
if line == "" {
continue
}
args = append(args, line)
}
}
args = append(args, os.Args[1:]...)
return args
}
func showFilesList(pr PullRequest) {
logger.Debug("showing list of files in PR")
files, err := pr.GetFiles()
if err != nil {
logger.Error("error accessing Stash: %s", err.Error())
}
for _, file := range files {
execFlag := ""
if file.DstExec != file.SrcExec {
if file.DstExec {
execFlag = " +x"
} else {
execFlag = " -x"
}
}
fmt.Printf("%7s %s%s\n", file.ChangeType, file.DstPath, execFlag)
}
}
func review(
pr PullRequest, editor string,
path string,
origin string, input string, output string,
activitiesLimit string,
ignoreWhitespaces bool,
interactiveMode bool,
) {
var review *Review
var err error
if origin == "" {
if path == "" {
logger.Debug("downloading overview from Stash")
review, err = pr.GetActivities(activitiesLimit)
} else {
logger.Debug("downloading review from Stash")
review, err = pr.GetReview(path, ignoreWhitespaces)
}
if review == nil {
fmt.Fprintln(os.Stderr, "Pull request not found.")
os.Exit(1)
}
if len(review.changeset.Diffs) == 0 {
fmt.Println("Specified file is not found in pull request.")
os.Exit(1)
}
} else {
logger.Debug("using origin review from file %s", origin)
originFile, err := os.Open(origin)
if err != nil {
logger.Fatal(err)
}
defer originFile.Close()
review, err = ReadReview(originFile)
if err != nil {
logger.Fatal(err)
}
if path == "" {
review.isOverview = true
}
}
if err != nil {
logger.Fatal(err)
}
var changes []ReviewChange
var fileToUse *os.File
defer func() {
if r := recover(); r != nil {
panicState = true
printPanicMsg(r, fileToUse.Name())
}
}()
if input != "" {
logger.Debug("reading review from file %s", input)
fileToUse, err = os.Open(input)
if err != nil {
logger.Fatal(err)
}
editedReview, err := ReadReview(fileToUse)
if err != nil {
panic(err)
}
logger.Debug("comparing old and new reviews")
changes = review.Compare(editedReview)
} else {
pullRequestInfo, err := pr.GetInfo()
if err != nil {
fmt.Println("Error while obtaining pull request info: %s", err)
os.Exit(1)
}
printFileName := false
writeAndExit := false
if output == "" {
output = tmpWorkDir + "/review.diff"
} else if output == "-" {
writeAndExit = true
printFileName = true
output = tmpWorkDir + "/review.diff"
} else {
writeAndExit = true
}
files, err := pr.GetFiles()
if err != nil {
logger.Fatal(err)
}
review.AddComment(files.String())
fileToUse, err = WriteReviewToFile(
pullRequestInfo.Links.Self[0].Href, review, output,
)
if err != nil {
logger.Fatal(err)
}
if writeAndExit {
if printFileName {
fmt.Println(output)
}
os.Exit(0)
}
changes, err = editReviewInEditor(editor, review, fileToUse)
if err != nil {
panic(err)
}
}
if len(changes) == 0 {
logger.Info("no changes detected in review file (maybe a bug)")
os.Exit(2)
}
if interactiveMode {
for i, change := range changes {
fmt.Printf("%d. %s\n\n", i+1, change.String())
}
pendingAnswer := true
for pendingAnswer {
fmt.Print("\n---\nIs that what you want to do? [Yn] ")
answer, _ := bufio.NewReader(os.Stdin).ReadString('\n')
switch answer {
case "n\n", "N\n":
os.Exit(2)
case "\n", "Y\n":
pendingAnswer = false
}
}
}
logger.Debug("applying changes (%d)", len(changes))
for i, change := range changes {
fmt.Printf("(%d/%d) applying changes\n", i+1, len(changes))
logger.Debug("change payload: %#v", change.GetPayload())
err := pr.ApplyChange(change)
if err != nil {
logger.Criticalf("can not apply change: %s", err.Error())
}
}
}
func WriteReviewToFile(
url string, review *Review, output string,
) (
*os.File, error,
) {
fileToUse, err := os.Create(output)
if err != nil {
return nil, err
}
logger.Info("writing review to file: %s", fileToUse.Name())
AddAshModeline(url, review)
WriteReview(review, fileToUse)
fileToUse.Sync()
return fileToUse, nil
}
func (p CmdLineArgs) Redacted() interface{} {
rePassFlag := regexp.MustCompile(`(\s(-p|--pass)[\s=])([^ ]+)`)
matches := rePassFlag.FindStringSubmatch(string(p))
if len(matches) == 0 {
return string(p)
} else {
return rePassFlag.ReplaceAllString(
string(p),
`$1`+logging.Redact(string(matches[3])))
}
}
func printPanicMsg(r interface{}, reviewFileName string) {
fmt.Println()
fmt.Println("PANIC:", r)
fmt.Println()
fmt.Println(string(debug.Stack()))
fmt.Println("Well, program has crashed. This is probably a bug.")
fmt.Println()
fmt.Printf("All data you've entered are kept in the file:\n\t%s",
reviewFileName)
fmt.Println()
fmt.Printf("Debug log of program execution can be found at:\n\t%s",
tmpWorkDir+"/debug.log")
fmt.Println()
fmt.Printf("Feel free to open issue or PR on the:\n\t%s",
"https://github.com/seletskiy/ash")
fmt.Println()
}
|
package demo
import (
"github.com/go-xe2/xthrift/lib/go/xthrift"
)
type HelloServiceProcessor struct {
*xthrift.TBaseProcessor
handler HelloService
}
func NewHelloServiceProcessor(handler HelloService) *HelloServiceProcessor {
inst := &HelloServiceProcessor{
handler: handler,
}
inst.TBaseProcessor = xthrift.NewBaseProcessor(inst)
return inst.registerFunctions()
}
func (p *HelloServiceProcessor) registerFunctions() *HelloServiceProcessor {
p.RegisterFunction("SayHello", newHelloServiceSayHello(p.handler))
return p
}
|
package main
import "fmt"
func main(){
fmt.Println("22");
var arr = [...]int{1,2,4,12,44}
fmt.Println(arr[3])
arr2 :=[9]int{3,3,3,4,5,6}
fmt.Println(arr2[3])
var slice = arr2[1:2]
fmt.Println(slice)
var mm = map[int]string{1:"a",2:"b"}
fmt.Println(mm[1])
e,ok := mm[4]
fmt.Println(e)
fmt.Println(ok)
ch := make(chan int,5)
ch <- 3
value := <- ch
fmt.Println(value)
}
|
package db
import (
"github.com/jinzhu/gorm"
)
// Open opens database and returns DB object.
func Open() (db *gorm.DB, err error) {
db, err = gorm.Open("sqlite3", "database/test.sqlite3")
if err != nil {
return
}
return
}
|
package main
import (
"bufio"
"flag"
"os"
"path/filepath"
"regexp"
"sync"
"time"
log "github.com/sirupsen/logrus"
)
const (
WhoisRequestPerSecond = 128
WhoisRequestPeriod = time.Second / time.Duration(WhoisRequestPerSecond)
)
func main() {
var file_path = flag.String("filepath", "/var/log/nginx/access-*.log.1", "Log files to analyze, wildcard allowed between quotes.")
var _ = flag.String("period", "72h", "period of time (past to now) to analyze")
flag.Parse()
files, err := filepath.Glob(*file_path)
if err != nil {
log.Fatal(err)
}
// log_format main '[$time_local] $http_host $remote_addr $http_x_forwarded_for '
// '"$request" $status $body_bytes_sent "$http_referer" '
// '"$http_user_agent" $request_time $upstream_response_time';
logline, err := regexp.Compile("^\\[([^\\]]+)\\] ([^ ]+) ([^ ]+) ([^ ]+) \"([^\"]*)\" ([^ ]+) ([^ ]+) \"([^\"]*)\" \"([^\"]*)\" ([^ ]+) ([^ ]+)")
if err != nil {
log.Fatal(err)
}
// GET /rancher-catalog.git/info/refs?service=git-upload-pack HTTP/1.1
uploadpack, err := regexp.Compile("^GET /(rancher)-catalog(.git)?/info/refs\\?service=git-upload-pack")
if err != nil {
log.Fatal(err)
}
stats := NewStats()
stats.StartLogging()
// Getting stats for every file
for _, f := range files {
log.Info("Analyzing ", f)
file, err := os.Open(f)
if err != nil {
log.Fatal(err)
}
// 64Kb buffer should be big enough
scanner := bufio.NewScanner(file)
var wg sync.WaitGroup
for scanner.Scan() {
line := scanner.Text()
submatches := logline.FindStringSubmatch(string(line))
stats.linesParsed++
if len(submatches) != 12 {
log.Warn(string(line))
continue
}
logTime, err := time.Parse("2/Jan/2006:15:04:05 -0700", submatches[1])
if err != nil {
log.Warn(err)
stats.linesSkipped++
continue
}
// ensure the request is a catalog git-upload-pack
if !uploadpack.MatchString(submatches[5]) {
stats.linesSkipped++
continue
}
// do a whois lookup
ip := submatches[4]
if _, ok := stats.ipRequests[ip]; !ok {
if w := stats.FindWhoisRecord(ip); w == nil {
wg.Add(1)
go func() {
w, err := whois(ip, &wg)
stats.whoisRequests++
if err == nil {
stats.AddWhoisRecord(w)
}
}()
time.Sleep(WhoisRequestPeriod)
}
}
stats.ipRequests[ip] = append(stats.ipRequests[ip], logTime)
}
file.Close()
if err != nil {
log.Fatal(err)
}
wg.Wait()
}
stats.SaveWhoisRecords()
stats.StopLogging()
stats.LogResult()
}
|
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller_test
import (
"fmt"
"log"
"github.com/kubernetes-sigs/kubebuilder/pkg/controller"
"github.com/kubernetes-sigs/kubebuilder/pkg/controller/types"
"github.com/kubernetes-sigs/kubebuilder/pkg/inject/run"
)
func ExampleGenericController_WatchChannel() {
podkeys := make(chan string)
c := &controller.GenericController{
Reconcile: func(key types.ReconcileKey) error {
fmt.Printf("Reconciling Pod %s\n", key)
return nil
},
}
if err := c.WatchChannel(podkeys); err != nil {
log.Fatalf("%v", err)
}
controller.AddController(c)
controller.RunInformersAndControllers(run.CreateRunArguments())
podkeys <- "namespace/pod-name"
}
|
package config
import (
"log"
"net"
"github.com/spf13/viper"
)
// Config holds the global server-wide config.
type Config struct {
HostIP string `mapstructure:"host_ip"`
BinPath string `mapstructure:"bin_path"`
DevMode bool
DevModeOptions DevModeOptions
Discord Discord
Database Database
Launcher Launcher
Sign Sign
Channel Channel
Entrance Entrance
}
// DevModeOptions holds various debug/temporary options for use while developing Erupe.
type DevModeOptions struct {
CleanDB bool // Automatically wipes the DB on server reset.
MaxLauncherHR bool // Sets the HR returned in the launcher to HR9 so that you can join non-beginner worlds.
FixedStageID bool // Causes all move_stage to use the ID sl1Ns200p0a0u0 to get you into all stages
LogOutboundMessages bool // Log all messages sent to the clients
SaveDumps SaveDumpOptions
}
type SaveDumpOptions struct {
Enabled bool
OutputDir string
}
// Discord holds the discord integration config.
type Discord struct {
Enabled bool
BotToken string
ChannelID string
}
// Database holds the postgres database config.
type Database struct {
Host string
Port int
User string
Password string
Database string
}
// Launcher holds the launcher server config.
type Launcher struct {
Port int
UseOriginalLauncherFiles bool
}
// Sign holds the sign server config.
type Sign struct {
Port int
}
// Channel holds the channel server config.
type Channel struct {
Port int
}
// Entrance holds the entrance server config.
type Entrance struct {
Port uint16
Entries []EntranceServerInfo
}
// EntranceServerInfo represents an entry in the serverlist.
type EntranceServerInfo struct {
IP string
Unk2 uint16
Type uint8 // Server type. 0=?, 1=open, 2=cities, 3=newbie, 4=bar
Season uint8 // Server activity. 0 = green, 1 = orange, 2 = blue
Unk6 uint8 // Something to do with server recommendation on 0, 3, and 5.
Name string // Server name, 66 byte null terminated Shift-JIS(JP) or Big5(TW).
// 4096(PC, PS3/PS4)?, 8258(PC, PS3/PS4)?, 8192 == nothing?
// THIS ONLY EXISTS IF Binary8Header.type == "SV2", NOT "SVR"!
AllowedClientFlags uint32
Channels []EntranceChannelInfo
}
// EntranceChannelInfo represents an entry in a server's channel list.
type EntranceChannelInfo struct {
Port uint16
MaxPlayers uint16
CurrentPlayers uint16
Unk4 uint16
Unk5 uint16
Unk6 uint16
Unk7 uint16
Unk8 uint16
Unk9 uint16
Unk10 uint16
Unk11 uint16
Unk12 uint16
Unk13 uint16
}
// getOutboundIP4 gets the preferred outbound ip4 of this machine
// From https://stackoverflow.com/a/37382208
func getOutboundIP4() net.IP {
conn, err := net.Dial("udp4", "8.8.8.8:80")
if err != nil {
log.Fatal(err)
}
defer conn.Close()
localAddr := conn.LocalAddr().(*net.UDPAddr)
return localAddr.IP.To4()
}
// LoadConfig loads the given config toml file.
func LoadConfig() (*Config, error) {
viper.SetConfigName("config")
viper.AddConfigPath(".")
viper.SetDefault("DevModeOptions.SaveDumps", SaveDumpOptions{
Enabled: true,
OutputDir: "savedata",
})
err := viper.ReadInConfig()
if err != nil {
return nil, err
}
c := &Config{}
err = viper.Unmarshal(c)
if err != nil {
return nil, err
}
if c.HostIP == "" {
c.HostIP = getOutboundIP4().To4().String()
}
return c, nil
}
|
// This file was generated for SObject EntityDefinition, API Version v43.0 at 2018-07-30 03:47:50.05474474 -0400 EDT m=+36.398716954
package sobjects
import (
"fmt"
"strings"
)
type EntityDefinition struct {
BaseSObject
DataStewardId string `force:",omitempty"`
DefaultCompactLayoutId string `force:",omitempty"`
DetailUrl string `force:",omitempty"`
DeveloperName string `force:",omitempty"`
DurableId string `force:",omitempty"`
EditDefinitionUrl string `force:",omitempty"`
EditUrl string `force:",omitempty"`
ExternalSharingModel string `force:",omitempty"`
HasSubtypes bool `force:",omitempty"`
HelpSettingPageName string `force:",omitempty"`
HelpSettingPageUrl string `force:",omitempty"`
Id string `force:",omitempty"`
InternalSharingModel string `force:",omitempty"`
IsApexTriggerable bool `force:",omitempty"`
IsAutoActivityCaptureEnabled bool `force:",omitempty"`
IsCompactLayoutable bool `force:",omitempty"`
IsCustomSetting bool `force:",omitempty"`
IsCustomizable bool `force:",omitempty"`
IsDeprecatedAndHidden bool `force:",omitempty"`
IsEverCreatable bool `force:",omitempty"`
IsEverDeletable bool `force:",omitempty"`
IsEverUpdatable bool `force:",omitempty"`
IsFeedEnabled bool `force:",omitempty"`
IsIdEnabled bool `force:",omitempty"`
IsLayoutable bool `force:",omitempty"`
IsMruEnabled bool `force:",omitempty"`
IsProcessEnabled bool `force:",omitempty"`
IsQueryable bool `force:",omitempty"`
IsReplicateable bool `force:",omitempty"`
IsRetrieveable bool `force:",omitempty"`
IsSearchLayoutable bool `force:",omitempty"`
IsSearchable bool `force:",omitempty"`
IsSubtype bool `force:",omitempty"`
IsTriggerable bool `force:",omitempty"`
IsWorkflowEnabled bool `force:",omitempty"`
KeyPrefix string `force:",omitempty"`
Label string `force:",omitempty"`
LastModifiedById string `force:",omitempty"`
LastModifiedDate string `force:",omitempty"`
MasterLabel string `force:",omitempty"`
NamespacePrefix string `force:",omitempty"`
NewUrl string `force:",omitempty"`
PluralLabel string `force:",omitempty"`
PublisherId string `force:",omitempty"`
QualifiedApiName string `force:",omitempty"`
RecordTypesSupported string `force:",omitempty"`
RunningUserEntityAccessId string `force:",omitempty"`
}
func (t *EntityDefinition) ApiName() string {
return "EntityDefinition"
}
func (t *EntityDefinition) String() string {
builder := strings.Builder{}
builder.WriteString(fmt.Sprintf("EntityDefinition #%s - %s\n", t.Id, t.Name))
builder.WriteString(fmt.Sprintf("\tDataStewardId: %v\n", t.DataStewardId))
builder.WriteString(fmt.Sprintf("\tDefaultCompactLayoutId: %v\n", t.DefaultCompactLayoutId))
builder.WriteString(fmt.Sprintf("\tDetailUrl: %v\n", t.DetailUrl))
builder.WriteString(fmt.Sprintf("\tDeveloperName: %v\n", t.DeveloperName))
builder.WriteString(fmt.Sprintf("\tDurableId: %v\n", t.DurableId))
builder.WriteString(fmt.Sprintf("\tEditDefinitionUrl: %v\n", t.EditDefinitionUrl))
builder.WriteString(fmt.Sprintf("\tEditUrl: %v\n", t.EditUrl))
builder.WriteString(fmt.Sprintf("\tExternalSharingModel: %v\n", t.ExternalSharingModel))
builder.WriteString(fmt.Sprintf("\tHasSubtypes: %v\n", t.HasSubtypes))
builder.WriteString(fmt.Sprintf("\tHelpSettingPageName: %v\n", t.HelpSettingPageName))
builder.WriteString(fmt.Sprintf("\tHelpSettingPageUrl: %v\n", t.HelpSettingPageUrl))
builder.WriteString(fmt.Sprintf("\tId: %v\n", t.Id))
builder.WriteString(fmt.Sprintf("\tInternalSharingModel: %v\n", t.InternalSharingModel))
builder.WriteString(fmt.Sprintf("\tIsApexTriggerable: %v\n", t.IsApexTriggerable))
builder.WriteString(fmt.Sprintf("\tIsAutoActivityCaptureEnabled: %v\n", t.IsAutoActivityCaptureEnabled))
builder.WriteString(fmt.Sprintf("\tIsCompactLayoutable: %v\n", t.IsCompactLayoutable))
builder.WriteString(fmt.Sprintf("\tIsCustomSetting: %v\n", t.IsCustomSetting))
builder.WriteString(fmt.Sprintf("\tIsCustomizable: %v\n", t.IsCustomizable))
builder.WriteString(fmt.Sprintf("\tIsDeprecatedAndHidden: %v\n", t.IsDeprecatedAndHidden))
builder.WriteString(fmt.Sprintf("\tIsEverCreatable: %v\n", t.IsEverCreatable))
builder.WriteString(fmt.Sprintf("\tIsEverDeletable: %v\n", t.IsEverDeletable))
builder.WriteString(fmt.Sprintf("\tIsEverUpdatable: %v\n", t.IsEverUpdatable))
builder.WriteString(fmt.Sprintf("\tIsFeedEnabled: %v\n", t.IsFeedEnabled))
builder.WriteString(fmt.Sprintf("\tIsIdEnabled: %v\n", t.IsIdEnabled))
builder.WriteString(fmt.Sprintf("\tIsLayoutable: %v\n", t.IsLayoutable))
builder.WriteString(fmt.Sprintf("\tIsMruEnabled: %v\n", t.IsMruEnabled))
builder.WriteString(fmt.Sprintf("\tIsProcessEnabled: %v\n", t.IsProcessEnabled))
builder.WriteString(fmt.Sprintf("\tIsQueryable: %v\n", t.IsQueryable))
builder.WriteString(fmt.Sprintf("\tIsReplicateable: %v\n", t.IsReplicateable))
builder.WriteString(fmt.Sprintf("\tIsRetrieveable: %v\n", t.IsRetrieveable))
builder.WriteString(fmt.Sprintf("\tIsSearchLayoutable: %v\n", t.IsSearchLayoutable))
builder.WriteString(fmt.Sprintf("\tIsSearchable: %v\n", t.IsSearchable))
builder.WriteString(fmt.Sprintf("\tIsSubtype: %v\n", t.IsSubtype))
builder.WriteString(fmt.Sprintf("\tIsTriggerable: %v\n", t.IsTriggerable))
builder.WriteString(fmt.Sprintf("\tIsWorkflowEnabled: %v\n", t.IsWorkflowEnabled))
builder.WriteString(fmt.Sprintf("\tKeyPrefix: %v\n", t.KeyPrefix))
builder.WriteString(fmt.Sprintf("\tLabel: %v\n", t.Label))
builder.WriteString(fmt.Sprintf("\tLastModifiedById: %v\n", t.LastModifiedById))
builder.WriteString(fmt.Sprintf("\tLastModifiedDate: %v\n", t.LastModifiedDate))
builder.WriteString(fmt.Sprintf("\tMasterLabel: %v\n", t.MasterLabel))
builder.WriteString(fmt.Sprintf("\tNamespacePrefix: %v\n", t.NamespacePrefix))
builder.WriteString(fmt.Sprintf("\tNewUrl: %v\n", t.NewUrl))
builder.WriteString(fmt.Sprintf("\tPluralLabel: %v\n", t.PluralLabel))
builder.WriteString(fmt.Sprintf("\tPublisherId: %v\n", t.PublisherId))
builder.WriteString(fmt.Sprintf("\tQualifiedApiName: %v\n", t.QualifiedApiName))
builder.WriteString(fmt.Sprintf("\tRecordTypesSupported: %v\n", t.RecordTypesSupported))
builder.WriteString(fmt.Sprintf("\tRunningUserEntityAccessId: %v\n", t.RunningUserEntityAccessId))
return builder.String()
}
type EntityDefinitionQueryResponse struct {
BaseQuery
Records []EntityDefinition `json:"Records" force:"records"`
}
|
package internal
import (
"encoding/json"
"io"
"io/ioutil"
"log"
"marauders-map-client-desktop/tools"
"net/http"
"os"
"path"
)
// ==========================================================
// Observer for sending to server files
// ==========================================================
type FileCmdObserver struct {
sendFileCmd *SendFileCommand
watchtower *Watchtower
respondServerCmd *RespondServerCommand
}
func (o *FileCmdObserver) execute(string_json string) {
var req FilesRequest
err := json.Unmarshal([]byte(string_json), &req)
if err != nil {
log.Println("ERROR Unmarshing: ", err)
return
}
if req.Cmd != "file" {
return
}
log.Println("FileCmdObserver: command received:", string_json)
switch req.Action {
case "send":
files := req.Files
o.sendFiles(req, files)
break
case "download":
urls := req.Files
for _, url := range urls {
o.downloadFile(url)
}
break
}
}
func (o *FileCmdObserver) sendFiles(req FilesRequest, files []string) {
log.Printf("Sending %d files", len(files))
for _, f := range files {
if !tools.FileExists(f) {
log.Printf("File requested '%s' doesn't exist\n", f)
continue
}
// POST file
res, err := o.sendFileCmd.Send(f)
defer res.Body.Close()
if err != nil {
// Prepare ERROR response
filenotification := FileNotification{}
filenotification.Reqid = req.Reqid
filenotification.Err = true
filenotification.Errmsg = err.Error()
filenotification.Typ = "file"
o.respondServerCmd.SendFileNotification(filenotification)
break
}
// Prepare OK response
data, _ := ioutil.ReadAll(res.Body)
shotId := string(data)
filenotification := FileNotification{}
filenotification.Reqid = req.Reqid
filenotification.Err = false
filenotification.Id = shotId
filenotification.Typ = "file"
filenotification.Filename = f
errr := o.respondServerCmd.SendFileNotification(filenotification)
// TODO: delete this
if errr != nil {
strres, _ := json.Marshal(filenotification)
log.Println("ScreenshotCmdObserver: responded: ", string(strres))
break
}
log.Println("Service notified about file")
}
}
func (o *FileCmdObserver) downloadFile(url string) error {
log.Println("Downloading: ", url)
downloadsfolder := watchtower.GetAbsoluteDownloadsFolderPath()
filename := tools.ExtractFileNameFromURL(url)
// Absolute filePath
finalFilePath := path.Join(downloadsfolder, filename)
// Get the data
resp, err := http.Get(url)
if err != nil {
log.Println("ERROR downloading:", err)
return err
}
defer resp.Body.Close()
// Create the file
out, err := os.Create(finalFilePath)
if err != nil {
log.Println("ERROR saving downloaded file:", err)
return err
}
defer out.Close()
// Write the body to file
_, err = io.Copy(out, resp.Body)
log.Printf("File %s downloaded\n", url)
return err
}
func NewFileCmdObserver(sendFileCmd *SendFileCommand, watchtower *Watchtower, respondServerCmd *RespondServerCommand) *FileCmdObserver {
return &FileCmdObserver{
sendFileCmd: sendFileCmd,
watchtower: watchtower,
respondServerCmd: respondServerCmd,
}
}
|
package config
import (
"github.com/corpix/relay/transport"
)
type Channel struct {
Name string `json:"name" yaml:"name"`
Transport string `json:"transport" yaml:"transport"`
}
type Relaying struct {
Channels []string `json:"channels" yaml:"channels"`
Mode string `json:"mode" yaml:"mode"`
}
type Config struct {
Transport map[string]transport.Config `json:"transport" yaml:"transport"`
Channel map[string]Channel `json:"channel" yaml:"channel"`
Relaying []Relaying `json:"relaying" yaml:"relaying"`
}
func New() *Config {
return &Config{
Transport: map[string]transport.Config{},
Channel: map[string]Channel{},
Relaying: []Relaying{},
}
}
|
package domain
import (
"errors"
"time"
)
type Note struct {
Id int `json:"id" db:"id"`
Title string `json:"title" db:"title" binding:"required"`
Body string `json:"body" db:"body" binding:"required"`
DateCreated time.Time `json:"date_created" db:"date_created"`
UserId int `json:"user_id" db:"user_id"`
}
type UpdateNoteInput struct {
Title *string `json:"title"`
Body *string `json:"body"`
}
func (i UpdateNoteInput) Validate() error {
if i.Title == nil && i.Body == nil {
return errors.New("update structure has no values")
}
return nil
}
|
package core
import "context"
type KV struct {
K string
V interface{}
}
type stateAdapter interface {
IsConnected() bool
Connect(ctx context.Context) error
Disconnect()
Has(ctx context.Context, key string) bool
Get(ctx context.Context, key string, dest interface{}) error
Set(ctx context.Context, key string, value interface{}) error
BatchSet(ctx context.Context, kvs []KV) error
Delete(ctx context.Context, key string) error
}
|
package controllers
import (
"github.com/gin-gonic/gin"
"github.com/hernancabral/Library/api/models"
"net/http"
)
func (server *Server) GetAuthors(c *gin.Context) {
// clear previous error if any
errList = map[string]string{}
authors, err := models.FindAllAuthors(server.DB)
if err != nil {
errList["no_authors"] = "no authors found"
c.JSON(http.StatusInternalServerError, gin.H{
"status": http.StatusInternalServerError,
"error": errList,
})
return
}
c.JSON(http.StatusOK, gin.H{
"status": http.StatusOK,
"response": authors,
})
}
|
package utils
import (
"errors"
"io"
"io/ioutil"
"log"
"net/http"
"github.com/go-rod/rod"
"github.com/go-rod/rod/lib/launcher"
)
func MakeRequestHeadless(url string) (string, error) {
l := launcher.MustNewManaged("")
l.NoSandbox(true)
browser := rod.New().Client(l.Client()).MustConnect()
defer browser.MustClose()
page := browser.MustPage(url)
page.MustWaitElementsMoreThan("div#jobs-list [style=\"\"]", 10)
str, err := page.HTML()
if err != nil {
return "", err
}
return str, nil
}
func MakeRequest(url string) (io.ReadCloser, error) {
res, err := http.Get(url)
if err != nil {
return nil, errors.New("Could not complete the request")
}
return res.Body, nil
}
func MakeRequestAsync(url string, ch chan<-[]byte) {
res, err := http.Get(url)
if (err != nil) {
log.Fatal(err)
}
defer res.Body.Close()
body, err := ioutil.ReadAll(res.Body)
if (err != nil) {
log.Fatal(err)
}
ch <- body
}
|
// Copyright 2023 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package common
import (
"bytes"
"crypto/rand"
"math"
"sort"
"testing"
"unsafe"
"github.com/stretchr/testify/require"
)
func randBytes(n int) []byte {
b := make([]byte, n)
rand.Read(b)
return b
}
func TestNoopKeyAdapter(t *testing.T) {
keyAdapter := NoopKeyAdapter{}
key := randBytes(32)
require.Len(t, key, keyAdapter.EncodedLen(key, ZeroRowID))
encodedKey := keyAdapter.Encode(nil, key, ZeroRowID)
require.Equal(t, key, encodedKey)
decodedKey, err := keyAdapter.Decode(nil, encodedKey)
require.NoError(t, err)
require.Equal(t, key, decodedKey)
}
func TestDupDetectKeyAdapter(t *testing.T) {
inputs := []struct {
key []byte
rowID int64
}{
{
[]byte{0x0},
0,
},
{
randBytes(32),
1,
},
{
randBytes(32),
math.MaxInt32,
},
{
randBytes(32),
math.MinInt32,
},
}
keyAdapter := DupDetectKeyAdapter{}
for _, input := range inputs {
encodedRowID := EncodeIntRowID(input.rowID)
result := keyAdapter.Encode(nil, input.key, encodedRowID)
require.Equal(t, keyAdapter.EncodedLen(input.key, encodedRowID), len(result))
// Decode the result.
key, err := keyAdapter.Decode(nil, result)
require.NoError(t, err)
require.Equal(t, input.key, key)
}
}
func TestDupDetectKeyOrder(t *testing.T) {
keys := [][]byte{
{0x0, 0x1, 0x2},
{0x0, 0x1, 0x3},
{0x0, 0x1, 0x3, 0x4},
{0x0, 0x1, 0x3, 0x4, 0x0},
{0x0, 0x1, 0x3, 0x4, 0x0, 0x0, 0x0},
}
keyAdapter := DupDetectKeyAdapter{}
encodedKeys := make([][]byte, 0, len(keys))
for _, key := range keys {
encodedKeys = append(encodedKeys, keyAdapter.Encode(nil, key, EncodeIntRowID(1)))
}
sorted := sort.SliceIsSorted(encodedKeys, func(i, j int) bool {
return bytes.Compare(encodedKeys[i], encodedKeys[j]) < 0
})
require.True(t, sorted)
}
func TestDupDetectEncodeDupKey(t *testing.T) {
keyAdapter := DupDetectKeyAdapter{}
key := randBytes(32)
result1 := keyAdapter.Encode(nil, key, EncodeIntRowID(10))
result2 := keyAdapter.Encode(nil, key, EncodeIntRowID(20))
require.NotEqual(t, result1, result2)
}
func startWithSameMemory(x []byte, y []byte) bool {
return cap(x) > 0 && cap(y) > 0 && uintptr(unsafe.Pointer(&x[:cap(x)][0])) == uintptr(unsafe.Pointer(&y[:cap(y)][0]))
}
func TestEncodeKeyToPreAllocatedBuf(t *testing.T) {
keyAdapters := []KeyAdapter{NoopKeyAdapter{}, DupDetectKeyAdapter{}}
for _, keyAdapter := range keyAdapters {
key := randBytes(32)
buf := make([]byte, 256)
buf2 := keyAdapter.Encode(buf[:4], key, EncodeIntRowID(1))
require.True(t, startWithSameMemory(buf, buf2))
// Verify the encoded result first.
key2, err := keyAdapter.Decode(nil, buf2[4:])
require.NoError(t, err)
require.Equal(t, key, key2)
}
}
func TestDecodeKeyToPreAllocatedBuf(t *testing.T) {
data := []byte{
0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf7,
0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x0, 0x8,
}
keyAdapters := []KeyAdapter{NoopKeyAdapter{}, DupDetectKeyAdapter{}}
for _, keyAdapter := range keyAdapters {
key, err := keyAdapter.Decode(nil, data)
require.NoError(t, err)
buf := make([]byte, 4+len(data))
buf2, err := keyAdapter.Decode(buf[:4], data)
require.NoError(t, err)
require.True(t, startWithSameMemory(buf, buf2))
require.Equal(t, key, buf2[4:])
}
}
func TestDecodeKeyDstIsInsufficient(t *testing.T) {
data := []byte{
0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf7,
0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x0, 0x8,
}
keyAdapters := []KeyAdapter{NoopKeyAdapter{}, DupDetectKeyAdapter{}}
for _, keyAdapter := range keyAdapters {
key, err := keyAdapter.Decode(nil, data)
require.NoError(t, err)
buf := make([]byte, 4, 6)
copy(buf, []byte{'a', 'b', 'c', 'd'})
buf2, err := keyAdapter.Decode(buf[:4], data)
require.NoError(t, err)
require.False(t, startWithSameMemory(buf, buf2))
require.Equal(t, buf[:4], buf2[:4])
require.Equal(t, key, buf2[4:])
}
}
|
package database
import (
"fmt"
"github.com/jmoiron/sqlx"
"github.com/pkg/errors"
"github.com/spf13/viper"
)
func ConnectMySQL() (err error) {
var dsnObj DSN
if err := viper.UnmarshalKey("DB", &dsnObj); err != nil {
return errors.WithStack(err)
}
dsn := fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?charset=utf8&parseTime=true", dsnObj.User, dsnObj.Password, dsnObj.Host, dsnObj.Port, dsnObj.DBName)
DB, err = sqlx.Connect("mysql", dsn)
if err != nil {
return errors.WithStack(err)
}
DB.SetMaxIdleConns(5)
DB.SetMaxOpenConns(30)
return errors.WithStack(DB.Ping())
}
|
package twosum
func twoSum(nums []int, target int) []int {
hashmap := make(map[int]int)
for index, value := range nums {
anotherNum := target - value
if _, ok := hashmap[anotherNum]; ok {
return []int{hashmap[anotherNum], index}
}
hashmap[value] = index
}
return nil
}
func twoSumWithLoop(nums []int, target int) []int {
for i := 0; i < len(nums); i++ {
for j := i + 1; j < len(nums); j++ {
if nums[i]+nums[j] == target {
return []int{i, j}
}
}
}
return nil
}
|
package main
import "fmt"
func countingSort(slice []int) []int {
if len(slice) == 0 {
return slice
}
var min,max = slice[0], slice[0]
for i:=0; i<len(slice); i++ {
if slice[i] > max {
max = slice[i]
} else if slice[i] < min {
min = slice[i]
}
}
countingSlice := make([]int, max-min+1)
for i:=0; i<len(slice); i++ {
countingSlice[slice[i]-min]++
}
k := 0
for i:= 0; i < len(countingSlice); i++ {
current := i + min
for j := 0; j < countingSlice[i]; j++{
slice[k] = current
k++
}
}
fmt.Print(countingSlice)
return slice
}
func main() {
//var test = []int {-5,-1,5,6,2,4,3,1,2,3,2,2,5}
var test = []int{2, 253, 252, 7, 7, 155, 101, 82, 253, 0, 252, -7, 252, 7, 63, -95, 15, 154, 201, -29}
fmt.Print(countingSort(test))
//fmt.Print(countingSort([]int{}))
} |
package reap
import (
"context"
"fmt"
"log"
"sort"
"strings"
"time"
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
"github.com/dollarshaveclub/acyl/pkg/ghclient"
"github.com/dollarshaveclub/acyl/pkg/locker"
"github.com/dollarshaveclub/acyl/pkg/models"
"github.com/dollarshaveclub/acyl/pkg/persistence"
"github.com/dollarshaveclub/acyl/pkg/spawner"
)
const (
lockWait = 5 * time.Second
deleteMaxCount = 100
destroyedMaxDurationHours = 730 // one month
failedMaxDurationSecs = 3600
spawnedMaxDurationSecs = 3600
registeredMaxDurationSecs = 7200
)
type ReaperMetricsCollector interface {
Pruned(int)
Reaped(string, string, models.QADestroyReason, error)
EnvironmentCount(string, models.EnvironmentStatus, uint)
}
// Reaper is an object that does periodic cleanup
type Reaper struct {
lp locker.LockProvider
dl persistence.DataLayer
es spawner.EnvironmentSpawner
rc ghclient.RepoClient
mc ReaperMetricsCollector
globalLimit uint
logger *log.Logger
lockKey int64
}
// NewReaper returns a Reaper object using the supplied dependencies
func NewReaper(lp locker.LockProvider, dl persistence.DataLayer, es spawner.EnvironmentSpawner, rc ghclient.RepoClient, mc ReaperMetricsCollector, globalLimit uint, logger *log.Logger, lockKey int64) *Reaper {
return &Reaper{
lp: lp,
dl: dl,
es: es,
rc: rc,
mc: mc,
globalLimit: globalLimit,
lockKey: lockKey,
logger: logger,
}
}
// Reap is called periodically to do various cleanup tasks
func (r *Reaper) Reap() {
var err error
reapSpan, ctx := tracer.StartSpanFromContext(context.Background(), "reap")
defer func() {
reapSpan.Finish(tracer.WithError(err))
}()
lock, err := r.lp.New(ctx, r.lockKey, "reap")
if err != nil || lock == nil {
r.logger.Printf("error trying to acquire lock: %v", err)
return
}
_, err = lock.Lock(ctx)
if err != nil {
r.logger.Printf("error locking: %v", err)
return
}
defer func() {
unlockCtx, unlockCancel := context.WithTimeout(context.Background(), lockWait)
lock.Unlock(unlockCtx)
unlockCancel()
}()
err = r.pruneDestroyedRecords(ctx)
if err != nil {
r.logger.Printf("error pruning destroyed records: %v", err)
}
err = r.destroyFailedOrStuckEnvironments(ctx)
if err != nil {
r.logger.Printf("error destroying failed/stuck environments: %v", err)
}
err = r.destroyClosedPRs(ctx)
if err != nil {
r.logger.Printf("error destroying environments associated with closed PRs: %v", err)
}
err = r.enforceGlobalLimit(ctx)
if err != nil {
r.logger.Printf("error enforcing global limit (%v): %v", r.globalLimit, err)
}
if err = r.auditQaEnvs(ctx); err != nil {
r.logger.Printf("reaper: audit qa envs: %v", err)
}
}
func (r *Reaper) auditQaEnvs(ctx context.Context) error {
qas, err := r.dl.GetQAEnvironments(ctx)
if err != nil {
return fmt.Errorf("error getting QA environments: %v", err)
}
repoMap := make(map[string]map[models.EnvironmentStatus]uint)
for _, qa := range qas {
if _, found := repoMap[qa.Repo]; !found {
repoMap[qa.Repo] = make(map[models.EnvironmentStatus]uint)
}
repoMap[qa.Repo][qa.Status]++
}
for repoName, repoStatuses := range repoMap {
for status, num := range repoStatuses {
r.mc.EnvironmentCount(repoName, status, num)
}
}
return nil
}
func (r *Reaper) destroyClosedPRs(ctx context.Context) error {
qas, err := r.dl.GetQAEnvironments(ctx)
if err != nil {
return fmt.Errorf("error getting QA environments: %v", err)
}
var prs string
for _, qa := range qas {
if qa.Status != models.Destroyed {
prs, err = r.rc.GetPRStatus(context.Background(), qa.Repo, qa.PullRequest)
if err != nil {
return err
}
if prs == "closed" {
r.logger.Printf("destroying environment because PR is now closed: %v", qa.Name)
err = r.es.DestroyExplicitly(context.Background(), &qa, models.ReapPrClosed)
if err != nil {
r.logger.Printf("error destroying %v: %v", qa.Name, err)
}
}
}
}
return nil
}
func (r *Reaper) pruneDestroyedRecords(ctx context.Context) error {
qas, err := r.dl.GetQAEnvironments(ctx)
if err != nil {
return fmt.Errorf("error getting QA environments: %v", err)
}
var found bool
var ts time.Time
var i int
defer func() { r.mc.Pruned(i) }()
for _, qa := range qas {
if qa.Status == models.Destroyed && i < deleteMaxCount {
found = false
for _, e := range qa.Events { // get timestamp of destroyed event
if strings.Contains(strings.ToLower(e.Message), strings.ToLower("marked as "+models.Destroyed.String())) {
found = true
ts = e.Timestamp
}
}
if !found {
r.logger.Printf("could not find destroyed event for %v", qa.Name)
err = r.dl.DeleteQAEnvironment(ctx, qa.Name)
if err != nil {
r.logger.Printf("error deleting destroyed environment: %v", err)
}
continue
}
if time.Since(ts) > (destroyedMaxDurationHours * time.Hour) {
r.logger.Printf("deleting destroyed environment: %v", qa.Name)
err = r.dl.DeleteQAEnvironment(ctx, qa.Name)
if err != nil {
r.logger.Printf("error deleting destroyed environment: %v", err)
}
i++
}
}
}
return nil
}
func (r *Reaper) destroyIfOlderThan(ctx context.Context, qa *models.QAEnvironment, d time.Duration, reason models.QADestroyReason) (err error) {
if time.Since(qa.Created) > d {
r.logger.Printf("destroying environment %v: in state %v greater than %v secs", qa.Name, qa.Status.String(), d.Seconds())
defer func() { r.mc.Reaped(qa.Name, qa.Repo, reason, err) }()
return r.es.DestroyExplicitly(ctx, qa, reason)
}
return nil
}
func (r *Reaper) destroyFailedOrStuckEnvironments(ctx context.Context) error {
qas, err := r.dl.GetQAEnvironments(ctx)
if err != nil {
return fmt.Errorf("error getting QA environments: %v", err)
}
for _, qa := range qas {
switch qa.Status {
case models.Spawned:
err = r.destroyIfOlderThan(ctx, &qa, spawnedMaxDurationSecs*time.Second, models.ReapAgeSpawned)
case models.Failure:
err = r.destroyIfOlderThan(ctx, &qa, failedMaxDurationSecs*time.Second, models.ReapAgeFailure)
default:
continue
}
if err != nil {
r.logger.Printf("error destroying if older than: %v", err)
}
}
return nil
}
func (r *Reaper) enforceGlobalLimit(ctx context.Context) error {
if r.globalLimit == 0 {
return nil
}
qae, err := r.dl.GetRunningQAEnvironments(ctx)
if err != nil {
return fmt.Errorf("error getting running environments: %v", err)
}
if len(qae) > int(r.globalLimit) {
kc := len(qae) - int(r.globalLimit)
sort.Slice(qae, func(i int, j int) bool { return qae[i].Created.Before(qae[j].Created) })
kenvs := qae[0:kc]
r.logger.Printf("reaper: enforcing global limit: extant: %v, limit: %v, destroying: %v", len(qae), r.globalLimit, kc)
for _, e := range kenvs {
env := e
r.logger.Printf("reaper: destroying: %v (created %v)", env.Name, env.Created)
err := r.es.DestroyExplicitly(context.Background(), &env, models.ReapEnvironmentLimitExceeded)
if err != nil {
r.logger.Printf("error destroying environment for exceeding limit: %v", err)
}
}
} else {
r.logger.Printf("global limit not exceeded: running: %v, limit: %v", len(qae), r.globalLimit)
}
return nil
}
|
// +build windows
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package winkernel
import (
"k8s.io/api/core/v1"
discovery "k8s.io/api/discovery/v1beta1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/proxy"
"k8s.io/kubernetes/pkg/proxy/healthcheck"
"net"
"strings"
"testing"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const testHostName = "test-hostname"
const macAddress = "00-11-22-33-44-55"
const clusterCIDR = "192.168.1.0/24"
const destinationPrefix = "192.168.2.0/24"
const providerAddress = "10.0.0.3"
const guid = "123ABC"
type fakeHNS struct{}
func newFakeHNS() *fakeHNS {
return &fakeHNS{}
}
func (hns fakeHNS) getNetworkByName(name string) (*hnsNetworkInfo, error) {
var remoteSubnets []*remoteSubnetInfo
rs := &remoteSubnetInfo{
destinationPrefix: destinationPrefix,
isolationID: 4096,
providerAddress: providerAddress,
drMacAddress: macAddress,
}
remoteSubnets = append(remoteSubnets, rs)
return &hnsNetworkInfo{
id: strings.ToUpper(guid),
name: name,
networkType: "Overlay",
remoteSubnets: remoteSubnets,
}, nil
}
func (hns fakeHNS) getEndpointByID(id string) (*endpointsInfo, error) {
return nil, nil
}
func (hns fakeHNS) getEndpointByIpAddress(ip string, networkName string) (*endpointsInfo, error) {
_, ipNet, _ := net.ParseCIDR(destinationPrefix)
if ipNet.Contains(net.ParseIP(ip)) {
return &endpointsInfo{
ip: ip,
isLocal: false,
macAddress: macAddress,
hnsID: guid,
hns: hns,
}, nil
}
return nil, nil
}
func (hns fakeHNS) createEndpoint(ep *endpointsInfo, networkName string) (*endpointsInfo, error) {
return &endpointsInfo{
ip: ep.ip,
isLocal: ep.isLocal,
macAddress: ep.macAddress,
hnsID: guid,
hns: hns,
}, nil
}
func (hns fakeHNS) deleteEndpoint(hnsID string) error {
return nil
}
func (hns fakeHNS) getLoadBalancer(endpoints []endpointsInfo, flags loadBalancerFlags, sourceVip string, vip string, protocol uint16, internalPort uint16, externalPort uint16) (*loadBalancerInfo, error) {
return &loadBalancerInfo{
hnsID: guid,
}, nil
}
func (hns fakeHNS) deleteLoadBalancer(hnsID string) error {
return nil
}
func NewFakeProxier(syncPeriod time.Duration, minSyncPeriod time.Duration, clusterCIDR string, hostname string, nodeIP net.IP, networkType string) *Proxier {
sourceVip := "192.168.1.2"
hnsNetworkInfo := &hnsNetworkInfo{
id: strings.ToUpper(guid),
name: "TestNetwork",
networkType: networkType,
}
proxier := &Proxier{
portsMap: make(map[localPort]closeable),
serviceMap: make(proxyServiceMap),
serviceChanges: newServiceChangeMap(),
endpointsMap: make(proxyEndpointsMap),
endpointsChanges: newEndpointsChangeMap(hostname),
clusterCIDR: clusterCIDR,
hostname: testHostName,
nodeIP: nodeIP,
serviceHealthServer: healthcheck.NewFakeServiceHealthServer(),
network: *hnsNetworkInfo,
sourceVip: sourceVip,
hostMac: macAddress,
isDSR: false,
hns: newFakeHNS(),
endPointsRefCount: make(endPointsReferenceCountMap),
}
return proxier
}
func TestCreateServiceVip(t *testing.T) {
syncPeriod := 30 * time.Second
proxier := NewFakeProxier(syncPeriod, syncPeriod, clusterCIDR, "testhost", net.ParseIP("10.0.0.1"), "Overlay")
if proxier == nil {
t.Error()
}
svcIP := "10.20.30.41"
svcPort := 80
svcNodePort := 3001
svcExternalIPs := "50.60.70.81"
svcPortName := proxy.ServicePortName{
NamespacedName: makeNSN("ns1", "svc1"),
Port: "p80",
}
timeoutSeconds := v1.DefaultClientIPServiceAffinitySeconds
makeServiceMap(proxier,
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
svc.Spec.Type = "NodePort"
svc.Spec.ClusterIP = svcIP
svc.Spec.ExternalIPs = []string{svcExternalIPs}
svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP
svc.Spec.SessionAffinityConfig = &v1.SessionAffinityConfig{
ClientIP: &v1.ClientIPConfig{
TimeoutSeconds: &timeoutSeconds,
},
}
svc.Spec.Ports = []v1.ServicePort{{
Name: svcPortName.Port,
Port: int32(svcPort),
Protocol: v1.ProtocolTCP,
NodePort: int32(svcNodePort),
}}
}),
)
makeEndpointsMap(proxier)
proxier.syncProxyRules()
if proxier.serviceMap[svcPortName].remoteEndpoint == nil {
t.Error()
}
if proxier.serviceMap[svcPortName].remoteEndpoint.ip != svcIP {
t.Error()
}
}
func TestCreateRemoteEndpointOverlay(t *testing.T) {
syncPeriod := 30 * time.Second
proxier := NewFakeProxier(syncPeriod, syncPeriod, clusterCIDR, "testhost", net.ParseIP("10.0.0.1"), "Overlay")
if proxier == nil {
t.Error()
}
svcIP := "10.20.30.41"
svcPort := 80
svcNodePort := 3001
svcPortName := proxy.ServicePortName{
NamespacedName: makeNSN("ns1", "svc1"),
Port: "p80",
}
makeServiceMap(proxier,
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
svc.Spec.Type = "NodePort"
svc.Spec.ClusterIP = svcIP
svc.Spec.Ports = []v1.ServicePort{{
Name: svcPortName.Port,
Port: int32(svcPort),
Protocol: v1.ProtocolTCP,
NodePort: int32(svcNodePort),
}}
}),
)
makeEndpointsMap(proxier,
makeTestEndpoints(svcPortName.Namespace, svcPortName.Name, func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: epIpAddressRemote,
}},
Ports: []v1.EndpointPort{{
Name: svcPortName.Port,
Port: int32(svcPort),
}},
}}
}),
)
proxier.syncProxyRules()
if proxier.endpointsMap[svcPortName][0].hnsID != guid {
t.Errorf("%v does not match %v", proxier.endpointsMap[svcPortName][0].hnsID, guid)
}
if *proxier.endPointsRefCount[guid] <= 0 {
t.Errorf("RefCount not incremented. Current value: %v", *proxier.endPointsRefCount[guid])
}
if *proxier.endPointsRefCount[guid] != *proxier.endpointsMap[svcPortName][0].refCount {
t.Errorf("Global refCount: %v does not match endpoint refCount: %v", *proxier.endPointsRefCount[guid], *proxier.endpointsMap[svcPortName][0].refCount)
}
}
func TestCreateRemoteEndpointL2Bridge(t *testing.T) {
syncPeriod := 30 * time.Second
proxier := NewFakeProxier(syncPeriod, syncPeriod, clusterCIDR, "testhost", net.ParseIP("10.0.0.1"), "L2Bridge")
if proxier == nil {
t.Error()
}
svcIP := "10.20.30.41"
svcPort := 80
svcNodePort := 3001
svcPortName := proxy.ServicePortName{
NamespacedName: makeNSN("ns1", "svc1"),
Port: "p80",
}
makeServiceMap(proxier,
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
svc.Spec.Type = "NodePort"
svc.Spec.ClusterIP = svcIP
svc.Spec.Ports = []v1.ServicePort{{
Name: svcPortName.Port,
Port: int32(svcPort),
Protocol: v1.ProtocolTCP,
NodePort: int32(svcNodePort),
}}
}),
)
makeEndpointsMap(proxier,
makeTestEndpoints(svcPortName.Namespace, svcPortName.Name, func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: epIpAddressRemote,
}},
Ports: []v1.EndpointPort{{
Name: svcPortName.Port,
Port: int32(svcPort),
}},
}}
}),
)
proxier.syncProxyRules()
if proxier.endpointsMap[svcPortName][0].hnsID != guid {
t.Errorf("%v does not match %v", proxier.endpointsMap[svcPortName][0].hnsID, guid)
}
if *proxier.endPointsRefCount[guid] <= 0 {
t.Errorf("RefCount not incremented. Current value: %v", *proxier.endPointsRefCount[guid])
}
if *proxier.endPointsRefCount[guid] != *proxier.endpointsMap[svcPortName][0].refCount {
t.Errorf("Global refCount: %v does not match endpoint refCount: %v", *proxier.endPointsRefCount[guid], *proxier.endpointsMap[svcPortName][0].refCount)
}
}
func TestCreateLoadBalancer(t *testing.T) {
syncPeriod := 30 * time.Second
proxier := NewFakeProxier(syncPeriod, syncPeriod, clusterCIDR, "testhost", net.ParseIP("10.0.0.1"), "Overlay")
if proxier == nil {
t.Error()
}
svcIP := "10.20.30.41"
svcPort := 80
svcNodePort := 3001
svcPortName := proxy.ServicePortName{
NamespacedName: makeNSN("ns1", "svc1"),
Port: "p80",
}
makeServiceMap(proxier,
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
svc.Spec.Type = "NodePort"
svc.Spec.ClusterIP = svcIP
svc.Spec.Ports = []v1.ServicePort{{
Name: svcPortName.Port,
Port: int32(svcPort),
Protocol: v1.ProtocolTCP,
NodePort: int32(svcNodePort),
}}
}),
)
makeEndpointsMap(proxier,
makeTestEndpoints(svcPortName.Namespace, svcPortName.Name, func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: epIpAddressRemote,
}},
Ports: []v1.EndpointPort{{
Name: svcPortName.Port,
Port: int32(svcPort),
}},
}}
}),
)
proxier.syncProxyRules()
if proxier.serviceMap[svcPortName].hnsID != guid {
t.Errorf("%v does not match %v", proxier.serviceMap[svcPortName].hnsID, guid)
}
}
func TestNoopEndpointSlice(t *testing.T) {
p := Proxier{}
p.OnEndpointSliceAdd(&discovery.EndpointSlice{})
p.OnEndpointSliceUpdate(&discovery.EndpointSlice{}, &discovery.EndpointSlice{})
p.OnEndpointSliceDelete(&discovery.EndpointSlice{})
p.OnEndpointSlicesSynced()
}
func TestFindRemoteSubnetProviderAddress(t *testing.T) {
networkInfo, _ := newFakeHNS().getNetworkByName("TestNetwork")
pa := networkInfo.findRemoteSubnetProviderAddress(providerAddress)
if pa != providerAddress {
t.Errorf("%v does not match %v", pa, providerAddress)
}
pa = networkInfo.findRemoteSubnetProviderAddress(epIpAddressRemote)
if pa != providerAddress {
t.Errorf("%v does not match %v", pa, providerAddress)
}
pa = networkInfo.findRemoteSubnetProviderAddress(serviceVip)
if len(pa) != 0 {
t.Errorf("Provider address is not empty as expected")
}
}
func makeNSN(namespace, name string) types.NamespacedName {
return types.NamespacedName{Namespace: namespace, Name: name}
}
func makeServiceMap(proxier *Proxier, allServices ...*v1.Service) {
for i := range allServices {
proxier.OnServiceAdd(allServices[i])
}
proxier.mu.Lock()
defer proxier.mu.Unlock()
proxier.servicesSynced = true
}
func makeTestService(namespace, name string, svcFunc func(*v1.Service)) *v1.Service {
svc := &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
Annotations: map[string]string{},
},
Spec: v1.ServiceSpec{},
Status: v1.ServiceStatus{},
}
svcFunc(svc)
return svc
}
func makeEndpointsMap(proxier *Proxier, allEndpoints ...*v1.Endpoints) {
for i := range allEndpoints {
proxier.OnEndpointsAdd(allEndpoints[i])
}
proxier.mu.Lock()
defer proxier.mu.Unlock()
proxier.endpointsSynced = true
}
func makeTestEndpoints(namespace, name string, eptFunc func(*v1.Endpoints)) *v1.Endpoints {
ept := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
}
eptFunc(ept)
return ept
}
|
package ers_test
import (
"regexp"
"testing"
ers "github.com/mix3/email-regex-safe-go"
"github.com/stretchr/testify/assert"
)
func New(opts ...ers.Option) *regexp.Regexp {
v, err := ers.New(opts...)
if err != nil {
panic(err)
}
return v
}
func TestEmailRegexSafe(t *testing.T) {
fixture := `_boop@beep.com
foo@foo.com
foo@f.com
foo@.com
some@sub.domain.jpg.co.uk.com.jpeg
_._boop@beep.com
bepp.test@boop.com
beep....foo@foo.com
beep..@foo.com
beep@bar.com.
beep.boop.boop.@foo.com
beep@boop.com .@foo.com
foo@_ $foobar@gmail.com
+foo@gmail.com
+$foo@gmail.com
+@test.com
++@test.com+@testtest.com
url:www.example.com reserved.[subscribe.example.com/subscribe.aspx?foo=zaaaa@example.io&beep=foo124123@example.nl
##rfc822;beep@test.co.uk
/images/some_logo@2x.jp
/images/foobar@2x.jpeg ----------------------------------------[beep.boop.net/foo.cfm?email=beep@example.ai\nwww.foo-beep.es was invalid
cid:image001.png@01bazz23.mx1e6980]www.facebook.com/example[cid:image002.png@03j570cf.ee1e6980]twitter.com/foobar[cid:image000.png@03j570cfzaaaazz.ee1e6980]http://www.linkedin.com/company/beep?trk=company_logo[cid:image005.png@03j570cf.es
foo@bar example.@gmail.com
foo+test@gmail.com
f=nr@context",c=e("gos") 'text@example.com, some text'
fazboop <foo@bar.com> beep baz@boop.com
foo@fe.com admin@2606:4700:4700::1111
fe@fe az@as test@1.2.3.4 foo@com.jpeg
foo@com.jpeg`
want := []string{
"boop@beep.com",
"foo@foo.com",
"foo@f.com",
"some@sub.domain.jpg.co.uk.com.jp",
"boop@beep.com",
"bepp.test@boop.com",
"beep....foo@foo.com",
"beep..@foo.com",
"beep@bar.com",
"beep.boop.boop.@foo.com",
"beep@boop.com",
"foobar@gmail.com",
"foo@gmail.com",
"foo@gmail.com",
"test.com+@testtest.com",
"zaaaa@example.io",
"foo124123@example.nl",
"beep@test.co.uk",
"some_logo@2x.jp",
"foobar@2x.jp",
"beep@example.ai",
"image001.png@01bazz23.mx",
"image002.png@03j570cf.ee",
"image000.png@03j570cfzaaaazz.ee",
"image005.png@03j570cf.es",
"example.@gmail.com",
"foo+test@gmail.com",
"text@example.com",
"foo@bar.com",
"baz@boop.com",
"foo@fe.com",
"test@1.2.3.4",
"foo@com.jp",
"foo@com.jp",
}
assert.Equal(t, want, New().FindAllString(fixture, -1))
}
|
package parser_test
import (
"io/ioutil"
"testing"
"github.com/bddbnet/gospy/parser/h.bilibili.com"
)
// step 1 获取列表
func TestIndexList(t *testing.T) {
const resultSize = 24
expectedItem := []string{
"Page: 1", "Page: 2", "Page: 3",
}
expectedUrl := []string{
"https://api.vc.bilibili.com/link_draw/v2/Photo/list?category=cos&type=hot&page_num=1&page_size=20",
"https://api.vc.bilibili.com/link_draw/v2/Photo/list?category=cos&type=hot&page_num=2&page_size=20",
"https://api.vc.bilibili.com/link_draw/v2/Photo/list?category=cos&type=hot&page_num=3&page_size=20",
}
file, err := ioutil.ReadFile("cos_index.html")
if err != nil {
t.Error(err)
}
parseResult := h_bilibili_com.CosIndexList(file)
if len(parseResult.Items) != resultSize {
t.Errorf("Item must have %d, %d found", resultSize, len(parseResult.Items))
}
if len(parseResult.Requests) != resultSize {
t.Errorf("Re must have %d, %d found", resultSize, len(parseResult.Requests))
}
for i, item := range expectedItem {
if parseResult.Items[i].Payload != item {
t.Errorf("item need %s,%s found", item, parseResult.Items[i])
}
}
for i, url := range expectedUrl {
if parseResult.Requests[i].Url != url {
t.Errorf("url need %s,%s found", url, parseResult.Requests[i].Url)
}
}
}
|
package main
import (
"bytes"
"encoding/json"
"fmt"
"io"
"os/exec"
"github.com/osbuild/osbuild-composer/internal/distro"
osbuild "github.com/osbuild/osbuild-composer/internal/osbuild1"
)
// Run an instance of osbuild, returning a parsed osbuild.Result.
//
// Note that osbuild returns non-zero when the pipeline fails. This function
// does not return an error in this case. Instead, the failure is communicated
// with its corresponding logs through osbuild.Result.
func RunOSBuild(manifest distro.Manifest, store, outputDirectory string, exports []string, errorWriter io.Writer) (*osbuild.Result, error) {
cmd := exec.Command(
"osbuild",
"--store", store,
"--output-directory", outputDirectory,
"--json", "-",
)
for _, export := range exports {
cmd.Args = append(cmd.Args, "--export", export)
}
cmd.Stderr = errorWriter
stdin, err := cmd.StdinPipe()
if err != nil {
return nil, fmt.Errorf("error setting up stdin for osbuild: %v", err)
}
var stdoutBuffer bytes.Buffer
cmd.Stdout = &stdoutBuffer
err = cmd.Start()
if err != nil {
return nil, fmt.Errorf("error starting osbuild: %v", err)
}
err = json.NewEncoder(stdin).Encode(manifest)
if err != nil {
return nil, fmt.Errorf("error encoding osbuild pipeline: %v", err)
}
err = stdin.Close()
if err != nil {
return nil, fmt.Errorf("error closing osbuild's stdin: %v", err)
}
err = cmd.Wait()
// try to decode the output even though the job could have failed
var result osbuild.Result
decodeErr := json.Unmarshal(stdoutBuffer.Bytes(), &result)
if decodeErr != nil {
return nil, fmt.Errorf("error decoding osbuild output: %v\nthe raw output:\n%s", decodeErr, stdoutBuffer.String())
}
if err != nil {
// ignore ExitError if output could be decoded correctly
if _, isExitError := err.(*exec.ExitError); !isExitError {
return nil, fmt.Errorf("running osbuild failed: %v", err)
}
}
return &result, nil
}
|
package main
import (
"database/sql"
"flag"
"fmt"
"log"
"net/http"
"net/url"
"strconv"
"github.com/go-chi/chi"
"github.com/go-chi/chi/middleware"
"github.com/go-chi/cors"
"github.com/jinzhu/configor"
"github.com/unrolled/render"
_ "github.com/go-sql-driver/mysql"
"github.com/jmoiron/sqlx"
)
var format = render.New()
// Response describes a general response
type Response struct {
Invalid bool `json:"invalid"`
Error string `json:"error"`
ID string `json:"id"`
}
// EventInfo describes data fields
type EventInfo struct {
ID int `json:"id"`
Text string `json:"text"`
StartDate string `db:"start_date" json:"start_date"`
EndDate string `db:"end_date" json:"end_date"`
Color string `json:"color"`
Calendar int `json:"calendar"`
Details string `json:"details"`
AllDay int `db:"all_day" json:"all_day"`
Recurring string `json:"recurring"`
OriginID int `db:"origin_id" json:"origin_id"`
SeriesEndDate string `db:"series_end_date" json:"series_end_date"`
Units string `json:"units"`
Section int `json:"section"`
}
// CalendarInfo describes calendar data fields
type CalendarInfo struct {
ID int `json:"id"`
Text string `json:"text"`
Color string `json:"color"`
Active int `json:"active"`
}
// UnitInfo descriebs unit data fields
type UnitInfo struct {
ID int `json:"id"`
Value string `json:"value"`
}
//SectionInfo describes data fields for sections (used by timeline view)
type SectionInfo struct {
ID int `json:"id"`
Text string `json:"text"`
}
//
var conn *sqlx.DB
// AppConfig describes application configuration
type AppConfig struct {
Port string
ResetOnStart bool
DB DBConfig
}
// DBConfig describes database configuration
type DBConfig struct {
Host string `default:"localhost"`
Port string `default:"3306"`
User string `default:"root"`
Password string `default:"1"`
Database string `default:"calendar"`
}
// Config is a structure with settings of this app instance
var Config AppConfig
func main() {
flag.StringVar(&Config.Port, "port", ":3000", "port for web server")
flag.Parse()
configor.New(&configor.Config{ENVPrefix: "APP", Silent: true}).Load(&Config, "config.yml")
// common drive access
var err error
connStr := fmt.Sprintf("%s:%s@(%s:%s)/%s?multiStatements=true&parseTime=true",
Config.DB.User, Config.DB.Password, Config.DB.Host, Config.DB.Port, Config.DB.Database)
conn, err = sqlx.Connect("mysql", connStr)
if err != nil {
log.Fatal(err)
}
migration(conn)
if err != nil {
log.Fatal(err)
}
r := chi.NewRouter()
r.Use(middleware.Logger)
r.Use(middleware.Recoverer)
cors := cors.New(cors.Options{
AllowedOrigins: []string{"*"},
AllowedMethods: []string{"GET", "POST", "PUT", "DELETE", "OPTIONS"},
AllowedHeaders: []string{"Accept", "Authorization", "Content-Type", "X-CSRF-Token"},
AllowCredentials: true,
MaxAge: 300,
})
r.Use(cors.Handler)
r.Get("/events", func(w http.ResponseWriter, r *http.Request) {
from := r.URL.Query().Get("from")
to := r.URL.Query().Get("to")
data := make([]EventInfo, 0)
var qs string
var err error
if from != "" && to != "" {
qs = "SELECT event.* FROM event WHERE start_date < ? AND (series_end_date >= ? OR recurring != '' AND series_end_date = '' OR end_date >= ?) ORDER BY start_date;"
err = conn.Select(&data, qs, to, from, from)
if err != nil {
format.Text(w, 500, err.Error())
return
}
} else {
qs = "SELECT event.* FROM event ORDER BY start_date;"
err = conn.Select(&data, qs)
if err != nil {
format.Text(w, 500, err.Error())
return
}
}
format.JSON(w, 200, data)
})
r.Put("/events/{id}", func(w http.ResponseWriter, r *http.Request) {
id := chi.URLParam(r, "id")
r.ParseForm()
err = sendUpdateQuery("event", r.Form, id)
if err != nil {
format.Text(w, 500, err.Error())
return
}
mode := r.Form.Get("recurring_update_mode")
if mode == "all" {
// remove all sub-events
_, err := conn.Exec("DELETE FROM event WHERE origin_id = ?", id)
if err != nil {
format.Text(w, 500, err.Error())
return
}
} else if mode == "next" {
// remove all sub-events after new 'this and next' group
date := r.Form.Get("recurring_update_date")
if date == "" {
panic("date must be provided")
}
// in case update came for a subevent, search the master event
var oid int
err = conn.Get(&oid, "SELECT origin_id FROM event WHERE id = ?", id)
if err != nil {
format.Text(w, 500, err.Error())
return
}
if oid != 0 {
id = strconv.Itoa(oid)
}
_, err = conn.Exec("DELETE FROM event WHERE origin_id = ? AND start_date >= ?", id, date)
if err != nil {
format.Text(w, 500, err.Error())
return
}
}
format.JSON(w, 200, Response{ID: id})
})
r.Delete("/events/{id}", func(w http.ResponseWriter, r *http.Request) {
id := chi.URLParam(r, "id")
_, err := conn.Exec("DELETE FROM event WHERE id = ? OR origin_id = ?", id, id)
if err != nil {
format.Text(w, 500, err.Error())
return
}
format.JSON(w, 200, Response{ID: id})
})
r.Post("/events", func(w http.ResponseWriter, r *http.Request) {
r.ParseForm()
res, err := sendInsertQuery("event", r.Form)
if err != nil {
format.Text(w, 500, err.Error())
return
}
id, _ := res.LastInsertId()
format.JSON(w, 200, Response{ID: strconv.FormatInt(id, 10)})
})
r.Get("/calendars", func(w http.ResponseWriter, r *http.Request) {
data := make([]CalendarInfo, 0)
err := conn.Select(&data, "SELECT calendar.* FROM calendar")
if err != nil {
format.Text(w, 500, err.Error())
return
}
format.JSON(w, 200, data)
})
r.Put("/calendars/{id}", func(w http.ResponseWriter, r *http.Request) {
id := chi.URLParam(r, "id")
r.ParseForm()
err := sendUpdateQuery("calendar", r.Form, id)
if err != nil {
format.Text(w, 500, err.Error())
return
}
format.JSON(w, 200, Response{ID: id})
})
r.Delete("/calendars/{id}", func(w http.ResponseWriter, r *http.Request) {
id := chi.URLParam(r, "id")
_, err := conn.Exec("DELETE FROM calendar WHERE id = ?", id)
if err != nil {
format.Text(w, 500, err.Error())
return
}
_, err = conn.Exec("DELETE FROM event WHERE calendar = ?", id)
if err != nil {
format.Text(w, 500, err.Error())
return
}
format.JSON(w, 200, Response{ID: id})
})
r.Post("/calendars", func(w http.ResponseWriter, r *http.Request) {
r.ParseForm()
res, err := sendInsertQuery("calendar", r.Form)
if err != nil {
format.Text(w, 500, err.Error())
return
}
id, _ := res.LastInsertId()
format.JSON(w, 200, Response{ID: strconv.FormatInt(id, 10)})
})
r.Get("/units", func(w http.ResponseWriter, r *http.Request) {
data := make([]UnitInfo, 0)
err := conn.Select(&data, "SELECT unit.* FROM unit")
if err != nil {
format.Text(w, 500, err.Error())
return
}
format.JSON(w, 200, data)
})
r.Get("/sections", func(w http.ResponseWriter, r *http.Request) {
data := make([]SectionInfo, 0)
err := conn.Select(&data, "SELECT section.* FROM section")
if err != nil {
format.Text(w, 500, err.Error())
return
}
format.JSON(w, 200, data)
})
log.Printf("Starting webserver at port " + Config.Port)
http.ListenAndServe(Config.Port, r)
}
// both event and calendar tables
var whitelistEvent = []string{
"start_date",
"end_date",
"all_day",
"text",
"details",
"color",
"recurring",
"calendar",
"origin_id",
"series_end_date",
"units",
"section",
}
var whitelistCalendar = []string{
"text",
"color",
"active",
}
func getWhiteList(table string) []string {
allowedFields := make([]string, 0, 10)
if table == "event" {
allowedFields = append(allowedFields, whitelistEvent...)
} else {
allowedFields = append(allowedFields, whitelistCalendar...)
}
return allowedFields
}
func sendUpdateQuery(table string, form url.Values, id string) error {
qs := "UPDATE " + table + " SET "
params := make([]interface{}, 0)
allowedFields := getWhiteList(table)
for _, key := range allowedFields {
value, ok := form[key]
if ok {
qs += key + " = ?, "
params = append(params, value[0])
}
}
params = append(params, id)
_, err := conn.Exec(qs[:len(qs)-2]+" WHERE id = ?", params...)
return err
}
func sendInsertQuery(table string, form map[string][]string) (sql.Result, error) {
qsk := "INSERT INTO " + table + " ("
qsv := "VALUES ("
params := make([]interface{}, 0)
allowedFields := getWhiteList(table)
for _, key := range allowedFields {
value, ok := form[key]
if ok {
qsk += key + ", "
qsv += "?, "
params = append(params, value[0])
}
}
qsk = qsk[:len(qsk)-2] + ") "
qsv = qsv[:len(qsv)-2] + ")"
res, err := conn.Exec(qsk+qsv, params...)
return res, err
}
|
package cmd
import (
"errors"
"fmt"
"git-stack/branchstack"
"os"
"github.com/spf13/cobra"
)
var (
rootCmd = &cobra.Command{
Use: "git-stack",
Short: "Keep track of WIP git branches in a stack.",
}
pushCmd = &cobra.Command{
Use: "push",
Short: "Push a branch name onto the stack.",
PreRun: loadFile,
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) < 1 {
return errors.New("missing branch")
}
branchstack.PushBranch(args[0])
return nil
},
PostRun: writeFile,
SilenceUsage: true,
}
popCmd = &cobra.Command{
Use: "pop",
Short: "Pop a branch name from the stack.",
PreRun: loadFile,
Run: func(cmd *cobra.Command, args []string) {
item := branchstack.PopBranch()
fmt.Fprint(os.Stdout, item)
},
PostRun: writeFile,
SilenceUsage: true,
}
)
func init() {
rootCmd.AddCommand(pushCmd, popCmd)
}
func Execute() error {
return rootCmd.Execute()
}
func loadFile(cmd *cobra.Command, args []string) {
branchstack.MustRead()
}
func writeFile(cmd *cobra.Command, args []string) {
branchstack.MustWrite()
} |
package main
type Config struct {
bind string
data string
db string
gcInterval int // Seconds, default 300s
gcLimit int // Number of entries to scan each gc, default 100
}
|
package routers
import (
"github.com/barrydev/api-3h-shop/src/common/response"
"github.com/barrydev/api-3h-shop/src/controllers"
"github.com/gin-gonic/gin"
)
func BindCurrent(router *gin.RouterGroup) {
router.GET("/order", func(c *gin.Context) {
handle := response.Handle{Context: c}
handle.Try(controllers.GetCurrentOrderV2).Then(response.SendSuccess).Catch(response.SendError)
})
}
func BindCurrentAuth(router *gin.RouterGroup) {
router.GET("/order", func(c *gin.Context) {
handle := response.Handle{Context: c}
handle.Try(controllers.GetCurrentOrderAuth).Then(response.SendSuccess).Catch(response.SendError)
})
}
|
package main
import "fmt"
func main() {
var arr [4][6]int
arr[2][2] = 1
for i := 0; i < 4; i++ {
for j := 0; j < 6; j++ {
fmt.Print(arr[i][j], " ")
}
fmt.Println()
}
for _, value := range arr {
for id, val := range value {
fmt.Println(id, val)
}
}
}
|
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"context"
"flag"
"fmt"
"net/http"
"os"
"time"
v1 "k8s.io/api/core/v1"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/tools/record"
"k8s.io/klog/v2"
"github.com/container-storage-interface/spec/lib/go/csi"
"github.com/kubernetes-csi/csi-lib-utils/connection"
"github.com/kubernetes-csi/csi-lib-utils/leaderelection"
"github.com/kubernetes-csi/csi-lib-utils/metrics"
"github.com/kubernetes-csi/csi-lib-utils/rpc"
"google.golang.org/grpc"
monitorcontroller "github.com/kubernetes-csi/external-health-monitor/pkg/controller"
)
const (
// Default timeout of short CSI calls like GetPluginInfo
csiTimeout = time.Second
)
// Command line flags
var (
monitorInterval = flag.Duration("monitor-interval", 1*time.Minute, "Interval for controller to check volumes health condition.")
kubeconfig = flag.String("kubeconfig", "", "Absolute path to the kubeconfig file. Required only when running out of cluster.")
resync = flag.Duration("resync", 10*time.Minute, "Resync interval of the controller.")
csiAddress = flag.String("csi-address", "/run/csi/socket", "Address of the CSI driver socket.")
showVersion = flag.Bool("version", false, "Show version.")
timeout = flag.Duration("timeout", 15*time.Second, "Timeout for waiting for attaching or detaching the volume.")
listVolumesInterval = flag.Duration("list-volumes-interval", 5*time.Minute, "Time interval for calling ListVolumes RPC to check volumes' health condition")
volumeListAndAddInterval = flag.Duration("volume-list-add-interval", 5*time.Minute, "Time interval for listing volumes and add them to queue")
nodeListAndAddInterval = flag.Duration("node-list-add-interval", 5*time.Minute, "Time interval for listing nodess and add them to queue")
workerThreads = flag.Uint("worker-threads", 10, "Number of pv monitor worker threads")
enableNodeWatcher = flag.Bool("enable-node-watcher", false, "Indicates whether the node watcher is enabled or not.")
enableLeaderElection = flag.Bool("leader-election", false, "Enable leader election.")
leaderElectionNamespace = flag.String("leader-election-namespace", "", "Namespace where the leader election resource lives. Defaults to the pod namespace if not set.")
leaderElectionLeaseDuration = flag.Duration("leader-election-lease-duration", 15*time.Second, "Duration, in seconds, that non-leader candidates will wait to force acquire leadership. Defaults to 15 seconds.")
leaderElectionRenewDeadline = flag.Duration("leader-election-renew-deadline", 10*time.Second, "Duration, in seconds, that the acting leader will retry refreshing leadership before giving up. Defaults to 10 seconds.")
leaderElectionRetryPeriod = flag.Duration("leader-election-retry-period", 5*time.Second, "Duration, in seconds, the LeaderElector clients should wait between tries of actions. Defaults to 5 seconds.")
metricsAddress = flag.String("metrics-address", "", "(deprecated) The TCP network address where the prometheus metrics endpoint will listen (example: `:8080`). The default is empty string, which means metrics endpoint is disabled. Only one of `--metrics-address` and `--http-endpoint` can be set.")
httpEndpoint = flag.String("http-endpoint", "", "The TCP network address where the HTTP server for diagnostics, including metrics and leader election health check, will listen (example: `:8080`). The default is empty string, which means the server is disabled. Only one of `--metrics-address` and `--http-endpoint` can be set.")
metricsPath = flag.String("metrics-path", "/metrics", "The HTTP path where prometheus metrics will be exposed. Default is `/metrics`.")
)
var (
version = "unknown"
)
func main() {
klog.InitFlags(nil)
flag.Set("logtostderr", "true")
flag.Parse()
if *showVersion {
fmt.Println(os.Args[0], version)
return
}
klog.Infof("Version: %s", version)
if *metricsAddress != "" && *httpEndpoint != "" {
klog.Error("only one of `--metrics-address` and `--http-endpoint` can be set.")
os.Exit(1)
}
addr := *metricsAddress
if addr == "" {
addr = *httpEndpoint
}
// Create the client config. Use kubeconfig if given, otherwise assume in-cluster.
config, err := buildConfig(*kubeconfig)
if err != nil {
klog.Error(err.Error())
os.Exit(1)
}
if *workerThreads == 0 {
klog.Error("option -worker-threads must be greater than zero")
os.Exit(1)
}
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
klog.Error(err.Error())
os.Exit(1)
}
factory := informers.NewSharedInformerFactory(clientset, *resync)
metricsManager := metrics.NewCSIMetricsManager("" /* driverName */)
// Connect to CSI.
csiConn, err := connection.Connect(*csiAddress, metricsManager, connection.OnConnectionLoss(connection.ExitOnConnectionLoss()))
if err != nil {
klog.Error(err.Error())
os.Exit(1)
}
err = rpc.ProbeForever(csiConn, *timeout)
if err != nil {
klog.Error(err.Error())
os.Exit(1)
}
// Find driver name.
ctx, cancel := context.WithTimeout(context.Background(), csiTimeout)
defer cancel()
storageDriver, err := rpc.GetDriverName(ctx, csiConn)
if err != nil {
klog.Error(err.Error())
os.Exit(1)
}
klog.V(2).Infof("CSI driver name: %q", storageDriver)
metricsManager.SetDriverName(storageDriver)
// Prepare HTTP endpoint for metrics + leader election healthz
mux := http.NewServeMux()
if addr != "" {
metricsManager.RegisterToServer(mux, *metricsPath)
go func() {
klog.Infof("ServeMux listening at %q", addr)
err := http.ListenAndServe(addr, mux)
if err != nil {
klog.Fatalf("Failed to start HTTP server at specified address (%q) and metrics path (%q): %s", addr, *metricsPath, err)
}
}()
}
supportsService, err := supportsPluginControllerService(ctx, csiConn)
if err != nil {
klog.Error(err.Error())
os.Exit(1)
}
if !supportsService {
klog.V(2).Infof("CSI driver does not support Plugin Controller Service, exiting")
os.Exit(1)
}
supportControllerListVolumes, err := supportControllerListVolumes(ctx, csiConn)
if err != nil {
klog.Error(err.Error())
os.Exit(1)
}
supportControllerGetVolume, err := supportControllerGetVolume(ctx, csiConn)
if err != nil {
klog.Error(err.Error())
os.Exit(1)
}
supportControllerVolumeCondition, err := supportControllerVolumeCondition(ctx, csiConn)
if err != nil {
klog.Error(err.Error())
os.Exit(1)
}
if (!supportControllerListVolumes && !supportControllerGetVolume) || !supportControllerVolumeCondition {
klog.V(2).Infof("CSI driver does not support Controller ListVolumes and GetVolume service or does not implement VolumeCondition, exiting")
os.Exit(1)
}
option := monitorcontroller.PVMonitorOptions{
DriverName: storageDriver,
ContextTimeout: *timeout,
EnableNodeWatcher: *enableNodeWatcher,
SupportListVolume: supportControllerListVolumes,
ListVolumesInterval: *listVolumesInterval,
PVWorkerExecuteInterval: *monitorInterval,
VolumeListAndAddInterval: *volumeListAndAddInterval,
NodeWorkerExecuteInterval: *monitorInterval,
NodeListAndAddInterval: *nodeListAndAddInterval,
}
broadcaster := record.NewBroadcaster()
broadcaster.StartRecordingToSink(&corev1.EventSinkImpl{Interface: clientset.CoreV1().Events(v1.NamespaceAll)})
eventRecorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: fmt.Sprintf("csi-pv-monitor-controller-%s", option.DriverName)})
monitorController := monitorcontroller.NewPVMonitorController(clientset, csiConn, factory.Core().V1().PersistentVolumes(),
factory.Core().V1().PersistentVolumeClaims(), factory.Core().V1().Pods(), factory.Core().V1().Nodes(), factory.Core().V1().Events(), eventRecorder, &option)
run := func(ctx context.Context) {
stopCh := ctx.Done()
factory.Start(stopCh)
monitorController.Run(int(*workerThreads), stopCh)
}
if !*enableLeaderElection {
run(context.TODO())
} else {
// Name of config map with leader election lock
lockName := "external-health-monitor-leader-" + storageDriver
le := leaderelection.NewLeaderElection(clientset, lockName, run)
if *httpEndpoint != "" {
le.PrepareHealthCheck(mux, leaderelection.DefaultHealthCheckTimeout)
}
if *leaderElectionNamespace != "" {
le.WithNamespace(*leaderElectionNamespace)
}
le.WithLeaseDuration(*leaderElectionLeaseDuration)
le.WithRenewDeadline(*leaderElectionRenewDeadline)
le.WithRetryPeriod(*leaderElectionRetryPeriod)
if err := le.Run(); err != nil {
klog.Fatalf("failed to initialize leader election: %v", err)
}
}
}
func buildConfig(kubeconfig string) (*rest.Config, error) {
if kubeconfig != "" {
return clientcmd.BuildConfigFromFlags("", kubeconfig)
}
return rest.InClusterConfig()
}
func supportControllerListVolumes(ctx context.Context, csiConn *grpc.ClientConn) (supportControllerListVolumes bool, err error) {
caps, err := rpc.GetControllerCapabilities(ctx, csiConn)
if err != nil {
return false, fmt.Errorf("failed to get controller capabilities: %v", err)
}
return caps[csi.ControllerServiceCapability_RPC_LIST_VOLUMES], nil
}
// TODO: move this to csi-lib-utils
func supportControllerGetVolume(ctx context.Context, csiConn *grpc.ClientConn) (supportControllerGetVolume bool, err error) {
client := csi.NewControllerClient(csiConn)
req := csi.ControllerGetCapabilitiesRequest{}
rsp, err := client.ControllerGetCapabilities(ctx, &req)
if err != nil {
return false, err
}
for _, cap := range rsp.GetCapabilities() {
if cap == nil {
continue
}
rpc := cap.GetRpc()
if rpc == nil {
continue
}
t := rpc.GetType()
if t == csi.ControllerServiceCapability_RPC_GET_VOLUME {
return true, nil
}
}
return false, nil
}
// TODO: move this to csi-lib-utils
func supportControllerVolumeCondition(ctx context.Context, csiConn *grpc.ClientConn) (supportControllerVolumeCondition bool, err error) {
client := csi.NewControllerClient(csiConn)
req := csi.ControllerGetCapabilitiesRequest{}
rsp, err := client.ControllerGetCapabilities(ctx, &req)
if err != nil {
return false, err
}
for _, cap := range rsp.GetCapabilities() {
if cap == nil {
continue
}
rpc := cap.GetRpc()
if rpc == nil {
continue
}
t := rpc.GetType()
if t == csi.ControllerServiceCapability_RPC_VOLUME_CONDITION {
return true, nil
}
}
return false, nil
}
// TODO: move this to csi-lib-utils
func supportsPluginControllerService(ctx context.Context, csiConn *grpc.ClientConn) (bool, error) {
client := csi.NewIdentityClient(csiConn)
req := csi.GetPluginCapabilitiesRequest{}
rsp, err := client.GetPluginCapabilities(ctx, &req)
if err != nil {
return false, err
}
for _, cap := range rsp.GetCapabilities() {
if cap == nil {
continue
}
srv := cap.GetService()
if srv == nil {
continue
}
t := srv.GetType()
if t == csi.PluginCapability_Service_CONTROLLER_SERVICE {
return true, nil
}
}
return false, nil
}
|
package main
import (
"../common"
"cloud.google.com/go/pubsub"
"context"
"fmt"
"google.golang.org/api/option"
"io/ioutil"
"log"
"os"
"strconv"
"time"
)
func main() {
ctx := context.Background()
c, err := NewOrderChannel(ctx, "hexapod", "servo")
if err != nil {
log.Fatalf("Failed to create channel: %v", err)
return
}
if len(os.Args) < 2 {
log.Fatalf("param")
return
}
var sequence int
data, err := ioutil.ReadFile(`seq.txt`)
if err != nil {
log.Fatalf("failed to read seq.txt: %v", err)
return
}
sequence, err = strconv.Atoi(string(data))
if err != nil {
log.Fatalf("failed to read seq.txt %v", err)
return
}
switch os.Args[1] {
case "heartbeat":
// PubSub
ctx := context.Background()
projectID := "hexapod"
subscription := "heartbeat"
client, err := pubsub.NewClient(ctx, projectID, option.WithCredentialsFile("cred.json"))
if err != nil {
log.Fatalf("Failed to create client: %v", err)
}
sub := client.Subscription(subscription)
_ = sub.Receive(ctx, func(c context.Context, m *pubsub.Message) {
fmt.Println(string(m.Data))
m.Ack()
})
case "walk":
err = c.Publish(ctx, &common.Message{
Now: time.Now(),
Sequence: sequence,
MessageType: common.MessageTypeWalk,
Arms: nil,
})
case "reset":
sequence += 100
err = c.Publish(ctx, &common.Message{
Now: time.Now(),
Sequence: sequence,
MessageType: common.MessageTypeReset,
Arms: []*common.Arm{
{
Degrees: []float64{0,0},
},
{
Degrees: []float64{0,0},
},
{
Degrees: []float64{0,0},
},
{
Degrees: []float64{0,0},
},
{
Degrees: []float64{0,0},
},
{
Degrees: []float64{0,0},
},
},
})
case "rotate":
err = c.Publish(ctx, &common.Message{
Now: time.Now(),
Sequence: sequence,
MessageType: common.MessageTypeRotate,
Arms: nil,
})
case "arms":
d := make([]float64, 8)
d[0], _ = strconv.ParseFloat(os.Args[2], 10)
d[1], _ = strconv.ParseFloat(os.Args[3], 10)
d[2], _ = strconv.ParseFloat(os.Args[4], 10)
d[3], _ = strconv.ParseFloat(os.Args[5], 10)
d[4], _ = strconv.ParseFloat(os.Args[6], 10)
d[5], _ = strconv.ParseFloat(os.Args[7], 10)
d[6], _ = strconv.ParseFloat(os.Args[8], 10)
d[7], _ = strconv.ParseFloat(os.Args[9], 10)
err = c.Publish(ctx, &common.Message{
Now: time.Now(),
Sequence: sequence,
MessageType: common.MessageTypeArms,
Arms: []*common.Arm{
{
Degrees: []float64{d[2], d[1]},
},
{
Degrees: []float64{d[3], d[0]},
},
{
Degrees: []float64{d[4], d[1]},
},
{
Degrees: []float64{d[5], d[0]},
},
{
Degrees: []float64{d[6], d[1]},
},
{
Degrees: []float64{d[7], d[0]},
},
},
})
default:
log.Fatalf("param")
}
if err != nil {
log.Fatalf("Failed to publish: %v", err)
return
}
sequence++
err = ioutil.WriteFile("seq.txt", []byte(fmt.Sprintf("%d", sequence)), 0644)
if err != nil {
log.Fatalf("Failed to write seq: %v", err)
return
}
return
}
type OrderChannel struct {
index int
topic *pubsub.Topic
}
func NewOrderChannel(ctx context.Context, projectID, topicName string) (*OrderChannel, error) {
client, err := pubsub.NewClient(ctx, projectID, option.WithCredentialsFile("cred.json"))
if err != nil {
return nil, err
}
t := client.Topic(topicName)
return &OrderChannel{
index: 0,
topic: t,
}, nil
}
func (o *OrderChannel) Publish(ctx context.Context, message *common.Message) error {
res := o.topic.Publish(ctx, &pubsub.Message{
Data: message.Message(),
})
_, err := res.Get(ctx)
if err != nil {
return err
}
return nil
}
|
package api
import (
"gf-init/app/service"
"gf-init/library/response"
"github.com/gogf/gf/net/ghttp"
)
var User = userApi{}
type userApi struct{}
// Index is a demonstration route handler for output "Hello World!".
func (*userApi) Index(r *ghttp.Request) {
if err := service.User.GetAllUsers(); err != nil {
response.JsonExit(r, 1, err.Error())
} else {
response.JsonExit(r, 0, "ok")
}
}
|
package schema
import (
"strings"
)
func StringContains(s, subs string) bool {
return strings.Contains(s, subs)
}
func StringContainsIgnoreCase(s, subs string) bool {
s = strings.ToLower(s)
subs = strings.ToLower(subs)
return strings.Contains(s, subs)
}
|
package log
import "context"
// constant
const key = "log_fields"
func ToContext(ctx context.Context) context.Context {
return l.ToContext(ctx)
//
//fields := logger.GetFields()
//
//ctxFields := fieldsFromContext(ctx)
//
//if ctxFields == nil {
// ctxFields = map[string]interface{}{}
//}
//
//for k, v := range fields {
// ctxFields[k] = v
//}
//
//return context.WithValue(ctx, key, ctxFields)
}
func FromContext(ctx context.Context) Logger {
return l.FromContext(ctx)
//fields := fieldsFromContext(ctx)
//return l.WithFields(fields)
}
func fieldsFromContext(ctx context.Context) Fields {
var fields Fields
if ctx == nil {
return Fields{}
}
if param := ctx.Value(key); param != nil {
fields = ctx.Value(key).(Fields)
}
return fields
}
|
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package width_test
import (
"fmt"
"gx/ipfs/QmVcxhXDbXjNoAdmYBWbY1eU67kQ8eZUHjG4mAYZUtZZu3/go-text/width"
)
func ExampleTransformer_fold() {
s := "abヲ₩○¥A"
f := width.Fold.String(s)
fmt.Printf("%U: %s\n", []rune(s), s)
fmt.Printf("%U: %s\n", []rune(f), f)
// Output:
// [U+0061 U+0062 U+FF66 U+FFE6 U+FFEE U+FFE5 U+FF21]: abヲ₩○¥A
// [U+0061 U+0062 U+30F2 U+20A9 U+25CB U+00A5 U+0041]: abヲ₩○¥A
}
func ExampleTransformer_widen() {
s := "ab¥ヲ₩○"
w := width.Widen.String(s)
fmt.Printf("%U: %s\n", []rune(s), s)
fmt.Printf("%U: %s\n", []rune(w), w)
// Output:
// [U+0061 U+0062 U+00A5 U+FF66 U+20A9 U+FFEE]: ab¥ヲ₩○
// [U+FF41 U+FF42 U+FFE5 U+30F2 U+FFE6 U+25CB]: ab¥ヲ₩○
}
func ExampleTransformer_narrow() {
s := "abヲ₩○¥A"
n := width.Narrow.String(s)
fmt.Printf("%U: %s\n", []rune(s), s)
fmt.Printf("%U: %s\n", []rune(n), n)
// Ambiguous characters with a halfwidth equivalent get mapped as well.
s = "←"
n = width.Narrow.String(s)
fmt.Printf("%U: %s\n", []rune(s), s)
fmt.Printf("%U: %s\n", []rune(n), n)
// Output:
// [U+0061 U+0062 U+30F2 U+FFE6 U+25CB U+FFE5 U+FF21]: abヲ₩○¥A
// [U+0061 U+0062 U+FF66 U+20A9 U+FFEE U+00A5 U+0041]: abヲ₩○¥A
// [U+2190]: ←
// [U+FFE9]: ←
}
|
package udp
import (
"github.com/golang/glog"
"net"
"sync"
)
type clientConn struct {
*net.UDPConn
}
func Dial(network, address string) (conn net.Conn, err error) {
raddr, err := net.ResolveUDPAddr(network, address)
if err != nil {
return
}
c, err := net.DialUDP(network, nil, raddr)
conn = &clientConn{c}
return
}
type serverConn struct {
*net.UDPConn
raddr *net.UDPAddr
input chan []byte
error chan error
ln *Listener
}
func (c *serverConn) RemoteAddr() net.Addr {
return c.raddr
}
func (c *serverConn) Read(b []byte) (n int, err error) {
select {
case err = <-c.error:
glog.V(1).Infoln("udp serverConn read", err)
return
case s := <-c.input:
glog.V(3).Infoln("udp serverConn read", s)
copy(b, s)
return len(s), nil
}
}
func (c *serverConn) Write(b []byte) (n int, err error) {
glog.V(3).Infoln("udp serverConn write", c.raddr, b)
return c.WriteToUDP(b, c.raddr)
}
func (c *serverConn) Close() error {
glog.V(2).Infoln("udp serverConn close")
c.ln.mutex.Lock()
delete(c.ln.conns, c.RemoteAddr().String())
c.ln.mutex.Unlock()
return nil
}
type Listener struct {
udpConn *net.UDPConn
conns map[string]*serverConn
mutex sync.Mutex
done chan struct{}
new chan *serverConn
newErr chan error
}
func Listen(network, address string) (ln *Listener, err error) {
ln = &Listener{
conns: make(map[string]*serverConn),
done: make(chan struct{}),
new: make(chan *serverConn),
newErr: make(chan error),
}
laddr, err := net.ResolveUDPAddr(network, address)
if err != nil {
return
}
ln.udpConn, err = net.ListenUDP(network, laddr)
if err != nil {
return
}
var conn *serverConn
go func() {
for {
select {
case <-ln.done:
return
default:
}
// allocate memory every time may be a bad ideal
b := make([]byte, 65535)
n, raddr, err := ln.udpConn.ReadFromUDP(b)
if raddr == nil {
ln.newErr <- err
continue
}
ln.mutex.Lock()
conn = ln.conns[raddr.String()]
ln.mutex.Unlock()
if conn == nil {
conn = &serverConn{ln.udpConn, raddr, make(chan []byte), make(chan error), ln}
ln.new <- conn
}
if err != nil {
conn.error <- err
} else {
conn.input <- b[:n]
}
}
}()
return
}
func (ln *Listener) Accept() (conn net.Conn, err error) {
glog.V(2).Infoln("udp listener waiting to accept")
select {
case conn = <-ln.new:
case err = <-ln.newErr:
}
if err != nil {
glog.V(1).Infoln("udp listener fail to accept", err)
return
}
ln.mutex.Lock()
ln.conns[conn.RemoteAddr().String()] = conn.(*serverConn)
ln.mutex.Unlock()
glog.V(2).Infoln("udp listener accepted", conn.RemoteAddr())
return
}
func (ln *Listener) Close() (err error) {
glog.V(2).Infoln("udp listener closing")
ln.done <- struct{}{}
return ln.udpConn.Close()
}
func (ln *Listener) Addr() net.Addr {
return ln.udpConn.LocalAddr()
}
|
func findDisappearedNumbers(nums []int) []int {
res:=[]int{}
for i:=0;i<len(nums);i++{
v:=nums[i]
if nums[v-1]!=v{
nums[i]=nums[v-1]
nums[v-1] = v
i--
}
}
for i,v:=range nums{
if i+1!=v{
res = append(res,i+1)
}
}
return res
}
|
// Copyright 2021 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package telemetry_test
import (
"fmt"
"testing"
"github.com/pingcap/kvproto/pkg/metapb"
"github.com/pingcap/tidb/domain"
"github.com/pingcap/tidb/parser/model"
"github.com/pingcap/tidb/store/mockstore"
"github.com/pingcap/tidb/store/mockstore/unistore"
"github.com/pingcap/tidb/telemetry"
"github.com/pingcap/tidb/testkit"
"github.com/stretchr/testify/require"
"github.com/tikv/client-go/v2/testutils"
)
func TestBuiltinFunctionsUsage(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("set @@tidb_enable_non_prepared_plan_cache=0") // affect this UT
// Clear builtin functions usage
telemetry.GlobalBuiltinFunctionsUsage.Dump()
usage := telemetry.GlobalBuiltinFunctionsUsage.Dump()
require.Equal(t, map[string]uint32{}, usage)
tk.MustExec("create table t (id int)")
tk.MustQuery("select id + 1 - 2 from t")
// Should manually invoke `Session.Close()` to report the usage information
tk.Session().Close()
usage = telemetry.GlobalBuiltinFunctionsUsage.Dump()
require.Equal(t, map[string]uint32{"PlusInt": 1, "MinusInt": 1}, usage)
}
// withMockTiFlash sets the mockStore to have N TiFlash stores (naming as tiflash0, tiflash1, ...).
func withMockTiFlash(nodes int) mockstore.MockTiKVStoreOption {
return mockstore.WithMultipleOptions(
mockstore.WithClusterInspector(func(c testutils.Cluster) {
mockCluster := c.(*unistore.Cluster)
_, _, region1 := mockstore.BootstrapWithSingleStore(c)
tiflashIdx := 0
for tiflashIdx < nodes {
store2 := c.AllocID()
peer2 := c.AllocID()
addr2 := fmt.Sprintf("tiflash%d", tiflashIdx)
mockCluster.AddStore(store2, addr2, &metapb.StoreLabel{Key: "engine", Value: "tiflash"})
mockCluster.AddPeer(region1, store2, peer2)
tiflashIdx++
}
}),
mockstore.WithStoreType(mockstore.EmbedUnistore),
)
}
func TestTiflashUsage(t *testing.T) {
store := testkit.CreateMockStore(t, withMockTiFlash(1))
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("create table t (id int)")
tk.MustExec("alter table t set tiflash replica 1")
dom := domain.GetDomain(tk.Session())
is := dom.InfoSchema()
db, _ := is.SchemaByName(model.NewCIStr("test"))
for _, tblInfo := range db.Tables {
if tblInfo.Name.L == "t" {
tblInfo.TiFlashReplica = &model.TiFlashReplicaInfo{
Count: 1,
Available: true,
}
}
}
telemetry.CurrentTiFlashPushDownCount.Swap(0)
telemetry.CurrentTiflashTableScanWithFastScanCount.Swap(0)
require.Equal(t, telemetry.CurrentTiflashTableScanCount.String(), "0")
require.Equal(t, telemetry.CurrentTiflashTableScanWithFastScanCount.String(), "0")
tk.MustExec("set session tidb_isolation_read_engines='tiflash';")
tk.MustQuery(`select count(*) from t`)
tk.MustExec(`set @@session.tiflash_fastscan=ON`)
tk.MustExec(`set session tidb_isolation_read_engines="tiflash";`)
tk.MustQuery(`select count(*) from test.t`)
tk.Session().Close()
require.Equal(t, telemetry.CurrentTiflashTableScanCount.String(), "2")
require.Equal(t, telemetry.CurrentTiflashTableScanWithFastScanCount.String(), "1")
}
|
// Copyright 2013 The Agostle Authors. All rights reserved.
// Use of this source code is governed by an Apache 2.0
// license that can be found in the LICENSE file.
package main
import (
"bufio"
"bytes"
"fmt"
"io"
"log"
"strconv"
"strings"
//"encoding/json"
"net/http"
"sync"
"time"
"github.com/mreiferson/go-httpclient"
)
var client = &http.Client{Transport: &httpclient.Transport{
DisableCompression: true,
ConnectTimeout: 1 * time.Second,
RequestTimeout: 5 * time.Second,
ResponseHeaderTimeout: 3 * time.Second,
MaxIdleConnsPerHost: 1,
}}
type retriever struct {
url string
temps []float32
sync.Mutex
}
func (r *retriever) Get() ([]float32, error) {
r.Lock()
defer r.Unlock()
var err error
r.temps, err = retrieve(r.temps, r.url)
return r.temps, err
}
func retrieve(temps []float32, url string) ([]float32, error) {
resp, err := client.Get(url)
if err != nil {
return nil, err
}
defer resp.Body.Close()
scn := bufio.NewScanner(resp.Body)
scn.Split(bufio.ScanLines)
buf := bytes.NewBuffer(make([]byte, 0, 128))
state := 0
Loop:
for scn.Scan() {
switch state {
case 0:
if bytes.Index(scn.Bytes(), []byte(`<div id="content">`)) >= 0 {
state++
}
case 1:
if bytes.Index(scn.Bytes(), []byte("<h2>TS-05")) >= 0 {
state++
}
case 2:
if i := bytes.Index(scn.Bytes(), []byte("<tbody>")); i >= 0 {
buf.Write(scn.Bytes()[i+7:])
state++
}
case 3:
if i := bytes.Index(scn.Bytes(), []byte("</tbody>")); i >= 0 {
buf.Write(scn.Bytes()[:i])
break Loop
}
buf.Write(scn.Bytes())
}
}
if buf.Len() > 0 {
if temps, err = readCells(temps, buf.String()); err != nil {
return nil, err
}
}
if err = scn.Err(); err != nil && err != io.EOF {
return nil, err
}
return temps, nil
}
func readCells(temps []float32, tbody string) ([]float32, error) {
if i := strings.Index(tbody, "<td"); i < 0 {
log.Printf("tbody=%q", tbody)
} else {
tbody = tbody[strings.Index(tbody, "<td"):]
if i = strings.Index(tbody, "</tr>"); i >= 0 {
tbody = tbody[:strings.Index(tbody, "</tr>")]
}
}
log.Printf("tr=%q", tbody)
temps = temps[:0]
var i int
parseCell := func(cell string) float32 {
if len(cell) == 0 || cell == "-" {
return 0
}
f, err := parseFloat(cell)
if err != nil {
log.Printf("error parsing %q as float: %v", cell, err)
return 0
}
return f
}
for len(tbody) > 0 {
i = strings.Index(tbody, "<td")
if i < 0 && len(temps) == 0 {
return nil, fmt.Errorf("no <td in %s", tbody)
}
tbody = tbody[i+3:]
if i = strings.Index(tbody, ">"); i < 0 {
return nil, fmt.Errorf("no > after <td in %s", tbody)
}
tbody = tbody[i+1:]
if i = strings.Index(tbody, "</td>"); i < 0 {
temps = append(temps, parseCell(tbody))
break
} else {
temps = append(temps, parseCell(tbody[:i]))
tbody = tbody[i+5:]
}
}
return temps, nil
}
func parseFloat(cell string) (float32, error) {
f, err := strconv.ParseFloat(strings.TrimSpace(strings.Replace(cell, ",", ".", 1)), 32)
return float32(f), err
}
|
package main
import "fmt"
func getHello(name string) string {
if name == "" {
return "Hallo, salam kenal!"
} else {
return "Hallo " + name
}
}
func main() {
fmt.Println(getHello(""))
result := getHello("Eko")
fmt.Println(result)
}
|
package webwx
import (
"fmt"
"reflect"
"regexp"
"testing"
)
func TestUtilGenDeviceId(t *testing.T) {
if s := genDeviceId(); len(s) != 16 {
t.Error("Invalid deviceId:", s, len(s))
}
}
func TestUtilGetUserAgent(t *testing.T) {
t.Log(getUserAgent())
}
func TestRegexp(t *testing.T) {
s := `<error><ret>0</ret><message></message><skey>@crypt_24c998a9_f90604f28a133860a55b9954db925a2a</skey><wxsid>Hp6P6xBuGnZJS+j8</wxsid><wxuin>1377554769</wxuin><pass_ticket>COk9zr04ui3%2BiirzqXU8XP5x5OJf7p2Cb8H2qt2%2BtpwARpT2doI6WJqncBf3HjkO</pass_ticket><isgrayscale>1</isgrayscale></error>`
p := regexp.MustCompile(`<error><ret>(\d+)</ret><message></message><skey>([\S]+)</skey><wxsid>([\S]+)</wxsid><wxuin>(\d+)</wxuin><pass_ticket>([\S]+)</pass_ticket><isgrayscale>(\d+)</isgrayscale></error>`)
match := p.FindStringSubmatch(s)
t.Log("match len:", len(match))
for _, v := range match {
t.Log(v)
}
s = `window.synccheck={retcode:"0",selector:"2"}`
p = regexp.MustCompile(`\{retcode:"(\d+)"\s*,\s*selector:"(\d+)"}`)
match = p.FindStringSubmatch(s)
t.Log("match len:", len(match))
for _, v := range match {
t.Log(v)
}
}
func TestEmptyStruct(t *testing.T) {
r := BaseRequest{}
s := fmt.Sprintf("%s, %s, %s, %s", r.Uin, r.Sid, r.Skey, r.DeviceID)
t.Log(s)
}
func TestJsLoginResp(t *testing.T) {
s := []string{
`window.QRLogin.code = 200; window.QRLogin.uuid = "Qbdd7BUqXQ==";`,
`window.code=408;`,
}
t.Skip(s[1])
}
func TestLoggerReflect(t *testing.T) {
if log == nil {
t.Skip("[WebWx] logger not inited, ignore..")
return
}
// get access
v := reflect.ValueOf(log)
for {
t.Log(v.Type(), v.Kind(), v.CanSet(), v.CanInterface(), v.CanAddr())
//t := v.Type()
//if t.String() == "seelog.logLevelConstraints" {
// break
//}
k := v.Kind()
if k == reflect.Ptr || k == reflect.Interface {
v = v.Elem()
} else if k == reflect.Struct {
v = v.Field(0)
} else {
break
}
}
t.Log(v.Type(), v.Kind(), v.CanSet(), v.CanInterface(), v.CanAddr())
}
|
package main
import (
"bytes"
"flag"
"io/ioutil"
"net"
"net/http"
"sort"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/Sirupsen/logrus"
)
type ipSlice []net.IP
func (slice ipSlice) Len() int {
return len(slice)
}
func (slice ipSlice) Less(a, b int) bool {
return ipCompare(slice[a], slice[b]) == -1
}
func (slice ipSlice) Swap(a, b int) {
tmp := slice[a]
slice[a] = slice[b]
slice[b] = tmp
}
var (
needUpdateAllowedIpList = false
globalAllowedIPs atomic.Value
globalAllowedIPsMutex = sync.Mutex{}
globalAllowedIPsNextUpdateTime atomic.Value
localIPNetworks = []net.IPNet{ // additional filter to ip.IsGlobalUnicast, issue https://github.com/golang/go/issues/11772
parseNet("10.0.0.0/8"),
parseNet("172.16.0.0/12"),
parseNet("192.168.0.0/16"),
parseNet("FC00::/7"),
}
)
func getAllowIPs() ipSlice {
if !flag.Parsed() {
logrus.Debug("Try get allowed ips before parse options")
return nil
}
if *allowIPRefreshInterval == 0 {
res := forceReadAllowedIPs()
logrus.Infof("Update allowed ips to: %v", res)
return res
}
if nextUpdateTime, ok := globalAllowedIPsNextUpdateTime.Load().(time.Time); !ok || nextUpdateTime.Before(time.Now()) {
globalAllowedIPsMutex.Lock()
defer globalAllowedIPsMutex.Unlock()
// second check after get mutex. It can be updated in other thread
if nextUpdateTime, ok := globalAllowedIPsNextUpdateTime.Load().(time.Time); !ok || nextUpdateTime.Before(time.Now()) {
res := forceReadAllowedIPs()
logrus.Infof("Update allowed ips to: %v", res)
globalAllowedIPs.Store(res)
globalAllowedIPsNextUpdateTime.Store(time.Now().Add(*allowIPRefreshInterval))
}
}
ips := globalAllowedIPs.Load().(ipSlice)
return ips
}
func getLocalIPs() (res ipSlice) {
addresses, err := net.InterfaceAddrs()
if err != nil {
logrus.Error("Can't get local ip addresses:", err)
return nil
}
res = make([]net.IP, 0, len(addresses))
for _, addr := range addresses {
logrus.Info("Local ip: ", addr.String())
ip, _, err := net.ParseCIDR(addr.String())
if err == nil {
res = append(res, ip)
} else {
logrus.Errorf("Can't parse local ip '%v': %v", addr.String(), err)
}
}
if logrus.GetLevel() >= logrus.InfoLevel {
ipStrings := make([]string, len(res))
for i, addr := range res {
ipStrings[i] = addr.String()
}
logrus.Info("Local ip:", ipStrings)
}
return res
}
func getIpByExternalRequest() (res ipSlice) {
fGetIp := func(network string) net.IP {
client := http.Client{Transport: &http.Transport{
Dial: func(_supress_network, addr string) (net.Conn, error) {
return net.Dial(network, addr)
},
},
}
client.Timeout = *getIPByExternalRequestTimeout
resp, err := client.Get("http://ifconfig.io/ip")
if resp != nil && resp.Body != nil {
defer resp.Body.Close()
}
if err != nil {
logrus.Debugf("Can't request to http://ifconfig.io/ip (%v): %v", network, err)
return nil
}
respBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
logrus.Debugf("Can't read response from http://ifconfig.io/ip (%v): %v", network, err)
return nil
}
ip := net.ParseIP(strings.TrimSpace(string(respBytes)))
logrus.Debugf("Detected ip by http://ifconfig.io/ip (%v): %v", network, ip)
return ip
}
res = make(ipSlice, 2)
wg := &sync.WaitGroup{}
wg.Add(2)
go func() {
res[0] = fGetIp("tcp4")
wg.Done()
}()
go func() {
res[1] = fGetIp("tcp6")
wg.Done()
}()
wg.Wait()
return res
}
func forceReadAllowedIPs() ipSlice {
var allowedIPs ipSlice
for _, allowed := range strings.Split(*allowIPsString, ",") {
allowed = strings.TrimSpace(allowed)
switch {
case allowed == "local":
logrus.Debug("Detect local ips")
needUpdateAllowedIpList = true
localIPs := getLocalIPs()
logrus.Debug("Detect local ips:", localIPs)
allowedIPs = append(allowedIPs, localIPs...)
case allowed == "nat":
logrus.Debug("Detect nated ips")
needUpdateAllowedIpList = true
allowedIPs = append(allowedIPs, getIpByExternalRequest()...)
case allowed == "auto":
logrus.Debug("Autodetect ips")
hasUnspecifiedIpv4 := false
hasUnspecifiedIpv6 := false
hasIpv4 := false
hasIpv6 := false
var autoAllowedIps ipSlice
for _, tcpAddr := range bindTo {
switch {
case tcpAddr.IP.Equal(net.IPv4zero):
hasUnspecifiedIpv4 = true
hasIpv4 = true
case tcpAddr.IP.Equal(net.IPv6unspecified):
hasUnspecifiedIpv6 = true
hasIpv6 = true
case tcpAddr.IP == nil:
hasUnspecifiedIpv4 = true
hasUnspecifiedIpv6 = true
hasIpv4 = true
hasIpv6 = true
default:
if len(tcpAddr.IP) == net.IPv4len {
hasIpv4 = true
logrus.Debugf("Add binded ipv4 to allowed: %v", tcpAddr.IP)
autoAllowedIps = append(autoAllowedIps, tcpAddr.IP)
} else {
hasIpv6 = true
logrus.Debugf("Add binded ipv6 to allowed: %v", tcpAddr.IP)
autoAllowedIps = append(autoAllowedIps, tcpAddr.IP)
}
}
}
var localIPs ipSlice
if hasUnspecifiedIpv6 || hasUnspecifiedIpv4 {
needUpdateAllowedIpList = true
logrus.Debug("Has unspecified ip addresses, autodetect all local ips.")
localIPs = getLocalIPs()
for _, ip := range localIPs {
if hasUnspecifiedIpv4 && len(ip) == net.IPv4len ||
hasUnspecifiedIpv6 && len(ip) == net.IPv6len {
autoAllowedIps = append(autoAllowedIps, ip)
}
}
}
hasPublicIPv4 := false
for _, ip := range autoAllowedIps {
if ip.To4() != nil && isPublicIp(ip) {
hasPublicIPv4 = true
break
}
}
if !hasPublicIPv4 {
needUpdateAllowedIpList = true
sort.Sort(autoAllowedIps)
logrus.Debug("Can't find local public ipv4 address. Try detect ip by external request. Local addresses:", localIPs)
externalIPs := getIpByExternalRequest()
for _, ip := range externalIPs {
if ip.To4() != nil && hasIpv4 || ip.To4() == nil && hasIpv6 {
logrus.Debug("IP add allowed by external request:", ip)
autoAllowedIps = append(autoAllowedIps, ip)
} else {
logrus.Debug("IP skip allowed by external request (ip family):", ip)
}
}
}
sort.Sort(autoAllowedIps)
logrus.Debugf("Add auto-allowed ips: %v", autoAllowedIps)
allowedIPs = append(allowedIPs, autoAllowedIps...)
case net.ParseIP(allowed) != nil:
allowedIPs = append(allowedIPs, net.ParseIP(allowed))
}
}
sort.Sort(allowedIPs)
cleanedAllowedIPs := ipSlice{}
prevIP := net.IP{}
for _, ip := range allowedIPs {
if ip == nil {
continue
}
if ip.Equal(prevIP) {
continue
}
cleanedAllowedIPs = append(cleanedAllowedIPs, ip)
prevIP = ip
}
allowedIPs = make(ipSlice, len(cleanedAllowedIPs))
copy(allowedIPs, cleanedAllowedIPs)
logrus.Info("Detected allowed IPs:", allowedIPs)
if needUpdateAllowedIpList {
logrus.Infof("Next update allowed ip list: %v (after %v)", time.Now().Add(*allowIPRefreshInterval), *allowIPRefreshInterval)
} else {
logrus.Info("No need update alowed ip list")
}
return allowedIPs
}
func ipCompare(a, b net.IP) int {
// normalize ips
if ipv4 := a.To4(); ipv4 != nil {
a = ipv4
}
if ipv4 := b.To4(); ipv4 != nil {
b = ipv4
}
switch {
case len(a) == 0 && len(b) == 0:
return 0
case len(a) < len(b):
return -1
case len(a) > len(b):
return 1
case a.Equal(b):
return 0
default:
return bytes.Compare([]byte(a), []byte(b))
}
}
// slice must be sorted
func ipContains(slice ipSlice, ip net.IP) bool {
index := sort.Search(len(slice), func(n int) bool { return ipCompare(slice[n], ip) >= 0 })
if index == len(slice) {
return false
}
return ipCompare(ip, slice[index]) == 0
}
func isPublicIp(ip net.IP) bool {
if len(ip) == 0 {
return false
}
if !ip.IsGlobalUnicast() {
return false
}
for _, net := range localIPNetworks {
if net.Contains(ip) {
return false
}
}
return true
}
func parseNet(s string) net.IPNet {
_, ipnet, err := net.ParseCIDR(s)
if err != nil {
logrus.Errorf("Can't parse cidr '%v': %v", s, err)
return net.IPNet{}
}
if ipnet == nil {
logrus.Errorf("Can't parse cidr '%v', nil result.", s)
return net.IPNet{}
}
return *ipnet
}
func parseAddressList(addresses string, defaultPort int) (res []net.TCPAddr) {
for _, addrS := range strings.Split(addresses, ",") {
addrTcp, err := net.ResolveTCPAddr("tcp", addrS)
if err == nil {
logrus.Debugf("Parse bind tcp addr '%v' -> '%v'", addrS, addrTcp)
} else {
addrIp, err := net.ResolveIPAddr("ip", addrS)
if addrIp != nil && err == nil {
addrTcp = &net.TCPAddr{
IP: addrIp.IP,
Port: defaultPort,
}
logrus.Debugf("Parse bind ip addr '%v' -> '%v'", addrS, addrTcp)
} else {
logrus.Errorf("Can't parse bind address '%v'", addrS)
}
}
if addrTcp != nil {
ipv4 := addrTcp.IP.To4()
if ipv4 != nil {
addrTcp.IP = ipv4
}
res = append(res, *addrTcp)
}
}
return res
}
|
package models
import (
"github.com/astaxie/beego/orm"
_ "github.com/go-sql-driver/mysql"
"strconv"
"strings"
"time"
)
func AddTopic(title, category, summery, content string, labels []string) error {
label := ""
if len(labels) > 0 {
label = "$" + strings.Join(labels, "#$") + "#"
}
o := orm.NewOrm()
topic := &Topic{
Title: title,
Category: category,
Summery: summery,
Lables: label,
Content: content,
Created: time.Now().Format("2006-01-02 15:04:05"),
Updated: time.Now().Format("2006-01-02 15:04:05"),
}
_, err := o.Insert(topic)
if err != nil {
return err
}
cate := new(Category)
qs := o.QueryTable("category")
err = qs.Filter("id", category).One(cate)
if err == nil {
cate.TopicCount++
_, err = o.Update(cate)
}
for i, n := 0, len(labels); i < n; i++ {
labelOne := new(Label)
qs := o.QueryTable("label")
err = qs.Filter("id", labels[i]).One(labelOne)
if err == nil {
labelOne.TopicCount++
_, err = o.Update(labelOne)
}
}
return err
}
func GetAllNewTopics() (topics []*Topic, err error) {
o := orm.NewOrm()
topics = make([]*Topic, 0)
qs := o.QueryTable("topic")
_, err = qs.Limit(6, 0).OrderBy("-created").All(&topics)
return topics, err
}
func GetAllViewsTopics() (topics []*Topic, err error) {
o := orm.NewOrm()
topics = make([]*Topic, 0)
qs := o.QueryTable("topic")
_, err = qs.Limit(6, 0).OrderBy("-views").All(&topics)
return topics, err
}
func GetAllArchiveTopics() (topics []*Topic, err error) {
o := orm.NewOrm()
topics = make([]*Topic, 0)
qs := o.QueryTable("topic")
_, err = qs.OrderBy("-created").All(&topics)
return topics, err
}
/**
* Get all topics .
* @category 按分类获取
* @label 按标签获取
* @isDec 是否发布的时间
* @start 分页开始
* @offset 每页多少
*/
func GetAllTopics(category, label string, isDesc bool, start, offset int) (topics []*Topic, err error) {
o := orm.NewOrm()
topics = make([]*Topic, 0)
qs := o.QueryTable("topic")
if isDesc {
if len(category) > 0 {
qs = qs.Filter("category", category).Limit(offset, start)
}
if len(label) > 0 {
qs = qs.Filter("lables__contains", "$"+label+"#").Limit(offset, start)
}
_, err = qs.Limit(offset, start).OrderBy("-created").All(&topics)
} else {
_, err = qs.Limit(offset, start).All(&topics)
}
for _, v := range topics {
v.Lables = strings.Replace(strings.Replace(v.Lables, "#", " ", -1), "$", "", -1)
}
return topics, err
}
func DelTopic(tid string) error {
tidNum, err := strconv.ParseInt(tid, 10, 64)
if err != nil {
return err
}
var oldCate string
var oldLabel string
o := orm.NewOrm()
topic := &Topic{Id: tidNum}
if o.Read(topic) == nil {
oldCate = topic.Category
oldLabel = topic.Lables
_, err = o.Delete(topic)
if err != nil {
return err
}
}
if len(oldCate) > 0 {
cate := new(Category)
qs := o.QueryTable("category")
err = qs.Filter("id", oldCate).One(cate)
if err == nil {
cate.TopicCount--
_, err = o.Update(cate)
}
}
if len(oldLabel) > 0 {
label := strings.Replace(strings.Replace(oldLabel, "#", " ", -1), "$", "", -1)
labels := strings.Split(label, " ")
for i, n := 0, len(labels); i < n-1; i++ {
labelOne := new(Label)
qs := o.QueryTable("label")
err = qs.Filter("id", labels[i]).One(labelOne)
if err == nil {
labelOne.TopicCount--
_, err = o.Update(labelOne)
}
}
}
return err
}
func GetTopic(tid string) (*Topic, error) {
tidNum, err := strconv.ParseInt(tid, 10, 64)
if err != nil {
return nil, err
}
o := orm.NewOrm()
topic := new(Topic)
qs := o.QueryTable("topic")
err = qs.Filter("id", tidNum).One(topic)
if err != nil {
return nil, err
}
topic.Views++
_, err = o.Update(topic)
topic.Lables = strings.Replace(strings.Replace(
topic.Lables, "#", " ", -1), "$", "", -1)
n := len(topic.Lables)
if n == 0 {
} else {
topic.Lables = topic.Lables[0 : n-1]
}
return topic, nil
}
/*func GetTopicLabels(labelsId []string) ([]*Label, error) {
o := orm.NewOrm()
Labels := make([]*Label, 0)
qs := o.QueryTable("label")
var err error
for i, n := 0, len(labelsId); i < n; i++ {
err = qs.Filter("id", labelsId[i]).One(Labels[i])
if err != nil {
return nil, err
}
}
return Labels, err
}*/
func EditTopic(tid, title, category, summery, content string, labels []string) error {
tidNum, err := strconv.ParseInt(tid, 10, 64)
label := ""
if len(labels) > 0 {
label = "$" + strings.Join(labels, "#$") + "#"
}
var oldCate, oldLabel string
o := orm.NewOrm()
topic := &Topic{Id: tidNum}
if o.Read(topic) == nil {
oldCate = topic.Category
oldLabel = topic.Lables
topic.Title = title
topic.Category = category
topic.Lables = label
topic.Summery = summery
topic.Content = content
topic.Updated = time.Now().Format("2006-01-02 15:04:05")
_, err = o.Update(topic)
if err != nil {
return err
}
}
if len(oldCate) > 0 {
cate := new(Category)
qs := o.QueryTable("category")
err = qs.Filter("id", oldCate).One(cate)
if err == nil {
cate.TopicCount--
_, err = o.Update(cate)
}
}
if len(oldLabel) > 0 {
label := strings.Replace(strings.Replace(oldLabel, "#", " ", -1), "$", "", -1)
labels := strings.Split(label, " ")
for i, n := 0, len(labels); i < n-1; i++ {
labelOne := new(Label)
qs := o.QueryTable("label")
err = qs.Filter("id", labels[i]).One(labelOne)
if err == nil {
labelOne.TopicCount--
_, err = o.Update(labelOne)
}
}
}
cate := new(Category)
qs := o.QueryTable("category")
err = qs.Filter("id", category).One(cate)
if err == nil {
cate.TopicCount++
_, err = o.Update(cate)
}
for i, n := 0, len(labels); i < n; i++ {
labelOne := new(Label)
qs := o.QueryTable("label")
err = qs.Filter("id", labels[i]).One(labelOne)
if err == nil {
labelOne.TopicCount++
_, err = o.Update(labelOne)
}
}
return err
}
func GetAllTopicsCount(category, label string) (int, error) {
o := orm.NewOrm()
qs := o.QueryTable("topic")
var cnt int64
var err error
if len(category) > 0 {
cnt, err = qs.Filter("category", category).Count()
} else if len(label) > 0 {
cnt, err = qs.Filter("lables__contains", "$"+label+"#").Count()
} else {
cnt, err = qs.Count()
}
return int(cnt), err
}
|
//converter converts bitmaps into an encoded video.
package converter
import (
"os"
"fmt"
"math"
"image"
"image/color"
"image/png"
"log"
"os/exec"
)
type Circle struct {
X, Y, R float64
}
func (c *Circle) Brightness(x, y float64) uint8 {
var dx, dy float64 = c.X - x, c.Y - y
d := math.Sqrt(dx*dx+dy*dy) / c.R
if d > 1 {
return 0
} else {
return 255
}
}
func createSampleImage(name string, serialNum int) {
var w, h int = 1000, 1000
var hw, hh float64 = float64(w / 2), float64(h / 2)
r := 300.0*(1 - float64(serialNum)/100.0)
θ := 2 * math.Pi / 3
cr := &Circle{hw - r*math.Sin(0), hh - r*math.Cos(0), 60}
cg := &Circle{hw - r*math.Sin(θ), hh - r*math.Cos(θ), 60}
cb := &Circle{hw - r*math.Sin(-θ), hh - r*math.Cos(-θ), 60}
m := image.NewRGBA(image.Rect(0, 0, w, h))
for x := 0; x < w; x++ {
for y := 0; y < h; y++ {
c := color.RGBA{
cr.Brightness(float64(x), float64(y)),
cg.Brightness(float64(x), float64(y)),
cb.Brightness(float64(x), float64(y)),
255,
}
m.Set(x, y, c)
}
}
f, err := os.OpenFile(name, os.O_WRONLY|os.O_CREATE, 0600)
if err != nil {
fmt.Println(err)
return
}
defer f.Close()
png.Encode(f, m)
}
func createImageSeries(name string, num int) {
for i := 1; i <= num; i++ {
fullName := name + fmt.Sprintf("%05d", i) + ".png"
createSampleImage(fullName, i)
}
}
func createVideo(name string) {
cmd := exec.Command("ffmpeg", "-f", "image2", "-r", "24", "-i", name + "%05d.png", "-vcodec", "mpeg4", "-y", name + ".mp4")
err := cmd.Run()
if err != nil {
log.Fatal(err)
}
}
func createSampleVideo() {
cmd := exec.Command("ffmpeg", "-f", "image2", "-r", "1", "-i", "test_images/image%05d.png", "-vcodec", "mpeg4", "-y", "new_movie.mp4")
err := cmd.Run()
if err != nil {
log.Fatal(err)
}
}
func createCircleVideo(name string) {
createImageSeries(name, 100)
createVideo(name)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.