text stringlengths 11 4.05M |
|---|
package saetoauthv2
import (
"fmt"
)
func NewAuthV2(ClientID, ClientSecret, AccessToken, RefreshToken string) (auth *AuthV2) {
fmt.Println("NewAuthV2")
auth = &AuthV2{
ClientID:ClientID,
ClientSecret:ClientSecret,
AccessToken:AccessToken,
RefreshToken:RefreshToken,
Host:"https://api.weibo.com/2/",
TimeOut:30,
ConnectTimeOut:30,
SslVerifyPeer:false,
Format:"json",
DecodeJson:true,
UserAgent:"Sae T OAuth2 v0.1",
}
return
}
func (this *AuthV2) AccessTokenURL() string {
return "https://api.weibo.com/oauth2/access_token"
}
func (this *AuthV2) AuthorizeURL() string {
return "https://api.weibo.com/oauth2/authorize"
}
func (this *AuthV2) GetAuthorizeURL( url, responseType, state, display string) string {
if responseType == "" {
responseType = "code"
}
return this.AuthorizeURL() + "?" + "client_id=" + this.ClientID + "&redirect_uri=" + url + "&response_type=" + responseType + "&state=" + state + "&display=" + display
} |
package parsing
import (
"github.com/s2gatev/sqlmorph/ast"
)
const LimitWithoutNumberError = "LIMIT statement must be followed by a number."
// LimitState parses LIMIT SQL clauses along with the value.
// ... LIMIT 10 ...
type LimitState struct {
BaseState
}
func (s *LimitState) Name() string {
return "LIMIT"
}
func (s *LimitState) Parse(result ast.Node, tokenizer *Tokenizer) (ast.Node, bool) {
target := result.(ast.HasLimit)
if token, _ := tokenizer.ReadToken(); token != LIMIT {
tokenizer.UnreadToken()
return result, false
}
if token, limit := tokenizer.ReadToken(); token == LITERAL {
target.SetLimit(limit)
} else {
wrongTokenPanic(LimitWithoutNumberError, limit)
}
return result, true
}
|
package core
import (
"monitoring/internal"
"fmt"
"os"
"time"
)
func (r *System)initHost() int {
internal.CheckErr(r.Host.new(),"couldn't load host info")
internal.CheckErr(r.CPU.PollingInfo(), "couldn't load cpu info")
internal.CheckErr(r.Disk.PollingInfo("/"), "couldn't load disk info")
fmt.Fprintf(os.Stdout,"System Initialization\n" +
"Host\t\t%v\n" +
"Uptime\t\t%v\n" +
"BootTime\t%v\n" +
"OS/Platform\t%v/%v\n" +
"Kernal\t\t%v\n" +
"CPU Vendor\t%v\n" +
"Core\t\t%v\n" +
"Model\t\t%v\n" +
"Disk\n%v\n",r.Host.Info.Hostname, r.Host.Info.Uptime, r.Host.Info.BootTime, r.Host.Info.OS, r.Host.Info.Platform,
r.Host.Info.KernelVersion, r.CPU.Info[0].VendorID, r.CPU.Info[0].Cores, r.CPU.Info[0].ModelName, r.Disk)
return 0
}
func (r *System)Collect(c chan bool) {
for {
r.Polling()
//fmt.Fprintf(os.Stdout, "%d.polling : %v\n CPU Usage: %v\n", cnt, r.Timestamp, r.CPU.Usage)
c <- true
time.Sleep(5 * time.Second)
}
} |
/*
Create a function that takes a division equation $str and checks if it will return a whole number without decimals after dividing.
Examples
validDivision("6/3") ➞ true
validDivision("30/25") ➞ false
validDivision("0/3") ➞ true
Notes
Return "invalid" if division by zero.
*/
package main
import "fmt"
func main() {
assert(divisible("6/3") == true)
assert(divisible("30/25") == false)
assert(divisible("0/3") == true)
assert(divisible("13/12") == false)
assert(divisible("329/329") == true)
assert(divisible("0/0") == "invalid")
assert(divisible("10/0") == "invalid")
assert(divisible("20/5") == true)
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
func divisible(s string) interface{} {
var x, y int
n, _ := fmt.Sscanf(s, "%d/%d", &x, &y)
if n != 2 || y == 0 {
return "invalid"
}
return x%y == 0
}
|
package errutil
import (
"fmt"
)
// First returns first non-nil error out of errs, or nil.
func First(errs ...error) error {
for _, e := range errs {
if e != nil {
return e
}
}
return nil
}
// FatalIf panics if err is not nil.
func FatalIf(err error) {
if err == nil {
return
}
panic(fmt.Sprintf("FATAL: %v", err))
}
|
// Copyright © 2014 Alienero. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package comet
import (
"fmt"
"github.com/Alienero/spp"
"github.com/golang/glog"
)
// Tcp write queue
type PackQueue struct {
// The last error in the tcp connection
writeError error
// Notice read the error
errorChan chan error
writeChan chan *spp.Pack
readChan chan *packAndErr
// Pack connection
rw *spp.Conn
}
type packAndErr struct {
pack *spp.Pack
err error
}
// Init a pack queue
func NewPackQueue(rw *spp.Conn) *PackQueue {
return &PackQueue{
rw: rw,
writeChan: make(chan *spp.Pack, Conf.WirteLoopChanNum),
readChan: make(chan *packAndErr, 1),
errorChan: make(chan error, 1),
}
}
// Start a pack write queue
// It should run in a new grountine
func (queue *PackQueue) writeLoop() {
// defer recover()
var err error
loop:
for {
select {
case pack := <-queue.writeChan:
if pack == nil {
break loop
}
err = queue.rw.WritePack(pack)
if err != nil {
// Tell listen error
queue.writeError = err
break loop
}
}
}
// Notice the read
if err != nil {
queue.errorChan <- err
}
}
// Write a pack , and get the last error
func (queue *PackQueue) WritePack(pack *spp.Pack) error {
if queue.writeError != nil {
return queue.writeError
}
queue.writeChan <- pack
return nil
}
// Read a pack and retuen the write queue error
func (queue *PackQueue) ReadPack() (pack *spp.Pack, err error) {
go func() {
p := new(packAndErr)
p.pack, p.err = queue.rw.ReadPack()
queue.readChan <- p
}()
select {
case err = <-queue.errorChan:
// Hava an error
// pass
case pAndErr := <-queue.readChan:
pack = pAndErr.pack
err = pAndErr.err
}
return
}
// Get a read pack queue
// Only call once
func (queue *PackQueue) ReadPackInLoop(fin <-chan byte) <-chan *packAndErr {
ch := make(chan *packAndErr, Conf.ReadPackLoop)
go func() {
// defer recover()
p := new(packAndErr)
loop:
for {
p.pack, p.err = queue.rw.ReadPack()
select {
case ch <- p:
// if p.err != nil {
// break loop
// }
// Without anything to do
case <-fin:
glog.Info("Recive fin (read loop chan)")
break loop
}
p = new(packAndErr)
}
close(ch)
}()
return ch
}
// Close the all of queue's channels
func (queue *PackQueue) Close() error {
close(queue.writeChan)
close(queue.readChan)
close(queue.errorChan)
return nil
}
// Buffer
type buffer struct {
index int
data []byte
}
func newBuffer(data []byte) *buffer {
return &buffer{
data: data,
index: 0,
}
}
func (b *buffer) readString(length int) (s string, err error) {
if (length + b.index) > len(b.data) {
err = fmt.Errorf("Out of range error:%v", length)
return
}
s = string(b.data[b.index:(length + b.index)])
b.index += length
return
}
func (b *buffer) readByte() (c byte, err error) {
if (1 + b.index) > len(b.data) {
err = fmt.Errorf("Out of range error")
return
}
c = b.data[b.index]
b.index++
return
}
|
/*
* @lc app=leetcode.cn id=929 lang=golang
*
* [929] 独特的电子邮件地址
*/
package main
import (
"strings"
)
// @lc code=start
func numUniqueEmails(emails []string) int {
emailMap := make(map[string]bool)
for i := 0; i < len(emails); i++ {
email := strings.Split(emails[i], "@")
index := strings.Index(email[0], "+")
if index != -1 {
email[0] = email[0][:index]
}
email[0] = strings.ReplaceAll(email[0], ".", "")
emailMap[email[0]+"@"+email[1]] = true
}
return len(emailMap)
}
// func main() {
// fmt.Println(numUniqueEmails([]string{"test.email+alex@leetcode.com", "test.e.mail+bob.cathy@leetcode.com", "testemail+david@lee.tcode.com"}),
// numUniqueEmails([]string{"a@leetcode.com", "b@leetcode.com", "c@leetcode.com"}))
// }
// @lc code=end
|
package web
import (
"net/http"
"fmt"
)
func handleError(err error, w http.ResponseWriter, r *http.Request) {
w.WriteHeader(500)
fmt.Fprintf(w, err.Error())
}
func handleNotFound(w http.ResponseWriter, r *http.Request) {
http.NotFound(w, r)
} |
package dushengchen
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
/**
https://leetcode.com/submissions/detail/740046542/
Runtime: 20 ms, faster than 32.24% of Go online submissions for Lowest Common Ancestor of a Binary Tree.
Memory Usage: 7.1 MB, less than 81.52% of Go online submissions for Lowest Common Ancestor of a Binary Tree.
*/
func lowestCommonAncestor(root, p, q *TreeNode) *TreeNode {
if pf, qf, n := isFather(root, p, q); pf && qf {
return n
}
return root
}
func isFather(root, p, q *TreeNode) (bool, bool, *TreeNode) {
if root == nil || q == nil || p == nil {
return false, false, nil
}
pf1, qf1, n := isFather(root.Left, p, q)
if pf1 && qf1 {
return pf1 ,qf1, n
}
pf2, qf2, n := isFather(root.Right, p, q)
if pf2 && qf2 {
return pf2 ,qf2, n
}
pf := pf1 || pf2 || root.Val == p.Val
qf := qf1 || qf2 || root.Val == q.Val
if pf && qf {
return true, true, root
}
return pf, qf, nil
} |
package option
import "fmt"
type Search struct {
title string
limit int
}
type SearchOption func(*Search)
func SearchTitle(title string) SearchOption {
return func(s *Search) {
s.title = title
}
}
func SearchLimit(limit int) SearchOption {
return func(s *Search) {
s.limit = limit
}
}
func Option(options ...SearchOption) {
opt := Search{}
for _, o := range options {
o(&opt)
}
fmt.Println(opt)
// use opt
}
|
package collectors
import (
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/bosh-prometheus/bosh_exporter/deployments"
)
type DeploymentsCollector struct {
deploymentReleaseInfoMetric *prometheus.GaugeVec
deploymentStemcellInfoMetric *prometheus.GaugeVec
deploymentInstanceCountMetric *prometheus.GaugeVec
lastDeploymentsScrapeTimestampMetric prometheus.Gauge
lastDeploymentsScrapeDurationSecondsMetric prometheus.Gauge
}
func NewDeploymentsCollector(
namespace string,
environment string,
boshName string,
boshUUID string,
) *DeploymentsCollector {
deploymentReleaseInfoMetric := prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: "deployment",
Name: "release_info",
Help: "Labeled BOSH Deployment Release Info with a constant '1' value.",
ConstLabels: prometheus.Labels{
"environment": environment,
"bosh_name": boshName,
"bosh_uuid": boshUUID,
},
},
[]string{"bosh_deployment", "bosh_release_name", "bosh_release_version"},
)
deploymentStemcellInfoMetric := prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: "deployment",
Name: "stemcell_info",
Help: "Labeled BOSH Deployment Stemcell Info with a constant '1' value.",
ConstLabels: prometheus.Labels{
"environment": environment,
"bosh_name": boshName,
"bosh_uuid": boshUUID,
},
},
[]string{"bosh_deployment", "bosh_stemcell_name", "bosh_stemcell_version", "bosh_stemcell_os_name"},
)
deploymentInstanceCountMetric := prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: "deployment",
Name: "instance_count",
Help: "Number of instances in this deployment",
ConstLabels: prometheus.Labels{
"environment": environment,
"bosh_name": boshName,
"bosh_uuid": boshUUID,
},
},
[]string{"bosh_deployment", "bosh_vm_type"},
)
lastDeploymentsScrapeTimestampMetric := prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: "",
Name: "last_deployments_scrape_timestamp",
Help: "Number of seconds since 1970 since last scrape of Deployments metrics from BOSH.",
ConstLabels: prometheus.Labels{
"environment": environment,
"bosh_name": boshName,
"bosh_uuid": boshUUID,
},
},
)
lastDeploymentsScrapeDurationSecondsMetric := prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: "",
Name: "last_deployments_scrape_duration_seconds",
Help: "Duration of the last scrape of Deployments metrics from BOSH.",
ConstLabels: prometheus.Labels{
"environment": environment,
"bosh_name": boshName,
"bosh_uuid": boshUUID,
},
},
)
collector := &DeploymentsCollector{
deploymentReleaseInfoMetric: deploymentReleaseInfoMetric,
deploymentStemcellInfoMetric: deploymentStemcellInfoMetric,
deploymentInstanceCountMetric: deploymentInstanceCountMetric,
lastDeploymentsScrapeTimestampMetric: lastDeploymentsScrapeTimestampMetric,
lastDeploymentsScrapeDurationSecondsMetric: lastDeploymentsScrapeDurationSecondsMetric,
}
return collector
}
func (c *DeploymentsCollector) Collect(deployments []deployments.DeploymentInfo, ch chan<- prometheus.Metric) error {
var begun = time.Now()
c.deploymentReleaseInfoMetric.Reset()
c.deploymentStemcellInfoMetric.Reset()
for _, deployment := range deployments {
c.reportDeploymentReleaseInfoMetrics(deployment, ch)
c.reportDeploymentStemcellInfoMetrics(deployment, ch)
c.reportDeploymentInstanceCountMetrics(deployment, ch)
}
c.deploymentReleaseInfoMetric.Collect(ch)
c.deploymentStemcellInfoMetric.Collect(ch)
c.deploymentInstanceCountMetric.Collect(ch)
c.lastDeploymentsScrapeTimestampMetric.Set(float64(time.Now().Unix()))
c.lastDeploymentsScrapeTimestampMetric.Collect(ch)
c.lastDeploymentsScrapeDurationSecondsMetric.Set(time.Since(begun).Seconds())
c.lastDeploymentsScrapeDurationSecondsMetric.Collect(ch)
return nil
}
func (c *DeploymentsCollector) Describe(ch chan<- *prometheus.Desc) {
c.deploymentReleaseInfoMetric.Describe(ch)
c.deploymentStemcellInfoMetric.Describe(ch)
c.deploymentInstanceCountMetric.Describe(ch)
c.lastDeploymentsScrapeTimestampMetric.Describe(ch)
c.lastDeploymentsScrapeDurationSecondsMetric.Describe(ch)
}
func (c *DeploymentsCollector) reportDeploymentReleaseInfoMetrics(
deployment deployments.DeploymentInfo,
ch chan<- prometheus.Metric,
) {
for _, release := range deployment.Releases {
c.deploymentReleaseInfoMetric.WithLabelValues(
deployment.Name,
release.Name,
release.Version,
).Set(float64(1))
}
}
func (c *DeploymentsCollector) reportDeploymentStemcellInfoMetrics(
deployment deployments.DeploymentInfo,
ch chan<- prometheus.Metric,
) {
for _, stemcell := range deployment.Stemcells {
c.deploymentStemcellInfoMetric.WithLabelValues(
deployment.Name,
stemcell.Name,
stemcell.Version,
stemcell.OSName,
).Set(float64(1))
}
}
func (c *DeploymentsCollector) reportDeploymentInstanceCountMetrics(
deployment deployments.DeploymentInfo,
ch chan<- prometheus.Metric,
) {
vm_type_count := make(map[string]int)
for _, instance := range deployment.Instances {
vm_type_count[instance.VMType] = vm_type_count[instance.VMType] + 1
}
for vm_type, count := range vm_type_count {
c.deploymentInstanceCountMetric.WithLabelValues(
deployment.Name,
vm_type,
).Set(float64(count))
}
}
|
package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"time"
"cloud.google.com/go/storage"
"github.com/gorilla/handlers"
"github.com/gorilla/mux"
)
var (
bucketName = os.Getenv("BUCKET_NAME")
googleAccessID = os.Getenv("GOOGLE_ACCESS_ID")
privateKeyPath = os.Getenv("PRIVATE_KEY_PATH")
originAllowed = os.Getenv("ORIGIN_ALLOWED")
port = os.Getenv("PORT")
fileName = "demo.mp4"
)
type signURL struct {
SignURL string `json:"signURL"`
PostSignURL string `json:"postSignURL"`
}
func main() {
router := mux.NewRouter()
api := router.PathPrefix("/api/v1/").Subrouter()
api.HandleFunc("/sign_url", indexHandel)
headersOk := handlers.AllowedHeaders([]string{"X-Requested-With"})
originsOk := handlers.AllowedOrigins([]string{originAllowed})
methodsOk := handlers.AllowedMethods([]string{"GET", "HEAD", "POST", "PUT", "OPTIONS"})
http.Handle("/", handlers.CORS(originsOk, headersOk, methodsOk)(router))
if port == "" {
port = "8080"
log.Printf("Defaulting to port %s", port)
}
log.Printf("Listening on port %s", port)
log.Fatal(http.ListenAndServe(fmt.Sprintf(":%s", port), nil))
}
func indexHandel(w http.ResponseWriter, r *http.Request) {
pkey, _ := ioutil.ReadFile(privateKeyPath)
opts := &storage.SignedURLOptions{
GoogleAccessID: googleAccessID,
PrivateKey: pkey,
Method: "GET",
Expires: time.Now().Add(15 * time.Minute),
}
optsPut := &storage.SignedURLOptions{
GoogleAccessID: googleAccessID,
PrivateKey: pkey,
ContentType: "video/mp4",
Method: "PUT",
Expires: time.Now().Add(12 * time.Hour),
}
url, err := storage.SignedURL(bucketName, fileName, opts)
if err != nil {
log.Printf("err: %+v", err)
}
postURL, err := storage.SignedURL(bucketName, fileName, optsPut)
if err != nil {
log.Printf("err: %+v", err)
}
signURL := signURL{
SignURL: url,
PostSignURL: postURL,
}
js, _ := json.Marshal(signURL)
w.Header().Set("Content-Type", "application/json")
w.Write(js)
}
|
package ptr
// String returns a pointer to the given string
func String(x string) *string { return &x }
|
// Web Server
package main
import (
"fmt"
"log"
"net/http"
)
func main() {
http.HandleFunc("/", handler) // ada petición llama a un handler
log.Fatal(http.ListenAndServe("localhost:8000", nil))
}
// handler each request
func handler(w http.ResponseWriter, r *http.Request) {
fmt.Printf("URL.Path = %q", r.URL.Path)
// write a response
//fmt.Fprintf(w, "URL.Path = %q\n", r.URL.Path)
// redirect to
http.Redirect(w, r, "http://www.google.es", 301)
}
|
// Copyright (c) 2020 Hirotsuna Mizuno. All rights reserved.
// Use of this source code is governed by the MIT license that can be found in
// the LICENSE file.
package speedio
import (
"fmt"
"sync"
"time"
"github.com/tunabay/go-infounit"
)
// limiter limits the transfer.
type limiter struct {
rate float64 // bytes per sec ( = bps / 8 )
burst float64 // bytes
minPartial int
rateCoef float64 // time.Second / rate
lastTime time.Time
lastToken float64 // bytes
mu sync.RWMutex
}
// resolution is the period for totaling the transfer amount to determine whether the bit rate is exceeded or not.
// For example, if the bit rate is 1 kbit/s and the resolution is 3s,
// if there is no transfer in the previous 2 seconds, transfer of 3 kbit is allowed
// in the next 1 second. However, when the resolution is 1s,
// the transfer allowed per second is always 1 kbit.
//
// maxWait is the maximum waiting time when the transfer exceeds the bit rate.
// After this maxWait time elapses, only the portion that can be transferred at that time is transferred.
func newLimiter(rate infounit.BitRate, resolution, maxWait time.Duration) (*limiter, error) {
l := &limiter{}
if err := l.set(time.Time{}, rate, resolution, maxWait); err != nil {
return nil, err
}
return l, nil
}
//
func (l *limiter) set(tc time.Time, rate infounit.BitRate, resolution, maxWait time.Duration) error {
switch {
case rate < 0:
return fmt.Errorf("%w: negative bit rate %v", ErrInvalidParameter, rate)
case rate == 0:
return fmt.Errorf("%w: zero bit rate", ErrInvalidParameter)
case resolution < 0:
return fmt.Errorf("%w: negative resolution %s", ErrInvalidParameter, resolution)
case resolution == 0:
return fmt.Errorf("%w: zero resolution", ErrInvalidParameter)
case maxWait < 0:
return fmt.Errorf("%w: negative max-wait %s", ErrInvalidParameter, maxWait)
case maxWait == 0:
return fmt.Errorf("%w: zero max-wait", ErrInvalidParameter)
}
newRate := float64(rate) / 8
newBurst := newRate * resolution.Seconds()
newMinPartial := int(newRate * maxWait.Seconds())
switch {
case infounit.ByteCount(newBurst) < 1:
return fmt.Errorf("%w: rate and/or resolution is too small: rate=%v, reso=%s", ErrInvalidParameter, rate, resolution)
case newMinPartial < 1:
return fmt.Errorf("%w: rate and/or max-wait is too small: rate=%v, wait=%s", ErrInvalidParameter, rate, maxWait)
}
l.mu.Lock()
defer l.mu.Unlock()
if !l.lastTime.IsZero() && !tc.IsZero() {
l.lastTime = tc
l.lastToken += tc.Sub(l.lastTime).Seconds() * l.rate
if l.burst < l.lastToken {
l.lastToken = l.burst
}
}
l.rate = newRate
l.burst = newBurst
l.minPartial = newMinPartial
l.rateCoef = float64(time.Second) / l.rate
return nil
}
// refund returns not used token.
func (l *limiter) refund(bc int) {
l.mu.Lock()
defer l.mu.Unlock()
l.lastToken += float64(bc)
}
// request requests a transfer of the specified number of bytes.
// It returns the duration to wait and the number of bytes allowed.
func (l *limiter) request(tc time.Time, bc int) (time.Duration, int) {
l.mu.Lock()
defer l.mu.Unlock()
allowed := l.lastToken + l.rate*tc.Sub(l.lastTime).Seconds()
if l.burst < allowed {
allowed = l.burst
}
allowedBytes := int(allowed)
switch {
case bc <= allowedBytes:
l.lastTime = tc
l.lastToken = allowed - float64(bc)
return 0, bc
case l.minPartial <= allowedBytes:
l.lastTime = tc
l.lastToken = allowed - float64(allowedBytes)
return 0, allowedBytes
}
wsz := l.minPartial
if bc < wsz {
wsz = bc
}
d := time.Duration(l.rateCoef * (float64(wsz) - allowed))
l.lastTime = tc.Add(d)
l.lastToken = 0
return d, wsz
}
|
// Copyright 2019 Kuei-chun Chen. All rights reserved.
package mdb
import (
"context"
"fmt"
"log"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
)
// ChangeStream defines what to watch? client, database or collection
type ChangeStream struct {
collection string
database string
pipeline []bson.D
}
type callback func(bson.M)
// SetCollection sets collection
func (cs *ChangeStream) SetCollection(collection string) {
cs.collection = collection
}
// SetDatabase sets database
func (cs *ChangeStream) SetDatabase(database string) {
cs.database = database
}
// SetPipeline sets pipeline
func (cs *ChangeStream) SetPipeline(pipeline []bson.D) {
cs.pipeline = pipeline
}
// SetPipelineString sets pipeline string
func (cs *ChangeStream) SetPipelineString(pipe string) {
var pipeline = []bson.D{}
if pipe != "" {
pipeline = MongoPipeline(pipe)
}
cs.pipeline = pipeline
}
// NewChangeStream gets a new ChangeStream
func NewChangeStream() *ChangeStream {
return &ChangeStream{}
}
// Watch prints oplogs in JSON format
func (cs *ChangeStream) Watch(client *mongo.Client, cb callback) {
var err error
var ctx = context.Background()
var cur *mongo.ChangeStream
fmt.Println("pipeline", cs.pipeline)
opts := options.ChangeStream()
opts.SetFullDocument("updateLookup")
if cs.collection != "" && cs.database != "" {
fmt.Println("Watching", cs.database+"."+cs.collection)
var coll = client.Database(cs.database).Collection(cs.collection)
if cur, err = coll.Watch(ctx, cs.pipeline, opts); err != nil {
panic(err)
}
} else if cs.database != "" {
fmt.Println("Watching", cs.database)
var db = client.Database(cs.database)
if cur, err = db.Watch(ctx, cs.pipeline, opts); err != nil {
panic(err)
}
} else {
fmt.Println("Watching all")
if cur, err = client.Watch(ctx, cs.pipeline, opts); err != nil {
panic(err)
}
}
defer cur.Close(ctx)
var doc bson.M
for cur.Next(ctx) {
if err = cur.Decode(&doc); err != nil {
log.Fatal(err)
}
cb(doc)
}
if err = cur.Err(); err != nil {
log.Fatal(err)
}
}
|
package main
import (
"context"
"fmt"
"time"
"github.com/yandex-cloud/examples/serverless/serverless_voximplant/scheme"
"github.com/yandex-cloud/ydb-go-sdk"
"github.com/yandex-cloud/ydb-go-sdk/table"
)
func listDocs(ctx context.Context, req *doctorsRequest) ([]*scheme.Doctor, error) {
switch {
case len(req.Spec) == 0:
return nil, newErrorBadRequest("bad request: require `specId`")
case len(req.Date) == 0:
return nil, newErrorBadRequest("bad request: require `date`")
}
date, err := time.Parse(dateLayout, req.Date)
if err != nil {
return nil, err
}
var query string
params := table.NewQueryParameters()
if len(req.Place) > 0 {
query = `DECLARE $place_id AS Utf8;
DECLARE $spec_id AS Utf8;
DECLARE $date AS Date;
SELECT DISTINCT doctor_id FROM schedule
WHERE spec_id = $spec_id AND ` + "`date`" + ` = $date AND place_id = $place_id
LIMIT 10`
params.Add(table.ValueParam("$spec_id", ydb.UTF8Value(req.Spec)))
params.Add(table.ValueParam("$date", ydb.DateValue(ydb.Time(date).Date())))
params.Add(table.ValueParam("$place_id", ydb.UTF8Value(req.Place)))
} else {
query = `DECLARE $spec_id AS Utf8;
DECLARE $date AS Date;
SELECT DISTINCT doctor_id FROM schedule
WHERE spec_id = $spec_id AND ` + "`date`" + ` = $date
LIMIT 10`
params.Add(table.ValueParam("$spec_id", ydb.UTF8Value(req.Spec)))
params.Add(table.ValueParam("$date", ydb.DateValue(ydb.Time(date).Date())))
}
var docIDs []string
err = table.Retry(ctx, sessPool, table.OperationFunc(func(ctx context.Context, session *table.Session) error {
txc := table.TxControl(table.BeginTx(table.WithOnlineReadOnly(table.WithInconsistentReads())), table.CommitTx())
_, res, err := session.Execute(ctx, txc, query, params, table.WithQueryCachePolicy(table.WithQueryCachePolicyKeepInCache()))
if err != nil {
return err
}
defer res.Close()
docIDs = make([]string, 0, res.RowCount())
for res.NextSet() {
for res.NextRow() {
if res.SeekItem("doctor_id") {
docIDs = append(docIDs, res.OUTF8())
}
}
}
return nil
}))
if err != nil {
return nil, err
}
if len(docIDs) == 0 {
return []*scheme.Doctor{}, nil
}
query = `DECLARE $ids AS List<Utf8>;
SELECT id, name FROM doctors
WHERE id IN $ids
ORDER BY name
LIMIT 10`
var ids []ydb.Value
for _, id := range docIDs {
ids = append(ids, ydb.UTF8Value(id))
}
params = table.NewQueryParameters(table.ValueParam("$ids", ydb.ListValue(ids...)))
var docs []*scheme.Doctor
err = table.Retry(ctx, sessPool, table.OperationFunc(func(ctx context.Context, session *table.Session) error {
txc := table.TxControl(table.BeginTx(table.WithOnlineReadOnly(table.WithInconsistentReads())), table.CommitTx())
_, res, err := session.Execute(ctx, txc, query, params, table.WithQueryCachePolicy(table.WithQueryCachePolicyKeepInCache()))
if err != nil {
return err
}
defer res.Close()
docs = make([]*scheme.Doctor, 0, res.RowCount())
for res.NextSet() {
for res.NextRow() {
docs = append(docs, new(scheme.Doctor).FromYDB(res))
}
}
return nil
}))
if err != nil {
return nil, err
}
return docs, nil
}
func getDoc(ctx context.Context, id string) (result *scheme.Doctor, err error) {
query := `DECLARE $id as UTF8;
SELECT * FROM doctors
WHERE id = $id
LIMIT 1;`
params := table.NewQueryParameters(table.ValueParam("$id", ydb.UTF8Value(id)))
err = table.Retry(ctx, sessPool, table.OperationFunc(func(ctx context.Context, session *table.Session) error {
txc := table.TxControl(table.BeginTx(table.WithOnlineReadOnly(table.WithInconsistentReads())), table.CommitTx())
_, res, err := session.Execute(ctx, txc, query, params, table.WithQueryCachePolicy(table.WithQueryCachePolicyKeepInCache()))
if err != nil {
return err
}
defer res.Close()
if res.RowCount() != 1 {
return fmt.Errorf("no such doctor")
}
res.NextSet()
res.NextRow()
result = new(scheme.Doctor).FromYDB(res)
return nil
}))
return result, err
}
|
package constants
const (
CacheDuration = "10s"
)
|
package isValidBST
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
/*
// wrong
func isValidBST(root *TreeNode) bool {
if root == nil {
return true
}
return validBST(root.Left, root.Val, -2147483648, true) &&
validBST(root.Right, 2147483647, root.Val, false)
}
func validBST(root *TreeNode, max, min int, isLeft bool) bool {
if root == nil {
return true
}
if root.Val >= max || root.Val <= min {
return false
}
if isLeft {
return validBST(root.Left, root.Val, -2147483648, true) && validBST(root.Right, max, root.Val, false)
} else {
return validBST(root.Left, root.Val, min, true) && validBST(root.Right, 2147483647, root.Val, false)
}
}
*/
func isValidBST(root *TreeNode) bool {
if root == nil {
return true
}
result := []int{}
stack := []*TreeNode{}
p := root
for len(stack) != 0 || p != nil {
for p != nil {
stack = append(stack, p)
p = p.Left
}
if len(stack) != 0 {
top := stack[len(stack)-1]
stack = stack[:len(stack)-1]
result = append(result, top.Val)
p = top.Right
}
}
return isSorted(result)
}
func isSorted(nums []int) bool {
length := len(nums)
if length == 0 {
return true
}
for i := 1; i < length; i++ {
if nums[i-1] >= nums[i] {
return false
}
}
return true
}
|
package poker
import (
"encoding/json"
"fmt"
"io"
)
type Player struct {
Name string
Wins int
}
type League []Player
func (l League) Find(name string) *Player {
for i, p := range l {
if p.Name == name {
return &l[i]
}
}
return nil
}
func LeagueFromReader(r io.Reader) ([]Player, error) {
var league []Player
err := json.NewDecoder(r).Decode(&league)
if err != nil {
_ = fmt.Errorf("error parsing json database. %v", err)
}
return league, err
}
|
package telegraph
import "testing"
func TestContentFormatByWTF(t *testing.T) {
_, err := ContentFormat(42)
if err == nil {
t.Error()
}
t.Log(err.Error())
}
func TestCreateInvalidAccount(t *testing.T) {
_, err := CreateAccount("", "", "")
if err == nil {
t.Error()
}
t.Log(err.Error())
}
func TestCreateInvalidPage(t *testing.T) {
newPage := &Page{
AuthorURL: "lolwat",
}
_, err := demoAccount.CreatePage(newPage, false)
if err == nil {
t.Error()
}
t.Log(err.Error())
}
func TestEditInvalidAccountInfo(t *testing.T) {
var update Account
_, err := demoAccount.EditAccountInfo(&update)
if err == nil {
t.Error()
}
t.Log(err.Error())
}
func TestEditInvalidPage(t *testing.T) {
update := &Page{
AuthorURL: "lolwat",
}
_, err := demoAccount.EditPage(update, false)
if err == nil {
t.Error()
}
t.Log(err.Error())
}
func TestGetInvalidAccountInfo(t *testing.T) {
var account Account
_, err := account.GetAccountInfo([]string{"short_name", "page_count"})
if err == nil {
t.Error()
}
t.Log(err.Error())
}
func TestGetInvalidPageList(t *testing.T) {
var account Account
_, err := account.GetPageList(0, 3)
if err == nil {
t.Error()
}
t.Log(err.Error())
}
func TestGetInvalidPageListByOffset(t *testing.T) {
var account Account
_, err := account.GetPageList(-42, 3)
if err == nil {
t.Error()
}
t.Log(err.Error())
}
func TestGetInvalidPageListByLimit(t *testing.T) {
var account Account
_, err := account.GetPageList(0, 9000)
if err == nil {
t.Error()
}
t.Log(err.Error())
}
func TestGetInvalidPage(t *testing.T) {
_, err := GetPage("lolwat", true)
if err == nil {
t.Error()
}
t.Log(err.Error())
}
func TestGetInvalidViewsByPage(t *testing.T) {
_, err := GetViews("lolwat", 2016, 12, 0, -1)
if err == nil {
t.Error()
}
t.Log(err.Error())
}
func TestGetInvalidViewsByHour(t *testing.T) {
_, err := GetViews("Sample-Page-12-15", 42, 0, 0, 0)
if err == nil {
t.Error()
}
t.Log(err.Error())
}
func TestGetInvalidViewsByDay(t *testing.T) {
_, err := GetViews("Sample-Page-12-15", 23, 42, 0, 0)
if err == nil {
t.Error()
}
t.Log(err.Error())
}
func TestGetInvalidViewsByMonth(t *testing.T) {
_, err := GetViews("Sample-Page-12-15", 23, 24, 22, 0)
if err == nil {
t.Error()
}
t.Log(err.Error())
}
func TestGetInvalidViewsByYear(t *testing.T) {
_, err := GetViews("Sample-Page-12-15", 23, 24, 12, 1980)
if err == nil {
t.Error()
}
t.Log(err.Error())
}
func TestRevokeInvalidAccessToken(t *testing.T) {
var account Account
_, err := account.RevokeAccessToken()
if err == nil {
t.Error()
}
t.Log(err.Error())
}
|
package econtext
import (
"github.com/labstack/echo"
"golang.org/x/net/context"
)
const (
ckey = "echo.Context"
contextkey = "context.Context"
)
// FromContext extracts the bound golang.org/x/net/context.Context from a Echo
// context if one has been set, or nil if one is not available.
func FromContext(c echo.Context) context.Context {
if ctx, ok := c.Get(ckey).(context.Context); ok {
return ctx
}
return nil
}
// ToContext extracts the bound Echo context from a golang.org/x/net/context.Context
// if one has been set, or the empty Echo context if one is not available.
func ToContext(ctx context.Context) echo.Context {
if c, ok := ctx.Value(contextkey).(*echo.Context); ok {
return *c
}
return echo.Context{}
}
// Set makes a two-way binding between the given Echo request context and the
// given golang.org/x/net/context.Context. Returns the fresh context.Context that contains
// this binding. Using the ToContext and FromContext functions will allow you to convert
// between one and the other.
//
// Note that since context.Context's are immutable, you will have to call this
// function to "re-bind" the request's canonical context.Context if you ever
// decide to change it.
func Set(c *echo.Context, context context.Context) context.Context {
ctx := ctx{c, context}
c.Set(ckey, ctx)
return ctx
}
|
package lib
import (
"fmt"
"io/ioutil"
"math"
"sort"
)
type Searcher struct {
CompleteWorks *string
WorksIndex *[]Work
NGramRules *[]*NGramRule
//SuffixArray *suffixarray.Index
}
type SearchResultHighlight struct {
SubContentBefore string
Token string
SubContentAfter string
}
type SearchResult struct {
Name string
Highlights []SearchResultHighlight
Score int
}
type SearchResponse struct {
query string
timeTook int
results SearchResult
}
func (s *Searcher) Load(filename string) error {
dat, err := ioutil.ReadFile(filename)
if err != nil {
return fmt.Errorf("Load: %w", err)
}
completeWorks := string(CrossPlatformNewlineRegexp.ReplaceAll(dat, []byte("\n")))
s.CompleteWorks = &completeWorks
worksIndex := BuildWorksIndex(&completeWorks)
s.WorksIndex = worksIndex
nGramRules := []*NGramRule{}
nGramRules = append(nGramRules, &NGramRule{"trigram", &NGramMap{}, 3, 8, nil})
nGramRules = append(nGramRules, &NGramRule{"bigram", &NGramMap{}, 2, 4, nil})
nGramRules = append(nGramRules, &NGramRule{"unigram", &NGramMap{}, 1, 1, nil})
for wIndex, work := range *worksIndex {
wStart := work.start
var wEnd int
if wIndex+1 < len(*worksIndex) {
wEnd = (*worksIndex)[wIndex+1].start
} else {
wEnd = len(completeWorks)
}
wContent := completeWorks[wStart:wEnd]
tokens := *TokenizeWithIndex(wStart, &wContent)
for ti, _ := range tokens {
for _, rule := range nGramRules {
rule.AppendToken(wIndex, ti, &tokens)
}
}
}
for _, rule := range nGramRules {
rule.Finalize()
}
s.NGramRules = &nGramRules
//s.SuffixArray = suffixarray.New(dat)
return nil
}
type SearchResultMapValue struct {
score int
poss *[]NGramPos
}
type SearchResultMap = map[int]*SearchResultMapValue
var SearchResultContentLen = 300
func Max(a, b int) int {
if a > b {
return a
}
return b
}
func Min(a, b int) int {
if a < b {
return a
}
return b
}
func (s *Searcher) FinalizeSearchResults(m *SearchResultMap) *[]SearchResult {
results := []SearchResult{}
completeWorks := *s.CompleteWorks
for workIndex, searchResult := range *m {
poss := searchResult.poss
work := (*s.WorksIndex)[workIndex]
offsetLen := int(math.Floor(float64(SearchResultContentLen) / float64(len(*poss)) / 2))
highlights := []SearchResultHighlight{}
for _, pos := range *poss {
offsetStart := Max(pos.start-offsetLen, 0)
offsetEnd := Min(pos.end+offsetLen, len(completeWorks))
highlights = append(highlights, SearchResultHighlight{
completeWorks[offsetStart:pos.start],
completeWorks[pos.start:pos.end],
completeWorks[pos.end:offsetEnd],
})
}
results = append(results, SearchResult{
Name: work.name,
Highlights: highlights,
Score: searchResult.score,
})
}
sort.Slice(results, func(a, b int) bool {
// Order by desc
return results[a].Score > results[b].Score
})
return &results
}
var maxCandidateLen = 5
func hasIntersection(p1, p2 NGramPos) bool {
return (p1.start <= p2.start && p2.start <= p1.end) ||
(p2.start <= p1.start && p1.start <= p2.end)
}
func AppendOrMerge(poss *[]NGramPos, pos NGramPos) *[]NGramPos {
for i, p := range *poss {
if hasIntersection(p, pos) {
(*poss)[i] = NGramPos{
workIndex: p.workIndex,
start: Min(p.start, pos.start),
end: Max(p.end, pos.end),
}
return poss
}
}
*poss = append(*poss, pos)
return poss
}
func AppendResultPoss(resultsMap *SearchResultMap, queryTokens *[]string, ruleP *NGramRule) *SearchResultMap {
rule := *ruleP
for _, qt := range CreateNGrams(*queryTokens, rule.n) {
for _, pos := range *rule.SearchToken(qt) {
if searchResult, ok := (*resultsMap)[pos.workIndex]; ok {
poss := searchResult.poss
searchResult.score = searchResult.score + rule.score
poss = AppendOrMerge(poss, pos)
if maxCandidateLen <= len(*poss) {
break
}
} else {
poss := []NGramPos{pos}
(*resultsMap)[pos.workIndex] = &SearchResultMapValue{rule.score, &poss}
}
}
}
return resultsMap
}
func (s *Searcher) Search(query string) *[]SearchResult {
queryTokens := Tokenize(query)
resultsMap := SearchResultMap{}
for _, rule := range *s.NGramRules {
AppendResultPoss(&resultsMap, &queryTokens, rule)
}
//idxs := s.SuffixArray.Lookup([]byte(query), -1)
//for _, idx := range idxs {
// results = append(results, s.CompleteWorks[idx-250:idx+250])
//}
return s.FinalizeSearchResults(&resultsMap)
}
|
package dao
import (
"database/sql"
"github.com/google/wire"
"github.com/Eric-WangHaitao/Go-0712/Week04/internal/model"
"log"
)
type UserRepository interface {
AddUser()
}
type userRepo struct {
*sql.DB
}
func (u *userRepo) AddUser() {
user := &model.User{}
user.Id = 1
user.Name = "xiaoming"
log.Println("add user :" + user.Name)
}
func NewUserRepo(db *sql.DB) *userRepo {
return &userRepo{}
}
var UserSet = wire.NewSet(NewUserRepo, wire.Bind(new(UserRepository), new(*userRepo)))
|
package domain
import (
"errors"
"fmt"
)
// errors
var (
// ErrValidSessionNotFound is returned when a valid session is not found
ErrValidSessionNotFound = errors.New("Valid session not found")
// ErrSessionExpired is returned when the requested session has expired
ErrSessionExpired = errors.New("Session is expired")
)
// log messages
var (
SessionExpired = "Auth failed because of an expired session"
SessionDoesNotExist = "Auth failed because of an invalid session"
SessionUnexpectedError = "An unexpected error occured while checking the session."
SessionCreationFailed = "An unexpected error occured creating a session"
RequestIsMissingSessionCookie = "Unauthorized: Request is missing a session cookie"
SessionCreated = "New Session Created"
SessionDestroyed = "Session Was Destroyed"
SessionRefreshed = "Session was refreshed with the refresh API"
SessionConcurrentLogin = "User logged in again with a concurrent active session"
)
// Temporary logging stuff. we should turn this into a callback.
type LogFields map[string]string
type LogService interface {
Info(message string, fields LogFields)
WarnError(message string, err error, fields LogFields)
}
// put this in mock?
type FmtLogger bool
func (l FmtLogger) Info(message string, fields LogFields) {
fmt.Printf("INFO: %s %v\n", message, fields)
}
func (l FmtLogger) WarnError(message string, err error, fields LogFields) {
fmt.Printf("WARN: %s %v %v\n", message, err, fields)
}
|
// This file contains a bit reworked methods from:
// https://github.com/bwmarrin/dgvoice/blob/master/dgvoice.go
//
// License:
// https://github.com/bwmarrin/dgvoice/blob/master/LICENSE
package main
import (
"bufio"
"encoding/binary"
"fmt"
"github.com/bwmarrin/discordgo"
"github.com/layeh/gopus"
"io"
"net/http"
"os/exec"
"strconv"
"sync"
"time"
)
const (
channels int = 2 // 1 for mono, 2 for stereo
frameRate int = 48000 // audio sampling rate
frameSize int = 960 // uint16 size of each audio frame
maxBytes int = (frameSize * 2) * 2 // max size of opus data
)
var (
opusEncoder *gopus.Encoder
run *exec.Cmd
sendpcm bool
send chan []int16
mu sync.Mutex
lock sync.Mutex
)
// SendPCM will receive on the provied channel encode
// received PCM data into Opus then send that to Discordgo
func SendPCM(v *discordgo.VoiceConnection, pcm <-chan []int16) {
// make sure this only runs one instance at a time.
mu.Lock()
if sendpcm || pcm == nil {
mu.Unlock()
return
}
sendpcm = true
mu.Unlock()
defer func() { sendpcm = false }()
var err error
opusEncoder, err = gopus.NewEncoder(frameRate, channels, gopus.Audio)
if err != nil {
fmt.Println("NewEncoder Error:", err)
return
}
for {
if pause {
lock.Lock()
for pause {
time.Sleep(time.Second * 1)
}
lock.Unlock()
}
// read pcm from chan, exit if channel is closed.
recv, ok := <-pcm
if !ok {
fmt.Println("PCM Channel closed.")
return
}
// try encoding pcm frame with Opus
opus, err := opusEncoder.Encode(recv, frameSize, maxBytes)
if err != nil {
fmt.Println("Encoding Error:", err)
return
}
for v.Ready == false || v.OpusSend == nil {
fmt.Printf("Discordgo not ready for opus packets. %+v : %+v", v.Ready, v.OpusSend)
time.Sleep(1 * time.Second)
}
// send encoded opus data to the sendOpus channel
v.OpusSend <- opus
}
}
func PlayAudioFile(v *discordgo.VoiceConnection, filename string, videoURL string) {
v.Speaking(true)
defer v.Speaking(false)
run = exec.Command("ffmpeg", "-i", videoURL, "-af", "dynaudnorm", "-f", "s16le", "-ar", strconv.Itoa(frameRate), "-ac", strconv.Itoa(channels), "pipe:1")
ffmpegout, err := run.StdoutPipe()
if err != nil {
fmt.Println("StdoutPipe Error:", err)
return
}
ffmpegbuf := bufio.NewReaderSize(ffmpegout, 65536)
err = run.Start()
if err != nil {
fmt.Println("RunStart Error:", err)
return
}
defer func() {
go run.Wait()
}()
if send == nil {
send = make(chan []int16, 2)
}
go SendPCM(v, send)
for {
audiobuf := make([]int16, frameSize*channels)
err = binary.Read(ffmpegbuf, binary.LittleEndian, &audiobuf)
if err == io.EOF || err == io.ErrUnexpectedEOF {
return
}
if err != nil {
fmt.Println("error reading from ffmpeg stdout :", err)
return
}
send <- audiobuf
}
}
func PlayAudioFileVK(v *discordgo.VoiceConnection, filename string) {
v.Speaking(true)
defer v.Speaking(false)
client := &http.Client{}
query := filename
req, err := http.NewRequest("GET", query, nil)
if err != nil {
fmt.Println("Couldn't query VK song URL")
return
}
req.Header.Set("User-Agent", "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.79 Safari/537.36")
resp, err := client.Do(req)
if err != nil {
ChMessageSend(textChannelID, "Sorry, couldn't fetch the response for some reason. Try again later ;)")
return
}
defer resp.Body.Close()
run = exec.Command("ffmpeg", "-i", "pipe:0", "-af", "dynaudnorm", "-f", "s16le", "-ar", strconv.Itoa(frameRate), "-ac", strconv.Itoa(channels), "pipe:1")
run.Stdin = resp.Body
ffmpegout, err := run.StdoutPipe()
if err != nil {
fmt.Println("StdoutPipe Error:", err)
return
}
ffmpegbuf := bufio.NewReaderSize(ffmpegout, 65536)
err = run.Start()
if err != nil {
fmt.Println("RunStart Error:", err)
return
}
defer func() {
go run.Wait()
}()
if send == nil {
send = make(chan []int16, 2)
}
go SendPCM(v, send)
for {
audiobuf := make([]int16, frameSize*channels)
err = binary.Read(ffmpegbuf, binary.LittleEndian, &audiobuf)
if err == io.EOF || err == io.ErrUnexpectedEOF {
return
}
if err != nil {
fmt.Println("error reading from ffmpeg stdout :", err)
return
}
send <- audiobuf
}
}
func KillPlayer() {
run.Process.Kill()
}
|
package main
import (
"bytes"
"encoding/json"
"github.com/pkg/errors"
"github.com/sethgrid/pester"
"io/ioutil"
"log"
"math/rand"
"net/http"
"time"
)
const eventAPI = "http://127.0.0.1:8080/event/update"
const betsAPI = "http://127.0.0.1:8081/bets?status=active"
type eventUpdateDto struct {
Id string `json:"id"`
Outcome string `json:"outcome"`
}
type betDto struct {
SelectionId string `json:"selectionId"`
}
func getActiveBets(httpClient pester.Client) ([]betDto, error) {
httpResponse, err := httpClient.Get(betsAPI)
if err != nil {
return nil, err
}
bodyContent, err := ioutil.ReadAll(httpResponse.Body)
if err != nil {
return nil, err
}
var dc []betDto
error := json.Unmarshal(bodyContent, &dc)
if error != nil {
return nil, error
}
return dc, nil
}
func publishUpdates(eventUpdate eventUpdateDto) error {
eventUpdateJson, err := json.Marshal(eventUpdate)
if err != nil {
return errors.WithMessage(err, "failed to marshal an event update")
}
_, error := http.Post(eventAPI, "application/json",
bytes.NewBuffer(eventUpdateJson))
if error != nil {
return errors.WithMessage(err, "failed to post event update")
}
log.Printf("Sent %s", eventUpdateJson)
return nil
}
func main() {
rand.Seed(time.Now().UnixNano())
httpClient := pester.New()
activeBets, err := getActiveBets(*httpClient)
if err != nil {
log.Fatalf("retrive active bets: %s", err)
}
matches := make(map[string]bool)
for _, bet := range activeBets {
matches[bet.SelectionId] = true
}
for key, _ := range matches {
var outcome string
if rand.Float64() > 0.5 {
outcome = "lost"
} else {
outcome = "won"
}
eventUpdate := &eventUpdateDto{
Id: key,
Outcome: outcome,
}
err := publishUpdates(*eventUpdate)
if err != nil {
log.Fatalln(err)
}
}
}
|
package ipproxy
import (
"context"
"fmt"
"net"
"github.com/google/netstack/tcpip"
"github.com/google/netstack/tcpip/buffer"
"github.com/google/netstack/tcpip/network/ipv4"
"github.com/google/netstack/tcpip/transport/udp"
"github.com/getlantern/errors"
"github.com/getlantern/eventual"
)
func (p *proxy) onUDP(pkt ipPacket) {
ft := pkt.ft()
conn := p.udpConns[ft]
if conn == nil {
var err error
conn, err = p.startUDPConn(ft)
if err != nil {
log.Error(err)
return
}
p.udpConns[ft] = conn
p.addUDPConn()
}
conn.channelEndpoint.InjectInbound(ipv4.ProtocolNumber, tcpip.PacketBuffer{
Data: buffer.View(pkt.raw).ToVectorisedView(),
})
}
func (p *proxy) startUDPConn(ft fourtuple) (*udpConn, error) {
upstreamValue := eventual.NewValue()
downstreamIPAddr := tcpip.Address(net.ParseIP(ft.src.ip).To4())
conn := &udpConn{
origin: *newOrigin(p, udp.NewProtocol(), ft.dst, upstreamValue, func(o *origin) error {
return nil
}),
ft: ft,
}
go func() {
upstreamAddr := fmt.Sprintf("%v:%d", ft.dst.ip, ft.dst.port)
upstream, err := p.opts.DialUDP(context.Background(), "udp", upstreamAddr)
if err != nil {
upstreamValue.Cancel()
conn.closeNow()
log.Errorf("Unable to dial upstream %v: %v", upstreamAddr, err)
}
upstreamValue.Set(upstream)
}()
if err := conn.init(udp.ProtocolNumber, tcpip.FullAddress{nicID, "", ft.dst.port}); err != nil {
conn.closeNow()
return nil, errors.New("Unable to initialize UDP connection for %v: %v", ft, err)
}
// to our NIC and routes packets to the downstreamIPAddr as well,
upstreamSubnet, _ := tcpip.NewSubnet(conn.ipAddr, tcpip.AddressMask(conn.ipAddr))
downstreamSubnet, _ := tcpip.NewSubnet(downstreamIPAddr, tcpip.AddressMask(downstreamIPAddr))
conn.stack.SetRouteTable([]tcpip.Route{
{
Destination: upstreamSubnet,
Gateway: "",
NIC: nicID,
},
{
Destination: downstreamSubnet,
Gateway: "",
NIC: nicID,
},
})
go conn.copyToUpstream(&tcpip.FullAddress{0, "", ft.dst.port})
go conn.copyFromUpstream(tcpip.WriteOptions{To: &tcpip.FullAddress{0, downstreamIPAddr, ft.src.port}})
return conn, nil
}
type udpConn struct {
origin
ft fourtuple
}
func (p *proxy) reapUDP() {
for ft, conn := range p.udpConns {
if conn.timeSinceLastActive() > p.opts.IdleTimeout {
go conn.closeNow()
delete(p.udpConns, ft)
p.removeUDPConn()
}
}
}
func (p *proxy) closeUDP() {
for ft, conn := range p.udpConns {
conn.closeNow()
delete(p.udpConns, ft)
p.removeUDPConn()
}
}
|
package suggest
import (
"fmt"
"testing"
)
func TestSuggest(t *testing.T) {
query := "apples and oranges"
suggestions, err := Suggest(query)
if err != nil {
t.Error(err)
}
if len(suggestions) <= 0 {
t.Error("expected suggestions, got none")
}
}
func ExampleSuggest() {
query := "apples and oranges"
suggestions, err := Suggest(query)
if err != nil {
panic(err)
}
count := len(suggestions)
fmt.Println(count > 0)
// Output: true
}
|
package main
// 剑指 Offer II 083. 没有重复元素集合的全排列
// 给定一个不含重复数字的整数数组 nums ,返回其 所有可能的全排列 。可以 按任意顺序 返回答案。
// 输入:nums = [1,2,3]
// 输出:[[1,2,3],[1,3,2],[2,1,3],[2,3,1],[3,1,2],[3,2,1]]
func main() {
nums := []int{1, 0}
permute(nums)
}
func permute(nums []int) [][]int {
var answer [][]int
visited := make([]bool, len(nums))
dfs(&answer, nums, &visited, &[]int{}, 0)
return answer
}
func dfs(answer *[][]int, nums []int, visited *[]bool, res *[]int, length int) {
if length == len(nums) {
slice := make([]int, length)
// 深克隆一份,因为res使用的是指针,避免后续流程的影响
copy(slice, *res)
*answer = append(*answer, slice)
return
}
for i := 0; i < len(nums); i++ {
if !(*visited)[i] {
// 添加到答案里
*res = append(*res, nums[i])
// 标记为已经走过了
(*visited)[i] = true
dfs(answer, nums, visited, res, length+1)
*res = (*res)[:(len(*res) - 1)]
// 回溯
(*visited)[i] = false
}
}
}
|
/*
Crie uma função que retorna uma função.
Atribua a função retornada a uma variável.
Chame a função retornada.
*/
package main
import (
"fmt"
"math"
)
func main() {
f := retornaNúmeroAoCubo()
fmt.Println("2 elevado ao cubo é:", f(2.0))
}
func retornaNúmeroAoCubo() func(num float64) float64 {
return func(num float64) float64 {
return math.Pow(num, 3.0)
}
}
|
package export
import (
"fmt"
"github.com/shopspring/decimal"
"github.com/tealeg/xlsx"
"strconv"
"time"
)
type CreateWalletLogFileRequest struct {
Sheet string // 工作表
Title string // 文件标题
Timezone string // 时区
IsDivideHundred bool // 金额是否需要除以100,币种单位为分的都需要
Content []WalletLog // 订单数据
}
type WalletLog struct {
Id int64
BusinessNo string
ChangeAmount int64
AfterBalance int64
OpType string
OrderType string
Remark string
CreateTime int64
}
//钱包流水表头
var walletLogHead = []string{
"业务单号",
"变动金额",
"变动后余额",
"出入账类型",
"订单类型",
"备注",
"创建时间",
}
func CreateWalletLogFile(file *xlsx.File, req *CreateWalletLogFileRequest) (*xlsx.File, error) {
orderLis := req.Content
// 创建文件
sheet, err := file.AddSheet(req.Sheet)
if err != nil {
return nil, err
}
// 设置标题样式
titleStyle := xlsx.NewStyle()
titleStyle.Font = xlsx.Font{
Size: 18,
Name: "宋体",
Bold: true,
Italic: false,
Underline: false,
}
titleStyle.Alignment = xlsx.Alignment{
Horizontal: "center",
Vertical: "center",
}
// 添加标题
titleRow := sheet.AddRow()
titleRow.SetHeightCM(2)
titleCell := titleRow.AddCell()
titleCell.HMerge = 20
titleCell.Value = fmt.Sprintf("%s(时区:%s)", req.Title, req.Timezone)
titleCell.SetStyle(titleStyle)
// 添加文件头
headStyle := xlsx.NewStyle()
headStyle.Font = xlsx.Font{
Size: 13,
Name: "宋体",
Bold: true,
}
headRow := sheet.AddRow()
headRow.SetHeightCM(0.7)
for _, v := range walletLogHead {
cell := headRow.AddCell()
cell.SetStyle(headStyle)
cell.Value = v
}
// 写入数据
for _, order := range orderLis {
contentRow := sheet.AddRow()
contentRow.SetHeightCM(0.7)
// 业务单号
orderNo := contentRow.AddCell()
orderNo.Value = order.BusinessNo
// 变动金额
changeAmount := contentRow.AddCell()
// 变动后余额
afterBalance := contentRow.AddCell()
if req.IsDivideHundred {
changeAmount.Value = decimal.NewFromInt(order.ChangeAmount).Div(decimal.NewFromInt(100)).Round(2).String()
afterBalance.Value = decimal.NewFromInt(order.AfterBalance).Div(decimal.NewFromInt(100)).Round(2).String()
} else {
changeAmount.Value = strconv.FormatInt(order.ChangeAmount, 10)
afterBalance.Value = strconv.FormatInt(order.AfterBalance, 10)
}
// 出入账类型
opType := contentRow.AddCell()
opType.Value = order.OpType
// 订单类型
orderType := contentRow.AddCell()
orderType.Value = order.OrderType
// 备注
remark := contentRow.AddCell()
remark.Value = order.Remark
// 创建时间
createTime := contentRow.AddCell()
createTime.Value = time.Unix(order.CreateTime, 0).Format(TimeFormat)
}
return file, nil
}
|
package pgsql
import (
"database/sql"
"database/sql/driver"
"strconv"
)
// Int4RangeFromIntArray2 returns a driver.Valuer that produces a PostgreSQL int4range from the given Go [2]int.
func Int4RangeFromIntArray2(val [2]int) driver.Valuer {
return int4RangeFromIntArray2{val: val}
}
// Int4RangeToIntArray2 returns an sql.Scanner that converts a PostgreSQL int4range into a Go [2]int and sets it to val.
func Int4RangeToIntArray2(val *[2]int) sql.Scanner {
return int4RangeToIntArray2{val: val}
}
// Int4RangeFromInt8Array2 returns a driver.Valuer that produces a PostgreSQL int4range from the given Go [2]int8.
func Int4RangeFromInt8Array2(val [2]int8) driver.Valuer {
return int4RangeFromInt8Array2{val: val}
}
// Int4RangeToInt8Array2 returns an sql.Scanner that converts a PostgreSQL int4range into a Go [2]int8 and sets it to val.
func Int4RangeToInt8Array2(val *[2]int8) sql.Scanner {
return int4RangeToInt8Array2{val: val}
}
// Int4RangeFromInt16Array2 returns a driver.Valuer that produces a PostgreSQL int4range from the given Go [2]int16.
func Int4RangeFromInt16Array2(val [2]int16) driver.Valuer {
return int4RangeFromInt16Array2{val: val}
}
// Int4RangeToInt16Array2 returns an sql.Scanner that converts a PostgreSQL int4range into a Go [2]int16 and sets it to val.
func Int4RangeToInt16Array2(val *[2]int16) sql.Scanner {
return int4RangeToInt16Array2{val: val}
}
// Int4RangeFromInt32Array2 returns a driver.Valuer that produces a PostgreSQL int4range from the given Go [2]int32.
func Int4RangeFromInt32Array2(val [2]int32) driver.Valuer {
return int4RangeFromInt32Array2{val: val}
}
// Int4RangeToInt32Array2 returns an sql.Scanner that converts a PostgreSQL int4range into a Go [2]int32 and sets it to val.
func Int4RangeToInt32Array2(val *[2]int32) sql.Scanner {
return int4RangeToInt32Array2{val: val}
}
// Int4RangeFromInt64Array2 returns a driver.Valuer that produces a PostgreSQL int4range from the given Go [2]int64.
func Int4RangeFromInt64Array2(val [2]int64) driver.Valuer {
return int4RangeFromInt64Array2{val: val}
}
// Int4RangeToInt64Array2 returns an sql.Scanner that converts a PostgreSQL int4range into a Go [2]int64 and sets it to val.
func Int4RangeToInt64Array2(val *[2]int64) sql.Scanner {
return int4RangeToInt64Array2{val: val}
}
// Int4RangeFromUintArray2 returns a driver.Valuer that produces a PostgreSQL int4range from the given Go [2]uint.
func Int4RangeFromUintArray2(val [2]uint) driver.Valuer {
return int4RangeFromUintArray2{val: val}
}
// Int4RangeToUintArray2 returns an sql.Scanner that converts a PostgreSQL int4range into a Go [2]uint and sets it to val.
func Int4RangeToUintArray2(val *[2]uint) sql.Scanner {
return int4RangeToUintArray2{val: val}
}
// Int4RangeFromUint8Array2 returns a driver.Valuer that produces a PostgreSQL int4range from the given Go [2]uint8.
func Int4RangeFromUint8Array2(val [2]uint8) driver.Valuer {
return int4RangeFromUint8Array2{val: val}
}
// Int4RangeToUint8Array2 returns an sql.Scanner that converts a PostgreSQL int4range into a Go [2]uint8 and sets it to val.
func Int4RangeToUint8Array2(val *[2]uint8) sql.Scanner {
return int4RangeToUint8Array2{val: val}
}
// Int4RangeFromUint16Array2 returns a driver.Valuer that produces a PostgreSQL int4range from the given Go [2]uint16.
func Int4RangeFromUint16Array2(val [2]uint16) driver.Valuer {
return int4RangeFromUint16Array2{val: val}
}
// Int4RangeToUint16Array2 returns an sql.Scanner that converts a PostgreSQL int4range into a Go [2]uint16 and sets it to val.
func Int4RangeToUint16Array2(val *[2]uint16) sql.Scanner {
return int4RangeToUint16Array2{val: val}
}
// Int4RangeFromUint32Array2 returns a driver.Valuer that produces a PostgreSQL int4range from the given Go [2]uint32.
func Int4RangeFromUint32Array2(val [2]uint32) driver.Valuer {
return int4RangeFromUint32Array2{val: val}
}
// Int4RangeToUint32Array2 returns an sql.Scanner that converts a PostgreSQL int4range into a Go [2]uint32 and sets it to val.
func Int4RangeToUint32Array2(val *[2]uint32) sql.Scanner {
return int4RangeToUint32Array2{val: val}
}
// Int4RangeFromUint64Array2 returns a driver.Valuer that produces a PostgreSQL int4range from the given Go [2]uint64.
func Int4RangeFromUint64Array2(val [2]uint64) driver.Valuer {
return int4RangeFromUint64Array2{val: val}
}
// Int4RangeToUint64Array2 returns an sql.Scanner that converts a PostgreSQL int4range into a Go [2]uint64 and sets it to val.
func Int4RangeToUint64Array2(val *[2]uint64) sql.Scanner {
return int4RangeToUint64Array2{val: val}
}
// Int4RangeFromFloat32Array2 returns a driver.Valuer that produces a PostgreSQL int4range from the given Go [2]float32.
func Int4RangeFromFloat32Array2(val [2]float32) driver.Valuer {
return int4RangeFromFloat32Array2{val: val}
}
// Int4RangeToFloat32Array2 returns an sql.Scanner that converts a PostgreSQL int4range into a Go [2]float32 and sets it to val.
func Int4RangeToFloat32Array2(val *[2]float32) sql.Scanner {
return int4RangeToFloat32Array2{val: val}
}
// Int4RangeFromFloat64Array2 returns a driver.Valuer that produces a PostgreSQL int4range from the given Go [2]float64.
func Int4RangeFromFloat64Array2(val [2]float64) driver.Valuer {
return int4RangeFromFloat64Array2{val: val}
}
// Int4RangeToFloat64Array2 returns an sql.Scanner that converts a PostgreSQL int4range into a Go [2]float64 and sets it to val.
func Int4RangeToFloat64Array2(val *[2]float64) sql.Scanner {
return int4RangeToFloat64Array2{val: val}
}
type int4RangeFromIntArray2 struct {
val [2]int
}
func (v int4RangeFromIntArray2) Value() (driver.Value, error) {
out := []byte{'['}
out = strconv.AppendInt(out, int64(v.val[0]), 10)
out = append(out, ',')
out = strconv.AppendInt(out, int64(v.val[1]), 10)
out = append(out, ')')
return out, nil
}
type int4RangeToIntArray2 struct {
val *[2]int
}
func (v int4RangeToIntArray2) Scan(src interface{}) error {
data, err := srcbytes(src)
if err != nil {
return err
} else if data == nil {
return nil
}
var lo, hi int64
elems := pgParseRange(data)
if len(elems[0]) > 0 {
if lo, err = strconv.ParseInt(string(elems[0]), 10, 32); err != nil {
return err
}
}
if len(elems[1]) > 0 {
if hi, err = strconv.ParseInt(string(elems[1]), 10, 32); err != nil {
return err
}
}
v.val[0] = int(lo)
v.val[1] = int(hi)
return nil
}
type int4RangeFromInt8Array2 struct {
val [2]int8
}
func (v int4RangeFromInt8Array2) Value() (driver.Value, error) {
out := []byte{'['}
out = strconv.AppendInt(out, int64(v.val[0]), 10)
out = append(out, ',')
out = strconv.AppendInt(out, int64(v.val[1]), 10)
out = append(out, ')')
return out, nil
}
type int4RangeToInt8Array2 struct {
val *[2]int8
}
func (v int4RangeToInt8Array2) Scan(src interface{}) error {
data, err := srcbytes(src)
if err != nil {
return err
} else if data == nil {
return nil
}
var lo, hi int64
elems := pgParseRange(data)
if len(elems[0]) > 0 {
if lo, err = strconv.ParseInt(string(elems[0]), 10, 8); err != nil {
return err
}
}
if len(elems[1]) > 0 {
if hi, err = strconv.ParseInt(string(elems[1]), 10, 8); err != nil {
return err
}
}
v.val[0] = int8(lo)
v.val[1] = int8(hi)
return nil
}
type int4RangeFromInt16Array2 struct {
val [2]int16
}
func (v int4RangeFromInt16Array2) Value() (driver.Value, error) {
out := []byte{'['}
out = strconv.AppendInt(out, int64(v.val[0]), 10)
out = append(out, ',')
out = strconv.AppendInt(out, int64(v.val[1]), 10)
out = append(out, ')')
return out, nil
}
type int4RangeToInt16Array2 struct {
val *[2]int16
}
func (v int4RangeToInt16Array2) Scan(src interface{}) error {
data, err := srcbytes(src)
if err != nil {
return err
} else if data == nil {
return nil
}
var lo, hi int64
elems := pgParseRange(data)
if len(elems[0]) > 0 {
if lo, err = strconv.ParseInt(string(elems[0]), 10, 16); err != nil {
return err
}
}
if len(elems[1]) > 0 {
if hi, err = strconv.ParseInt(string(elems[1]), 10, 16); err != nil {
return err
}
}
v.val[0] = int16(lo)
v.val[1] = int16(hi)
return nil
}
type int4RangeFromInt32Array2 struct {
val [2]int32
}
func (v int4RangeFromInt32Array2) Value() (driver.Value, error) {
out := []byte{'['}
out = strconv.AppendInt(out, int64(v.val[0]), 10)
out = append(out, ',')
out = strconv.AppendInt(out, int64(v.val[1]), 10)
out = append(out, ')')
return out, nil
}
type int4RangeToInt32Array2 struct {
val *[2]int32
}
func (v int4RangeToInt32Array2) Scan(src interface{}) error {
data, err := srcbytes(src)
if err != nil {
return err
} else if data == nil {
return nil
}
var lo, hi int64
elems := pgParseRange(data)
if len(elems[0]) > 0 {
if lo, err = strconv.ParseInt(string(elems[0]), 10, 32); err != nil {
return err
}
}
if len(elems[1]) > 0 {
if hi, err = strconv.ParseInt(string(elems[1]), 10, 32); err != nil {
return err
}
}
v.val[0] = int32(lo)
v.val[1] = int32(hi)
return nil
}
type int4RangeFromInt64Array2 struct {
val [2]int64
}
func (v int4RangeFromInt64Array2) Value() (driver.Value, error) {
out := []byte{'['}
out = strconv.AppendInt(out, v.val[0], 10)
out = append(out, ',')
out = strconv.AppendInt(out, v.val[1], 10)
out = append(out, ')')
return out, nil
}
type int4RangeToInt64Array2 struct {
val *[2]int64
}
func (v int4RangeToInt64Array2) Scan(src interface{}) error {
data, err := srcbytes(src)
if err != nil {
return err
} else if data == nil {
return nil
}
var lo, hi int64
elems := pgParseRange(data)
if len(elems[0]) > 0 {
if lo, err = strconv.ParseInt(string(elems[0]), 10, 32); err != nil {
return err
}
}
if len(elems[1]) > 0 {
if hi, err = strconv.ParseInt(string(elems[1]), 10, 32); err != nil {
return err
}
}
v.val[0] = lo
v.val[1] = hi
return nil
}
type int4RangeFromUintArray2 struct {
val [2]uint
}
func (v int4RangeFromUintArray2) Value() (driver.Value, error) {
out := []byte{'['}
out = strconv.AppendUint(out, uint64(v.val[0]), 10)
out = append(out, ',')
out = strconv.AppendUint(out, uint64(v.val[1]), 10)
out = append(out, ')')
return out, nil
}
type int4RangeToUintArray2 struct {
val *[2]uint
}
func (v int4RangeToUintArray2) Scan(src interface{}) error {
data, err := srcbytes(src)
if err != nil {
return err
} else if data == nil {
return nil
}
var lo, hi uint64
elems := pgParseRange(data)
if len(elems[0]) > 0 {
if lo, err = strconv.ParseUint(string(elems[0]), 10, 32); err != nil {
return err
}
}
if len(elems[1]) > 0 {
if hi, err = strconv.ParseUint(string(elems[1]), 10, 32); err != nil {
return err
}
}
v.val[0] = uint(lo)
v.val[1] = uint(hi)
return nil
}
type int4RangeFromUint8Array2 struct {
val [2]uint8
}
func (v int4RangeFromUint8Array2) Value() (driver.Value, error) {
out := []byte{'['}
out = strconv.AppendUint(out, uint64(v.val[0]), 10)
out = append(out, ',')
out = strconv.AppendUint(out, uint64(v.val[1]), 10)
out = append(out, ')')
return out, nil
}
type int4RangeToUint8Array2 struct {
val *[2]uint8
}
func (v int4RangeToUint8Array2) Scan(src interface{}) error {
data, err := srcbytes(src)
if err != nil {
return err
} else if data == nil {
return nil
}
var lo, hi uint64
elems := pgParseRange(data)
if len(elems[0]) > 0 {
if lo, err = strconv.ParseUint(string(elems[0]), 10, 8); err != nil {
return err
}
}
if len(elems[1]) > 0 {
if hi, err = strconv.ParseUint(string(elems[1]), 10, 8); err != nil {
return err
}
}
v.val[0] = uint8(lo)
v.val[1] = uint8(hi)
return nil
}
type int4RangeFromUint16Array2 struct {
val [2]uint16
}
func (v int4RangeFromUint16Array2) Value() (driver.Value, error) {
out := []byte{'['}
out = strconv.AppendUint(out, uint64(v.val[0]), 10)
out = append(out, ',')
out = strconv.AppendUint(out, uint64(v.val[1]), 10)
out = append(out, ')')
return out, nil
}
type int4RangeToUint16Array2 struct {
val *[2]uint16
}
func (v int4RangeToUint16Array2) Scan(src interface{}) error {
data, err := srcbytes(src)
if err != nil {
return err
} else if data == nil {
return nil
}
var lo, hi uint64
elems := pgParseRange(data)
if len(elems[0]) > 0 {
if lo, err = strconv.ParseUint(string(elems[0]), 10, 16); err != nil {
return err
}
}
if len(elems[1]) > 0 {
if hi, err = strconv.ParseUint(string(elems[1]), 10, 16); err != nil {
return err
}
}
v.val[0] = uint16(lo)
v.val[1] = uint16(hi)
return nil
}
type int4RangeFromUint32Array2 struct {
val [2]uint32
}
func (v int4RangeFromUint32Array2) Value() (driver.Value, error) {
out := []byte{'['}
out = strconv.AppendUint(out, uint64(v.val[0]), 10)
out = append(out, ',')
out = strconv.AppendUint(out, uint64(v.val[1]), 10)
out = append(out, ')')
return out, nil
}
type int4RangeToUint32Array2 struct {
val *[2]uint32
}
func (v int4RangeToUint32Array2) Scan(src interface{}) error {
data, err := srcbytes(src)
if err != nil {
return err
} else if data == nil {
return nil
}
var lo, hi uint64
elems := pgParseRange(data)
if len(elems[0]) > 0 {
if lo, err = strconv.ParseUint(string(elems[0]), 10, 32); err != nil {
return err
}
}
if len(elems[1]) > 0 {
if hi, err = strconv.ParseUint(string(elems[1]), 10, 32); err != nil {
return err
}
}
v.val[0] = uint32(lo)
v.val[1] = uint32(hi)
return nil
}
type int4RangeFromUint64Array2 struct {
val [2]uint64
}
func (v int4RangeFromUint64Array2) Value() (driver.Value, error) {
out := []byte{'['}
out = strconv.AppendUint(out, v.val[0], 10)
out = append(out, ',')
out = strconv.AppendUint(out, v.val[1], 10)
out = append(out, ')')
return out, nil
}
type int4RangeToUint64Array2 struct {
val *[2]uint64
}
func (v int4RangeToUint64Array2) Scan(src interface{}) error {
data, err := srcbytes(src)
if err != nil {
return err
} else if data == nil {
return nil
}
var lo, hi uint64
elems := pgParseRange(data)
if len(elems[0]) > 0 {
if lo, err = strconv.ParseUint(string(elems[0]), 10, 32); err != nil {
return err
}
}
if len(elems[1]) > 0 {
if hi, err = strconv.ParseUint(string(elems[1]), 10, 32); err != nil {
return err
}
}
v.val[0] = lo
v.val[1] = hi
return nil
}
type int4RangeFromFloat32Array2 struct {
val [2]float32
}
func (v int4RangeFromFloat32Array2) Value() (driver.Value, error) {
out := []byte{'['}
out = strconv.AppendInt(out, int64(v.val[0]), 10)
out = append(out, ',')
out = strconv.AppendInt(out, int64(v.val[1]), 10)
out = append(out, ')')
return out, nil
}
type int4RangeToFloat32Array2 struct {
val *[2]float32
}
func (v int4RangeToFloat32Array2) Scan(src interface{}) error {
data, err := srcbytes(src)
if err != nil {
return err
} else if data == nil {
return nil
}
var lo, hi int64
elems := pgParseRange(data)
if len(elems[0]) > 0 {
if lo, err = strconv.ParseInt(string(elems[0]), 10, 32); err != nil {
return err
}
}
if len(elems[1]) > 0 {
if hi, err = strconv.ParseInt(string(elems[1]), 10, 32); err != nil {
return err
}
}
v.val[0] = float32(lo)
v.val[1] = float32(hi)
return nil
}
type int4RangeFromFloat64Array2 struct {
val [2]float64
}
func (v int4RangeFromFloat64Array2) Value() (driver.Value, error) {
out := []byte{'['}
out = strconv.AppendInt(out, int64(v.val[0]), 10)
out = append(out, ',')
out = strconv.AppendInt(out, int64(v.val[1]), 10)
out = append(out, ')')
return out, nil
}
type int4RangeToFloat64Array2 struct {
val *[2]float64
}
func (v int4RangeToFloat64Array2) Scan(src interface{}) error {
data, err := srcbytes(src)
if err != nil {
return err
} else if data == nil {
return nil
}
var lo, hi int64
elems := pgParseRange(data)
if len(elems[0]) > 0 {
if lo, err = strconv.ParseInt(string(elems[0]), 10, 32); err != nil {
return err
}
}
if len(elems[1]) > 0 {
if hi, err = strconv.ParseInt(string(elems[1]), 10, 32); err != nil {
return err
}
}
v.val[0] = float64(lo)
v.val[1] = float64(hi)
return nil
}
|
package logic
import (
ccmd "github.com/pip-services3-go/pip-services3-commons-go/commands"
cconv "github.com/pip-services3-go/pip-services3-commons-go/convert"
"github.com/pip-services3-go/pip-services3-commons-go/run"
cvalid "github.com/pip-services3-go/pip-services3-commons-go/validate"
)
type AppExampleCommandSet struct {
ccmd.CommandSet
controller *AppExampleController
}
func NewAppExampleCommandSet() *AppExampleCommandSet {
c := &AppExampleCommandSet{
CommandSet: *ccmd.NewCommandSet(),
controller: NewAppExampleController(),
}
return c
}
func (c *AppExampleCommandSet) greetingCommand() ccmd.ICommand {
return ccmd.NewCommand(
"greeting",
cvalid.NewObjectSchema().
WithRequiredProperty("name", cconv.String),
func(correlationId string, args *run.Parameters) (interface{}, error) {
name := args.GetAsString("name")
return c.controller.Greeting(name)
},
)
}
|
package main
import (
"fmt"
"sort"
)
func main() {
mapTest := make(map[int]int, 5)
mapTest[1] = 1000
mapTest[3] = 4
mapTest[4] = 3
mapTest[2] = 2
var keys []int
for k, _ := range mapTest {
keys = append(keys, k)
}
sort.Ints(keys)
fmt.Println(keys)
for _, val := range keys {
fmt.Printf("k=%v,v=%v\n", val, mapTest[val])
}
}
|
package luasrc
import (
"github.com/davyxu/tabtoy/v3/gen"
"github.com/davyxu/tabtoy/v3/model"
"strings"
"text/template"
)
var UsefulFunc = template.FuncMap{}
func WrapValue(globals *model.Globals, value string, valueType *model.TypeDefine) string {
if valueType.IsArray() {
var sb strings.Builder
sb.WriteString("{")
// 空的单元格,导出空数组,除非强制指定填充默认值
if value != "" {
for index, elementValue := range strings.Split(value, valueType.ArraySplitter) {
if index > 0 {
sb.WriteString(",")
}
sb.WriteString(gen.WrapSingleValue(globals, valueType, elementValue))
}
}
sb.WriteString("}")
return sb.String()
} else {
return gen.WrapSingleValue(globals, valueType, value)
}
return value
}
func init() {
UsefulFunc["WrapTabValue"] = func(globals *model.Globals, dataTable *model.DataTable, allHeaders []*model.TypeDefine, row, col int) (ret string) {
// 找到完整的表头(按完整表头遍历)
header := allHeaders[col]
if header == nil {
return ""
}
// 在单元格找到值
valueCell := dataTable.GetCell(row, col)
if valueCell != nil {
return WrapValue(globals, valueCell.Value, header)
} else {
// 这个表中没有这列数据
return WrapValue(globals, "", header)
}
}
}
|
package leetcode
/*We distribute some number of candies, to a row of n = num_people people in the following way:
We then give 1 candy to the first person, 2 candies to the second person,
and so on until we give n candies to the last person.
Then, we go back to the start of the row, giving n + 1 candies to the first person,
n + 2 candies to the second person, and so on until we give 2 * n candies to the last person.
This process repeats (with us giving one more candy each time,
and moving to the start of the row after we reach the end) until we run out of candies.
The last person will receive all of our remaining candies
(not necessarily one more than the previous gift).
Return an array (of length num_people and sum candies)
that represents the final distribution of candies.
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/distribute-candies-to-people
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。*/
func distributeCandies(candies int, num_people int) []int {
rlt := make([]int, num_people, num_people)
cnt := 0
for candies > 0 {
temp := 0
if cnt+1 <= candies {
temp = cnt + 1
} else {
temp = candies
}
rlt[cnt%num_people] += temp
candies -= temp
cnt++
}
return rlt
}
|
/*
Copyright 2018 The HAWQ Team.
*/
// Api versions allow the api contract for a resource to be changed while keeping
// backward compatibility by support multiple concurrent versions
// of the same resource
// +k8s:openapi-gen=true
// +k8s:deepcopy-gen=package,register
// +k8s:conversion-gen=github.com/hawq-cn/apiserver-example/pkg/apis/core
// +k8s:defaulter-gen=TypeMeta
// +groupName=core.hawq.org
package v1alpha1 // import "github.com/hawq-cn/apiserver-example/pkg/apis/core/v1alpha1"
|
package hardcoding
import "github.com/chitoku-k/ejaculation-counter/reactor/repository"
var (
ThroughVariants = []string{
"doruȝ-",
"dorw",
"dorwe",
"dorwgh",
"dourȝh",
"drowgȝ",
"durghe",
"durwe",
"-thogh",
"thorch",
"thorew",
"thorewe",
"thorffe",
"thorg",
"Thorgh",
"thorgh",
"-thorgh",
"thoʳgh",
"thorghe",
"thorght",
"thorghw",
"thorghwe",
"thorgth",
"thorh",
"thoro",
"thorogh",
"thoroghe",
"thoroght",
"-thoroght",
"thorohe",
"thoroo",
"thorou",
"Thorough",
"thorough",
"thorough-",
"thoroughe",
"thorought",
"Thorouh",
"thorouȝ",
"thorouȝh",
"Thorow",
"thorow",
"thorow-",
"Thorowe",
"thorowe",
"thorowg",
"thorowgh",
"thorowghe",
"thorowght",
"thorowh",
"thorowth",
"thorowut",
"thorowȝ",
"thorowȝt",
"thorrou",
"thorrow",
"thorth",
"thorthe",
"thoru",
"thoru-",
"thorue",
"thorugh",
"-thorugh",
"thorughe",
"thorught",
"-thorught",
"Thoruh",
"thoruh",
"thoruh-",
"thorur",
"thoruth",
"Thoruȝ",
"thoruȝ",
"thoruȝh",
"thorv",
"Thorw",
"thorw",
"-thorw",
"thoʳw",
"Thorwe",
"thorwe",
"thorwgh",
"thorwh",
"thorwȝ",
"-thorwȝ",
"thoʳwȝ",
"thorwȝe",
"Thorȝ",
"thorȝ",
"Thorȝe",
"thorȝe",
"thorȝh",
"thorȝoh",
"thorȝt",
"thorȝw",
"thorȝwe",
"thour",
"thour",
"thoure",
"thourgh",
"-thourgh",
"thourghe",
"thourght-",
"thourh",
"thourhe",
"thourow",
"thourr",
"thourth",
"thourw",
"thourw",
"thourwg",
"thourȝ",
"thourȝ",
"thourȝe",
"thow",
"thowe",
"thowffe",
"thowr",
"thowrgh",
"thowrow",
"thowur",
"thrawth",
"threw",
"thro",
"thro-",
"-thro",
"throch",
"throcht",
"throgh",
"throghe",
"throghet",
"throght",
"throght",
"throghte",
"throighe",
"throu",
"throuche",
"throue",
"throug",
"through",
"through-",
"throughe",
"throught",
"throuh",
"throuȝ",
"throuȝe",
"throuȝht",
"throve",
"throw",
"throw-",
"tʰrow",
"throwe",
"thʳowe",
"throwe",
"throwg",
"throwgh",
"throwght",
"throwh",
"throwr",
"throwth",
"throwȝ",
"throwȝe",
"throȝ",
"-throȝe",
"throȝe",
"throȝgh",
"throȝghe",
"throȝh",
"throȝhe",
"throȝt",
"thruch",
"thrue-",
"thrug-",
"Thrugh",
"thrugh",
"thrughe",
"thrught",
"thrughte",
"thruh",
"thruth",
"thruȝ",
"thruȝe",
"thruȝhe",
"thrvoo",
"thrw",
"thrwe",
"thrwgh",
"thrwght",
"thrygh",
"thuht",
"thur",
"thurch",
"thurew",
"thurg",
"thurge",
"thurge-",
"thurgeh",
"Thurgh",
"thurgh",
"thurgh-",
"-thurgh",
"thurgh",
"thurghe",
"thurght",
"thurghte",
"thurgth",
"thurgwe",
"Thurh",
"thurh",
"thurhe",
"thurhge",
"thurhgh",
"thuro",
"thurow",
"thurowe",
"thurth",
"thurthe",
"thuru",
"thurv",
"thurw",
"-thurw",
"thurwe",
"Thurȝ",
"thurȝ",
"thurȝe",
"Thurȝh",
"thurȝh",
"Thurȝhg",
"thurȝt",
"thurȝth",
"thwrgh",
"thwrw",
"torgh",
"torghe",
"torw",
"-torwe",
"trghug",
"trogh",
"troght",
"trough",
"trow",
"trowe",
"trowffe",
"trowgh",
"trowght",
"trugh",
"trughe",
"trught",
"twrw",
"yerowe",
"yhorh",
"yhoru",
"yhrow",
"yhurgh",
"yhurght",
"yora",
"yorch",
"yorgh",
"yorghe",
"yorh",
"yoro",
"yorou",
"yoroue",
"yorough",
"yorour",
"yorow",
"yorow-",
"yorowe",
"yorowe",
"yoru",
"yorugh",
"yoruh",
"yoruȝ",
"yorw",
"yorwe",
"yorȝ",
"your",
"yourch",
"yourgh",
"yourghe",
"yourh",
"yourw-",
"yourȝ",
"yowr",
"yowrw",
"yoȝou",
"yrogh",
"yrou-",
"yrow",
"yrugh",
"yruȝ",
"yurch",
"yurg-",
"yurgh",
"yurghe",
"yurght",
"yurh",
"yurhg",
"yurht",
"yurowe",
"yurth",
"yurthe",
"yuru",
"yurw",
"yurwh",
"yurȝ",
"yurȝe",
"ðoru",
"þarȝ",
"þerew",
"þᵉrew",
"þerow",
"þerue-",
"þhorow",
"þhurȝ",
"þor",
"þorch",
"þore",
"þoreu",
"þorew",
"þorewe",
"þorewȝ",
"þoreȝ",
"þorg",
"-þorgh",
"þorgh",
"þorghe",
"þorght",
"þorghȝ",
"þorguh",
"þorgȝ",
"þorh",
"þoro",
"þorogh",
"þoroghe",
"þorou",
"þoroᵘ",
"þoroue",
"þorough",
"þorought",
"þorouh",
"þorour",
"-þorouȝ",
"þorouȝ",
"þorouȝe",
"þorouȝh",
"þorᵒuȝt",
"þorow",
"-þorow",
"þᵒrow",
"þorow",
"þorowe",
"þorowgh",
"þorowghe",
"þorowh",
"þorowth",
"þorowþ",
"þorouwȝ",
"þoroȝ",
"þorrow",
"þorrughe",
"þorth",
"þoru",
"-þoru",
"þorue",
"þorug",
"þorugh",
"þorught",
"þorugȝ",
"þoruh",
"þoruhg",
"þoruth",
"þoruþ",
"þoruȝ",
"-þoruȝ",
"þoruȝe",
"þoruȝh",
"þoruȝt",
"þorv",
"þorw",
"þorw-",
"-þorw",
"þorwe",
"þorwgh",
"þorwgȝ",
"þorwh",
"-þorwh",
"þorwhe",
"þorwth",
"þorwtȝ",
"þorwȝ",
"þorwȝe",
"þorþ",
"þorȝ",
"þorȝe",
"þorȝh",
"þorȝhȝ",
"þorȝt",
"þough",
"þour",
"þoᵘr",
"þour",
"þourg",
"þourgh",
"þourght",
"þourgȝ",
"þourh",
"þourh",
"þourow",
"þourt",
"þourth",
"þouruȝ",
"þourw",
"þourw-",
"-þourw",
"þourwe",
"þourþ",
"þourȝ",
"t-þourȝ",
"þourȝ",
"þourȝe",
"þourȝh",
"þourȝt",
"þourȝw",
"þouȝ",
"þouȝt",
"þowr",
"þowre",
"þro",
"þrogh",
"þroghe",
"þrorow",
"þrorowe",
"þroth",
"þrou",
"þrough",
"þrought",
"þroughte-",
"þrouh",
"þrouhe",
"þrouht",
"þrouȝ",
"þrouȝe",
"þrouȝh",
"þrouȝt",
"þrouȝte",
"þrouȝth",
"þrow",
"þʳow",
"þrowe",
"þrowgh",
"þrowghe",
"þrowh",
"-þrowþ",
"þrowȝ",
"þrowȝe",
"þroȝ",
"þroȝe",
"þroȝgh",
"þroȝh",
"þroȝt",
"þroȝth",
"þrugh",
"-þruh",
"þruȝ",
"þruȝe",
"þur",
"þurch",
"þureh",
"þureȝ",
"þurf",
"þurg",
"þurgh",
"-þurgh",
"þurghe",
"þurght",
"þurghȝ",
"þurgȝh",
"þurh",
"þurʰ",
"þurhg",
"þurht",
"þuro",
"þurow",
"þurru",
"þurth",
"þurthe",
"þuru",
"þuruch",
"þurugh",
"þurughe",
"þurut",
"þuruȝ",
"þurw",
"þurw-",
"þurwe",
"þurwȝ",
"þurwȝ",
"þurþ",
"þurȝ",
"þurȝe",
"þurȝg",
"þurȝh",
"þurȝhg",
"þurȝt",
"þurȝth",
"þwrgh",
"ȝorgh",
"ȝoru",
"ȝoruȝ",
"ȝoruȝt",
"ȝorw",
"ȝour",
"ȝowr",
"ȝurch",
}
)
type throughRepository struct {
}
func NewThroughRepository() repository.ThroughRepository {
return &throughRepository{}
}
func (*throughRepository) Get() []string {
return ThroughVariants
}
|
package domain
import "github.com/hyeyoom/go-web-app-boilerplate/domain/base"
type Product struct {
base.DefaultModel
Name string
}
type ProductRepository interface {
Create(*Product)
}
|
package main
import (
_ "github.com/jcallow/covid19map/internal/controllers"
)
func main() {
}
|
package main
import (
"fmt"
"time"
)
var locales map[string]map[string]string
func main() {
locales = make(map[string]map[string]string)
en := make(map[string]string, 10)
en["pea"] = "pea"
en["bean"] = "bean"
en["how old"] = "I am %d years old"
en["time_zone"] = "America/Chicago"
locales["en"] = en
// 设置中文
cn := make(map[string]string, 10)
cn["pea"] = "豌豆"
cn["bean"] = "毛豆"
cn["how old"] = "我今年%d岁了"
cn["time_zone"] = "Asia/Shanghai"
locales["cn"] = cn
locales["zh-CN"] = cn
lang := "zh-CN"
fmt.Println(msg(lang, "pea"))
fmt.Println(msg(lang, "bean"))
fmt.Printf(msg(lang, "how old")+"\n", 30)
loc, _ := time.LoadLocation(msg(lang, "time_zone"))
t := time.Now()
t = t.In(loc)
fmt.Println(t.Format(time.RFC3339))
}
func msg(locale, key string) string {
if v, ok := locales[locale]; ok {
if v2, ok := v[key]; ok {
return v2
}
}
return ""
}
|
package service
import (
"fmt"
"github.com/parnurzeal/gorequest"
"net/http"
)
//mesher listen 127.0.0.1:30101
var proxy = gorequest.New().Proxy("http://127.0.0.1:30101")
//use proxy way to connect provider demo-mesher-server's api /demo/hello
func Greeting() ([]byte, error) {
resp, body, errs := proxy.Get("http://demo-mesher-server:8090/demo/hello").EndBytes()
if errs != nil {
return nil, fmt.Errorf(fmt.Sprintf("do request catch a err:%#v", errs))
}
if resp.StatusCode < http.StatusOK || resp.StatusCode >= http.StatusMultipleChoices {
return nil, fmt.Errorf("request status not ok, %d", resp.StatusCode)
}
return body, nil
}
|
package main
import (
"bufio"
"encoding/json"
"log"
"fmt"
"os"
"os/user"
"github.com/tskinn/pomogo"
)
func main() {
reader := bufio.NewReader(os.Stdin)
// get old task
oldRaw, _, _ := reader.ReadLine()
oldTask := pomogo.Task{}
err := json.Unmarshal(oldRaw, &oldTask)
if err != nil {
log.Println(err)
return
}
// get new task
newRaw, _, _ := reader.ReadLine()
newTask := pomogo.Task{}
err = json.Unmarshal(newRaw, &newTask)
if err != nil {
log.Println(err)
return
}
fmt.Println(string(newRaw))
// create user
username := "unknown"
osUser, err := user.Current()
if err == nil {
username = osUser.Username
} else {
//log it
}
client := pomogo.Client{}
err = client.Connect()
if err != nil {
// do soemthing
return
}
err = client.SessionStart(username, newTask.UUID)
if err != nil {
return
}
// user has session and newTask
// send start session rpc?
log.Printf("%v\n", newTask)
}
|
package types
import (
"encoding/json"
"fmt"
"time"
)
type Timestamp struct {
time.Time
}
// UnmarshalJSON decodes an int64 timestamp into a time.Time object
func (p *Timestamp) UnmarshalJSON(bytes []byte) error {
// 1. Decode the bytes into an int64
var raw int64
err := json.Unmarshal(bytes, &raw)
if err != nil {
fmt.Printf("error decoding timestamp: %s\n", err)
return err
}
// 2 - Parse the unix timestamp
*&p.Time = time.Unix(raw, 0)
return nil
}
|
package main
import (
"encoding/json"
"fmt"
"sort"
"testing"
)
func TestSortMap(t *testing.T) {
m := map[string]string{
"1":"222",
"2":"2212",
"3":"2232",
}
keys := []string{}
for k, _ := range m {
keys = append(keys, k)
}
sort.Strings(keys)
for _, v := range keys {
fmt.Println(m[v])
}
}
func TestSortedMap(t *testing.T) {
m := NewMapList()
var a,b,c Keyer
a = &Element{
key: "aaa",
value: "aavvv",
}
b = &Element{
key: "bbbb",
value: "bbvvv",
}
c = &Element{
key: "ccc",
value: "cccvvv",
}
m.Push(a)
m.Push(b)
m.Push(c)
cb := func(data Keyer) {
fmt.Print(data.GetVal())
fmt.Println(data.GetKey())
}
m.Walk(cb)
}
func TestKeyType(t *testing.T) {
m := make(map[KeyStruct]int)
key1 := KeyStruct{a:1}
m[key1] = 1
fmt.Println(m)
}
func TestCopy(t *testing.T) {
m := make(map[string]int)
m["a"] = 111
data,_ := json.Marshal(m)
m2 := make(map[string]int)
_ = json.Unmarshal(data, &m2)
fmt.Println(m2)
m2["a"] = 2222
fmt.Println(m)
fmt.Println(m2)
} |
package main
import "fmt"
func sum(xi ...int) int {
var s int
for _, v := range xi {
s = s + v
}
return s
}
func main() {
fmt.Println(sum(2, 4, 5, 4, 3))
fmt.Println(sum(5, 0, 8))
fmt.Println(sum(5, 4))
}
|
package service
import (
"github.com/makishi00/go-vue-bbs/model"
"github.com/makki0205/gojwt"
)
var Token = token{}
type token struct {
}
func (t *token) Store(token model.Token) model.Token {
db.Create(&token)
return token
}
func (t *token) ExistByToken(token string) bool {
var tokens []model.Token
db.Where("body = ?", token).Find(&tokens)
return len(tokens) != 0
}
func (t *token) ExistTokenById(token string) bool {
var tokens []model.Token
payload, err := jwt.Decode(token)
if err != nil{
panic(err)
}
db.Where("user_id = ?", payload["id"]).Find(&tokens)
return len(tokens) != 0
}
func (t *token) DeleteByUserId(userId int) bool {
var tokens []model.Token
db.Where("user_id = ?", userId).Delete(&tokens)
return len(tokens) != 0
} |
package leetcode
type Trie struct {
t []*Trie
e bool
p bool
}
/** Initialize your data structure here. */
func Constructor() Trie {
var trie = Trie{t: make([]*Trie, 26), e: false, p: false}
return trie
}
/** Inserts a word into the trie. */
func (this *Trie) Insert(word string) {
if len(word) == 0 {
this.e = true
return
}
c := word[0] - 'a'
if this.t[c] == nil {
tmp := Constructor()
this.t[c] = &tmp
}
this.t[c].p = true
this.t[c].Insert(word[1:])
}
/** Returns if the word is in the trie. */
func (this *Trie) Search(word string) bool {
if len(word) == 0 {
return this.e
}
c := word[0] - 'a'
if this.t[c] != nil {
return this.t[c].Search(word[1:])
}
return false
}
/** Returns if there is any word in the trie that starts with the given prefix. */
func (this *Trie) StartsWith(prefix string) bool {
if len(prefix) == 0 {
return this.p
}
c := prefix[0] - 'a'
if this.t[c] != nil {
return this.t[c].StartsWith(prefix[1:])
}
return false
}
|
package action_fanout_service
import (
"ms/sun_old/base"
"ms/sun/shared/helper"
"ms/sun/servises/mem_user_service"
"ms/sun/shared/x"
)
func resetActionFanoutForUser(userId int) {
var toSaveArr []x.ActionFanout
x.NewActionFanout_Deleter().ForUserId_Eq(userId).Delete(base.DB)
um, ok := mem_user_service.GetForUser(userId)
if !ok {
return
}
followedIds := um.GetFollowed()
for _, uid := range followedIds {
fum, ok := mem_user_service.GetForUser(uid)
if ok {
acts := fum.GetLastActions()
for _, act := range acts {
if act == nil {
continue
}
r := x.ActionFanout{
OrderId: helper.NanoRowIdAtSec(act.CreatedTime),
ForUserId: userId,
ActionId: act.ActionId,
ActorUserId: act.ActorUserId,
}
toSaveArr = append(toSaveArr, r)
}
}
}
x.MassReplace_ActionFanout(toSaveArr, base.DB)
}
func ResetActionFanoutAll() {
uids, err := x.NewUser_Selector().Select_UserId().GetIntSlice(base.DB)
if err != nil {
return
}
for _, uid := range uids {
resetActionFanoutForUser(uid)
}
}
|
package routes
import (
"github.com/gorilla/mux"
"github.com/ipastushenko/simple-chat/server/controllers/session"
)
func appendAuthAuthRouter(router *mux.Router) {
router.Handle("/auth/sign_out", session.NewSignOutHandler()).Methods("GET")
}
func appendAnonymousAuthRouter(router *mux.Router) {
router.Handle("/auth/sign_in", session.NewSignInHandler()).Methods("POST")
}
|
package mining
import (
"encoding/json"
"strconv"
"github.com/yggie/github-data-challenge-2014/models"
)
type EventsResult struct {
PushEvents []*models.PushEvent
}
func (r *EventsResult) AddPushEvent(event *models.PushEvent) {
r.PushEvents = append(r.PushEvents, event)
}
func ParseEvents(data []byte) *EventsResult {
var rawEvents []interface{}
json.Unmarshal(data, &rawEvents)
result := EventsResult{}
for _, element := range rawEvents {
elem := element.(map[string]interface{})
switch elem["type"] {
case "PushEvent":
result.AddPushEvent(ToPushEvent(elem))
}
}
return &result
}
func ToEvent(data map[string]interface{}) *models.Event {
id, err := strconv.ParseInt(data["id"].(string), 10, 64)
if err != nil {
panic(err)
}
return &models.Event{
Id: id,
EventType: data["type"].(string),
CreatedAt: data["created_at"].(string),
User: ToUser(data["actor"].(map[string]interface{})),
Repository: ToRepository(data["repo"].(map[string]interface{})),
}
}
func ToPushEvent(data map[string]interface{}) *models.PushEvent {
payload := data["payload"].(map[string]interface{})
return &models.PushEvent{
Event: ToEvent(data),
Size: int(payload["size"].(float64) + 0.5),
PushId: int64(payload["push_id"].(float64) + 0.5),
Commits: ToCommits(payload["commits"].([]interface{})),
}
}
func ToUser(data map[string]interface{}) *models.User {
return &models.User{
Id: int64(data["id"].(float64) + 0.5),
Login: data["login"].(string),
GravatarId: data["gravatar_id"].(string),
AvatarUrl: data["avatar_url"].(string),
}
}
func ToRepository(data map[string]interface{}) *models.Repository {
return &models.Repository{
Id: int64(data["id"].(float64) + 0.5),
Name: data["name"].(string),
Url: data["url"].(string),
}
}
func ToCommit(data map[string]interface{}) *models.Commit {
author := data["author"].(map[string]interface{})
return &models.Commit{
Sha: data["sha"].(string),
Message: data["message"].(string),
Distinct: data["distinct"].(bool),
Author: &models.CommitAuthor{
Name: author["name"].(string),
Email: author["email"].(string),
},
}
}
func ToCommits(data []interface{}) []*models.Commit {
commits := make([]*models.Commit, len(data))
for index, element := range data {
commits[index] = ToCommit(element.(map[string]interface{}))
}
return commits
}
|
package postgres_backend
import (
"bytes"
"fmt"
"github.com/straumur/straumur"
"strings"
)
func writeArray(paramCount int, args *[]interface{}, key string, arr []string) (int, string) {
var buffer bytes.Buffer
buffer.WriteString(fmt.Sprintf("%s @> ARRAY[", key))
for arrIdx, i := range arr {
buffer.WriteString(fmt.Sprintf("$%d", paramCount))
if arrIdx+1 < len(arr) {
buffer.WriteString(", ")
}
*args = append(*args, i)
paramCount++
}
buffer.WriteString("]::text[]")
return paramCount, buffer.String()
}
func buildSelectQuery(q straumur.Query) (string, []interface{}) {
var buffer bytes.Buffer
args := []interface{}{}
paramCount := 1
delimiter := " and "
writeDelimiter := false
buffer.WriteString("select * from event")
//Early exit, query is empty
if q.IsEmpty() {
buffer.WriteString(" order by created desc;")
return buffer.String(), args
}
buffer.WriteString(" where ")
if q.Key != "" {
buffer.WriteString("key in (")
keys := strings.Split(q.Key, "OR")
for arrIdx, s := range keys {
args = append(args, strings.TrimSpace(s))
buffer.WriteString(fmt.Sprintf("$%d", paramCount))
paramCount++
if arrIdx+1 < len(keys) {
buffer.WriteString(", ")
}
}
buffer.WriteString(")")
writeDelimiter = true
}
if q.Origin != "" {
if writeDelimiter {
buffer.WriteString(delimiter)
}
buffer.WriteString(fmt.Sprintf("origin = $%d", paramCount))
args = append(args, q.Origin)
paramCount++
writeDelimiter = true
}
if !q.From.IsZero() {
if writeDelimiter {
buffer.WriteString(delimiter)
}
buffer.WriteString(fmt.Sprintf("created >= $%d", paramCount))
args = append(args, q.From)
paramCount++
writeDelimiter = true
}
if !q.To.IsZero() {
if writeDelimiter {
buffer.WriteString(delimiter)
}
buffer.WriteString(fmt.Sprintf("created < $%d", paramCount))
args = append(args, q.To)
paramCount++
writeDelimiter = true
}
//Array fields
array_fields := make(map[string][]string)
array_fields["entities"] = q.Entities
array_fields["actors"] = q.Actors
for db_name, array_field := range array_fields {
if len(array_field) > 0 {
if writeDelimiter {
buffer.WriteString(delimiter)
}
nextParam, s := writeArray(paramCount, &args, db_name, array_field)
paramCount = nextParam
buffer.WriteString(s)
writeDelimiter = true
}
}
//todo add sort to query
buffer.WriteString(" order by created desc;")
return buffer.String(), args
}
|
package PDU
import (
"github.com/andrewz1/gosmpp/Data"
"github.com/andrewz1/gosmpp/Exception"
"github.com/andrewz1/gosmpp/Utils"
)
type EnquireLinkResp struct {
Response
}
func NewEnquireLinkResp() *EnquireLinkResp {
a := &EnquireLinkResp{}
a.Construct()
return a
}
func (c *EnquireLinkResp) Construct() {
defer c.SetRealReference(c)
c.Response.Construct()
c.SetCommandId(Data.ENQUIRE_LINK_RESP)
}
func (c *EnquireLinkResp) GetInstance() (IPDU, error) {
return NewEnquireLinkResp(), nil
}
func (c *EnquireLinkResp) SetBody(buf *Utils.ByteBuffer) (*Exception.Exception, IPDU) {
return nil, nil
}
func (c *EnquireLinkResp) GetBody() (*Utils.ByteBuffer, *Exception.Exception, IPDU) {
return nil, nil, nil
}
|
package main
import (
"image"
"image/png"
"log"
"net/http"
"os"
)
type endpoint struct {
// store in a git repo?
// might help to survive cloudburst
counts map[string]int
source image.Image
}
// http://localhost:8080/counter/${{identifier}}
func (e *endpoint) ServeHTTP(w http.ResponseWriter, r *http.Request) {
e.counts[r.URL.Path]++
out := counter(e.source, e.counts[r.URL.Path])
err := png.Encode(w, out)
if err != nil {
f, _ := os.Create("brokencounter.png")
png.Encode(f, out) // yeah, right, this is gonna work...
log.Fatal("what have i done")
}
return
}
|
package main
import "fmt"
type T struct {
Name string
Port int
State State
}
type State int
const (
Running State = iota + 1
Stopped
Rebooting
Terminated
)
func (s State) String() string {
switch s {
case Running:
return "Running"
case Stopped:
return "Stopped"
case Rebooting:
return "Rebooting"
case Terminated:
return "Terminated"
default:
return "Unknown"
}
}
func main() {
t := T{Name: "example", Port: 6666}
// prints: "t {Name:example Port:6666 State:Running}"
fmt.Printf("t %+v\n", t)
} |
package article
import (
"context"
"errors"
"github.com/jmoiron/sqlx"
"time"
)
type pgStore struct {
db *sqlx.DB
}
func NewPgStorage(db *sqlx.DB) *pgStore {
return &pgStore{db: db}
}
func (p pgStore) UpdateArticle(ctx context.Context, article *Article) (id string, err error) {
query := `
update ad.articles
set body = :body,
title = :title,
preface = :preface,
updated_at = now()
where id = :id
`
_, err = p.db.NamedExecContext(ctx, query, article)
return article.Id, err
}
func (p pgStore) SaveArticle(ctx context.Context, article *Article) (id string, err error) {
query := `
insert into ad.articles
(id, body, title, preface, user_id, created_at, updated_at, deleted_at)
values (:id, :body, :title, :preface, :user_id, default, null, null)
returning id
`
rows, err := p.db.NamedQueryContext(ctx, query, article)
for rows.Next() {
err = rows.Scan(&id)
}
if id == "" {
return "", errors.New("error while adding new article")
}
return id, err
}
func (p pgStore) GetArticleById(ctx context.Context, id string) (*Article, error) {
query := `
select id, body, title, preface, user_id, created_at, updated_at, deleted_at
from ad.articles
where id = $1
`
var article Article
err := p.db.GetContext(ctx, &article, query, id)
if err != nil {
return nil, err
}
return &article, nil
}
func (p pgStore) GetArticleIdsByPeriod(ctx context.Context, from, to time.Time) ([]string, error) {
query := `
select id
from ad.articles
where created_at between $1 and $2
`
var ids = make([]string, 0)
err := p.db.SelectContext(ctx, &ids, query, from, to)
if err != nil {
return nil, err
}
return ids, nil
}
func (p pgStore) GetArticlesByPeriod(ctx context.Context, from, to time.Time) ([]Article, error) {
query := `
select id, preface, title, user_id, created_at, updated_at, deleted_at
from ad.articles
where created_at between $1 and $2
`
var articles = make([]Article, 0)
err := p.db.SelectContext(ctx, &articles, query, from, to)
if err != nil {
return nil, err
}
return articles, nil
}
func (p pgStore) DeleteArticleById(ctx context.Context, id string) error {
query := `
update ad.articles
set is_deleted = now()
where id = ?
`
_, err := p.db.ExecContext(ctx, query, id)
return err
}
|
package application
import (
"fmt"
"github.com/akosgarai/opengl_playground/examples/model-loading/pkg/interfaces"
"github.com/go-gl/glfw/v3.3/glfw"
"github.com/go-gl/mathgl/mgl32"
)
const (
DEBUG = glfw.KeyH
)
type Camera interface {
Log() string
GetViewMatrix() mgl32.Mat4
GetProjectionMatrix() mgl32.Mat4
Walk(float32)
Strafe(float32)
Lift(float32)
UpdateDirection(float32, float32)
GetPosition() mgl32.Vec3
}
type Mesh interface {
Draw(interfaces.Shader)
Update(float64)
}
type Application struct {
window Window
camera Camera
cameraSet bool
shaderMap map[interfaces.Shader][]Mesh
mouseDowns map[glfw.MouseButton]bool
MousePosX float64
MousePosY float64
directionalLightSources []DirectionalLightSource
pointLightSources []PointLightSource
spotLightSources []SpotLightSource
keyDowns map[glfw.Key]bool
}
type Window interface {
GetCursorPos() (float64, float64)
SetKeyCallback(glfw.KeyCallback) glfw.KeyCallback
SetMouseButtonCallback(glfw.MouseButtonCallback) glfw.MouseButtonCallback
ShouldClose() bool
SwapBuffers()
}
// New returns an application instance
func New() *Application {
return &Application{
cameraSet: false,
shaderMap: make(map[interfaces.Shader][]Mesh),
mouseDowns: make(map[glfw.MouseButton]bool),
directionalLightSources: []DirectionalLightSource{},
pointLightSources: []PointLightSource{},
spotLightSources: []SpotLightSource{},
keyDowns: make(map[glfw.Key]bool),
}
}
// Log returns the string representation of this object.
func (a *Application) Log() string {
logString := "Application:\n"
if a.cameraSet {
logString += " - camera : " + a.camera.Log() + "\n"
}
return logString
}
// SetWindow updates the window with the new one.
func (a *Application) SetWindow(w Window) {
a.window = w
}
// GetWindow returns the current window of the application.
func (a *Application) GetWindow() Window {
return a.window
}
// SetCamera updates the camera with the new one.
func (a *Application) SetCamera(c Camera) {
a.cameraSet = true
a.camera = c
}
// GetCamera returns the current camera of the application.
func (a *Application) GetCamera() Camera {
return a.camera
}
// AddShader method inserts the new shader to the shaderMap
func (a *Application) AddShader(s interfaces.Shader) {
a.shaderMap[s] = []Mesh{}
}
// AddMeshToShader attaches the mest to a shader.
func (a *Application) AddMeshToShader(m Mesh, s interfaces.Shader) {
a.shaderMap[s] = append(a.shaderMap[s], m)
}
// Draw calls Draw function in every drawable item. It loops on the shaderMap (shaders).
// For each shader, first set it to used state, setup camera realted uniforms,
// then setup light related uniforms. Then we can pass the shader to the mesh for drawing.
func (a *Application) Draw() {
for s, _ := range a.shaderMap {
s.Use()
if a.cameraSet {
s.SetUniformMat4("view", a.camera.GetViewMatrix())
s.SetUniformMat4("projection", a.camera.GetProjectionMatrix())
cameraPos := a.camera.GetPosition()
s.SetUniform3f("viewPosition", cameraPos.X(), cameraPos.Y(), cameraPos.Z())
}
a.lightHandler(s)
for index, _ := range a.shaderMap[s] {
a.shaderMap[s][index].Draw(s)
}
}
}
// Setup light related uniforms.
func (a *Application) lightHandler(s interfaces.Shader) {
a.setupDirectionalLightForShader(s)
a.setupPointLightForShader(s)
a.setupSpotLightForShader(s)
}
// Setup directional light related uniforms. It iterates over the directional sources
// and setups each uniform, where the name is not empty.
func (a *Application) setupDirectionalLightForShader(s interfaces.Shader) {
for _, source := range a.directionalLightSources {
if source.DirectionUniformName != "" {
direction := source.LightSource.GetDirection()
s.SetUniform3f(source.DirectionUniformName, direction.X(), direction.Y(), direction.Z())
}
if source.AmbientUniformName != "" {
ambient := source.LightSource.GetAmbient()
s.SetUniform3f(source.AmbientUniformName, ambient.X(), ambient.Y(), ambient.Z())
}
if source.DiffuseUniformName != "" {
diffuse := source.LightSource.GetDiffuse()
s.SetUniform3f(source.DiffuseUniformName, diffuse.X(), diffuse.Y(), diffuse.Z())
}
if source.SpecularUniformName != "" {
specular := source.LightSource.GetSpecular()
s.SetUniform3f(source.DiffuseUniformName, specular.X(), specular.Y(), specular.Z())
}
}
}
// Setup point light relates uniforms. It iterates over the point light sources and sets
// up every uniform, where the name is not empty.
func (a *Application) setupPointLightForShader(s interfaces.Shader) {
for _, source := range a.pointLightSources {
if source.PositionUniformName != "" {
position := source.LightSource.GetPosition()
s.SetUniform3f(source.PositionUniformName, position.X(), position.Y(), position.Z())
}
if source.AmbientUniformName != "" {
ambient := source.LightSource.GetAmbient()
s.SetUniform3f(source.AmbientUniformName, ambient.X(), ambient.Y(), ambient.Z())
}
if source.DiffuseUniformName != "" {
diffuse := source.LightSource.GetDiffuse()
s.SetUniform3f(source.DiffuseUniformName, diffuse.X(), diffuse.Y(), diffuse.Z())
}
if source.SpecularUniformName != "" {
specular := source.LightSource.GetSpecular()
s.SetUniform3f(source.DiffuseUniformName, specular.X(), specular.Y(), specular.Z())
}
if source.ConstantTermUniformName != "" {
s.SetUniform1f(source.ConstantTermUniformName, source.LightSource.GetConstantTerm())
}
if source.LinearTermUniformName != "" {
s.SetUniform1f(source.LinearTermUniformName, source.LightSource.GetLinearTerm())
}
if source.QuadraticTermUniformName != "" {
s.SetUniform1f(source.QuadraticTermUniformName, source.LightSource.GetQuadraticTerm())
}
}
}
// Setup spot light related uniforms. It iterates over the spot light sources and sets up
// every uniform, where the name is not empty.
func (a *Application) setupSpotLightForShader(s interfaces.Shader) {
for _, source := range a.spotLightSources {
if source.DirectionUniformName != "" {
direction := source.LightSource.GetDirection()
s.SetUniform3f(source.DirectionUniformName, direction.X(), direction.Y(), direction.Z())
}
if source.PositionUniformName != "" {
position := source.LightSource.GetPosition()
s.SetUniform3f(source.PositionUniformName, position.X(), position.Y(), position.Z())
}
if source.AmbientUniformName != "" {
ambient := source.LightSource.GetAmbient()
s.SetUniform3f(source.AmbientUniformName, ambient.X(), ambient.Y(), ambient.Z())
}
if source.DiffuseUniformName != "" {
diffuse := source.LightSource.GetDiffuse()
s.SetUniform3f(source.DiffuseUniformName, diffuse.X(), diffuse.Y(), diffuse.Z())
}
if source.SpecularUniformName != "" {
specular := source.LightSource.GetSpecular()
s.SetUniform3f(source.DiffuseUniformName, specular.X(), specular.Y(), specular.Z())
}
if source.ConstantTermUniformName != "" {
s.SetUniform1f(source.ConstantTermUniformName, source.LightSource.GetConstantTerm())
}
if source.LinearTermUniformName != "" {
s.SetUniform1f(source.LinearTermUniformName, source.LightSource.GetLinearTerm())
}
if source.QuadraticTermUniformName != "" {
s.SetUniform1f(source.QuadraticTermUniformName, source.LightSource.GetQuadraticTerm())
}
if source.CutoffUniformName != "" {
s.SetUniform1f(source.CutoffUniformName, source.LightSource.GetCutoff())
}
if source.OuterCutoffUniformName != "" {
s.SetUniform1f(source.OuterCutoffUniformName, source.LightSource.GetOuterCutoff())
}
}
}
// AddDirectionalLightSource sets up a directional light source.
// It takes a DirectionalLight input that contains the model related info,
// and it also takes a [4]string, with the uniform names that are used in the shader applications
// the 'DirectionUniformName', 'AmbientUniformName', 'DiffuseUniformName', 'SpecularUniformName'.
// They has to be in this order.
func (a *Application) AddDirectionalLightSource(lightSource DirectionalLight, uniformNames [4]string) {
var dSource DirectionalLightSource
dSource.LightSource = lightSource
dSource.DirectionUniformName = uniformNames[0]
dSource.AmbientUniformName = uniformNames[1]
dSource.DiffuseUniformName = uniformNames[2]
dSource.SpecularUniformName = uniformNames[3]
a.directionalLightSources = append(a.directionalLightSources, dSource)
}
// AddPointLightSource sets up a point light source. It takes a PointLight
// input that contains the model related info, and it also containt the uniform names in [7]string format.
// The order has to be the following: 'PositionUniformName', 'AmbientUniformName', 'DiffuseUniformName',
// 'SpecularUniformName', 'ConstantTermUniformName', 'LinearTermUniformName', 'QuadraticTermUniformName'.
func (a *Application) AddPointLightSource(lightSource PointLight, uniformNames [7]string) {
var pSource PointLightSource
pSource.LightSource = lightSource
pSource.PositionUniformName = uniformNames[0]
pSource.AmbientUniformName = uniformNames[1]
pSource.DiffuseUniformName = uniformNames[2]
pSource.SpecularUniformName = uniformNames[3]
pSource.ConstantTermUniformName = uniformNames[4]
pSource.LinearTermUniformName = uniformNames[5]
pSource.QuadraticTermUniformName = uniformNames[6]
a.pointLightSources = append(a.pointLightSources, pSource)
}
// AddSpotLightSource sets up a spot light source. It takes a SpotLight input
// that contains the model related info, and it also contains the uniform names in [10]string format.
// The order has to be the following: 'PositionUniformName', 'DirectionUniformName', 'AmbientUniformName',
// 'DiffuseUniformName', 'SpecularUniformName', 'ConstantTermUniformName', 'LinearTermUniformName',
// 'QuadraticTermUniformName', 'CutoffUniformName'.
func (a *Application) AddSpotLightSource(lightSource SpotLight, uniformNames [10]string) {
var sSource SpotLightSource
sSource.LightSource = lightSource
sSource.PositionUniformName = uniformNames[0]
sSource.DirectionUniformName = uniformNames[1]
sSource.AmbientUniformName = uniformNames[2]
sSource.DiffuseUniformName = uniformNames[3]
sSource.SpecularUniformName = uniformNames[4]
sSource.ConstantTermUniformName = uniformNames[5]
sSource.LinearTermUniformName = uniformNames[6]
sSource.QuadraticTermUniformName = uniformNames[7]
sSource.CutoffUniformName = uniformNames[8]
sSource.OuterCutoffUniformName = uniformNames[8]
a.spotLightSources = append(a.spotLightSources, sSource)
}
// KeyCallback is responsible for the keyboard event handling.
func (a *Application) KeyCallback(w *glfw.Window, key glfw.Key, scancode int, action glfw.Action, mods glfw.ModifierKey) {
switch key {
case DEBUG:
if action != glfw.Release {
fmt.Printf("%s\n", a.Log())
}
break
default:
a.SetKeyState(key, action)
break
}
}
// SetKeyState setups the keyDowns based on the key and action
func (a *Application) SetKeyState(key glfw.Key, action glfw.Action) {
var isButtonPressed bool
if action != glfw.Release {
isButtonPressed = true
} else {
isButtonPressed = false
}
a.keyDowns[key] = isButtonPressed
}
// GetKeyState returns the state of the given key
func (a *Application) GetKeyState(key glfw.Key) bool {
return a.keyDowns[key]
}
// Update calls the Update function in every drawable item.
func (a *Application) Update(dt float64) {
for s, _ := range a.shaderMap {
for index, _ := range a.shaderMap[s] {
a.shaderMap[s][index].Update(dt)
}
}
}
// MouseButtonCallback is responsible for the mouse button event handling.
func (a *Application) MouseButtonCallback(w *glfw.Window, button glfw.MouseButton, action glfw.Action, mods glfw.ModifierKey) {
a.MousePosX, a.MousePosY = w.GetCursorPos()
switch button {
default:
a.SetButtonState(button, action)
break
}
}
// SetKeyState setups the keyDowns based on the key and action
func (a *Application) SetButtonState(button glfw.MouseButton, action glfw.Action) {
var isButtonPressed bool
if action != glfw.Release {
isButtonPressed = true
} else {
isButtonPressed = false
}
a.mouseDowns[button] = isButtonPressed
}
// GetMouseButtonState returns the state of the given button
func (a *Application) GetMouseButtonState(button glfw.MouseButton) bool {
return a.mouseDowns[button]
}
|
package decode
import (
"testing"
"github.com/golang/go/src/fmt"
)
func TestEnDe(t *testing.T){
ru:=RSAUtils()
fmt.Println(ru.init())
fmt.Println(ru)
//公钥加密 私钥解密
eby,err:=ru.RsaEncrypt([]byte("hello world"),ru.PublicKey)
fmt.Println(err)
dby,err:=ru.RsaDecrypt(eby,ru.PrivateKey)
fmt.Println(string(dby),err)
}
|
package main
import (
"context"
"fmt"
proto "micro_test/proto"
micro_client "github.com/micro/go-micro/client"
)
func main() {
// Create a new service. Optionally include some options here.
//service := micro.NewService(micro.Name("server.client"))
/*serviceName := "server.client"
service := micro.NewService(
micro.Name(serviceName),
micro.Server(
server.NewServer(
server.Name(serviceName),
server.Address(":7777"),
),
),
)
service.Init()*/
var cli micro_client.Client
// Create new greeter client
server := proto.NewWaiterService("server", cli)
// Call the greeter
//rsp, err := server.GetPddSessionsInfo(context.TODO(), &proto.RequestGetPddSessionsInfo{Id: 20})
rsp, err := server.GetPddSessionsList(context.TODO(), &proto.RequestGetPddSessionsList{Page: 1, PerPage:2})
if err != nil {
fmt.Println(err)
}
// Print response
fmt.Println(fmt.Sprintf("%+v", rsp))
} |
package main
import (
"fmt"
"reflect"
)
func main() {
a:=1
var b string
fmt.Println(reflect.TypeOf(a)) // 打印数据类型
fmt.Println(reflect.TypeOf(b))
c := string(a) // 类型转换
fmt.Println(reflect.TypeOf(c))
//小案例 A
//编程实现107653秒是几天几小时几分钟几秒?
time := 107653
e:= time/60/60/24
fmt.Println(e)
fmt.Println("天:", time/60/60/24%365)
fmt.Println("时:", time/60/60%24)
fmt.Println("分:", time/60%60)
fmt.Println("秒:", time%60)
} |
package font
type Weight string
const WeightNormal = "normal"
const WeightBold = "bold"
const WeightBolder = "bolder"
const WeightLighter = "lighter"
const WeightInitial = "initial"
const WeightInherit = "inherit"
const Weight100 = "100"
const Weight200 = "200"
const Weight300 = "300"
const Weight400 = "400"
const Weight500 = "500"
const Weight600 = "600"
const Weight700 = "700"
const Weight800 = "800"
const Weight900 = "900"
func (w Weight) String() string {
return string(w)
}
|
package util
import (
"github.com/satori/go.uuid"
)
func CreateUUID() (string, error) {
newUUID, err := uuid.NewV4()
if err != nil {
return "", err
}
return newUUID.String(), nil
}
func ValidUUID(uuid string) bool {
return true
}
|
package camt
import (
"encoding/xml"
"github.com/thought-machine/finance-messaging/iso20022"
)
type Document03900101 struct {
XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:camt.039.001.01 Document"`
Message *CaseStatusReport `xml:"camt.039.001.01"`
}
func (d *Document03900101) AddMessage() *CaseStatusReport {
d.Message = new(CaseStatusReport)
return d.Message
}
// Scope
// The Case Status Report message is sent by a case assignee to a case creator or case assigner.
// This message is used to report on the status of a case.
// Usage
// A Case Status Report message is sent in reply to a Case Status Report Request message. This message
// - covers one and only one case at a time. (If a case assignee needs to report on several cases, then multiple Case Status Report messages must be sent.)
// - may be forwarded to subsequent case assigner(s) until it reaches the end point
// - is able to indicate the fact that a case has been assigned to a party downstream in the payment processing chain
// - may not be used in place of a Resolution Of Investigation (except for the condition given in the next bullet point) or Notification Of Case Assignment message
// - may be skipped and replaced by a Resolution Of Investigation message if at the moment when the request for a investigation status arrives, the assignee has obtained a solution. (In this case a Resolution Of Investigation message can be sent in lieu of a Case Status Report and the case may be closed.)
type CaseStatusReport struct {
// Specifies generic information about an investigation report.
Header *iso20022.ReportHeader `xml:"Hdr"`
// Identifies the case.
Case *iso20022.Case `xml:"Case"`
// Defines the status of the case.
Status *iso20022.CaseStatus `xml:"Sts"`
// Identifies the last assignment performed.
NewAssignment *iso20022.CaseAssignment `xml:"NewAssgnmt,omitempty"`
}
func (c *CaseStatusReport) AddHeader() *iso20022.ReportHeader {
c.Header = new(iso20022.ReportHeader)
return c.Header
}
func (c *CaseStatusReport) AddCase() *iso20022.Case {
c.Case = new(iso20022.Case)
return c.Case
}
func (c *CaseStatusReport) AddStatus() *iso20022.CaseStatus {
c.Status = new(iso20022.CaseStatus)
return c.Status
}
func (c *CaseStatusReport) AddNewAssignment() *iso20022.CaseAssignment {
c.NewAssignment = new(iso20022.CaseAssignment)
return c.NewAssignment
}
|
package router
import (
"dappapi/middleware"
"github.com/gin-gonic/gin"
)
func InitRouter() *gin.Engine {
r := gin.New()
middleware.InitMiddleware(r)
authMiddleware, _ := middleware.AuthInit()
// 注册系统路由
InitSysRouter(r, authMiddleware)
return r
}
|
package service
import (
"bytes"
"encoding/json"
"fmt"
"github.com/bearname/videohost/internal/common/infrarstructure/amqp"
"github.com/bearname/videohost/internal/common/util"
"github.com/bearname/videohost/internal/video-scaler/domain"
"github.com/bearname/videohost/internal/videoserver/domain/model"
log "github.com/sirupsen/logrus"
"net/http"
"os"
"os/exec"
"strconv"
"strings"
)
type VideoScaleServiceImpl struct {
messageBroker *amqp.RabbitMqService
token *domain.Token
videoServerAddress string
authServerAddress string
}
func NewVideoScaleService(service *amqp.RabbitMqService, videoServerAddress string, authServerAddress string) *VideoScaleServiceImpl {
s := new(VideoScaleServiceImpl)
s.messageBroker = service
s.token = domain.NewToken("", "")
s.videoServerAddress = videoServerAddress
s.authServerAddress = authServerAddress
return s
}
func (s *VideoScaleServiceImpl) PrepareToStream(videoId string, inputVideoPath string, allNeededQualities []domain.Quality, ownerId string) bool {
const extension = ".mp4"
log.Info(inputVideoPath)
output, err := exec.Command("C:\\Users\\mikha\\go\\src\\videohost\\bin\\video-scaler\\resolution.bat", inputVideoPath).Output()
if err != nil {
log.Error(err.Error())
return false
}
split := strings.Split(string(output), "\n")
height, ok := s.getDimension(split, "height")
if !ok {
log.Error("Failed get resolution")
return false
}
if !domain.IsSupportedQuality(height) {
log.Error("Not supported quality")
return false
}
client := &http.Client{}
token, ok := util.InitAccessToken(client, s.authServerAddress)
if !ok {
return false
}
s.token = token
response, err := util.GetRequest(&http.Client{}, s.videoServerAddress+"/api/v1/videos/"+videoId, s.token.RefreshToken)
if err != nil {
return false
}
var video model.Video
err = json.Unmarshal(response, &video)
if err != nil {
log.Error(err)
return false
}
filename := inputVideoPath[0:len(inputVideoPath)-len("index.mp4")] + "index.m3u8"
file, err := os.OpenFile(filename, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0600)
if err != nil {
log.Error(err)
return false
}
_, err = file.WriteString("#EXTM3U\n")
if err != nil {
log.Error(err)
return false
}
availableVideoQualities := strings.Split(video.Quality, ",")
for _, quality := range allNeededQualities {
contains := util.Contains(availableVideoQualities, quality.String())
if !contains {
s.prepareToStreamByQuality(videoId, inputVideoPath, extension, quality, ownerId)
}
}
return true
}
func (s *VideoScaleServiceImpl) prepareToStreamByQuality(videoId string, inputVideoPath string, extension string, quality domain.Quality, ownerId string) {
err := s.scaleVideoToQuality(inputVideoPath, extension, quality)
if err != nil {
log.Error("Failed prepare to stream file " + inputVideoPath + " in quality " + quality.String() + "p")
} else {
body := videoId + "," + quality.String() + "," + ownerId
fmt.Println(body)
log.Info("Success prepare to stream file " + inputVideoPath + " in quality " + quality.String() + "p")
ok := s.addVideoQuality(videoId, quality)
log.Info(s.getResultMessage(ok))
err = s.messageBroker.Publish("events_topic", "events.video-scaled", body)
if err != nil {
log.Error("Failed publish event 'video-scaled")
}
}
}
func (s *VideoScaleServiceImpl) addVideoQuality(videoId string, quality domain.Quality) bool {
buf := struct {
Quality int `json:"quality"`
}{Quality: quality.Values()}
marshal, err := json.Marshal(buf)
if err != nil {
return false
}
request, err := http.NewRequest("PUT", s.videoServerAddress+"/api/v1/videos/"+videoId+"/add-quality", bytes.NewBuffer(marshal))
if err != nil {
log.Error(err)
return false
}
client := &http.Client{}
token, ok := util.InitAccessToken(client, s.authServerAddress)
if !ok {
return false
}
s.token = token
request.Header.Add("Authorization", "Bearer "+s.token.AccessToken)
response, err := client.Do(request)
if err != nil {
log.Error(err)
return false
}
defer response.Body.Close()
if response.StatusCode == http.StatusUnauthorized {
token, err = util.GetAdminAccessToken(client, s.authServerAddress)
if err != nil {
log.Error(err)
return false
}
s.token = token
}
if response.StatusCode != http.StatusOK {
log.Error("failed get id of owner of the video ")
return false
}
return true
}
func (s *VideoScaleServiceImpl) getResultMessage(quality bool) string {
message := "Add video quality "
if quality {
message += "success"
} else {
message += "failed"
}
return message
}
func (s *VideoScaleServiceImpl) scaleVideoToQuality(inputVideoPath string, extension string, quality domain.Quality) error {
outputVideoPath := s.getOutputVideoPath(inputVideoPath, extension, quality)
log.Info("prepare video to stream on quality " + quality.String() + "p")
root := outputVideoPath[0 : strings.LastIndex(outputVideoPath, "\\")+1]
outputHls := root + "index-" + quality.String() + `.m3u8`
inputVideoPath = strings.ReplaceAll(inputVideoPath, "\\", "\\")
outputHls = strings.ReplaceAll(outputHls, "\\", "\\")
err := s.prepareToStream(inputVideoPath, outputHls, quality)
if err != nil {
return err
}
resolution := domain.QualityToResolution(quality)
data := "#EXT-X-STREAM-INF:PROGRAM-ID=1,BANDWIDTH=6221600,CODECS=\"mp4a.40.2,avc1.640028\",RESOLUTION=" + resolution.String() + ",NAME=\"" + quality.String() + "\"\n" +
"/media/a7e608d9-bc76-11eb-afc7-e4e74940035b/" + quality.String() + "/stream/\n"
file, err := os.OpenFile(root+"index.m3u8", os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0600)
if err != nil {
return err
}
_, err = file.WriteString(data)
return err
}
func (s *VideoScaleServiceImpl) getDimension(split []string, key string) (int, bool) {
value := strings.Split(split[1], "=")
if value[0] != key {
return 0, false
}
fmt.Println("'" + value[1] + "'")
number := value[1][0 : len(value[1])-1]
atoi, err := strconv.Atoi(number)
if err != nil {
return 0, false
}
return atoi, true
}
func (s *VideoScaleServiceImpl) prepareToStream(videoPath string, output string, quality domain.Quality) error {
resolution := domain.QualityToResolution(quality)
fmt.Println(resolution)
return exec.Command(`ffmpeg`, `-i`, videoPath, `-profile:v`, `baseline`, `-level`, `3.0`, `-s`, resolution.String(),
`-start_number`, `0`, `-hls_time`, `10`, `-hls_list_size`, `0`, `-f`, `hls`, output).Run()
}
func (s *VideoScaleServiceImpl) getOutputVideoPath(videoPath string, extension string, quality domain.Quality) string {
return videoPath[0:len(videoPath)-len(extension)] + "-" + quality.String() + "p" + extension
}
|
package main
import (
"fmt"
)
type TrackType uint32
const (
UnknownTrack TrackType = iota
AudioTrack
VideoTrack
SubtitleTrack
)
// encryption scheme type
var (
encryptionSchemeTypeCENC uint32 = 0x63656E63 // "cenc"
encryptionSchemeTypeCENS uint32 = 0x63656E73 // "cens"
encryptionSchemeTypeCBCS uint32 = 0x63626373 // "cecs"
encryptionSchemeTypeCBC1 uint32 = 0x63626331 // "cbc1"
)
type atom struct {
atomType uint32
bodySize int64 // body size
headerSize uint32
}
func (a *atom) String() string {
return fmt.Sprintf("Atom type:%s. Atom size:%d", a.Type(), a.Size())
}
func (a *atom) Type() string {
return int2String(a.atomType)
}
func (a *atom) Size() int64 {
return a.bodySize + int64(a.headerSize)
}
// ISO/IEC 14496-12 Part 12: ISO base media file format
// basic copy from https://github.com/mozilla/mp4parse-rust/blob/master/mp4parse/src/boxes.rs
var (
fourCCftyp uint32 = 0x66747970 // "ftyp"
fourCCstyp uint32 = 0x73747970 // "styp"
fourCCmoov uint32 = 0x6d6f6f76 // "moov"
fourCCsidx uint32 = 0x73696478 // "sidx"
fourCCssix uint32 = 0x73736978 // "ssix"
fourCCmdat uint32 = 0x6D646174 // "mdat"
fourCCmvex uint32 = 0x6d766578 // "mvex"
fourCCmehd uint32 = 0x6d656864 // "mehd"
fourCCmeta uint32 = 0x6d657461 // "meta"
fourCCtrex uint32 = 0x74726578 // "trex"
fourCCleva uint32 = 0x6c657661 // "leva"
fourCCmoof uint32 = 0x6D6F6F66 // "moof" fragment-movie ->
fourCCmfhd uint32 = 0x6D666864 // "mfhd"
fourCCtraf uint32 = 0x74726166 // "traf"
fourCCtfhd uint32 = 0x74666864 // "tfhd"
fourCCtrun uint32 = 0x7472756E // "trun"
fourCCsbgp uint32 = 0x73626770 // "sbgp"
fourCCsgpd uint32 = 0x73677064 // "sgpd"
fourCCsenc uint32 = 0x73656e63 // "senc"
fourCCsubs uint32 = 0x73756273 // "subs"
fourCCsaiz uint32 = 0x7361697A // "saiz"
fourCCsaio uint32 = 0x7361696F // "saio"
fourCCtfdt uint32 = 0x74666474 // "tfdt" <- fragment-movie
fourCCmfra uint32 = 0x6D667261 // "mfra"
fourCCfree uint32 = 0x66726565 // "free"
fourCCskip uint32 = 0x736b6970 // "skip"
fourCCpdin uint32 = 0x7064696e // "pdin"
fourCCuuid uint32 = 0x75756964 // "uuid"
fourCCudta uint32 = 0x75647461 // "udta"
fourCCprft uint32 = 0x70726674 // "prft"
fourCCmvhd uint32 = 0x6d766864 // "mvhd"
fourCCtrak uint32 = 0x7472616b // "trak"
fourCCtkhd uint32 = 0x746b6864 // "tkhd"
fourCCedts uint32 = 0x65647473 // "edts"
fourCCmdia uint32 = 0x6d646961 // "mdia"
fourCCmdhd uint32 = 0x6d646864 // "mdhd"
fourCChdlr uint32 = 0x68646c72 // "hdlr"
fourCCminf uint32 = 0x6d696e66 // "minf"
fourCCelng uint32 = 0x656c6e67 // "elng"
fourCCvmhd uint32 = 0x766D6864 // "vmhd"
fourCCsmhd uint32 = 0x736D6864 // "smhd"
// fourCChmhd uint32 = 0x686D6864 // "hmhd"
// fourCCnmhd uint32 = 0x6E6D6864 // "nmhd"
fourCCdinf uint32 = 0x64696E66 // "dinf"
fourCCstbl uint32 = 0x7374626c // "stbl"
fourCCstsd uint32 = 0x73747364 // "stsd"
fourCCstts uint32 = 0x73747473 // "stts"
fourCCstsc uint32 = 0x73747363 // "stsc"
fourCCstsz uint32 = 0x7374737a // "stsz"
fourCCstz2 uint32 = 0x73747a32 // "stz2"
fourCCstco uint32 = 0x7374636f // "stco"
fourCCco64 uint32 = 0x636f3634 // "co64"
fourCCstss uint32 = 0x73747373 // "stss"
fourCCctts uint32 = 0x63747473 // "ctts"
fourCCcslg uint32 = 0x63736c67 // "cslg"
fourCCstsh uint32 = 0x73747368 // "stsh"
fourCCpadb uint32 = 0x70616462 // "padb"
fourCCstdp uint32 = 0x73746470 // "stdp"
fourCCsdtp uint32 = 0x73647470 // "sdtp"
fourCCcolr uint32 = 0x636f6c72 // "colr"
fourCCclap uint32 = 0x636c6170 // "clap"
fourCCpasp uint32 = 0x70617370 // "pasp"
avc1SampleEntry uint32 = 0x61766331 // "avc1" video sample entry ->
avc2SampleEntry uint32 = 0x61766332 // "avc2"
avc3SampleEntry uint32 = 0x61766333 // "avc3"
avc4SampleEntry uint32 = 0x61766334 // "avc4"
encvSampleEntry uint32 = 0x656e6376 // "protectedInfo" encrypted video sample entry
hev1SampleEntry uint32 = 0x68657631 // "hev1"
hvc1SampleEntry uint32 = 0x68766331 // "hvc1"
hVC1SampleEntry uint32 = 0x48564331 // "HVC1"
dvavSampleEntry uint32 = 0x64766176 // "dvav"
dva1SampleEntry uint32 = 0x64766131 // "dva1"
dvheSampleEntry uint32 = 0x64766865 // "dvhe"
dvh1SampleEntry uint32 = 0x64766831 // "dvh1"
vp08SampleEntry uint32 = 0x76703038 // "vp08"
vp09SampleEntry uint32 = 0x76703039 // "vp09"
av01SampleEntry uint32 = 0x61763031 // "av01"
s263SampleEntry uint32 = 0x73323633 // "s263"
h263SampleEntry uint32 = 0x48323633 // "H263"
s264SampleEntry uint32 = 0x73323634 // "s264"
mp4vSampleEntry uint32 = 0x6d703476 // "mp4v"
jpegSampleEntry uint32 = 0x6a706567 // "jpeg"
jPEGSampleEntry uint32 = 0x4a504547 // "JPEG"
div3SampleEntry uint32 = 0x64697633 // "div3"
dIV3SampleEntry uint32 = 0x44495633 // "DIV3" <- video sample entry
fourCCav1c uint32 = 0x61763143 // "av1C" -> video codec configuration record
fourCCavcC uint32 = 0x61766343 // "avcC"
fourCCdvcC uint32 = 0x64766343 // "dvcC"
fourCCdvvC uint32 = 0x64767643 // "dvvC"
fourCCvpcC uint32 = 0x76706343 // "vpcC"
fourCChvcC uint32 = 0x68766343 // "hvcC" <- video codec configuration record
flaCSampleEntry uint32 = 0x664c6143 // "fLaC" audio sample entry ->
opusSampleEntry uint32 = 0x4f707573 // "Opus"
mp4aSampleEntry uint32 = 0x6d703461 // "mp4a"
encaSampleEntry uint32 = 0x656e6361 // "enca" encrypted audio sample entry
mp3SampleEntry uint32 = 0x2e6d7033 // ".mp3"
lpcmSampleEntry uint32 = 0x6c70636d // "lpcm"
alacSampleEntry uint32 = 0x616c6163 // "alac"
ac3SampleEntry uint32 = 0x61632d33 // "ac-3"
ac4SampleEntry uint32 = 0x61632d34 // "ac-4"
ec3SampleEntry uint32 = 0x65632d33 // "ec-3"
mlpaSampleEntry uint32 = 0x6D6C7061 // "mlpa"
dtscSampleEntry uint32 = 0x64747363 // "dtsc"
dtseSampleEntry uint32 = 0x64747365 // "dtse"
dtshSampleEntry uint32 = 0x64747368 // "dtsh"
dtslSampleEntry uint32 = 0x6474736c // "dtsl"
samrSampleEntry uint32 = 0x73616d72 // "samr"
sawbSampleEntry uint32 = 0x73617762 // "sawb"
sowtSampleEntry uint32 = 0x736f7774 // "sowt"
twosSampleEntry uint32 = 0x74776f73 // "twos"
alawSampleEntry uint32 = 0x616c6177 // "alaw"
ulawSampleEntry uint32 = 0x756c6177 // "ulaw"
sounSampleEntry uint32 = 0x736f756e // "soun" <- audio sample entry
tx3gSampleEntry uint32 = 0x74783367 // "tx3g" subtitle sample entry ->
stppSampleEntry uint32 = 0x73747070 // "stpp"
wvttSampleEntry uint32 = 0x77767474 // "wvgtt"
TTMLSampleEntry uint32 = 0x54544d4c // "TTML"
c608SampleEntry uint32 = 0x63363038 // "c608" <- subtitle sample entry
fourCCesds uint32 = 0x65736473 // "esds" audio sample descriptors ->
fourCCdfla uint32 = 0x64664c61 // "dfLa"
fourCCdops uint32 = 0x644f7073 // "dOps"
fourCCalac uint32 = 0x616C6163 // "alac" - Also used by ALACSampleEntry
fourCCddts uint32 = 0x64647473 // "ddts"
fourCCdac3 uint32 = 0x64616333 // "dac3"
fourCCdec3 uint32 = 0x64656333 // "dec3"
fourCCdac4 uint32 = 0x64616334 // "dac4"
fourCCwave uint32 = 0x77617665 // "wave" - quicktime atom
fourCCdmlp uint32 = 0x646D6C70 // "dmlp" <- audio sample descriptors
// protection information boxes
fourCCpssh uint32 = 0x70737368 // "pssh"
fourCCsinf uint32 = 0x73696e66 // "sinf"
fourCCfrma uint32 = 0x66726d61 // "frma"
fourCCschm uint32 = 0x7363686d // "schm"
fourCCschi uint32 = 0x73636869 // "schi"
// fourCCtenc uint32 = 0x74656e63 // "tenc"
// fourCCctts uint32 = 0x63747473 // "ctts"
// fourCCuuid uint32 = 0x75756964 // "uuid"
// fourCCmhdr uint32 = 0x6d686472 // "mhdr"
// fourCCkeys uint32 = 0x6b657973 // "keys"
// fourCCilst uint32 = 0x696c7374 // "ilst"
// fourCCdata uint32 = 0x64617461 // "Data"
// fourCCname uint32 = 0x6e616d65 // "name"
// fourCCitif uint32 = 0x69746966 // "itif"
// fourCCudta uint32 = 0x75647461 // "udta"
// AlbumEntry uint32 = 0xa9616c62 // "©alb"
// ArtistEntry uint32 = 0xa9415254 // "©ART"
// ArtistLowercaseEntry uint32 = 0xa9617274 // "©art"
// AlbumArtistEntry uint32 = 0x61415254 // "aART"
// CommentEntry uint32 = 0xa9636d74 // "©cmt"
// DateEntry uint32 = 0xa9646179 // "©day"
// TitleEntry uint32 = 0xa96e616d // "©nam"
// CustomGenreEntry uint32 = 0xa967656e // "©gen"
// StandardGenreEntry uint32 = 0x676e7265 // "gnre"
// TrackNumberEntry uint32 = 0x74726b6e // "trkn"
// DiskNumberEntry uint32 = 0x6469736b // "disk"
// ComposerEntry uint32 = 0xa9777274 // "©wrt"
// EncoderEntry uint32 = 0xa9746f6f // "©too"
// EncodedByEntry uint32 = 0xa9656e63 // "©enc"
// TempoEntry uint32 = 0x746d706f // "tmpo"
// CopyrightEntry uint32 = 0x63707274 // "cprt"
// CompilationEntry uint32 = 0x6370696c // "cpil"
// CoverArtEntry uint32 = 0x636f7672 // "covr"
// AdvisoryEntry uint32 = 0x72746e67 // "rtng"
// RatingEntry uint32 = 0x72617465 // "rate"
// GroupingEntry uint32 = 0xa9677270 // "©grp"
// MediaTypeEntry uint32 = 0x7374696b // "stik"
// PodcastEntry uint32 = 0x70637374 // "pcst"
// CategoryEntry uint32 = 0x63617467 // "catg"
// KeywordEntry uint32 = 0x6b657977 // "keyw"
// PodcastUrlEntry uint32 = 0x7075726c // "purl"
// PodcastGuidEntry uint32 = 0x65676964 // "egid"
// DescriptionEntry uint32 = 0x64657363 // "desc"
// LongDescriptionEntry uint32 = 0x6c646573 // "ldes"
// LyricsEntry uint32 = 0xa96c7972 // "©lyr"
// TVNetworkNameEntry uint32 = 0x74766e6e // "tvnn"
// TVShowNameEntry uint32 = 0x74767368 // "tvsh"
// TVEpisodeNameEntry uint32 = 0x7476656e // "tven"
// TVSeasonNumberEntry uint32 = 0x7476736e // "tvsn"
// TVEpisodeNumberEntry uint32 = 0x74766573 // "tves"
// PurchaseDateEntry uint32 = 0x70757264 // "purd"
// GaplessPlaybackEntry uint32 = 0x70676170 // "pgap"
// OwnerEntry uint32 = 0x6f776e72 // "ownr"
// HDVideoEntry uint32 = 0x68647664 // "hdvd"
// SortNameEntry uint32 = 0x736f6e6d // "sonm"
// SortAlbumEntry uint32 = 0x736f616c // "soal"
// SortArtistEntry uint32 = 0x736f6172 // "soar"
// SortAlbumArtistEntry uint32 = 0x736f6161 // "soaa"
// SortComposerEntry uint32 = 0x736f636f // "soco"
)
type boxFtyp struct {
majorBrand uint32
minorVersion uint32
compatibleBrands []uint32
isQuickTimeFormat bool
}
type boxSidx struct {
referenceID uint32
timeScale uint32
earlistPresentationTime uint64
firstTime uint64
referenceCount uint16
reference []struct {
referenceType uint8 // reference_type 1 bit
referenceSize uint32 // reference_size 31 bit
subSegmentDuration uint32
startWithSAP uint8 // starts_with_SAP 1 bit
sapType uint8 // SAP_type 3 bit
sapDeltaTime uint32 // SAP_delta_time 28 bit
}
}
func (p *boxSidx) String() string {
return fmt.Sprintf("\n[Segment Index]:\n{ ReferenceID:%d\n Time Scale:%d EarlistPresentationTime:%d\n "+
"firstTime:%d\n%s}", p.referenceID, p.timeScale, p.earlistPresentationTime, p.firstTime,
func() string {
var retString string
for i := uint16(0); i < p.referenceCount; i++ {
retString += fmt.Sprintf(" referenceType:%-2d referenceSize:%-10d subSegmentDuration:%-10d startWithSAP:%-1d sapType:%-2d sapDeltaTime:%-10d\n",
p.reference[i].referenceType, p.reference[i].referenceSize, p.reference[i].subSegmentDuration, p.reference[i].startWithSAP,
p.reference[i].sapType, p.reference[i].sapDeltaTime)
}
return retString
}())
}
type boxSsix struct {
sugSegmentCount uint32 // is ranges' len
ranges []struct {
rangeCount uint32 // is rangeSize's len
rangeSize []struct {
level uint8
size uint32
}
}
}
// type boxMfra struct {
// trfa []boxTraf
// mfro *boxMfro
// }
type boxMvhd struct {
version int
creationTime uint64 // uint32 : Version == 0
modificationTime uint64 // uint32 : Version == 0
timeScale uint32
duration uint64 // uint32 : Version == 0
rate uint32 // 0x00010000
volume uint16 // 0x0100
reserved1 [10]uint8 // bit(16) reserved = 0; int(32)[2] reserved = 0; int(32)[9]
matrix [9]uint32 // int(32)[9] matrix = { 0x00010000,0,0,0,0x00010000,0,0,0,0x40000000 };
reserved2 [24]uint8 // bit(32)[6] pre_defined = 0;
nextTrackId uint32
}
type boxMvex struct {
fragmentDuration uint64 // uint32 if Version == 0
trex []boxTrex
leva *boxLeva
}
type boxTrex struct {
trackId uint32
defaultSampleDescriptionIndex uint32
defaultSampleDuration uint32
defaultSampleSize uint32
defaultSampleFlags uint32
}
type boxLeva struct {
levelCount uint8
levels []struct {
trackId uint32
paddingFlag uint8 // 1 bit
assignmentType uint8 // 7bit
groupingType uint32 // assignmentType == 0 || 1
groupingTypeParameter uint32 // assignmentType == 1
subTrackId uint32 // assignmentType == 4
}
}
type boxTrak struct {
id uint32 // track id
trackEnabled bool // is track enabled
trackType TrackType
quickTimeFormat bool // only for audio
movie *MovieInfo
creationTime uint64 // in seconds since midnight, Jan. 1, 1904, in UTC time
modificationTime uint64 // in seconds since midnight, Jan. 1, 1904, in UTC time
// the duration of media. If edit list box exist, the value of this field is equal to
// the sum of the durations of all the track’s edits.
duration uint64
sampleNumber uint64
timeOffset int64
timeScale uint32
language uint16 // ISO-639-2/T language code
extLanguage string
// for visual tracks
flagTrackSizeIsAspectRatio bool
width uint32
height uint32
format uint32 // fourCC format, i.e. unencrypted sample entry/ Coding name
protection []*ProtectedInformation
edts *boxEdts
// mdia *boxMdia
audioEntry *audioSampleEntry
videoEntry *videoSampleEntry
stts *boxStts
ctts *boxCtts
cslg *boxCslg
stsc *boxStsc
stsz *boxStsz
stco *boxStco
syncSamples []uint32
stss *boxStss
stsh *boxStsh
samplePriority []uint16 // degradation priority of each sample. If existed, len(samplePriority) == sample_count of stsz box
sampleDependency *boxSdtp
subs *boxSubs
sbgp *boxSbgp
sgpd *boxSgpd
saio *boxSaio
saiz *boxSaiz
senc *boxSenc
}
type boxMdia struct {
// media header
creationTime uint64 // in seconds since midnight, Jan. 1, 1904, in UTC time
modificationTime uint64 // in seconds since midnight, Jan. 1, 1904, in UTC time
timeScale uint32
duration uint64 // in timeScale
language uint16 // unsigned int(5)[3], ISO-639-2/T language code
stbl *boxStbl
ctts *boxCtts
extLanguageTag string
}
type PSSH struct {
SystemId []byte // uuid, 128 bits (16 bytes)
KIdCount uint32 // number of KId
KId [][]byte // unsigned int(8)[16] KID
DataSize uint32
Data []byte // len(Data) == DataSize
}
type boxTkhd struct {
trackId uint32
creationTime uint64 // if Version == 1 else uint32; in seconds since midnight, Jan. 1, 1904, in UTC time
modificationTime uint64 // if Version == 1 else uint32; in seconds since midnight, Jan. 1, 1904, in UTC time
duration uint64
volume uint16 // if track_is_audio 0x0100 else 0
width uint32
height uint32
flagTrackEnabled bool
flagTrackInMovie bool
flagTrackInPreview bool
}
type boxMinf struct {
dinf *boxDinf
stbl *boxStbl
}
type dataEntry struct {
entryFlag uint32
content string
}
type boxDinf struct {
entryCount uint32
dataEntries map[uint32]*dataEntry
}
type boxStbl struct {
stsd *boxStsd
stts *boxStts
stsc *boxStsc
stco *boxStco
stsz *boxStsz
stss *boxStss
ctts *boxCtts
saio []*boxSaio
saiz []*boxSaiz
sbgp *boxSbgp
sgpd *boxSgpd
subs *boxSubs
}
type boxStsd struct {
version uint8
entryCount uint32
audioSampleEntry *audioSampleEntry
videoSampleEntry *videoSampleEntry
protectedInfo *ProtectedInformation
}
type audioSampleEntry struct {
qttfBytesPerSample uint32
qttfSamplesPerPacket uint32
qttfBytesPerPacket uint32
qttfBytesPerFrame uint32
quickTimeVersion int
codec CodecType
channelCount uint16
sampleRate uint32
sampleSize uint16
originalFormat uint32
protectedInfo ProtectedInformation
format uint32 // need to be specific, now it represent the entryType
descriptorsRawData map[CodecType][]byte // raw Data of descriptor
decoderDescriptors map[CodecType]interface{} // store the descriptor in specific struct
}
type videoSampleEntry struct {
originalFormat uint32
codec CodecType
dataReferenceIndex uint16
width uint16
height uint16
depth uint16
format uint32 // need to be specific, now it represent the entryType
// ColourInformationBox, if has
colourType uint32
colorPrimaries uint16
transferCharacteristics uint16
matrixCoefficients uint16
fullRangeFlag bool
iCCProfile []byte
// PixelAspectRatioBox, if has
hSpacing uint32
vSpacing uint32
// CleanApertureBox, if has
cleanApertureWidthN uint32
cleanApertureWidthD uint32
cleanApertureHeightN uint32
cleanApertureHeightD uint32
horizOffN uint32
horizOffD uint32
vertOffN uint32
vertOffD uint32
protectedInfo *ProtectedInformation // information of encv
configurationRecordsRawData map[CodecType][]byte // raw Data of decoderConfigurationRecord
decoderConfigurationRecords map[CodecType]interface{} // key: codec type. value: parsed of decoderConfigurationRecord
}
// String return the human-readable format.
func (v *videoSampleEntry) String() string {
return fmt.Sprintf("\n[Video Track Information]:\n{\n Original Format:%s\n "+
"RealFormat:%s\n Codec:%s\n Width:%d, Height:%d\n hSpacing:%d, vSpacing:%d\n}\n",
int2String(v.originalFormat), int2String(v.format), codecString[v.codec],
v.width, v.height, v.hSpacing, v.vSpacing)
}
type ProtectedInformation struct {
DataFormat uint32 // coding name fourcc
SchemeType uint32 // 4CC identifying the scheme
SchemeVersion uint32 // scheme Version
TencVersion uint8 // Version if "tenc"
DefaultCryptByteBlock uint8 // 4 bits
DefaultSkipByteBlock uint8 // 4 bits
DefaultIsProtected uint8 // least significant bit: 1 byte
DefaultPerSampleIVSize uint8 // least significant bit 1 byte
DefaultKID []byte // 16 bytes
// if DefaultIsProtected == 1 && DefaultPerSampleIVSize == 0 ->
DefaultConstantIVSize uint8 // least significant bit 1 byte
DefaultConstantIV []byte // size: DefaultConstantIVSize bytes
}
func (p *ProtectedInformation) String() string {
return fmt.Sprintf(" SchemeType:%s is_protected:%d\n", int2String(p.SchemeType), p.DefaultIsProtected)
}
type boxStts struct {
entryCount uint32
sampleCount []uint32
sampleDelta []uint32
}
type boxStsc struct {
entryCount uint32
firstChunk []uint32
samplePerChunk []uint32
sampleDescriptionIndex []uint32
}
type boxStsz struct {
atomType uint32 // fourCCstsz/forCCstz2
sampleSize uint32 // stsz
fieldSize uint8 // stz2
sampleCount uint32
entrySize []uint32
}
type boxStco struct {
entryCount uint32
chunkOffset []uint64
}
type boxTraf struct {
tfhd *boxtfhd
subs *boxSubs
baseMediaDecodeTime uint64 // Track fragment decode time
trun []*boxTrun // 0 or more
sbgp *boxSbgp // 0 or more
sgpd *boxSgpd // 0 or more, with one for each 'sbgp'
saio []*boxSaio // 0 or more
saiz []*boxSaiz // 0 or more
senc *boxSenc
psshs []PSSH
}
type boxtfhd struct {
trackId uint32
flags uint32
baseDataOffset *uint64 // if flags & 0x000001
sampleDescriptionIndex *uint32 // if flags & 0x000002
defaultSampleDuration *uint32 // if flags & 0x000008
defaultSampleSize *uint32 // if flags & 0x000010
defaultSampleFlags *uint32 // if flags & 0x000020
defaultBaseIsMoof bool // if flags & 0x000001 == 0
}
type trunSample struct {
sampleDuration *uint32
sampleSize *uint32
sampleFlags *uint32
sampleCompositionTimeOffset *int32 // unsigned if version == 0
}
type boxTrun struct {
sampleCount uint32
dataOffset *uint32
firstSampleFlags *uint32
samples []*trunSample
}
// sample to group
type boxSbgp struct {
groupingType uint32
groupingTypeParameter *uint32 // if version == 1
entryCount uint32
sampleCount []uint32 // len(sampleCount) == entryCount
groupDescriptionIndex []uint32 // len(groupDescriptionIndex) == entryCount
}
type cencSampleEncryptionInformationGroupEntry struct {
cryptByteBlock uint8
skipByteBlock uint8
isProtected bool
perSampleIVSize uint8
kID []byte // 16 byte
constantIV []byte // if isProtected && perSampleIVSize
}
// SampleGroupDescription
type boxSgpd struct {
groupingType uint32 // only support "seig" currently
defaultLength *uint32 // if version == 1
defaultSampleDescriptionIndex *uint32 // if version >= 2
entryCount uint32
descriptionLength *uint32 // if version ==1 && defaultLength == 0
cencGroupEntries []*cencSampleEncryptionInformationGroupEntry // len(cencGroupEntries) == entryCount
}
type subSampleEncryption struct {
bytesOfClearData uint16
bytesOfProtectedData uint32
}
type sampleEncryption struct {
IV []byte
subSampleCount uint16
subSamples []subSampleEncryption
}
type boxSenc struct {
flags uint32
sampleCount uint32
samples []*sampleEncryption
}
// struct of "subs"
type subSampleInfo struct {
subSampleSize uint32
subSamplePriority uint8
discardable uint8
codecSpecificParameters uint32
}
type subSampleEntry struct {
sampleDelta uint32
subSampleCount uint16
subSamples []*subSampleInfo
}
type boxSubs struct {
flags uint32
entryCount uint32
entries []*subSampleEntry
}
type boxSaio struct {
auxInfoType *uint32
auxInfoTypeParameter *uint32
entryCount uint32
offset []uint64 // len(offset) == entryCount
}
type boxSaiz struct {
auxInfoType *uint32
auxInfoTypeParameter *uint32
defaultSampleInfoSize uint8
sampleCount uint32
sampleInfoSize []uint8 // len(sampleInfoSize) == sampleCount
}
type boxEdts struct {
entryCount uint32
editDuration []uint64 // if Version == 0, uint32
mediaTime []int64 // if Version == 0, int32
mediaRate []float32
}
type boxStss struct {
entryCount uint32
sampleNumber []uint32
}
type boxCtts struct {
entryCount uint32
sampleCount []uint32
sampleOffset []int32 // signed if version == 1
}
// CompositionToDecodeBox
type boxCslg struct {
compositionToDTSShift int64
leastDecodeToDisplayDelta int64
greatestDecodeToDisplayDelta int64
compositionStartTime int64
compositionEndTime int64
}
// shadow sync table, for seeking or for similar purposes
type boxStsh struct {
entryCount uint32
shadowedSampleNumber []uint32 // size is entryCount
syncSampleNumber []uint32 // size is entryCount
}
type boxSdtp struct {
// all parameter's length is sample_count in stsz box
isLeading []uint8
sampleDependsOn []uint8
sampleIsDependedOn []uint8
sampleHasRedundancy []uint8
}
//
// type sphatial struct {
// Spherical bool
// Stitched bool
// StitchingSoftware bool
// }
|
package problem
func Main() {
notSync()
syncWithMutex()
syncWithChan()
}
|
// Copyright (c) OpenFaaS Author(s) 2018. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
package handlers
import (
"net/http"
"net/http/httptest"
"testing"
"time"
)
func Test_MakeNotifierWrapper_ReceivesHttpStatusInNotifier(t *testing.T) {
notifier := &testNotifier{}
handlerVisited := false
handlerWant := http.StatusAccepted
handler := MakeNotifierWrapper(func(w http.ResponseWriter, r *http.Request) {
handlerVisited = true
w.WriteHeader(handlerWant)
}, []HTTPNotifier{notifier})
req := httptest.NewRequest(http.MethodGet, "/", nil)
rec := httptest.NewRecorder()
handler.ServeHTTP(rec, req)
if handlerVisited != true {
t.Errorf("expected handler to have been visited")
}
if notifier.StatusReceived == 0 {
t.Errorf("notifier wanted a status, but got none")
t.Fail()
return
}
if rec.Result().StatusCode != handlerWant {
t.Errorf("recorder status want: %d, got %d", handlerWant, rec.Result().StatusCode)
}
if notifier.StatusReceived != handlerWant {
t.Errorf("notifier status want: %d, got %d", handlerWant, notifier.StatusReceived)
}
}
func Test_MakeNotifierWrapper_ReceivesDefaultHttpStatusWhenNotSet(t *testing.T) {
notifier := &testNotifier{}
handlerVisited := false
handlerWant := http.StatusOK
handler := MakeNotifierWrapper(func(w http.ResponseWriter, r *http.Request) {
handlerVisited = true
}, []HTTPNotifier{notifier})
req := httptest.NewRequest(http.MethodGet, "/", nil)
rec := httptest.NewRecorder()
handler.ServeHTTP(rec, req)
if handlerVisited != true {
t.Errorf("expected handler to have been visited")
}
if notifier.StatusReceived == 0 {
t.Errorf("notifier wanted a status, but got none")
t.Fail()
return
}
if rec.Result().StatusCode != handlerWant {
t.Errorf("recorder status want: %d, got %d", handlerWant, rec.Result().StatusCode)
}
if notifier.StatusReceived != handlerWant {
t.Errorf("notifier status want: %d, got %d", handlerWant, notifier.StatusReceived)
}
}
type testNotifier struct {
StatusReceived int
}
// Notify about service metrics
func (tf *testNotifier) Notify(method string, URL string, originalURL string, statusCode int, duration time.Duration) {
tf.StatusReceived = statusCode
}
|
// Wallet system
package ds
type Wallet struct {
bal int
}
func balance(w *Wallet) int {
return (*w).bal
}
func deposit(w *Wallet, amount int) {
(*w).bal += amount
}
|
package util_test
import(
"testing"
"util"
"fmt"
"time"
)
func Test_ParseDate(t *testing.T){
l := "201301"
d := util.ParseDate(l)
fmt.Println(d)
if d.Year() == 2013 && int(d.Month()) == 1 && d.Day() == 31 {
t.Log("Success to parse: ", l)
} else {
t.Error("Cannot parse: ", l)
}
l = "190002"
d = util.ParseDate(l)
fmt.Println(d)
if d.Year() == 1900 && int(d.Month()) == 2 && d.Day() == 28 {
t.Log("Success to parse: ", l)
} else {
t.Error("Cannot parse: ", l)
}
l = "200002"
d = util.ParseDate(l)
fmt.Println(d)
if d.Year() == 2000 && int(d.Month()) == 2 && d.Day() == 29 {
t.Log("Success to parse: ", l)
} else {
t.Error("Cannot parse: ", l)
}
l = "200402"
d = util.ParseDate(l)
fmt.Println(d)
if d.Year() == 2004 && int(d.Month()) == 2 && d.Day() == 29 {
t.Log("Success to parse: ", l)
} else {
t.Error("Cannot parse: ", l)
}
}
func Test_IsLeapYear(t *testing.T){
year := 1900
ret := util.IsLeapYear(year)
if !ret {
t.Log("Success to judge the leap year: ", year)
} else {
t.Error("Cannot judge the leap year: ", year)
}
year = 2000
ret = util.IsLeapYear(year)
if ret {
t.Log("Success to judge the leap year: ", year)
} else {
t.Error("Cannot judge the leap year: ", year)
}
year = 2008
ret = util.IsLeapYear(year)
if ret {
t.Log("Success to judge the leap year: ", year)
} else {
t.Error("Cannot judge the leap year: ", year)
}
year = 1993
ret = util.IsLeapYear(year)
if !ret {
t.Log("Success to judge the leap year: ", year)
} else {
t.Error("Cannot judge the leap year: ", year)
}
}
func Test_LastDay(t *testing.T){
y := 2001
m := 2
ret := util.LastDay(y, m)
if ret == 28 {
t.Log("Success to get the right last day: ", y, m, ret)
} else {
t.Error("Wrong to get the last day: ", y, m, ret)
}
y = 2000
m = 2
ret = util.LastDay(y, m)
if ret == 29 {
t.Log("Success to get the right last day: ", y, m, ret)
} else {
t.Error("Wrong to get the last day: ", y, m, ret)
}
}
func Test_FormatDate(t *testing.T){
date, _ := time.Parse("20060102", "20141127")
result := util.FormatDate(date)
if result == "2014-11-27" {
t.Log("Success to format date")
} else {
t.Error("Fail to format date as: yyyy-MM-dd")
}
}
|
package main
import "fmt"
/*
/* func number returns number
*/
func number() int {
return 75
}
func main(){
switch num:=number(); {
case num < 50:
fmt.Println("num less then 50")
fallthrough
case num < 100:
fmt.Println("num less then 100")
fallthrough
case num < 200:
fmt.Println("num less then 200")
}
} |
// Package imageutil is a collection of low-level image processing tools.
package imageutil
|
package main
import (
"io"
)
var _ = declareDay(9, func(part2 bool, inputReader io.Reader) interface{} {
if part2 {
return day09Part2(inputReader)
}
return day09Part1(inputReader)
})
func day09Part1(inputReader io.Reader) interface{} {
var computer computer
computer.init(inputReader)
go computer.run()
return computer.singleInOut(1)
}
func day09Part2(inputReader io.Reader) interface{} {
var computer computer
computer.init(inputReader)
go computer.run()
return computer.singleInOut(2)
}
|
/*
Challenge
Given a colour raster image* with the same width and height, output the image transformed under Arnold's cat map. (*details see below)
Definition
Given the size of the image N we assume that the coordinates of a pixel are given as numbers between 0 and N-1.
Arnold's cat map is then defined as follows:
A pixel at coordinates [x,y] is moved to [(2*x + y) mod N, (x + y) mod N].
This is nothing but a linear transform on torus: The yellow, violet and green part get mapped back onto the initial square due to the mod N.
visualization
This map (let's call it f) has following properties:
It is bijective, that means reversible: It is a linear transformation with the matrix [[2,1],[1,1]]. Since it has determinant 1 and and it has only integer entries, the inverse also has only integer entries and is given by [[1,-1],[-1,2]], this means it is also bijective on integer coordinates.
It is a torsion element of the group of bijective maps of N x N images, that means if you apply it sufficiently many times, you will get the original image back: f(f(...f(x)...)) = x The amount of times the map applied to itself results in the identity is guaranteed to be less or equal to 3*N. In the following you can see the image of a cat after a given number of iterated applications of Arnold's cat map, and an animation of what a repeated application looks like:
multiple repeated applications
Details
Your program does not necessarily have to deal with images, but 2D-arrays/matrices, strings or similar 2D-structures are acceptable too.
It does not matter whether your (0,0) point is on the bottom left or on the top left. (Or in any other corner, if this is more convenient in your language.) Please specify what convention you use in your submission.
Testcases
In matrix form ([1,2,3,4] is the top row, 1 has index (0,0), 2 has index (1,0), 5 has index (0,1))
1 2 3 4
5 6 7 8
9 10 11 12
13 14 15 16
maps to:
1 14 11 8
12 5 2 15
3 16 9 6
10 7 4 13
--------------------
1 2 3
4 5 6
7 8 9
map to:
1 8 6
9 4 2
5 3 7
As image (bottom left is (0,0)):
*/
package main
import (
"flag"
"fmt"
"image"
"image/draw"
_ "image/gif"
_ "image/jpeg"
"image/png"
"log"
"os"
"path/filepath"
)
var flags struct {
prefix string
outdir string
}
func main() {
parseflags()
m, err := loadimage(flag.Arg(0))
ck(err)
iterate(m, flags.prefix, flags.outdir)
}
func ck(err error) {
if err != nil {
log.Fatal(err)
}
}
func parseflags() {
flag.StringVar(&flags.prefix, "p", "", "image prefix")
flag.StringVar(&flags.outdir, "o", ".", "output directory")
flag.Parse()
if flag.NArg() < 1 {
usage()
}
}
func usage() {
fmt.Fprintln(os.Stderr, "usage: [options] <image>")
flag.PrintDefaults()
os.Exit(2)
}
func loadimage(name string) (*image.RGBA, error) {
f, err := os.Open(name)
if err != nil {
return nil, err
}
defer f.Close()
m, _, err := image.Decode(f)
if err != nil {
return nil, err
}
return conv(m), nil
}
func conv(m image.Image) *image.RGBA {
r := m.Bounds()
p := image.NewRGBA(r)
draw.Draw(p, r, m, image.ZP, draw.Src)
return p
}
func iterate(m *image.RGBA, prefix, outdir string) {
a := conv(m)
b := conv(m)
r := a.Bounds()
w := r.Dx()
h := r.Dy()
writeimage(m, prefix, outdir, 0)
for i := 1; ; i++ {
catmap(a, b)
writeimage(b, prefix, outdir, i)
// convergence of same size image is guaranteed
// otherwise find fixed point
if (w == h && equal(m, b)) || equal(a, b) {
break
}
a, b = b, a
}
}
func writeimage(m *image.RGBA, prefix, outdir string, iter int) (err error) {
defer func() {
if err != nil {
fmt.Println(err)
}
}()
name := fmt.Sprintf("%v/%v%v.png", outdir, prefix, iter)
name = filepath.Clean(name)
fmt.Println("writing", name)
dir := filepath.Dir(name)
os.MkdirAll(dir, 0755)
f, err := os.Create(name)
if err != nil {
return err
}
err = png.Encode(f, m)
xerr := f.Close()
if err == nil {
err = xerr
}
return err
}
func catmap(a, b *image.RGBA) {
r := a.Bounds()
for y := r.Min.Y; y < r.Max.Y; y++ {
for x := r.Min.X; x < r.Max.X; x++ {
nx := (2*x + y) % r.Dx()
ny := (x + y) % r.Dy()
b.SetRGBA(nx, ny, a.RGBAAt(x, y))
}
}
}
func equal(a, b *image.RGBA) bool {
r := a.Bounds()
s := b.Bounds()
if r != s {
return false
}
for y := r.Min.Y; y < r.Max.Y; y++ {
for x := r.Min.X; x < r.Max.X; x++ {
u := a.RGBAAt(x, y)
v := b.RGBAAt(x, y)
if u != v {
return false
}
}
}
return true
}
|
package db_mysql
import (
"database/sql"
"fmt"
"github.com/astaxie/beego"
_ "github.com/go-sql-driver/mysql"
)
var Db *sql.DB
func ConnectDB(){
config :=beego.AppConfig
dbdriver :=config.String("db_driverName")
dbuser :=config.String("db_user")
dbpassword:=config.String("db_password")
dbip:=config.String("db_ip")
dbname :=config.String("db_name")
connurl:= dbuser+":"+dbpassword+"@tcp("+dbip+")/"+dbname+"?charset=utf8"
db,err:=sql.Open(dbdriver,connurl)
if err !=nil{
fmt.Println(err.Error())
panic("数据库错误")
}
Db=db
}
|
package main
import (
"fmt"
"image"
"image/color"
"image/png"
"math/rand"
"os"
"runtime/trace"
"strings"
"sync"
log "github.com/cihub/seelog"
"github.com/grindlemire/seezlog"
"github.com/jessevdk/go-flags"
"github.com/pkg/profile"
)
// Opts ...
type Opts struct {
File string `short:"f" long:"file" default:"output.png" description:"File name to output to"`
Complexity int `short:"c" long:"complexity" default:"4" description:"Complexity of the fractal"`
MaxIteration int `short:"i" long:"maxIterations" default:"1000" description:"Max number of iterations to run in fractal calculation"`
MoveX float64 `short:"x" default:"0" description:"x movement"`
MoveY float64 `short:"y" default:"0" description:"y movement"`
Zoom float64 `short:"z" default:"1" description:"zoom level"`
Width int `long:"width" default:"2048" description:"width of image"`
Height int `long:"height" default:"2048" description:"height of image"`
Mem bool `long:"mem" description:"memory profile"`
CPU bool `long:"cpu" description:"cpu profile"`
Trace bool `long:"trace" description:"trace profile"`
Block bool `long:"block" description:"block profile"`
}
var opts Opts
var parser = flags.NewParser(&opts, flags.Default)
func main() {
logger, err := seezlog.SetupConsoleLogger(seezlog.Info)
if err != nil {
fmt.Printf("Error creating logger: %v\n", err)
exit(1)
}
err = log.ReplaceLogger(logger)
if err != nil {
fmt.Printf("Error replacing logger: %s\n", err)
exit(1)
}
defer log.Flush()
_, err = parser.Parse()
if err != nil {
if !isUsage(err) {
log.Error("Error parsing arguments: ", err)
exit(1)
} else {
exit(0)
}
}
// LaunchServer()
f, err := os.Create(opts.File)
if err != nil {
log.Error("Error creating file: ", err)
exit(1)
}
p := getProfiler()
if p != nil {
defer profile.Start(p, profile.ProfilePath("./")).Stop()
}
if opts.Trace {
f, err := os.Create("./out.trace")
if err != nil {
log.Error("Error creating file: ", err)
exit(1)
}
trace.Start(f)
defer trace.Stop()
}
// img, err := executeAlgorithm(opts)
// img, err := executeColumnParallelAlgorithm(opts)
img, err := executeBufferedColumnWorkersAlgorithm(opts)
// img, err := executeWorkersAlgorithm(opts)
// img, err := executeBufferedWorkersAlgorithm(opts)
// img, err := executePixelParallelAlgorithm(opts)
if err != nil {
log.Error("Error executing algorithm: ", err)
exit(1)
}
err = png.Encode(f, img)
if err != nil {
log.Error("Error encoding fractal to file: ", err)
exit(1)
}
}
func getProfiler() func(p *profile.Profile) {
if opts.CPU {
return profile.CPUProfile
}
if opts.Mem {
return profile.MemProfile
}
if opts.Block {
return profile.BlockProfile
}
return nil
}
func executeAlgorithm(opts Opts) (img image.Image, err error) {
m := createPNG(opts)
for i := 0; i < opts.Width; i++ {
for j := 0; j < opts.Height; j++ {
m.Set(i, j, getMandelbrotColor(i, j, opts))
}
}
return m, nil
}
func executePixelParallelAlgorithm(opts Opts) (img image.Image, err error) {
m := createPNG(opts)
wg := sync.WaitGroup{}
wg.Add(opts.Width * opts.Height)
for i := 0; i < opts.Width; i++ {
for j := 0; j < opts.Height; j++ {
go func(i, j int) {
m.Set(i, j, getMandelbrotColor(i, j, opts))
wg.Done()
}(i, j)
}
}
wg.Wait()
return m, nil
}
func executeColumnParallelAlgorithm(opts Opts) (img image.Image, err error) {
m := createPNG(opts)
wg := sync.WaitGroup{}
wg.Add(opts.Width)
for i := 0; i < opts.Width; i++ {
go func(i int) {
for j := 0; j < opts.Height; j++ {
m.Set(i, j, getMandelbrotColor(i, j, opts))
}
wg.Done()
}(i)
}
wg.Wait()
return m, nil
}
func executeWorkersAlgorithm(opts Opts) (img image.Image, err error) {
m := createPNG(opts)
wg := sync.WaitGroup{}
c := make(chan map[string]int)
for n := 0; n < 8; n++ {
wg.Add(1)
go func() {
for px := range c {
x := px["x"]
y := px["y"]
m.Set(x, y, getMandelbrotColor(x, y, opts))
}
wg.Done()
}()
}
for i := 0; i < opts.Width; i++ {
for j := 0; j < opts.Height; j++ {
c <- map[string]int{"x": i, "y": j}
}
}
close(c)
wg.Wait()
return m, nil
}
func executeBufferedWorkersAlgorithm(opts Opts) (img image.Image, err error) {
m := createPNG(opts)
wg := sync.WaitGroup{}
c := make(chan map[string]int, opts.Width*opts.Height)
for n := 0; n < 8; n++ {
wg.Add(1)
go func() {
for px := range c {
x := px["x"]
y := px["y"]
m.Set(x, y, getMandelbrotColor(x, y, opts))
}
wg.Done()
}()
}
for i := 0; i < opts.Width; i++ {
for j := 0; j < opts.Height; j++ {
c <- map[string]int{"x": i, "y": j}
}
}
close(c)
wg.Wait()
return m, nil
}
func executeBufferedColumnWorkersAlgorithm(opts Opts) (img image.Image, err error) {
m := createPNG(opts)
wg := sync.WaitGroup{}
c := make(chan int, opts.Width)
for n := 0; n < 8; n++ {
wg.Add(1)
go func() {
for i := range c {
for j := 0; j < opts.Height; j++ {
m.Set(i, j, getMandelbrotColor(i, j, opts))
}
}
wg.Done()
}()
}
for i := 0; i < opts.Width; i++ {
c <- i
}
close(c)
wg.Wait()
return m, nil
}
func createPNG(opts Opts) (m *image.RGBA) {
palette := []color.RGBA{}
for i := 0; i < 1000; i++ {
c := color.RGBA{
R: uint8(rand.Intn(255)),
G: uint8(rand.Intn(255)),
B: uint8(rand.Intn(255)),
A: 255,
}
palette = append(palette, c)
}
r := image.Rect(0, 0, opts.Width, opts.Height)
m = image.NewRGBA(r)
return m
}
func transformColor(i int, opts Opts) color.RGBA {
c := (float64(i) / float64(opts.MaxIteration-1)) * (255) * 15
// if you are in set be black
if i == opts.MaxIteration {
return color.RGBA{
R: uint8(0),
G: uint8(0),
B: uint8(0),
A: uint8(255),
}
}
// if you are in first half approach red from black
if i < opts.MaxIteration/2-1 {
return color.RGBA{
R: uint8(c),
G: uint8(0),
B: uint8(0),
A: uint8(255),
}
}
// if you are in the second half approach white from red
return color.RGBA{
R: uint8(255),
G: uint8(c),
B: uint8(c),
A: uint8(255),
}
}
func getMandelbrotColor(i, j int, opts Opts) color.RGBA {
iteration := 0
cx := 1.5*(float64(i)-float64(opts.Width)/2.0)/(.5*float64(opts.Width)*opts.Zoom) + opts.MoveX
cy := (float64(j)-float64(opts.Height)/2.0)/(0.5*opts.Zoom*float64(opts.Height)) + opts.MoveY
newX := float64(0)
newY := float64(0)
oldX := float64(0)
oldY := float64(0)
for ((newX*newX)+(newY*newY) < float64(opts.Complexity)) && (iteration < opts.MaxIteration) {
oldX = newX
oldY = newY
newX = oldX*oldX - oldY*oldY + cx
newY = 2.0*oldX*oldY + cy
iteration++
}
return transformColor(iteration, opts)
}
func exit(status int) {
log.Flush()
os.Exit(status)
}
func isUsage(err error) bool {
return strings.HasPrefix(err.Error(), "Usage:")
}
|
// Copyright 2023 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google
import (
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/apigee"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/apigee/alpha"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/apigee/beta"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/apikeys"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/apikeys/alpha"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/apikeys/beta"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/assuredworkloads"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/assuredworkloads/alpha"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/assuredworkloads/beta"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/bigquery"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/bigquery/alpha"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/bigquery/beta"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/bigqueryreservation"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/bigqueryreservation/alpha"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/bigqueryreservation/beta"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/billingbudgets"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/billingbudgets/alpha"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/billingbudgets/beta"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/binaryauthorization"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/binaryauthorization/alpha"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/binaryauthorization/beta"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/cloudbuild"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/cloudbuild/alpha"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/cloudbuild/beta"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/cloudbuildv2"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/cloudbuildv2/alpha"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/cloudbuildv2/beta"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/clouddeploy"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/clouddeploy/alpha"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/clouddeploy/beta"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/cloudfunctions"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/cloudfunctions/alpha"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/cloudfunctions/beta"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/cloudidentity"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/cloudidentity/alpha"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/cloudidentity/beta"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/cloudkms"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/cloudkms/alpha"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/cloudkms/beta"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/cloudresourcemanager"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/cloudresourcemanager/alpha"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/cloudresourcemanager/beta"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/cloudscheduler"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/cloudscheduler/alpha"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/cloudscheduler/beta"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/compute"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/compute/alpha"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/compute/beta"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/configcontroller/alpha"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/containeranalysis"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/containeranalysis/alpha"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/containeranalysis/beta"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/containeraws"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/containeraws/alpha"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/containeraws/beta"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/containerazure"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/containerazure/alpha"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/containerazure/beta"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/datafusion/alpha"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/datafusion/beta"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/dataplex"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/dataplex/alpha"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/dataplex/beta"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/dataproc"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/dataproc/alpha"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/dataproc/beta"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/dlp"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/dlp/alpha"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/dlp/beta"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/eventarc"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/eventarc/alpha"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/eventarc/beta"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/filestore"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/filestore/alpha"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/filestore/beta"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/firebase/alpha"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/firebase/beta"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/firebaserules"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/firebaserules/alpha"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/firebaserules/beta"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/gkehub/alpha"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/gkehub/beta"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/iam"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/iam/alpha"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/iam/beta"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/iap"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/iap/alpha"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/iap/beta"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/identitytoolkit"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/identitytoolkit/alpha"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/identitytoolkit/beta"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/logging"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/logging/alpha"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/logging/beta"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/monitoring"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/monitoring/alpha"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/monitoring/beta"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/networkconnectivity"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/networkconnectivity/alpha"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/networkconnectivity/beta"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/networksecurity/alpha"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/networksecurity/beta"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/networkservices"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/networkservices/alpha"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/networkservices/beta"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/orgpolicy"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/orgpolicy/alpha"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/orgpolicy/beta"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/osconfig"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/osconfig/alpha"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/osconfig/beta"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/privateca"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/privateca/alpha"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/privateca/beta"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/pubsub"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/pubsub/alpha"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/pubsub/beta"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/recaptchaenterprise"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/recaptchaenterprise/alpha"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/recaptchaenterprise/beta"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/run/alpha"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/storage"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/storage/alpha"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/storage/beta"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/vpcaccess"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/vpcaccess/alpha"
_ "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/vpcaccess/beta"
)
|
package main
func main() {
s := "abcdefg"
s1 := s[:3]
s2 := s[1:4]
s3 := s[2:]
println(s1, s2, s3)
}
|
// Package for declaring types that will be used by various other packages. This is useful
// for preventing import cycles. For example, pkg/pods depends on pkg/auth. If both
// wish to use pods.ID, an import cycle is created.
package types
type PodID string
func (p PodID) String() string {
return string(p)
}
|
// break on switch statements.
package main
import "fmt"
func main() {
loop:
for i := 0; i < 10; i++ {
switch i {
case 2:
fmt.Printf("%d break from switch\n", i)
break
case 3:
fmt.Printf("%d break from loop\n", i)
break loop
default:
fmt.Println(i)
}
}
}
|
package module
import (
"fmt"
"io"
"os"
"buddin.us/eolian/dsp"
"github.com/mitchellh/mapstructure"
)
func init() {
Register("Debug", func(c Config) (Patcher, error) {
var config struct {
RateDivisor int
Output io.Writer
}
if err := mapstructure.Decode(c, &config); err != nil {
return nil, err
}
if config.RateDivisor == 0 {
config.RateDivisor = 10
}
if config.Output == nil {
config.Output = os.Stdout
}
return newDebug(config.Output, config.RateDivisor)
})
}
type debug struct {
IO
in *In
rate int
tick int
output io.Writer
}
func newDebug(w io.Writer, rate int) (*debug, error) {
m := &debug{
in: NewIn("input", dsp.Float64(0)),
rate: dsp.SampleRate / rate,
output: w,
}
return m, m.Expose("Debug", []*In{m.in}, []*Out{{Name: "output", Provider: dsp.Provide(m)}})
}
func (d *debug) Process(out dsp.Frame) {
d.in.Process(out)
for i := range out {
if d.tick == 0 {
fmt.Fprintf(d.output, "%v\n", float64(out[i]))
}
d.tick = (d.tick + 1) % d.rate
}
}
|
package sshkeymanager
import (
"strings"
)
type User struct {
Name string
UID string
Home string
Shell string
}
var users []User
func (c *Client) GetUsers() ([]User, error) {
if err := c.NewSession(); err != nil {
return nil, err
}
defer c.CloseSession()
raw, err := c.Ses.CombinedOutput("cat /etc/passwd")
if err != nil {
return nil, err
}
rawToString := string(raw)
strs := strings.Split(rawToString, "\n")
for _, s := range strs {
u := strings.Split(s, ":")
if len(s) == 0 {
continue
}
var user User
user.Name = u[0]
user.UID = u[2]
user.Home = u[5]
user.Shell = u[6]
users = append(users, user)
}
return users, nil
}
|
package main
import (
"fmt"
"sort"
)
func arrayPairSum(nums []int) int {
sort.Slice(nums, func(i, j int) bool {
return nums[i] < nums[j]
})
sum := 0
for i:=0; i<len(nums)/2; i++ {
sum += nums[2*i]
}
return sum
}
func main() {
fmt.Println(arrayPairSum([]int{1,4,3,2}))
}
|
package main
import (
"context"
"fmt"
"log"
"net/http"
"github.com/sfomuseum/go-flags/flagset"
"github.com/aaronland/go-http-server"
)
func NewHandler() http.Handler {
fn := func(rsp http.ResponseWriter, req *http.Request) {
msg := fmt.Sprintf("Hello, %s", req.Host)
rsp.Write([]byte(msg))
}
h := http.HandlerFunc(fn)
return h
}
func main() {
var server_uri string
fs := flagset.NewFlagSet("server")
fs.StringVar(&server_uri, "server-uri", "http://localhost:8080", "A valid aaronland/go-http-server URI.")
flagset.Parse(fs)
err := flagset.SetFlagsFromEnvVars(fs, "AARONLAND")
if err != nil {
log.Fatalf("Failed to set flags from environment variables, %v", err)
}
ctx := context.Background()
s, err := server.NewServer(ctx, server_uri)
if err != nil {
log.Fatalf("Unable to create server (%s), %v", server_uri, err)
}
mux := http.NewServeMux()
handler := NewHandler()
mux.Handle("/", handler)
log.Printf("Listening on %s", s.Address())
err = s.ListenAndServe(ctx, mux)
if err != nil {
log.Fatalf("Failed to start server, %v", err)
}
}
|
package optionsgen_test
import (
"testing"
testcase "github.com/kazhuravlev/options-gen/options-gen/testdata/case-09-custom-validator"
"github.com/stretchr/testify/assert"
)
func TestOptionsWithCustomValidator(t *testing.T) {
t.Run("valid options", func(t *testing.T) {
opts := testcase.NewOptions(100, 19)
assert.NoError(t, opts.Validate())
})
t.Run("invalid options", func(t *testing.T) {
opts := testcase.NewOptions(100, 17)
assert.Error(t, opts.Validate())
})
}
|
/*
Create a function that takes an array of strings and return an array, sorted from shortest to longest.
Examples
sortByLength(["Google", "Apple", "Microsoft"])
➞ ["Apple", "Google", "Microsoft"]
sortByLength(["Leonardo", "Michelangelo", "Raphael", "Donatello"])
➞ ["Raphael", "Leonardo", "Donatello", "Michelangelo"]
sortByLength(["Turing", "Einstein", "Jung"])
➞ ["Jung", "Turing", "Einstein"]
Notes
All test cases contain arrays with strings of different lengths, so you won't have to deal with multiple strings of the same length.
*/
package main
import (
"fmt"
"reflect"
"sort"
)
func main() {
test([]string{"Google", "Apple", "Microsoft"}, []string{"Apple", "Google", "Microsoft"})
test([]string{"Leonardo", "Michelangelo", "Raphael", "Donatello"}, []string{"Raphael", "Leonardo", "Donatello", "Michelangelo"})
test([]string{"Turing", "Einstein", "Jung"}, []string{"Jung", "Turing", "Einstein"})
test([]string{"Tatooine", "Hoth", "Yavin", "Dantooine"}, []string{"Hoth", "Yavin", "Tatooine", "Dantooine"})
test([]string{"Mario", "Bowser", "Link"}, []string{"Link", "Mario", "Bowser"})
}
func test(s, r []string) {
p := lensort(s)
fmt.Println(p)
assert(reflect.DeepEqual(p, r))
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
func lensort(s []string) []string {
p := append([]string{}, s...)
sort.SliceStable(p, func(i, j int) bool {
return len(p[i]) < len(p[j])
})
return p
}
|
--- caddy.go.orig 2022-09-22 16:12:41 UTC
+++ caddy.go
@@ -824,6 +824,10 @@ func InstanceID() (uuid.UUID, error) {
return uuid.ParseBytes(uuidFileBytes)
}
+// VersionString uses a predefined version string to short-circuit
+// the Version() function below, to simplify vendor packaging.
+var VersionString string
+
// Version returns the Caddy version in a simple/short form, and
// a full version string. The short form will not have spaces and
// is intended for User-Agent strings and similar, but may be
@@ -841,6 +845,11 @@ func InstanceID() (uuid.UUID, error) {
//
// This function is experimental and subject to change or removal.
func Version() (simple, full string) {
+ // Use VersionString if provided (eg. through -ldflags -X)
+ if VersionString != "" {
+ return VersionString, VersionString
+ }
+
// the currently-recommended way to build Caddy involves
// building it as a dependency so we can extract version
// information from go.mod tooling; once the upstream
|
package wip
// Attachment is an uploaded file (seems to define an image)
type Attachment struct {
ID string `json:"id"`
URL string `json:"url"`
AspectRatio float32 `json:"aspect_ratio"`
Filename string `json:"filename"`
Size uint32 `json:"size"`
MimeType string `json:"mime_type"`
CreatedAt string `json:"created_at"`
UpdatedAt string `json:"updated_at"`
CreatedBy string `json:"user"`
}
|
/*
Tencent is pleased to support the open source community by making Basic Service Configuration Platform available.
Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except
in compliance with the License. You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under
the License is distributed on an "as IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
either express or implied. See the License for the specific language governing permissions and
limitations under the License.
*/
package service
import (
"context"
"errors"
"fmt"
"bscp.io/pkg/dal/table"
"bscp.io/pkg/kit"
"bscp.io/pkg/logs"
pbbase "bscp.io/pkg/protocol/core/base"
pbtset "bscp.io/pkg/protocol/core/template-set"
pbds "bscp.io/pkg/protocol/data-service"
"bscp.io/pkg/search"
"bscp.io/pkg/types"
)
// CreateTemplateSet create template set.
func (s *Service) CreateTemplateSet(ctx context.Context, req *pbds.CreateTemplateSetReq) (*pbds.CreateResp, error) {
kt := kit.FromGrpcContext(ctx)
if _, err := s.dao.TemplateSet().GetByUniqueKey(
kt, req.Attachment.BizId, req.Attachment.TemplateSpaceId, req.Spec.Name); err == nil {
return nil, fmt.Errorf("template set's same name %s already exists", req.Spec.Name)
}
if req.Spec.Public == true {
req.Spec.BoundApps = []uint32{}
}
templateSet := &table.TemplateSet{
Spec: req.Spec.TemplateSetSpec(),
Attachment: req.Attachment.TemplateSetAttachment(),
Revision: &table.Revision{
Creator: kt.User,
Reviser: kt.User,
},
}
id, err := s.dao.TemplateSet().Create(kt, templateSet)
if err != nil {
logs.Errorf("create template set failed, err: %v, rid: %s", err, kt.Rid)
return nil, err
}
resp := &pbds.CreateResp{Id: id}
return resp, nil
}
// ListTemplateSets list template set.
func (s *Service) ListTemplateSets(ctx context.Context, req *pbds.ListTemplateSetsReq) (*pbds.ListTemplateSetsResp, error) {
kt := kit.FromGrpcContext(ctx)
opt := &types.BasePage{Start: req.Start, Limit: uint(req.Limit), All: req.All}
if err := opt.Validate(types.DefaultPageOption); err != nil {
return nil, err
}
searcher, err := search.NewSearcher(req.SearchFields, req.SearchValue, search.TemplateSet)
if err != nil {
return nil, err
}
details, count, err := s.dao.TemplateSet().List(kt, req.BizId, req.TemplateSpaceId, searcher, opt)
if err != nil {
logs.Errorf("list template sets failed, err: %v, rid: %s", err, kt.Rid)
return nil, err
}
resp := &pbds.ListTemplateSetsResp{
Count: uint32(count),
Details: pbtset.PbTemplateSets(details),
}
return resp, nil
}
// UpdateTemplateSet update template set.
func (s *Service) UpdateTemplateSet(ctx context.Context, req *pbds.UpdateTemplateSetReq) (*pbbase.EmptyResp, error) {
kt := kit.FromGrpcContext(ctx)
var (
hasInvisibleApp bool
invisibleApps []uint32
err error
)
if req.Spec.Public == false {
invisibleApps, err = s.dao.TemplateBindingRelation().ListTemplateSetInvisibleApps(kt, req.Attachment.BizId,
req.Id, req.Spec.BoundApps)
if err != nil {
logs.Errorf("update template set failed, err: %v, rid: %s", err, kt.Rid)
return nil, err
}
if len(invisibleApps) > 0 {
hasInvisibleApp = true
if !req.Force {
return nil, errors.New("template set is bound to unnamed app, please unbind first")
}
}
}
if len(req.Spec.TemplateIds) > 0 {
if err := s.dao.Validator().ValidateTemplatesExist(kt, req.Spec.TemplateIds); err != nil {
return nil, err
}
}
if _, err := s.dao.TemplateSet().GetByUniqueKeyForUpdate(
kt, req.Attachment.BizId, req.Attachment.TemplateSpaceId, req.Id, req.Spec.Name); err == nil {
return nil, fmt.Errorf("template set's same name %s already exists", req.Spec.Name)
}
templateSet := &table.TemplateSet{
ID: req.Id,
Spec: req.Spec.TemplateSetSpec(),
Attachment: req.Attachment.TemplateSetAttachment(),
Revision: &table.Revision{
Reviser: kt.User,
},
}
if req.Spec.Public == true {
templateSet.Spec.BoundApps = []uint32{}
}
tx := s.dao.GenQuery().Begin()
// 1. update template set
if err = s.dao.TemplateSet().UpdateWithTx(kt, tx, templateSet); err != nil {
logs.Errorf("update template set failed, err: %v, rid: %s", err, kt.Rid)
tx.Rollback()
return nil, err
}
// 2. delete template set for invisible apps if exists
if hasInvisibleApp {
if err = s.dao.TemplateBindingRelation().DeleteTmplSetForInvisibleAppsWithTx(kt, tx, req.Attachment.BizId,
req.Id, invisibleApps); err != nil {
logs.Errorf("delete template set for invisible apps failed, err: %v, rid: %s", err, kt.Rid)
tx.Rollback()
return nil, err
}
}
tx.Commit()
return new(pbbase.EmptyResp), nil
}
// DeleteTemplateSet delete template set.
func (s *Service) DeleteTemplateSet(ctx context.Context, req *pbds.DeleteTemplateSetReq) (*pbbase.EmptyResp, error) {
kt := kit.FromGrpcContext(ctx)
r := &pbds.ListTemplateSetBoundCountsReq{
BizId: req.Attachment.BizId,
TemplateSpaceId: req.Attachment.TemplateSpaceId,
TemplateSetIds: []uint32{req.Id},
}
boundCnt, err := s.ListTemplateSetBoundCounts(ctx, r)
if err != nil {
logs.Errorf("delete template set failed, err: %v, rid: %s", err, kt.Rid)
return nil, err
}
var hasUnnamedApp bool
if len(boundCnt.Details) > 0 {
if boundCnt.Details[0].BoundUnnamedAppCount > 0 {
hasUnnamedApp = true
if !req.Force {
return nil, errors.New("template set is bound to unnamed app, please unbind first")
}
}
}
tx := s.dao.GenQuery().Begin()
// 1. delete template set
templateSet := &table.TemplateSet{
ID: req.Id,
Attachment: req.Attachment.TemplateSetAttachment(),
}
if err = s.dao.TemplateSet().DeleteWithTx(kt, tx, templateSet); err != nil {
logs.Errorf("delete template set failed, err: %v, rid: %s", err, kt.Rid)
tx.Rollback()
return nil, err
}
// 2. delete bound unnamed app if exists
if hasUnnamedApp {
if err = s.dao.TemplateBindingRelation().DeleteTmplSetWithTx(kt, tx, req.Attachment.BizId, req.Id); err != nil {
logs.Errorf("delete template set failed, err: %v, rid: %s", err, kt.Rid)
tx.Rollback()
return nil, err
}
}
tx.Commit()
return new(pbbase.EmptyResp), nil
}
// ListAppTemplateSets list app template set.
func (s *Service) ListAppTemplateSets(ctx context.Context, req *pbds.ListAppTemplateSetsReq) (
*pbds.ListAppTemplateSetsResp, error) {
kt := kit.FromGrpcContext(ctx)
details, err := s.dao.TemplateSet().ListAppTmplSets(kt, req.BizId, req.AppId)
if err != nil {
logs.Errorf("list template sets failed, err: %v, rid: %s", err, kt.Rid)
return nil, err
}
resp := &pbds.ListAppTemplateSetsResp{
Details: pbtset.PbTemplateSets(details),
}
return resp, nil
}
// ListTemplateSetsByIDs list template set by ids.
func (s *Service) ListTemplateSetsByIDs(ctx context.Context, req *pbds.ListTemplateSetsByIDsReq) (
*pbds.ListTemplateSetsByIDsResp, error) {
kt := kit.FromGrpcContext(ctx)
if err := s.dao.Validator().ValidateTemplateSetsExist(kt, req.Ids); err != nil {
return nil, err
}
details, err := s.dao.TemplateSet().ListByIDs(kt, req.Ids)
if err != nil {
logs.Errorf("list template sets failed, err: %v, rid: %s", err, kt.Rid)
return nil, err
}
resp := &pbds.ListTemplateSetsByIDsResp{
Details: pbtset.PbTemplateSets(details),
}
return resp, nil
}
// ListTemplateSetsOfBiz list template sets of one biz.
func (s *Service) ListTemplateSetsOfBiz(ctx context.Context, req *pbds.ListTemplateSetsOfBizReq) (
*pbds.ListTemplateSetsOfBizResp, error) {
kt := kit.FromGrpcContext(ctx)
tmplSets, err := s.dao.TemplateSet().ListAllTemplateSetsOfBiz(kt, req.BizId)
if err != nil {
logs.Errorf("list template sets of biz failed, err: %v, rid: %s", err, kt.Rid)
return nil, err
}
if len(tmplSets) == 0 {
return &pbds.ListTemplateSetsOfBizResp{}, nil
}
// get the map of template space id => template set detail
tmplSetsMap := make(map[uint32]*pbtset.TemplateSetOfBizDetail)
for _, t := range tmplSets {
if _, ok := tmplSetsMap[t.Attachment.TemplateSpaceID]; !ok {
tmplSetsMap[t.Attachment.TemplateSpaceID] = &pbtset.TemplateSetOfBizDetail{}
}
tmplSetsMap[t.Attachment.TemplateSpaceID].TemplateSets = append(
tmplSetsMap[t.Attachment.TemplateSpaceID].TemplateSets,
&pbtset.TemplateSetOfBizDetail_TemplateSetOfBiz{
TemplateSetId: t.ID,
TemplateSetName: t.Spec.Name,
})
}
tmplSpaceIDs := make([]uint32, 0, len(tmplSetsMap))
for tmplSpaceID := range tmplSetsMap {
tmplSpaceIDs = append(tmplSpaceIDs, tmplSpaceID)
}
tmplSpaces, err := s.dao.TemplateSpace().ListByIDs(kt, tmplSpaceIDs)
if err != nil {
logs.Errorf("list template sets of biz failed, err: %v, rid: %s", err, kt.Rid)
return nil, err
}
details := make([]*pbtset.TemplateSetOfBizDetail, 0)
for _, t := range tmplSpaces {
details = append(details, &pbtset.TemplateSetOfBizDetail{
TemplateSpaceId: t.ID,
TemplateSpaceName: t.Spec.Name,
TemplateSets: tmplSetsMap[t.ID].TemplateSets,
})
}
resp := &pbds.ListTemplateSetsOfBizResp{
Details: details,
}
return resp, nil
}
|
package main_test
import (
. "github.com/dgruber/playascii"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("Template", func() {
Context("HTML generation", func() {
It("should generate the index.html", func() {
index, err := CreateIndexTemplate()
Ω(err).Should(BeNil())
Ω(index).Should(ContainSubstring("demo.cast"))
})
})
})
|
/*
* Copyright (c) 2020. Ant Group. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*/
package daemon
import (
"fmt"
"os"
"path/filepath"
"github.com/pkg/errors"
"github.com/dragonflyoss/image-service/contrib/nydus-snapshotter/config"
"github.com/dragonflyoss/image-service/contrib/nydus-snapshotter/pkg/nydussdk"
"github.com/dragonflyoss/image-service/contrib/nydus-snapshotter/pkg/nydussdk/model"
)
const (
APISocketFileName = "api.sock"
SharedNydusDaemonID = "shared_daemon"
)
type NewDaemonOpt func(d *Daemon) error
type Daemon struct {
ID string
SnapshotID string
ConfigDir string
SocketDir string
LogDir string
LogLevel string
CacheDir string
SnapshotDir string
Pid int
ImageID string
DaemonMode string
ApiSock *string
RootMountPoint *string
}
func (d *Daemon) SharedMountPoint() string {
return filepath.Join(*d.RootMountPoint, d.SnapshotID, "fs")
}
func (d *Daemon) MountPoint() string {
if d.RootMountPoint != nil {
return filepath.Join("/", d.SnapshotID, "fs")
}
return filepath.Join(d.SnapshotDir, d.SnapshotID, "fs")
}
func (d *Daemon) BootstrapFile() (string, error) {
return GetBootstrapFile(d.SnapshotDir, d.SnapshotID)
}
func (d *Daemon) ConfigFile() string {
return filepath.Join(d.ConfigDir, "config.json")
}
func (d *Daemon) APISock() string {
if d.ApiSock != nil {
return *d.ApiSock
}
return filepath.Join(d.SocketDir, APISocketFileName)
}
func (d *Daemon) LogFile() string {
return filepath.Join(d.LogDir, "stderr.log")
}
func (d *Daemon) CheckStatus() (model.DaemonInfo, error) {
client, err := nydussdk.NewNydusClient(d.APISock())
if err != nil {
return model.DaemonInfo{}, errors.Wrap(err, "failed to check status, client has not been initialized")
}
return client.CheckStatus()
}
func (d *Daemon) SharedMount() error {
client, err := nydussdk.NewNydusClient(d.APISock())
if err != nil {
return errors.Wrap(err, "failed to mount")
}
bootstrap, err := d.BootstrapFile()
if err != nil {
return err
}
return client.SharedMount(d.MountPoint(), bootstrap, d.ConfigFile())
}
func (d *Daemon) SharedUmount() error {
client, err := nydussdk.NewNydusClient(d.APISock())
if err != nil {
return errors.Wrap(err, "failed to mount")
}
return client.Umount(d.MountPoint())
}
func (d *Daemon) IsMultipleDaemon() bool {
return d.DaemonMode == config.DaemonModeMultiple
}
func (d *Daemon) IsSharedDaemon() bool {
return d.DaemonMode == config.DaemonModeShared
}
func (d *Daemon) IsPrefetchDaemon() bool {
return d.DaemonMode == config.DaemonModePrefetch
}
func NewDaemon(opt ...NewDaemonOpt) (*Daemon, error) {
d := &Daemon{Pid: 0}
d.ID = newID()
d.DaemonMode = config.DefaultDaemonMode
for _, o := range opt {
err := o(d)
if err != nil {
return nil, err
}
}
return d, nil
}
func GetBootstrapFile(dir, id string) (string, error) {
// the meta file is stored to <snapshotid>/image/image.boot
bootstrap := filepath.Join(dir, id, "fs", "image", "image.boot")
_, err := os.Stat(bootstrap)
if err == nil {
return bootstrap, nil
}
if os.IsNotExist(err) {
// for backward compatibility check meta file from legacy location
bootstrap = filepath.Join(dir, id, "fs", "image.boot")
_, err = os.Stat(bootstrap)
if err == nil {
return bootstrap, nil
}
}
return "", errors.Wrap(err, fmt.Sprintf("failed to find bootstrap file for ID %s", id))
}
|
// +build ignore
package main
import (
"log"
"net/http"
"strings"
)
const dir = "."
func main() {
fs := http.FileServer(http.Dir(dir))
log.Print("Serving " + dir + " on http://localhost:8080")
http.ListenAndServe(":8080", http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {
resp.Header().Add("Cache-Control", "no-cache")
if strings.HasSuffix(req.URL.Path, ".wasm") {
resp.Header().Set("content-type", "application/wasm")
}
fs.ServeHTTP(resp, req)
}))
}
|
package main
import (
"bufio"
"fmt"
"io"
"os"
"strconv"
"strings"
)
type DiskStats struct {
Name string
ReadsCompleted, WritesCompleted uint64
}
func GetDiskStats() ([]DiskStats, error) {
file, err := os.Open("/proc/diskstats")
if err != nil {
return nil, fmt.Errorf("Could not read /proc/diskstats, found error: %s", err)
}
defer file.Close()
return getDiskStats(file)
}
func getDiskStats(out io.Reader) ([]DiskStats, error) {
scanner := bufio.NewScanner(out)
var diskStats []DiskStats
for scanner.Scan() {
fields := strings.Fields(scanner.Text())
if len(fields) < 14 {
continue
}
name := fields[2]
readsCompleted, err := strconv.ParseUint(fields[3], 10, 64)
if err != nil {
return nil, fmt.Errorf("Failed to parse reads completed, found error: %s", err)
}
writesCompleted, err := strconv.ParseUint(fields[7], 10, 64)
if err != nil {
return nil, fmt.Errorf("Failed to parse writes completed, found error: %s", err)
}
diskStats = append(diskStats, DiskStats{
Name: name,
WritesCompleted: writesCompleted,
ReadsCompleted: readsCompleted,
})
}
err := scanner.Err()
if err != nil {
return nil, fmt.Errorf("Could not use the scanner, found error: %s", err)
}
return diskStats, nil
}
|
/*
Your challenge for today is to create a program which is password protected, and wont open unless the correct user and password is given.
For extra credit, have the user and password in a seperate .txt file.
For even more extra credit, break into your own program :)
*/
package main
import (
"crypto/sha512"
"crypto/subtle"
"fmt"
"strings"
)
func main() {
for {
user, _ := input("Username")
pass, _ := input("Password")
if auth(user, pass) {
break
}
}
fmt.Println("FREEDOM")
}
func auth(name, pass string) bool {
h := sha512.Sum512([]byte(pass))
x := name
y := fmt.Sprintf("%x", h)
a := subtle.ConstantTimeCompare([]byte(x), []byte("luser"))
b := subtle.ConstantTimeCompare([]byte(y), []byte("197bcc298645b84bd95bba43749f708745375ce30262d3ecdc8439605d157786a6f9d8bac4a8dd31e9e68364820e3fc39ab6a15a75988d994f3a6361d0d37309"))
return a&b != 0
}
func input(str string) (string, error) {
var val string
fmt.Printf("%s: ", str)
_, err := fmt.Scan(&val)
val = strings.TrimSpace(val)
return val, err
}
|
package main
import "fmt"
func findMaxConsecutiveOnes(nums []int) int {
count := 0
max := 0
for _, v := range nums {
if v == 1 {
count++
}
if v == 0 {
if count > max {
max = count
}
count = 0
}
}
if count > max {
max = count
}
return max
}
func main() {
nums := []int{1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}
fmt.Println(findMaxConsecutiveOnes(nums))
}
|
package myos
import (
"fmt"
"io"
"reflect"
"strings"
"testing"
)
func TestUnicode(t *testing.T) {
str := "Go 爱好者 "
fmt.Printf("The string: %q\n", str)
fmt.Printf(" => runes(char): %q\n", []rune(str))
// rune == int32 四个字节存储
fmt.Printf(" => runes(hex): %x\n", []rune(str))
fmt.Printf(" => bytes(hex): [% x]\n", []byte(str))
for i, c := range str {
fmt.Printf("%d: %q [% x]\n", i, c, []byte(string(c)))
}
restype := reflect.TypeOf(str)
fmt.Println(restype)
var builder1 strings.Builder
restype = reflect.TypeOf(builder1)
fmt.Println(restype)
}
func TestString(t *testing.T) {
var builder1 strings.Builder
builder1.WriteString("hello world!!!")
fmt.Printf("the first output(%d):\n%q\n", builder1.Len(), builder1.String())
fmt.Println()
builder1.WriteByte(' ')
builder1.WriteString("it minimizes memory copying.the zero value is ready to use")
builder1.Write([]byte{'\n', '\n'})
builder1.WriteString("Do not copy a non-zero Builder.")
fmt.Printf("the second output(%d):\n\"%s\"\n", builder1.Len(), builder1.String())
fmt.Println("Grow the builder")
builder1.Grow(10)
fmt.Printf("The length of contents in the builder is %d.\n", builder1.Len())
fmt.Println()
fmt.Println("Reset the bulder")
builder1.Reset()
fmt.Printf("the third outpt(%d):\n%q\n", builder1.Len(), builder1.String())
}
func TestStringBuiler(t *testing.T) {
var builder1 strings.Builder
builder1.Grow(1)
// 利用函数传值,
f1 := func(b strings.Builder) {
//b.Grow(1)
}
f1(builder1)
ch1 := make(chan strings.Builder, 1)
ch1 <- builder1
// 利用通道传值
builder2 := <-ch1
//builder2.Grow(1)
_ = builder2
// 利用复制传值
builder3 := builder1
//builder3.Grow(1)
_ = builder3
f2 := func(bp *strings.Builder) {
(*bp).Grow(1) //利用指针,这里虽然不会引发panic,但不是并发安全的
builder4 := *bp
//builder4.Grow(1) // 这里还是会引发panic
_ = builder4
}
f2(&builder1)
builder1.Reset()
builder5 := builder1
builder5.Grow(1)
builder5.WriteString("hello")
}
func TestStringsReader(t *testing.T) {
reader1 := strings.NewReader(
"NewReader returns a new Reader reading from s. " +
"It is similar to bytes.NewBufferString but more efficient and read-only.")
fmt.Printf("the size of reader:%d\n", reader1.Size())
fmt.Printf("The reading index in reader:%d\n", (reader1.Size() - int64(reader1.Len())))
buf1 := make([]byte, 47)
n, _ := reader1.Read(buf1)
fmt.Printf("%d bytes were read.(call Read)\n", n)
fmt.Printf("The reading index in reader:%d\n", reader1.Size()-int64(reader1.Len()))
fmt.Println()
buf2 := make([]byte, 21)
offset1 := int64(64)
n, _ = reader1.ReadAt(buf2, offset1)
fmt.Printf("%d bytes were read.(call ReadAt,offset:%d)\n", n, offset1)
fmt.Printf("The reading index in reader:%d\n", reader1.Size()-int64(reader1.Len()))
fmt.Println()
offset2 := int64(17)
expectedIndex := reader1.Size() - int64(reader1.Len()) + offset2
fmt.Printf("Seek with offset %d and whence %d ...\n", offset2, io.SeekCurrent)
readingIndex,_:=reader1.Seek(offset2,io.SeekCurrent)
fmt.Printf("The reading index in reader:%d(returned by Seek)\n", readingIndex)
fmt.Printf("The reading index in reader:%d(computed by me)\n", expectedIndex)
n,_=reader1.Read(buf2)
fmt.Printf("%d bytes were read.(call Read)\n", n)
fmt.Printf("the reading index in reader: %d\n", reader1.Size()-int64(reader1.Len()))
} |
// Copyright 2022 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ddl_test
import (
"testing"
"time"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
"github.com/pingcap/tidb/ddl/util/callback"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/parser/model"
"github.com/pingcap/tidb/parser/mysql"
"github.com/pingcap/tidb/parser/terror"
"github.com/pingcap/tidb/testkit"
"github.com/pingcap/tidb/testkit/external"
"github.com/pingcap/tidb/util/domainutil"
"github.com/stretchr/testify/require"
)
const repairTableLease = 600 * time.Millisecond
func TestRepairTable(t *testing.T) {
require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/infoschema/repairFetchCreateTable", `return(true)`))
defer func() {
require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/infoschema/repairFetchCreateTable"))
}()
store, dom := testkit.CreateMockStoreAndDomainWithSchemaLease(t, repairTableLease)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
// Test repair table when TiDB is not in repair mode.
tk.MustExec("CREATE TABLE t (a int primary key nonclustered, b varchar(10));")
tk.MustGetErrMsg("admin repair table t CREATE TABLE t (a float primary key, b varchar(5));", "[ddl:8215]Failed to repair table: TiDB is not in REPAIR MODE")
// Test repair table when the repaired list is empty.
domainutil.RepairInfo.SetRepairMode(true)
tk.MustGetErrMsg("admin repair table t CREATE TABLE t (a float primary key, b varchar(5));", "[ddl:8215]Failed to repair table: repair list is empty")
// Test repair table when it's database isn't in repairInfo.
domainutil.RepairInfo.SetRepairTableList([]string{"test.other_table"})
tk.MustGetErrMsg("admin repair table t CREATE TABLE t (a float primary key, b varchar(5));", "[ddl:8215]Failed to repair table: database test is not in repair")
// Test repair table when the table isn't in repairInfo.
tk.MustExec("CREATE TABLE other_table (a int, b varchar(1), key using hash(b));")
tk.MustGetErrMsg("admin repair table t CREATE TABLE t (a float primary key, b varchar(5));", "[ddl:8215]Failed to repair table: table t is not in repair")
// Test user can't access to the repaired table.
tk.MustGetErrMsg("select * from other_table", "[schema:1146]Table 'test.other_table' doesn't exist")
// Test create statement use the same name with what is in repaired.
tk.MustGetErrMsg("CREATE TABLE other_table (a int);", "[ddl:1103]Incorrect table name 'other_table'%!(EXTRA string=this table is in repair)")
// Test column lost in repair table.
tk.MustGetErrMsg("admin repair table other_table CREATE TABLE other_table (a int, c char(1));", "[ddl:8215]Failed to repair table: Column c has lost")
// Test column type should be the same.
tk.MustGetErrMsg("admin repair table other_table CREATE TABLE other_table (a bigint, b varchar(1), key using hash(b));", "[ddl:8215]Failed to repair table: Column a type should be the same")
// Test index lost in repair table.
tk.MustGetErrMsg("admin repair table other_table CREATE TABLE other_table (a int unique);", "[ddl:8215]Failed to repair table: Index a has lost")
// Test index type should be the same.
tk.MustGetErrMsg("admin repair table other_table CREATE TABLE other_table (a int, b varchar(2) unique)", "[ddl:8215]Failed to repair table: Index b type should be the same")
// Test sub create statement in repair statement with the same name.
tk.MustExec("admin repair table other_table CREATE TABLE other_table (a int);")
// Test whether repair table name is case-sensitive.
domainutil.RepairInfo.SetRepairMode(true)
domainutil.RepairInfo.SetRepairTableList([]string{"test.other_table2"})
tk.MustExec("CREATE TABLE otHer_tAblE2 (a int, b varchar(1));")
tk.MustExec("admin repair table otHer_tAblE2 CREATE TABLE otHeR_tAbLe (a int, b varchar(2));")
repairTable := external.GetTableByName(t, tk, "test", "otHeR_tAbLe") //nolint:typecheck
require.Equal(t, "otHeR_tAbLe", repairTable.Meta().Name.O)
// Test memory and system database is not for repair.
domainutil.RepairInfo.SetRepairMode(true)
domainutil.RepairInfo.SetRepairTableList([]string{"test.xxx"})
tk.MustGetErrMsg("admin repair table performance_schema.xxx CREATE TABLE yyy (a int);", "[ddl:8215]Failed to repair table: memory or system database is not for repair")
// Test the repair detail.
turnRepairModeAndInit(true)
defer turnRepairModeAndInit(false)
// Domain reload the tableInfo and add it into repairInfo.
tk.MustExec("CREATE TABLE origin (a int primary key nonclustered auto_increment, b varchar(10), c int);")
// Repaired tableInfo has been filtered by `domain.InfoSchema()`, so get it in repairInfo.
originTableInfo, _ := domainutil.RepairInfo.GetRepairedTableInfoByTableName("test", "origin")
hook := &callback.TestDDLCallback{Do: dom}
var repairErr error
hook.OnJobRunBeforeExported = func(job *model.Job) {
if job.Type != model.ActionRepairTable {
return
}
if job.TableID != originTableInfo.ID {
repairErr = errors.New("table id should be the same")
return
}
if job.SchemaState != model.StateNone {
repairErr = errors.New("repair job state should be the none")
return
}
// Test whether it's readable, when repaired table is still stateNone.
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
_, repairErr = tk.Exec("select * from origin")
// Repaired tableInfo has been filtered by `domain.InfoSchema()`, here will get an error cause user can't get access to it.
if repairErr != nil && terror.ErrorEqual(repairErr, infoschema.ErrTableNotExists) {
repairErr = nil
}
}
originalHook := dom.DDL().GetHook()
defer dom.DDL().SetHook(originalHook)
dom.DDL().SetHook(hook)
// Exec the repair statement to override the tableInfo.
tk.MustExec("admin repair table origin CREATE TABLE origin (a int primary key nonclustered auto_increment, b varchar(5), c int);")
require.NoError(t, repairErr)
// Check the repaired tableInfo is exactly the same with old one in tableID, indexID, colID.
// testGetTableByName will extract the Table from `domain.InfoSchema()` directly.
repairTable = external.GetTableByName(t, tk, "test", "origin") //nolint:typecheck
require.Equal(t, originTableInfo.ID, repairTable.Meta().ID)
require.Equal(t, 3, len(repairTable.Meta().Columns))
require.Equal(t, originTableInfo.Columns[0].ID, repairTable.Meta().Columns[0].ID)
require.Equal(t, originTableInfo.Columns[1].ID, repairTable.Meta().Columns[1].ID)
require.Equal(t, originTableInfo.Columns[2].ID, repairTable.Meta().Columns[2].ID)
require.Equal(t, 1, len(repairTable.Meta().Indices))
require.Equal(t, originTableInfo.Columns[0].ID, repairTable.Meta().Indices[0].ID)
require.Equal(t, originTableInfo.AutoIncID, repairTable.Meta().AutoIncID)
require.Equal(t, mysql.TypeLong, repairTable.Meta().Columns[0].GetType())
require.Equal(t, mysql.TypeVarchar, repairTable.Meta().Columns[1].GetType())
require.Equal(t, 5, repairTable.Meta().Columns[1].GetFlen())
require.Equal(t, mysql.TypeLong, repairTable.Meta().Columns[2].GetType())
// Exec the show create table statement to make sure new tableInfo has been set.
result := tk.MustQuery("show create table origin")
require.Equal(t, "CREATE TABLE `origin` (\n `a` int(11) NOT NULL AUTO_INCREMENT,\n `b` varchar(5) DEFAULT NULL,\n `c` int(11) DEFAULT NULL,\n PRIMARY KEY (`a`) /*T![clustered_index] NONCLUSTERED */\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", result.Rows()[0][1])
}
func turnRepairModeAndInit(on bool) {
list := make([]string, 0)
if on {
list = append(list, "test.origin")
}
domainutil.RepairInfo.SetRepairMode(on)
domainutil.RepairInfo.SetRepairTableList(list)
}
func TestRepairTableWithPartition(t *testing.T) {
require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/infoschema/repairFetchCreateTable", `return(true)`))
defer func() {
require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/infoschema/repairFetchCreateTable"))
}()
store := testkit.CreateMockStoreWithSchemaLease(t, repairTableLease)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop table if exists origin")
turnRepairModeAndInit(true)
defer turnRepairModeAndInit(false)
// Domain reload the tableInfo and add it into repairInfo.
tk.MustExec("create table origin (a int not null) partition by RANGE(a) (" +
"partition p10 values less than (10)," +
"partition p30 values less than (30)," +
"partition p50 values less than (50)," +
"partition p70 values less than (70)," +
"partition p90 values less than (90));")
// Test for some old partition has lost.
tk.MustGetErrMsg("admin repair table origin create table origin (a int not null) partition by RANGE(a) ("+
"partition p10 values less than (10),"+
"partition p30 values less than (30),"+
"partition p50 values less than (50),"+
"partition p90 values less than (90),"+
"partition p100 values less than (100));", "[ddl:8215]Failed to repair table: Partition p100 has lost")
// Test for some partition changed the condition.
tk.MustGetErrMsg("admin repair table origin create table origin (a int not null) partition by RANGE(a) ("+
"partition p10 values less than (10),"+
"partition p20 values less than (25),"+
"partition p50 values less than (50),"+
"partition p90 values less than (90));", "[ddl:8215]Failed to repair table: Partition p20 has lost")
// Test for some partition changed the partition name.
tk.MustGetErrMsg("admin repair table origin create table origin (a int not null) partition by RANGE(a) ("+
"partition p10 values less than (10),"+
"partition p30 values less than (30),"+
"partition pNew values less than (50),"+
"partition p90 values less than (90));", "[ddl:8215]Failed to repair table: Partition pnew has lost")
originTableInfo, _ := domainutil.RepairInfo.GetRepairedTableInfoByTableName("test", "origin")
tk.MustExec("admin repair table origin create table origin_rename (a int not null) partition by RANGE(a) (" +
"partition p10 values less than (10)," +
"partition p30 values less than (30)," +
"partition p50 values less than (50)," +
"partition p90 values less than (90));")
repairTable := external.GetTableByName(t, tk, "test", "origin_rename") //nolint:typecheck
require.Equal(t, originTableInfo.ID, repairTable.Meta().ID)
require.Equal(t, 1, len(repairTable.Meta().Columns))
require.Equal(t, originTableInfo.Columns[0].ID, repairTable.Meta().Columns[0].ID)
require.Equal(t, 4, len(repairTable.Meta().Partition.Definitions))
require.Equal(t, originTableInfo.Partition.Definitions[0].ID, repairTable.Meta().Partition.Definitions[0].ID)
require.Equal(t, originTableInfo.Partition.Definitions[1].ID, repairTable.Meta().Partition.Definitions[1].ID)
require.Equal(t, originTableInfo.Partition.Definitions[2].ID, repairTable.Meta().Partition.Definitions[2].ID)
require.Equal(t, originTableInfo.Partition.Definitions[4].ID, repairTable.Meta().Partition.Definitions[3].ID)
// Test hash partition.
tk.MustExec("drop table if exists origin")
domainutil.RepairInfo.SetRepairMode(true)
domainutil.RepairInfo.SetRepairTableList([]string{"test.origin"})
tk.MustExec("create table origin (a varchar(1), b int not null, c int, key idx(c)) partition by hash(b) partitions 30")
// Test partition num in repair should be exactly same with old one, other wise will cause partition semantic problem.
tk.MustGetErrMsg("admin repair table origin create table origin (a varchar(2), b int not null, c int, key idx(c)) partition by hash(b) partitions 20", "[ddl:8215]Failed to repair table: Hash partition num should be the same")
originTableInfo, _ = domainutil.RepairInfo.GetRepairedTableInfoByTableName("test", "origin")
tk.MustExec("admin repair table origin create table origin (a varchar(3), b int not null, c int, key idx(c)) partition by hash(b) partitions 30")
repairTable = external.GetTableByName(t, tk, "test", "origin") //nolint:typecheck
require.Equal(t, originTableInfo.ID, repairTable.Meta().ID)
require.Equal(t, 30, len(repairTable.Meta().Partition.Definitions))
require.Equal(t, originTableInfo.Partition.Definitions[0].ID, repairTable.Meta().Partition.Definitions[0].ID)
require.Equal(t, originTableInfo.Partition.Definitions[1].ID, repairTable.Meta().Partition.Definitions[1].ID)
require.Equal(t, originTableInfo.Partition.Definitions[29].ID, repairTable.Meta().Partition.Definitions[29].ID)
}
|
package main
import (
"fmt"
"strconv"
"strings"
)
type TreeNode struct {
left, right *TreeNode
data int
}
func serialize(root *TreeNode) string {
sxd := []string{}
stack := []*TreeNode{}
if root == nil {
return ""
}
stack = append(stack, root)
for len(stack) > 0 {
top := stack[len(stack)-1]
stack = stack[:len(stack)-1]
if top == nil {
sxd = append(sxd, "#")
continue
}
sxd = append(sxd, strconv.Itoa(top.data))
stack = append(stack, top.right)
stack = append(stack, top.left)
}
return strings.Join(sxd, ",")
}
func deserializeInternal(treeStrParts []string, currPos *int) *TreeNode {
if len(treeStrParts) == 0 {
return nil
}
if treeStrParts[*currPos] == "#" {
*currPos++
return nil
}
nodeData, err := strconv.Atoi(treeStrParts[*currPos])
if err != nil {
fmt.Printf("Invalid data: %v\n", treeStrParts[*currPos])
return nil
}
newNode := &TreeNode{data: nodeData}
*currPos++
newNode.left = deserializeInternal(treeStrParts, currPos)
newNode.right = deserializeInternal(treeStrParts, currPos)
return newNode
}
func deserialize(treeStr string) *TreeNode {
pos := 0
return deserializeInternal(strings.Split(treeStr, ","), &pos)
}
func test1() {
node2 := TreeNode{data: 2}
node4 := TreeNode{data: 4}
node5 := TreeNode{data: 5}
node3 := TreeNode{data: 3, left: &node4, right: &node5}
node1 := TreeNode{data: 1, left: &node2, right: &node3}
sxdResult := serialize(&node1)
fmt.Printf("Serialized form: %v\n", sxdResult)
dsxd := deserialize(sxdResult)
fmt.Printf("Serialized form of deserialized tree: %v\n", serialize(dsxd))
}
func main() {
fmt.Printf("Serialize/deserialize binary tree\n")
test1()
}
|
package pg
import (
"encoding/json"
"fmt"
"strings"
. "grm-service/dbcentral/pg"
"applications/data-collection/types"
)
type MetaDB struct {
MetaCentralDB
}
// 添加数据记录
func (db MetaDB) AddDataObject(data, name, dataset, device, user, shpType string) error {
metajson := fmt.Sprintf(`
{
"full_valid" : false,
"label" : "矢量数据",
"metadata" : [
{
"group" : "Basic Information",
"label" : "基本信息",
"value" : [
{
"classify" : "false",
"name" : "create_time",
"required" : "false",
"system" : "false",
"title" : "创建时间",
"type" : "time",
"value" : "1000-01-01"
},
{
"classify" : "true",
"name" : "data_time",
"required" : "true",
"system" : "false",
"title" : "数据时间",
"type" : "date",
"value" : "1000-01-01"
},
{
"classify" : "false",
"name" : "description",
"required" : "false",
"system" : "false",
"title" : "描述",
"type" : "string",
"value" : ""
},
{
"classify" : "false",
"name" : "envelope",
"required" : "true",
"system" : "false",
"title" : "地理坐标",
"type" : "array",
"value" : ""
},
{
"classify" : "false",
"name" : "feature_class_count",
"required" : "true",
"system" : "true",
"title" : "类型数量",
"type" : "int",
"value" : ""
},
{
"classify" : "false",
"name" : "feature_nums",
"required" : "true",
"system" : "true",
"title" : "要素个数",
"type" : "bigint",
"value" : ""
},
{
"classify" : "false",
"name" : "field_list",
"required" : "false",
"system" : "true",
"title" : "字段列表",
"type" : "string",
"value" : ""
},
{
"classify" : "false",
"name" : "file_size",
"required" : "false",
"system" : "true",
"title" : "文件大小",
"type" : "bigint",
"value" : ""
},
{
"classify" : "false",
"name" : "name",
"required" : "true",
"system" : "false",
"title" : "矢量名称",
"type" : "string",
"value" : "%s"
},
{
"classify" : "false",
"name" : "north_east_x",
"required" : "true",
"system" : "true",
"title" : "东北图廓角点X坐标",
"type" : "int",
"value" : ""
},
{
"classify" : "false",
"name" : "north_east_y",
"required" : "true",
"system" : "true",
"title" : "东北图廓角点Y坐标",
"type" : "int",
"value" : ""
},
{
"classify" : "false",
"name" : "north_west_x",
"required" : "true",
"system" : "true",
"title" : "西北图廓角点X坐标",
"type" : "int",
"value" : ""
},
{
"classify" : "false",
"name" : "north_west_y",
"required" : "true",
"system" : "true",
"title" : "西北图廓角点Y坐标",
"type" : "int",
"value" : ""
},
{
"classify" : "false",
"name" : "path",
"required" : "true",
"system" : "true",
"title" : "路径",
"type" : "string",
"value" : ""
},
{
"classify" : "false",
"name" : "ref_system",
"required" : "true",
"system" : "false",
"title" : "参考系",
"type" : "string",
"value" : ""
},
{
"classify" : "true",
"name" : "shp_type",
"required" : "true",
"system" : "true",
"title" : "矢量类型",
"type" : "string",
"value" : ""
},
{
"classify" : "false",
"name" : "south_east_x",
"required" : "true",
"system" : "true",
"title" : "东南图廓角点X坐标",
"type" : "int",
"value" : ""
},
{
"classify" : "false",
"name" : "south_east_y",
"required" : "true",
"system" : "true",
"title" : "东南图廓角点Y坐标",
"type" : "int",
"value" : ""
},
{
"classify" : "false",
"name" : "south_west_x",
"required" : "true",
"system" : "true",
"title" : "西南图廓角点X坐标",
"type" : "int",
"value" : ""
},
{
"classify" : "false",
"name" : "south_west_y",
"required" : "true",
"system" : "true",
"title" : "西南图廓角点Y坐标",
"type" : "int",
"value" : ""
}
]
}
],
"type" : "Shape"
}`, name)
envelope := `ST_GeomFromGeoJSON('{
"type": "Polygon",
"coordinates": [
[
[116.28852713281697, 40.22718090480737],
[116.87148916894972, 40.22980208808957],
[116.83097708398881, 41.06531175295575],
[116.25556814844191, 41.06065225097328],
[116.28852713281697, 40.22718090480737]
]
]
}')`
sql := fmt.Sprintf(`insert into meta_object(uuid,name,meta_type,create_time,dataset,device,load_user,
data_time,ref_system,shp_type,path,file_size,meta_json,envelope)
values('%s','%s','Shape',current_timestamp,'%s','%s',
'%s','1000-01-01','GCS_WGS_1984','%s','',-1,'%s',%s)`,
data, name, dataset, device, user, shpType, metajson, envelope)
_, err := db.Conn.Exec(sql)
if err != nil {
return err
}
return nil
}
// 修改元数据信息
func (db *MetaDB) UpdateDataMeta(dataId string, req *types.UpdateMetaRequest) error {
columns := `name,path,file_size,create_time,data_time,
projection_type,resolution,size,ref_system,
coord_unit,thumb_path,snap_path,
shp_type, feature_nums, sat_type, sensor, description`
sql := fmt.Sprintf(`select meta_json from meta_object where uuid = '%s'`, dataId)
rows, err := db.Conn.Query(sql)
if err != nil {
return err
}
defer rows.Close()
var meta_json string
if rows.Next() {
err = rows.Scan(&meta_json)
if err != nil {
fmt.Printf("rows.Scan error: %s\n", err.Error())
return err
} else {
var metas types.DataMeta
err := json.Unmarshal([]byte(meta_json), &metas)
if err != nil {
fmt.Printf("Failed to parse meta json : %s\n", err.Error())
return err
}
for iGroup, _ := range metas.MetaData {
if len(req.Group) > 0 && metas.MetaData[iGroup].Group != req.Group {
continue
}
for _, meta := range req.Metas {
exists := false
for jData, _ := range metas.MetaData[iGroup].Value {
if metas.MetaData[iGroup].Value[jData].Name == meta.Name {
exists = true
if metas.MetaData[iGroup].Value[jData].Value != meta.Value {
metas.MetaData[iGroup].Value[jData].Value = meta.Value
dataModify := metas.MetaData[iGroup].Value[jData].Modified
modify, ok := dataModify.(bool)
if ok && !modify {
metas.MetaData[iGroup].Value[jData].Modified = true
} else {
modify, ok := dataModify.(string)
if ok && modify != "true" {
metas.MetaData[iGroup].Value[jData].Modified = true
}
}
}
break
}
}
if !exists {
metas.MetaData[iGroup].Value = append(metas.MetaData[iGroup].Value, meta)
}
}
// for jData, _ := range metas.MetaData[iGroup].Value {
// for _, meta := range req.Metas {
// if metas.MetaData[iGroup].Value[jData].Name == meta.Name {
// metas.MetaData[iGroup].Value[jData].Value = meta.Value
// }
// }
// }
}
ret, err := json.Marshal(metas)
if err != nil {
return err
}
// 更新meta表
sql := fmt.Sprintf(`update meta_object set meta_json = '%s'`, string(ret))
for _, meta := range req.Metas {
if strings.LastIndex(columns, meta.Name) != -1 {
sql = fmt.Sprintf(`%s, %s = '%s'`, sql, meta.Name, meta.Value)
}
}
//if req.Tags != base.NullTags {
// sql = fmt.Sprintf(`%s, tags = '%s'`, sql, req.Tags)
//}
if req.DisplayField != "" {
sql = fmt.Sprintf(`%s, display_field = '%s'`, sql, req.DisplayField)
}
sql = fmt.Sprintf(`%s where uuid = '%s'`, sql, dataId)
_, err = db.Conn.Exec(sql)
if err != nil {
return err
}
}
}
if err := rows.Err(); err != nil {
return err
}
return nil
}
// 移除数据调查
func (db *MetaDB) DelDataObject(colId string) error {
sql := fmt.Sprintf(`delete from meta_object where uuid = '%s'`, colId)
_, err := db.Conn.Exec(sql)
return err
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.