text stringlengths 11 4.05M |
|---|
package sweep
// ResponseHeader represents a command response header.
type ResponseHeader struct {
Cmd [2]byte
CmdStatus Int2
CmdSum byte
}
// CommandParamPacket ...
type CommandParamPacket struct {
Cmd [2]byte
CmdParam [2]byte
}
// ResponseParamA represents the first part of a response parameter header.
type ResponseParamA struct {
Cmd [2]byte
CmdParam [2]byte
}
// ResponseParamB represents the second part of a response parameter header.
type ResponseParamB struct {
CmdStatus Int2
CmdSum byte
}
// ResponseDevice ...
type ResponseDevice struct {
Cmd [2]byte
BitRate [6]byte
LaserState byte
Mode byte
Diagnostic byte
MotorSpeed Int2
SampleRate Int4
}
// ResponseVersion ...
type ResponseVersion struct {
Cmd [2]byte
Model [5]byte
ProtocolMajor byte
ProtocolMinor byte
FirmwareMajor byte
FirmwareMinor byte
HardwareVersion byte
SerialNum [8]byte
}
// ResponseMotorReady ...
type ResponseMotorReady struct {
Cmd [2]byte
MotorReady Int2
}
// ResponseMotorInfo ...
type ResponseMotorInfo struct {
Cmd [2]byte
MotorSpeed Int2
}
// ResponseSampleRate ...
type ResponseSampleRate struct {
Cmd [2]byte
SampleRate Int2
}
// Valid SyncFlags for ResponseScanPacket.
const (
FlagSync = 1 << iota
FlagCommunicationFail
)
// ResponseScanPacket represents a response scan packet.
type ResponseScanPacket struct {
SyncFlags byte
Angle uint16
Distance uint16
SignalStrength byte
Checksum byte
}
// Checksum returns the checksum of the ResponseHeader
func (h *ResponseHeader) Checksum() byte {
return ((h.Cmd[0] + h.Cmd[1]) & 0x3F) + 0x30
}
// Checksum returns the checksum of the ResponseParam.
func (p *ResponseParamA) Checksum() byte {
return ((p.Cmd[0] + p.Cmd[1]) & 0x3F) + 0x30
}
// AngleDeg returns the angle of the scan in degrees.
func (p *ResponseScanPacket) AngleDeg() float64 {
return float64(p.Angle) / 16
}
|
package camt053
import (
"bufio"
"os"
"testing"
)
func TestRead(t *testing.T) {
f, e := os.Open("./CAMT053.xml")
if e != nil {
t.Fatal(e)
}
buf := bufio.NewReader(f)
if e := Read(buf, func(head GrpHdr, stmt Stmt) error {
/*for _, ntry := range stmt.Ntry {
if ntry.NtryDtls.TxDtls.BkTxCd.Prtry.Cd == PAYMENT_RECEIVED_RABO {
fmt.Printf(
"%s %s %s %sEUR %s %s %s - %s (%s)\n",
stmt.Id, ntry.NtryDtls.TxDtls.RltdPties.Cdtr.Nm, ntry.NtryDtls.TxDtls.RltdPties.CdtrAcct.Id.IBAN,
ntry.Amt, ntry.CdtDbtInd,
ntry.NtryDtls.TxDtls.RmtInf.Ustrd,
ntry.BookgDt.Dt,
ntry.NtryDtls.TxDtls.RltdPties.Dbtr.Nm,
ntry.NtryDtls.TxDtls.BkTxCd.Prtry.Cd,
)
}
}*/
if stmt.ElctrncSeqNb < 16141 || stmt.ElctrncSeqNb > 16206 {
t.Fatalf("Invalid ElctrncSeqNb")
}
if stmt.Acct.Id.IBAN != "NL59RABO3181240869" && stmt.Acct.Id.IBAN != "NL17RABO0310029597" {
t.Fatalf("Invalid IBAN=" + stmt.Acct.Id.IBAN)
}
return nil
}); e != nil {
t.Fatal(e)
}
}
func TestFilterPaymentsReceived(t *testing.T) {
f, e := os.Open("./CAMT053.xml")
if e != nil {
t.Fatal(e)
}
buf := bufio.NewReader(f)
p, e := FilterPaymentsReceived(buf)
if e != nil {
t.Fatal(e)
}
if len(p) != 3 {
t.Errorf("Payments received != 3", len(p))
}
if p[0].IBAN != "NL62ABNA0408441224" {
t.Errorf("Payment(%s) invalid IBAN=%s", p[0].Id, p[0].IBAN)
}
if p[0].Amount != "4425.25" {
t.Errorf("Payment(%s) invalid amount=%s", p[0].Id, p[0].Amount)
}
if p[0].Comment != "2016Q3-0004" {
t.Errorf("Payment(%s) invalid comment=%s", p[0].Id, p[0].Comment)
}
if p[0].Date != "2016-08-15" {
t.Errorf("Payment(%s) invalid date=%s", p[0].Id, p[0].Date)
}
if p[0].Name != "XS NEWS B V" {
t.Errorf("Payment(%s) invalid name=%s", p[0].Id, p[0].Name)
}
}
|
package main
import (
"github.com/gorilla/websocket"
"context"
"net/http"
"encoding/json"
mon "part5/internal/monitor"
"io/ioutil"
"part5/internal/incident"
"log"
)
var upgrader = websocket.Upgrader{
ReadBufferSize: 1024,
WriteBufferSize: 1024,
CheckOrigin: func(_ *http.Request) bool {
return true
},
}
// We declare a websocket handler for streaming real-time incidents to any connected
// clients.
func serveMonitors(w http.ResponseWriter, r *http.Request) {
conn, err := upgrader.Upgrade(w, r, nil)
if err != nil {
log.Println(err)
return
}
defer conn.Close()
mon.Run(context.Background(), mon.Monitor{
WSConn: conn,
})
}
func saveIncomingIncidents(w http.ResponseWriter, r *http.Request) {
var evt incident.Incident
reqBody, _ := ioutil.ReadAll(r.Body)
if err := json.Unmarshal(reqBody, &evt); err != nil {
w.WriteHeader(http.StatusBadRequest)
return
}
if err := incident.Persist(&evt); err != nil {
w.WriteHeader(http.StatusInternalServerError)
return
}
mon.Broadcast(evt)
storedPayload, _ := json.Marshal(evt)
w.WriteHeader(http.StatusCreated)
w.Write(storedPayload)
}
|
package main
import (
"fmt"
"os"
)
func main() {
if len(os.Args) != 3 {
fmt.Println("Usage: dldist string1 string2")
fmt.Println(" The program will compute the Damerau-Levenshtein distance between string1 and string2.")
return
}
dist := DLDist(os.Args[1], os.Args[2])
fmt.Println(dist)
}
|
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package enterprise
import (
"context"
"time"
"chromiumos/tast/ctxutil"
"chromiumos/tast/local/arc"
"chromiumos/tast/local/arc/arcent"
"chromiumos/tast/local/arc/playstore"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/retry"
"chromiumos/tast/testing"
)
func init() {
testing.AddTest(&testing.Test{
Func: ARCAvailableAppInstall,
LacrosStatus: testing.LacrosVariantUnneeded,
Desc: "Checks that available apps can be installed in Play Store",
Contacts: []string{"mhasank@chromium.org", "arc-commercial@google.com"},
Attr: []string{"group:mainline"},
SoftwareDeps: []string{"chrome", "play_store"},
Timeout: 15 * time.Minute,
VarDeps: []string{
arcent.LoginPoolVar,
},
Params: []testing.Param{
{
ExtraSoftwareDeps: []string{"android_p"},
ExtraAttr: []string{"informational"},
},
{
Name: "vm",
ExtraSoftwareDeps: []string{"android_vm"},
ExtraAttr: []string{"informational"},
}},
})
}
// ARCAvailableAppInstall verifies that allow-listed app can be installed in Play Store.
func ARCAvailableAppInstall(ctx context.Context, s *testing.State) {
const (
bootTimeout = 4 * time.Minute
testPackage = "com.google.android.calculator"
defaultUITimeout = time.Minute
)
packages := []string{testPackage}
rl := &retry.Loop{Attempts: 1,
MaxAttempts: 2,
DoRetries: true,
Fatalf: s.Fatalf,
Logf: s.Logf}
creds, err := chrome.PickRandomCreds(s.RequiredVar(arcent.LoginPoolVar))
if err != nil {
rl.Exit("get login creds", err)
}
login := chrome.GAIALogin(creds)
cleanupCtx := ctx
ctx, cancel := ctxutil.Shorten(ctx, time.Minute)
defer cancel()
fdms, err := arcent.SetupPolicyServerWithArcApps(ctx, s.OutDir(), creds.User, packages, arcent.InstallTypeAvailable)
if err != nil {
rl.Exit("setup fake policy server", err)
}
defer fdms.Stop(cleanupCtx)
if err := testing.Poll(ctx, func(ctx context.Context) (retErr error) {
cr, err := chrome.New(
ctx,
login,
chrome.ARCSupported(),
chrome.UnRestrictARCCPU(),
chrome.DMSPolicy(fdms.URL),
chrome.ExtraArgs(arc.DisableSyncFlags()...))
if err != nil {
return rl.Retry("connect to Chrome", err)
}
defer cr.Close(cleanupCtx)
tconn, err := cr.TestAPIConn(ctx)
if err != nil {
return rl.Retry("create test API connection", err)
}
a, err := arc.NewWithTimeout(ctx, s.OutDir(), bootTimeout)
if err != nil {
return rl.Retry("start ARC by policy", err)
}
defer a.Close(cleanupCtx)
if err := arcent.ConfigureProvisioningLogs(ctx, a); err != nil {
return rl.Exit("configure provisioning logs", err)
}
if err := arcent.WaitForProvisioning(ctx, a, rl.Attempts); err != nil {
return rl.Retry("wait for provisioning", err)
}
d, err := a.NewUIDevice(ctx)
if err != nil {
return rl.Exit("initialize UI Automator", err)
}
defer d.Close(cleanupCtx)
if err := arcent.EnsurePlayStoreNotEmpty(ctx, tconn, cr, a, d, s.OutDir(), rl.Attempts); err != nil {
return rl.Exit("verify Play Store is not empty", err)
}
if err := playstore.OpenAppPage(ctx, a, testPackage); err != nil {
return rl.Exit("open app page", err)
}
if installButton, err := arcent.WaitForInstallButton(ctx, d); err != nil {
return rl.Exit("find the install button", err)
} else if enabled, err := installButton.IsEnabled(ctx); err != nil {
return rl.Exit("check install button state", err)
} else if !enabled {
return rl.Exit("verify install button is enabled", nil)
}
return nil
}, nil); err != nil {
s.Fatal("Available app install test failed: ", err)
}
}
|
package config
import (
"encoding/json"
"log"
"os"
)
// Save save current status and config to file
func Save() {
SaveStatus()
SaveConfig()
}
// SaveStatus save and indent Status to status.json
func SaveStatus() {
b, err := json.MarshalIndent(status, "", " ")
if err != nil {
log.Println("Failed to save status...")
return
}
os.WriteFile("status.json", b, 0755)
}
// SaveConfig save and indent Config to config.json
func SaveConfig() {
b, err := json.MarshalIndent(config, "", " ")
if err != nil {
log.Println("Failed to save config...")
return
}
os.WriteFile("config.json", b, 0755)
}
|
// Copyright (c) 2016-2019 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package transfer
import (
"fmt"
"path"
"strings"
"github.com/uber/kraken/core"
"github.com/uber/kraken/lib/backend/namepath"
"github.com/uber/kraken/lib/store"
)
type testTransferer struct {
tagPather namepath.Pather
tags map[string]core.Digest
cas *store.CAStore
}
// NewTestTransferer creates a Transferer which stores blobs in cas and tags in
// memory for testing purposes.
func NewTestTransferer(cas *store.CAStore) ImageTransferer {
tagPather, err := namepath.New("", namepath.DockerTag)
if err != nil {
panic(err)
}
return &testTransferer{
tagPather: tagPather,
tags: make(map[string]core.Digest),
cas: cas,
}
}
// Stat returns blob info from local cache.
func (t *testTransferer) Stat(namespace string, d core.Digest) (*core.BlobInfo, error) {
fi, err := t.cas.GetCacheFileStat(d.Hex())
if err != nil {
return nil, fmt.Errorf("stat cache file: %w", err)
}
return core.NewBlobInfo(fi.Size()), nil
}
func (t *testTransferer) Download(namespace string, d core.Digest) (store.FileReader, error) {
return t.cas.GetCacheFileReader(d.Hex())
}
func (t *testTransferer) Upload(namespace string, d core.Digest, blob store.FileReader) error {
return t.cas.CreateCacheFile(d.Hex(), blob)
}
func (t *testTransferer) GetTag(tag string) (core.Digest, error) {
p, err := t.tagPather.BlobPath(tag)
if err != nil {
return core.Digest{}, err
}
d, ok := t.tags[p]
if !ok {
return core.Digest{}, ErrTagNotFound
}
return d, nil
}
func (t *testTransferer) PutTag(tag string, d core.Digest) error {
p, err := t.tagPather.BlobPath(tag)
if err != nil {
return err
}
t.tags[p] = d
return nil
}
func (t *testTransferer) ListTags(prefix string) ([]string, error) {
prefix = path.Join(t.tagPather.BasePath(), prefix)
var tags []string
for path := range t.tags {
if strings.HasPrefix(path, prefix) {
tag, err := t.tagPather.NameFromBlobPath(path)
if err != nil {
return nil, fmt.Errorf("invalid tag path %s: %s", path, err)
}
tags = append(tags, tag)
}
}
return tags, nil
}
|
package bitmap
import (
"errors"
"io"
)
var (
errOutOfRange = errors.New("out of range")
errAssignSize = errors.New("size not match")
)
type Bitmap struct {
val []byte
size int
}
func NewEmptyBitmap(size int) *Bitmap {
if size == 0 {
return new(Bitmap)
}
return &Bitmap{
val: make([]byte, (size-1)/8+1),
size: size,
}
}
func NewBitmap(data []byte) *Bitmap {
b := new(Bitmap)
b.val = data
b.size = len(data) * 8
return b
}
func (b *Bitmap) Len() int {
return b.size
}
func (b *Bitmap) Truncate(size int) {
if size > b.size {
panic(errOutOfRange)
}
if (b.size-size)/8 > 0 {
val := make([]byte, (size-1)/8+1)
copy(val, b.val)
b.val = val
}
b.size = size
}
func (b *Bitmap) Assign(o *Bitmap) error {
if o.size != b.size {
return errAssignSize
}
copy(b.val, o.val)
return nil
}
func (b *Bitmap) SetBitOn(pos int) error {
if pos < 0 || pos >= b.size {
return errOutOfRange
}
b.val[pos/8] |= (1 << (7 - uint(pos)%8))
return nil
}
func (b *Bitmap) SetBitOff(pos int) error {
if pos < 0 || pos >= b.size {
return errOutOfRange
}
b.val[pos/8] &= ^(1 << (7 - uint(pos)%8))
return nil
}
func (b *Bitmap) SetRangeBitOn(begin, end int) error {
if begin < 0 || end > b.size {
return errOutOfRange
}
for i := begin; i < end; i++ {
b.val[i/8] |= (1 << (7 - uint(i)%8))
}
return nil
}
func (b *Bitmap) GetBit(pos int) (bool, error) {
if pos < 0 || pos >= b.size {
return false, errOutOfRange
}
if b.val[pos/8]&(1<<(7-uint(pos)%8)) == 0 {
return false, nil
}
return true, nil
}
func (b *Bitmap) MustGetBit(pos int) bool {
on, err := b.GetBit(pos)
if err != nil {
panic("MustGetBit")
}
return on
}
func (b *Bitmap) CountBitOn(begin, end int) (int, error) {
if begin < 0 || end > b.size {
return 0, errOutOfRange
}
count := 0
for i := begin; i < end; i++ {
if b.MustGetBit(i) {
count++
}
}
return count, nil
}
func (b *Bitmap) MustCountBitOn(begin, end int) int {
count, err := b.CountBitOn(begin, end)
if err != nil {
panic(err.Error())
}
return count
}
func (b *Bitmap) Bytes() []byte {
return b.val
}
func (b *Bitmap) Not() *Bitmap {
r := NewEmptyBitmap(b.size)
for i := range b.val {
r.val[i] = ^b.val[i]
}
return r
}
func (b *Bitmap) And(o *Bitmap) *Bitmap {
if b.size != o.size {
return nil
}
r := NewEmptyBitmap(b.size)
for i := range b.val {
r.val[i] = b.val[i] & o.val[i]
}
return r
}
func (b *Bitmap) XOR(o *Bitmap) *Bitmap {
if b.size != o.size {
return nil
}
r := NewEmptyBitmap(b.size)
for i := range b.val {
r.val[i] = b.val[i] ^ o.val[i]
}
return r
}
func (b *Bitmap) ReadFrom(r io.Reader) error {
_, err := r.Read(b.val)
return err
}
func (b *Bitmap) WriteTo(w io.Writer) error {
_, err := w.Write(b.val)
return err
}
|
// 06 Pivot root
package main
import (
"fmt"
"os"
"os/exec"
"syscall"
)
func main() {
switch os.Args[1] {
case "run":
run()
case "child":
child()
default:
panic(fmt.Sprintf("Unknow command %s", os.Args[1]))
}
}
func run() {
cmd := exec.Command("/proc/self/exe", append([]string{"child"}, os.Args[2:]...)...)
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
cmd.Env = []string{"PS1=marcel:$(pwd) #", "PATH=/bin:/usr/bin/", "TERM=xterm"}
cmd.SysProcAttr = &syscall.SysProcAttr{
Cloneflags: syscall.CLONE_NEWUTS |
syscall.CLONE_NEWNS |
syscall.CLONE_NEWIPC |
syscall.CLONE_NEWPID |
syscall.CLONE_NEWNET |
syscall.CLONE_NEWUSER,
UidMappings: []syscall.SysProcIDMap{
{
ContainerID: 0,
HostID: os.Getuid(),
Size: 1,
},
},
GidMappings: []syscall.SysProcIDMap{
{
ContainerID: 0,
HostID: os.Getgid(),
Size: 1,
},
},
}
if err := cmd.Run(); err != nil {
panic(err)
}
}
func child() {
fmt.Printf("Namespace initialization\n")
rootfsPath := "/home/stoakes/go/src/recreez-votre-docker/steps/06_pivot-root/centos"
checkRootFS(rootfsPath)
// Monte /proc
if err := mountProc(rootfsPath); err != nil {
fmt.Printf("Error mounting /proc - %s\n", err)
os.Exit(1)
}
// Monte /dev/urandom depuis l'hôte
if err := bindMountDeviceNode("/dev/urandom", rootfsPath+"/dev/urandom"); err != nil {
fmt.Printf("Error running bind mount urandom: %s", err)
}
// Pivote
if err := pivotRoot(rootfsPath); err != nil {
fmt.Printf("Error running pivot_root - %s\n", err)
os.Exit(1)
}
fmt.Printf("Running %v as PID %d\n", os.Args[2:], os.Getpid())
cmd := exec.Command(os.Args[2], os.Args[3:]...)
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
panic(err)
}
// Une fois la commande exécutée: unmount
syscall.Unmount("proc", 0)
syscall.Unmount("dev/urandom", 0)
}
|
package mapreduce
//
// any additional state that you want to add to type WorkerInfo
//
type WorkerInfoImpl struct {
//status bool // 1 means busy, 0 means available
}
//
// run the MapReduce job across all the workers
//
func (mr *MapReduce) RunMasterImpl() {
mr.Workers = make(map[string]*WorkerInfo)
Mapjob := func(job_num int, worker_temp string) bool {
args := &DoJobArgs{}
args.File = mr.file
args.Operation = Map
args.JobNumber = job_num
args.NumOtherPhase = mr.nReduce
var reply DoJobReply
success := call(worker_temp, "Worker.DoJob", args, &reply)
if success {
mr.impl.lock1.Lock()
mr.impl.countmap++
mr.impl.lock1.Unlock()
mr.registerChannel <- worker_temp
return true
} else {
mr.impl.jobnum1 <- job_num
mr.registerChannel <- worker_temp
return false
}
}
Reducejob := func(job_num int, worker_temp string) bool {
args := &DoJobArgs{}
args.File = mr.file
args.Operation = Reduce
args.JobNumber = job_num
args.NumOtherPhase = mr.nMap
var reply DoJobReply
success := call(worker_temp, "Worker.DoJob", args, &reply)
if success {
mr.impl.lock2.Lock()
mr.impl.countreduce++
mr.impl.lock2.Unlock()
mr.registerChannel <- worker_temp
return true
} else {
mr.impl.jobnum2 <- job_num
mr.registerChannel <- worker_temp
return false
}
}
for i := 0; i < mr.nMap; i++ {
mr.impl.jobnum1 <- i
}
for {
mr.impl.lock1.Lock()
//fmt.Println(mr.impl.countmap)
if (mr.impl.countmap >= mr.nMap) {
mr.impl.lock1.Unlock()
break
}
mr.impl.lock1.Unlock()
select {
case x := <-mr.impl.jobnum1:
worker_temp := <-mr.registerChannel
go Mapjob(x, worker_temp)
default:
continue
}
}
for i := 0; i < mr.nReduce; i++ {
mr.impl.jobnum2 <- i
}
for {
mr.impl.lock2.Lock()
if (mr.impl.countreduce >= mr.nReduce) {
mr.impl.lock2.Unlock()
break
}
mr.impl.lock2.Unlock()
select {
case x := <-mr.impl.jobnum2:
worker_temp := <-mr.registerChannel
go Reducejob(x, worker_temp)
default:
continue
}
}
return
}
|
package main
import "fmt"
type Number struct {
a int
b int
}
func (num *Number)Multi() int {
return num.a*num.b
}
func main() {
num:=Number{
a: 2,
b: 6,
}
fmt.Println(num.Multi())
} |
// Copyright 2021 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package boot
import (
"testing"
specs "github.com/opencontainers/runtime-spec/specs-go"
"gvisor.dev/gvisor/runsc/config"
)
func TestGetMountAccessType(t *testing.T) {
const source = "foo"
for _, tst := range []struct {
name string
annotations map[string]string
want config.FileAccessType
}{
{
name: "container=exclusive",
annotations: map[string]string{
MountPrefix + "mount1.source": source,
MountPrefix + "mount1.type": "bind",
MountPrefix + "mount1.share": "container",
},
want: config.FileAccessExclusive,
},
{
name: "pod=shared",
annotations: map[string]string{
MountPrefix + "mount1.source": source,
MountPrefix + "mount1.type": "bind",
MountPrefix + "mount1.share": "pod",
},
want: config.FileAccessShared,
},
{
name: "shared=shared",
annotations: map[string]string{
MountPrefix + "mount1.source": source,
MountPrefix + "mount1.type": "bind",
MountPrefix + "mount1.share": "shared",
},
want: config.FileAccessShared,
},
{
name: "default=shared",
annotations: map[string]string{
MountPrefix + "mount1.source": source + "mismatch",
MountPrefix + "mount1.type": "bind",
MountPrefix + "mount1.share": "container",
},
want: config.FileAccessShared,
},
} {
t.Run(tst.name, func(t *testing.T) {
spec := &specs.Spec{Annotations: tst.annotations}
podHints, err := NewPodMountHints(spec)
if err != nil {
t.Fatalf("newPodMountHints failed: %v", err)
}
conf := &config.Config{FileAccessMounts: config.FileAccessShared}
mnt := &specs.Mount{Source: source}
if got := getMountAccessType(conf, mnt, podHints.FindMount(mnt)); got != tst.want {
t.Errorf("getMountAccessType(), got: %v, want: %v", got, tst.want)
}
})
}
}
|
package ship
import (
"bufio"
"bytes"
"encoding/json"
"fmt"
"html/template"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"path"
"strings"
"sync"
"time"
)
type Server struct {
Host string
Root string
Builds string
Builders map[string]*Builder
Requests map[string]*Requests
apps map[string]map[string]*App
once sync.Once
feed chan func()
overview *template.Template
}
type Requests struct {
Name string
Root string
Builds []*Build
Deployments []*Deploy
output *os.File
stream *bufio.Writer
}
type App struct {
Name string `json:"app"`
URL string `json:"url"`
Version string `json:"md5"`
}
func (s *Server) initialize() {
s.Builders = make(map[string]*Builder)
s.Requests = make(map[string]*Requests)
s.apps = make(map[string]map[string]*App)
s.readBuilds()
s.readRequests()
s.readApps()
var err error
s.overview, err = template.New("overview").Parse(htmlOverview)
if err != nil {
log.Fatal(err)
}
// process events
s.feed = make(chan func())
go func() {
for f := range s.feed {
f()
}
}()
}
func (s *Server) readBuilds() {
s.Builds = path.Join(s.Root, "builds")
// make sure the root directory for builds exists
os.MkdirAll(s.Builds, 0755)
// inspect it
entries, err := ioutil.ReadDir(s.Builds)
if err != nil {
log.Fatal(err)
}
for _, entry := range entries {
name := entry.Name()
// clean possibly corrupted builds
if entry.IsDir() {
if err := os.RemoveAll(path.Join(s.Builds, name)); err != nil {
log.Fatal(err)
}
log.Println("removed", name)
continue
}
// import builders
i := strings.Index(name, ".")
if i < 0 {
log.Println("unknown", name)
continue
}
switch name[i+1:] {
case "json":
s.readBuilder(name[:i])
case "build", "gz":
default:
log.Println("unknown", name)
}
}
}
func (s *Server) readBuilder(name string) {
b, ok := s.Builders[name]
if !ok {
b = &Builder{
Name: name,
Root: s.Root,
}
s.Builders[name] = b
}
file, err := os.Open(path.Join(s.Builds, name+".json"))
if err != nil {
log.Fatal(err)
}
err = json.NewDecoder(file).Decode(b)
if err != nil {
log.Fatal(err)
}
}
func (s *Server) readRequests() {
root := path.Join(s.Root, "logs")
// make sure the root directory for requests exists
os.MkdirAll(root, 0755)
// inspect it
entries, err := ioutil.ReadDir(root)
if err != nil {
log.Fatal(err)
}
for _, entry := range entries {
if entry.IsDir() {
continue
}
name := entry.Name()
// import requests
i := strings.Index(name, ".")
if i < 0 {
log.Println("unknown", name)
continue
}
file, err := os.Open(path.Join(root, name))
if err != nil {
log.Fatal(err)
}
lines := bufio.NewScanner(file)
switch name[i+1:] {
case "build":
for lines.Scan() {
b := new(Build)
if err = json.Unmarshal(lines.Bytes(), b); err != nil {
break
}
r := s.get(b.Filename)
r.Builds = append(r.Builds, b)
}
case "deploy":
for lines.Scan() {
d := new(Deploy)
if err = json.Unmarshal(lines.Bytes(), d); err != nil {
break
}
r := s.get(d.Filename)
r.Deployments = append(r.Deployments, d)
}
}
if err != nil {
log.Fatal("failed to parse", file.Name(), err)
}
if err = lines.Err(); err != nil {
log.Fatal(err)
}
}
}
func (s *Server) readApps() {
root := path.Join(s.Root, "apps")
// make sure the root directory for requests exists
os.MkdirAll(root, 0755)
// inspect it
entries, err := ioutil.ReadDir(root)
if err != nil {
log.Fatal(err)
}
for _, entry := range entries {
if entry.IsDir() {
continue
}
name := entry.Name()
file, err := os.Open(path.Join(root, name))
if err != nil {
log.Fatal(err)
}
items := make(map[string]*App)
err = json.NewDecoder(file).Decode(&items)
if err != nil {
log.Fatal(err)
}
s.apps[name] = items
}
}
func (s *Server) Write(w http.ResponseWriter) {
w.Header().Set("Content-Type", "text/html")
done := make(chan struct{})
s.feed <- func() {
err := s.overview.Execute(w, s)
if err != nil {
log.Println(err)
}
close(done)
}
<-done
}
func (s *Server) Start() (err error) {
s.once.Do(s.initialize)
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path != "/" {
http.NotFound(w, r)
return
}
s.Write(w)
})
http.HandleFunc("/builds/", func(w http.ResponseWriter, r *http.Request) {
if r.Method != "GET" {
http.Error(w, "405 method not allowed", http.StatusMethodNotAllowed)
return
}
if !strings.HasSuffix(r.URL.Path, ".gz") {
http.NotFound(w, r)
return
}
http.ServeFile(w, r, path.Join(s.Root, r.URL.Path))
})
decode := func(w http.ResponseWriter, r *http.Request, q interface{}) {
if r.Method != "POST" {
http.Error(w, "405 method not allowed", http.StatusMethodNotAllowed)
return
}
err := json.NewDecoder(r.Body).Decode(q)
r.Body.Close()
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
w.Header().Set("Content-Type", "text/plain")
err = s.Process(w, q)
if err != nil {
http.Error(w, err.Error(), http.StatusNotFound)
return
}
}
http.HandleFunc("/request/build", func(w http.ResponseWriter, r *http.Request) {
decode(w, r, new(Build))
})
http.HandleFunc("/request/deploy", func(w http.ResponseWriter, r *http.Request) {
decode(w, r, new(Deploy))
})
http.HandleFunc("/app/instance", func(w http.ResponseWriter, r *http.Request) {
if r.Method != "POST" {
http.Error(w, "405 method not allowed", http.StatusMethodNotAllowed)
return
}
item := new(App)
err := json.NewDecoder(r.Body).Decode(&item)
r.Body.Close()
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
s.feed <- func() {
instances, ok := s.apps[item.Name]
if !ok {
instances = make(map[string]*App)
s.apps[item.Name] = instances
}
instances[item.URL] = item
body, err := json.MarshalIndent(&instances, "", "\t")
if err != nil {
log.Fatal(err)
}
err = ioutil.WriteFile(path.Join(s.Root, "apps", item.Name), body, 0666)
if err != nil {
log.Fatal(err)
}
}
})
return
}
func (s *Server) Process(w io.Writer, r interface{}) (err error) {
switch r := r.(type) {
case *Build:
err = s.makeBuilder(w, r)
case *Deploy:
err = s.makeDeploy(w, r)
default:
err = fmt.Errorf("unknown type of request: %T", r)
}
return
}
func (s *Server) get(name string) *Requests {
r, ok := s.Requests[name]
if !ok {
r = &Requests{
Root: path.Join(s.Root, "logs"),
Name: name,
}
s.Requests[name] = r
}
return r
}
func (s *Server) makeBuilder(w io.Writer, b *Build) (err error) {
s.once.Do(s.initialize)
// record the time when the request was received
b.When = time.Now().UTC()
// new workspace
dir, err := ioutil.TempDir(s.Builds, b.Filename+"-")
if err != nil {
return
}
// keep track of the build request
s.feed <- func() {
r := s.get(b.Filename)
r.Builds = append(r.Builds, b)
r.save(b)
}
builder := &Builder{
Workspace: dir,
Root: s.Builds,
Build: b,
}
// build
name, err := builder.Make()
if err != nil {
return
}
// keep track of the build
s.feed <- func() {
s.Builders[name] = builder
}
io.WriteString(w, name)
return
}
func (s *Server) makeDeploy(w io.Writer, d *Deploy) (err error) {
s.once.Do(s.initialize)
hosts, ok := s.apps[d.Filename]
if ok {
return
}
// record the time when the request was received
d.When = time.Now().UTC()
r := struct {
URL string `json:"url"`
MD5 string `json:"md5"`
}{
URL: s.Host + "/versions/" + d.Version + ".gz",
MD5: d.Version,
}
body, err := json.Marshal(&r)
if err != nil {
log.Fatal(err)
}
done := make(chan string, len(hosts))
update := func(host string) {
r, err := http.Post("http://"+host+"/deploy/new", "application/json", bytes.NewReader(body))
if err != nil {
log.Println(host, err)
done <- host + " " + err.Error()
return
}
io.Copy(os.Stdout, r.Body)
done <- host + " OK"
}
// send update requests
n := 0
for _, item := range d.Targets {
for host, _ := range hosts {
if !strings.Contains(host, item) {
continue
}
n++
go update(host)
}
}
// wait for the requests to complete
lines := make([]string, n)
for i := 0; i < n; i++ {
lines[i] = <-done
fmt.Fprintf(w, "%s\n", lines[i])
}
// keep track of the deployment request
s.feed <- func() {
r := s.get(d.Filename)
d.Logs = lines
r.Deployments = append(r.Deployments, d)
r.save(d)
}
return
}
func (r *Requests) save(item interface{}) {
// get type
kind := strings.ToLower(strings.TrimPrefix(fmt.Sprintf("%T", item), "*ship."))
// and type filename
name := fmt.Sprintf("%s-%s.%s", r.Name, time.Now().UTC().Format("20060102"), kind)
// to get the log file
filename := path.Join(r.Root, name)
if r.output == nil || r.output.Name() != filename {
if r.output != nil {
r.stream.Flush()
r.output.Close()
}
output, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
if err != nil {
log.Fatal(err)
}
r.output = output
r.stream = bufio.NewWriter(r.output)
}
err := json.NewEncoder(r.stream).Encode(item)
if err != nil {
log.Fatal(err)
}
r.stream.Flush()
}
|
package test
import "testing"
//运行前初始化,只运行一次
func TestMain(m *testing.M) {
println("start")
m.Run()
println("end")
}
//测试普通函数和方法
func Test1(t *testing.T) {
println("test1")
}
func Test2(t *testing.T) {
println("test2")
}
func Test3(t *testing.T) {
print(testing.Short())
} |
package interfaces
import (
repo "github.com/salihkemaloglu/gignoxqc-beta-001/repositories"
)
//IUserRepository ..
type IUserRepository interface {
Login() (*repo.User, error)
}
|
package main
import "fmt"
// Channels are the pipes that connect concurrent goroutines.
func main() {
message := make(chan string)
//启动一个goroutine
go func() {
//Send a value into a channel using the channel <- syntax.
//消息进入通道
message <- "ping"
}()
//消息出通道 传给message
msg := <-message
fmt.Println(msg)
}
|
// Copyright 2017 Zhang Peihao <zhangpeihao@gmail.com>
package httpapi
import (
"io/ioutil"
"net/http"
"strings"
"github.com/golang/glog"
"github.com/zhangpeihao/shutdown"
"github.com/zhangpeihao/zim/pkg/broker"
"github.com/zhangpeihao/zim/pkg/protocol"
"github.com/zhangpeihao/zim/pkg/util"
)
// Subscribe 订阅
func (b *BrokerImpl) Subscribe(tag string, handler broker.SubscribeHandler) error {
glog.Infof("broker::httpapi::Subscribe(%s)\n", tag)
defer glog.Infof("broker::httpapi::Subscribe(%s) done\n", tag)
if err := shutdown.ExitWaitGroupAdd(b.ctx, 1); err != nil {
glog.Errorf("broker::httpapi::Subscribe(%s) ExitWaitGroupAdd error: %s", tag, err)
return err
}
defer shutdown.ExitWaitGroupDone(b.ctx)
queue := make(chan *protocol.Command, b.queueSize)
b.Lock()
b.queues[tag] = queue
b.Unlock()
FOR_LOOP:
for {
select {
case cmd := <-queue:
func() {
util.RecoverFromPanic()
handler(tag, cmd)
}()
case <-b.ctx.Done():
glog.Infof("broker::httpapi::Subscribe(%s) break by context", tag)
break FOR_LOOP
}
}
return nil
}
// ServeHTTP 处理HTTP链接
func (b *BrokerImpl) ServeHTTP(w http.ResponseWriter, r *http.Request) {
glog.Infoln("broker::httpapi::ServeHTTP()")
defer r.Body.Close()
tag := strings.ToLower(strings.Trim(r.URL.Path, "/"))
glog.Infoln("broker::httpapi::ServeHTTP() tag: ", tag)
if tag == "debug.html" {
if b.Debug {
// b.HandleDebug(w, r)
} else {
glog.Warningln("broker::httpapi::ServeHTTP() not in debug mode")
w.WriteHeader(404)
return
}
} else {
b.Lock()
queue, ok := b.queues[tag]
b.Unlock()
if !ok {
glog.Warningln("broker::httpapi::ServeHTTP() no tag(", tag, ")")
w.WriteHeader(404)
return
}
if payload, err := ioutil.ReadAll(r.Body); err != nil {
glog.Warningf("broker::httpapi::ServeHTTP() Read payload error: %s\n",
err)
w.WriteHeader(400)
} else {
if cmd, err := ParseCommand(b.ctx, tag, r.Header, payload, b.timeout); err != nil {
glog.Warningf("broker::httpapi::ServeHTTP() ParseCommand error: %s\n",
err)
w.WriteHeader(400)
} else {
queue <- cmd
w.WriteHeader(200)
}
}
}
}
|
package clientkube
import (
"context"
"log"
"os"
"testing"
"github.com/go-logr/stdr"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/require"
"k8s.io/apimachinery/pkg/runtime/schema"
"github.com/bryanl/clientkube/pkg/cluster"
"github.com/bryanl/clientkube/pkg/mocks"
)
func TestMemoryStoreInformer_Watch(t *testing.T) {
res := schema.GroupVersionResource{
Group: "group",
Version: "version",
Resource: "resource",
}
stdLog := log.New(os.Stderr, "", log.LstdFlags)
loggerOption := WithLogger(stdr.New(stdLog))
tests := []struct {
name string
options func(ctrl *gomock.Controller, options cluster.ListOptions) []Option
withInformer func(informer *MemoryStoreInformer)
listOptions cluster.ListOptions
initClient func(ctrl *gomock.Controller, options cluster.ListOptions) cluster.Client
}{
{
name: "watch for unsynced resource uses client for watch",
options: func(ctrl *gomock.Controller, options cluster.ListOptions) []Option {
return []Option{
loggerOption,
}
},
initClient: func(ctrl *gomock.Controller, options cluster.ListOptions) cluster.Client {
w := mocks.NewMockWatch(ctrl)
client := mocks.NewMockClient(ctrl)
client.EXPECT().Watch(gomock.Any(), res, options).Return(w, nil)
return client
},
listOptions: cluster.ListOptions{},
},
{
name: "watch for synced resource builds watch from store",
options: func(ctrl *gomock.Controller, options cluster.ListOptions) []Option {
s := mocks.NewMockStore(ctrl)
s.EXPECT().
Watch(res, options)
return []Option{
loggerOption,
WithStore(s),
}
},
withInformer: func(informer *MemoryStoreInformer) {
require.NoError(t, informer.SetSynced(res, nil))
},
initClient: func(ctrl *gomock.Controller, options cluster.ListOptions) cluster.Client {
client := mocks.NewMockClient(ctrl)
return client
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
ctx := context.Background()
ctrl := gomock.NewController(t)
client := test.initClient(ctrl, test.listOptions)
options := test.options(ctrl, test.listOptions)
msi := NewInformer(client, options...)
if fn := test.withInformer; fn != nil {
fn(msi)
}
_, err := msi.Watch(ctx, res, test.listOptions)
require.NoError(t, err)
})
}
}
|
package main
import "fmt"
func main(){
/*
while
i := 0
for i < 10 {
fmt.Println("E aew rapaziada", i)
i = i + 1
}
*/
// for
/* for i := 0; i <10; i++ {
fmt.Println("E aew rapaziada", i)
}
*/
}
|
package configuration
import (
"os"
"strings"
"time"
errs "github.com/pkg/errors"
"github.com/spf13/viper"
)
const (
// Constants for viper variable names. Will be used to set
// default values as well as to get each value
varCleanTestDataEnabled = "clean.test.data"
varDBLogsEnabled = "enable.db.logs"
varDeveloperModeEnabled = "developer.mode.enabled"
varDiagnoseHTTPAddress = "diagnose.http.address"
varEnvironment = "environment"
varHTTPAddress = "http.address"
varLogJSON = "log.json"
varLogLevel = "log.level"
varMetricsHTTPAddress = "metrics.http.address"
// Postgres
varPostgresHost = "postgres.host"
varPostgresPort = "postgres.port"
varPostgresUser = "postgres.user"
varPostgresDatabase = "postgres.database"
varPostgresPassword = "postgres.password"
varPostgresSSLMode = "postgres.sslmode"
varPostgresConnectionTimeout = "postgres.connection.timeout"
varPostgresTransactionTimeout = "postgres.transaction.timeout"
varPostgresConnectionRetrySleep = "postgres.connection.retrysleep"
varPostgresConnectionMaxIdle = "postgres.connection.maxidle"
varPostgresConnectionMaxOpen = "postgres.connection.maxopen"
varMonitorIPDuration = "monitor.ip.duration"
// ProxyURL
varProxyURL = "proxy.url"
)
// New creates a configuration reader object using a configurable configuration
// file path.
func New(configFilePath string) (*Config, error) {
c := Config{
v: viper.New(),
}
c.v.SetEnvPrefix("F8")
c.v.AutomaticEnv()
c.v.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
c.v.SetTypeByDefaultValue(true)
c.setConfigDefaults()
if configFilePath != "" {
c.v.SetConfigType("yaml")
c.v.SetConfigFile(configFilePath)
err := c.v.ReadInConfig() // Find and read the config file
if err != nil { // Handle errors reading the config file
return nil, errs.Errorf("Fatal error config file: %s \n", err)
}
}
return &c, nil
}
// Config encapsulates the Viper configuration registry which stores the
// configuration data in-memory.
type Config struct {
v *viper.Viper
}
// GetConfig is a wrapper over NewConfigurationData which reads configuration file path
// from the environment variable.
func GetConfig() (*Config, error) {
return New(getMainConfigFile())
}
func getMainConfigFile() string {
// This was either passed as a env var or set inside main.go from --config
envConfigPath, _ := os.LookupEnv("WEBHOOK_CONFIG_FILE_PATH")
return envConfigPath
}
func (c *Config) setConfigDefaults() {
c.v.SetTypeByDefaultValue(true)
c.v.SetDefault(varLogLevel, defaultLogLevel)
c.v.SetDefault(varHTTPAddress, defaultHTTPAddress)
c.v.SetDefault(varMetricsHTTPAddress, defaultMetricsHTTPAddress)
c.v.SetDefault(varDeveloperModeEnabled,
defaultDeveloperModeEnabled)
c.v.SetDefault(varCleanTestDataEnabled, true)
c.v.SetDefault(varDBLogsEnabled, false)
//---------
// Postgres
//---------
c.v.SetDefault(varPostgresHost, defaultPostgresHost)
c.v.SetDefault(varPostgresPort, defaultPostgresPort)
c.v.SetDefault(varPostgresUser, defaultPostgresUser)
c.v.SetDefault(varPostgresDatabase, defaultPostgresDatabase)
c.v.SetDefault(varPostgresPassword, defaultPostgresPassword)
c.v.SetDefault(varPostgresSSLMode, defaultPostgresSSLMode)
c.v.SetDefault(varPostgresConnectionTimeout,
defaultPostgresConnectionTimeout)
c.v.SetDefault(varPostgresConnectionMaxIdle,
defaultPostgresConnectionMaxIdle)
c.v.SetDefault(varPostgresConnectionMaxOpen,
defaultPostgresConnectionMaxOpen)
// Number of seconds to wait before trying to connect again
c.v.SetDefault(varPostgresConnectionRetrySleep,
defaultPostgresConnectionRetrySleep)
// Timeout of a transaction in minutes
c.v.SetDefault(varPostgresTransactionTimeout,
defaultPostgresConnectionTimeout)
// ProxyURL to forward webhook request
c.v.SetDefault(varProxyURL, defaultProxyURL)
// Monitor IP Duration for duration between job to update IP
c.v.SetDefault(varMonitorIPDuration, defaultMonitorIPDuration)
}
// DeveloperModeEnabled returns `true` if development related features (as set via default, config file, or environment variable),
// e.g. token generation endpoint are enabled
func (c *Config) DeveloperModeEnabled() bool {
return c.v.GetBool(varDeveloperModeEnabled)
}
// GetEnvironment returns the current environment application is deployed in
// like 'production', 'prod-preview', 'local', etc as the value of environment variable
// `F8_ENVIRONMENT` is set.
func (c *Config) GetEnvironment() string {
if c.v.IsSet(varEnvironment) {
return c.v.GetString(varEnvironment)
}
return "local"
}
// IsLogJSON returns if we should log json format (as set via config file or environment variable)
func (c *Config) IsLogJSON() bool {
if c.v.IsSet(varLogJSON) {
return c.v.GetBool(varLogJSON)
}
if c.DeveloperModeEnabled() {
return false
}
return true
}
// GetHTTPAddress returns the HTTP address (as set via default, config file, or environment variable)
// that the wit server binds to (e.g. "0.0.0.0:8080")
func (c *Config) GetHTTPAddress() string {
return c.v.GetString(varHTTPAddress)
}
// GetMetricsHTTPAddress returns the address the /metrics endpoing will be mounted.
// By default GetMetricsHTTPAddress is the same as GetHTTPAddress
func (c *Config) GetMetricsHTTPAddress() string {
return c.v.GetString(varMetricsHTTPAddress)
}
// GetDiagnoseHTTPAddress returns the address of where to start the gops handler.
// By default GetDiagnoseHTTPAddress is 127.0.0.1:0 in devMode, but turned off in prod mode
// unless explicitly configured
func (c *Config) GetDiagnoseHTTPAddress() string {
if c.v.IsSet(varDiagnoseHTTPAddress) {
return c.v.GetString(varDiagnoseHTTPAddress)
} else if c.DeveloperModeEnabled() {
return "127.0.0.1:0"
}
return ""
}
// GetLogLevel returns the loggging level (as set via config file or environment variable)
func (c *Config) GetLogLevel() string {
return c.v.GetString(varLogLevel)
}
// GetProxyURL returns URL to forward Webhook
func (c *Config) GetProxyURL() string {
return c.v.GetString(varProxyURL)
}
// GetMonitorIPDuration Return duration between monitoring call to
// update ip ranges from source
func (c *Config) GetMonitorIPDuration() time.Duration {
return c.v.GetDuration(varMonitorIPDuration)
}
|
package colour
import (
"testing"
)
func TestColour(t *testing.T) {
colour := New(-0.5, 0.4, 1.7)
if colour.Red != -0.5 {
t.Error("Could not access the Red attribute of Colour.")
}
if colour.Green != 0.4 {
t.Error("Could not access the Red attribute of Colour.")
}
if colour.Blue != 1.7 {
t.Error("Could not access the Red attribute of Colour.")
}
}
func TestColourEqualMethod(t *testing.T) {
var tests = []struct {
a, b Colour
expected bool
}{
{
a: New(0, 0, 0),
b: New(0, 0, 0),
expected: true,
},
{
a: New(1, 0, 0),
b: New(0, 0, 0),
expected: false,
},
{
a: New(0, 1, 0),
b: New(0, 0, 0),
expected: false,
},
{
a: New(0, 0, 1),
b: New(0, 0, 0),
expected: false,
},
}
for _, test := range tests {
output := test.a.Equal(test.b)
if output != test.expected {
t.Error("Failed Colour equality test.")
}
}
}
func TestColourAddMethod(t *testing.T) {
var tests = []struct {
a, b, expected Colour
}{
{
a: New(0, 0, 0),
b: New(0, 0, 0),
expected: New(0, 0, 0),
},
{
a: New(0.9, 0.6, 0.75),
b: New(0.7, 0.1, 0.25),
expected: New(1.6, 0.7, 1),
},
}
for _, test := range tests {
output := test.a.Add(test.b)
if output.Equal(test.expected) != true {
t.Errorf(
"Failed adding colours (%+v + %+v): expected %+v, recieved %+v",
test.a, test.b, test.expected, output,
)
}
}
}
func TestColourSubMethod(t *testing.T) {
var tests = []struct {
a, b Colour
expected Colour
}{
{
a: New(0, 0, 0),
b: New(0, 0, 0),
expected: New(0, 0, 0),
},
{
a: New(0.9, 0.6, 0.75),
b: New(0.7, 0.1, 0.25),
expected: New(0.2, 0.5, 0.5),
},
}
for _, test := range tests {
output := test.a.Sub(test.b)
if output.Equal(test.expected) != true {
t.Errorf(
"Failed subtracting colours (%+v - %+v): expected %+v, recieved %+v",
test.a, test.b, test.expected, output,
)
}
}
}
func TestColourScalarMultMethod(t *testing.T) {
var tests = []struct {
colour, expected Colour
multiplier float64
}{
{
colour: New(0, 0, 0),
multiplier: 1,
expected: New(0, 0, 0),
},
{
colour: New(0.2, 0.3, 0.4),
multiplier: 2,
expected: New(0.4, 0.6, 0.8),
},
}
for _, test := range tests {
output := test.colour.ScalarMult(test.multiplier)
if output.Equal(test.expected) != true {
t.Errorf(
"Failed scalar multiplying (%+v + %+v): expected %+v, recieved %+v",
test.colour, test.multiplier, test.expected, output,
)
}
}
}
func TestColourMultMethod(t *testing.T) {
var tests = []struct {
a, b, expected Colour
}{
{
a: New(0, 0, 0),
b: New(0, 0, 0),
expected: New(0, 0, 0),
},
{
a: New(1, 0.2, 0.4),
b: New(0.9, 1, 0.1),
expected: New(0.9, 0.2, 0.04),
},
}
for _, test := range tests {
output := test.a.Mult(test.b)
if output.Equal(test.expected) != true {
t.Errorf(
"Failed multiplying colours (%+v - %+v): expected %+v, recieved %+v",
test.a, test.b, test.expected, output,
)
}
}
}
|
// Copyright 2021 Andrew Werner.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the License.
package btree
import (
"testing"
"github.com/ajwerner/btree/internal/ordered"
)
func TestBTree(t *testing.T) {
assertEq := func(t *testing.T, exp, got int) {
t.Helper()
if exp != got {
t.Fatalf("expected %d, got %d", exp, got)
}
}
tree := MakeSet(ordered.Compare[int])
tree.Upsert(2)
tree.Upsert(12)
tree.Upsert(1)
it := tree.Iterator()
it.First()
expected := []int{1, 2, 12}
for _, exp := range expected {
assertEq(t, exp, it.Cur())
it.Next()
}
}
|
package evaluator
import (
"fmt"
"github.com/grossamos/jam0001/shared"
)
type MissingArgumentError struct {
message string
pos shared.Position
}
func (mae *MissingArgumentError) Error() string {
return fmt.Sprintf("MissingArgumentError (at line %d): %s", mae.pos.Line, mae.message)
}
type TypeError struct {
message string
pos shared.Position
}
func (te *TypeError) Error() string {
return fmt.Sprintf("TypeError (at line %d): %s", te.pos.Line, te.message)
}
type MathEvalError struct {
message string
pos shared.Position
}
func (err *MathEvalError) Error() string {
return fmt.Sprintf("MathEvalError (at line %d): %s", err.pos.Line, err.message)
}
type EvalError struct {
message string
pos shared.Position
}
func (err *EvalError) Error() string {
return fmt.Sprintf("EvalError (at line %d): %s", err.pos.Line, err.message)
}
type UnimplementedError struct {
message string
pos shared.Position
}
func (err *UnimplementedError) Error() string {
return fmt.Sprintf("UnimplementedError (at line %d): %s", err.pos.Line, err.message)
}
type OutOfRangeError struct {
index, r int
pos shared.Position
}
func (err *OutOfRangeError) Error() string {
return fmt.Sprintf("OutOfRangeError (at line %d): %d > %d", err.pos.Line, err.index, err.r)
}
|
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package taskswitchcuj
import (
"context"
"chromiumos/tast/errors"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/ash"
"chromiumos/tast/local/chrome/browser"
"chromiumos/tast/local/chrome/cuj"
)
// simpleWebsites are websites to be opened in individual browsers
// with no additional setup required.
// 1. WebGL Aquarium -- considerable load on graphics.
// 2. Chromium issue tracker -- considerable amount of elements.
// 3. CrosVideo -- customizable video player.
var simpleWebsites = []string{
"https://bugs.chromium.org/p/chromium/issues/list",
"https://crosvideo.appspot.com/?codec=h264_60&loop=true&mute=true",
}
// openChromeTabs opens Chrome tabs and returns the number of windows
// that were opened.
//
// This function opens an individual window for each URL in
// simpleWebsites. It also opens a window with multiple tabs, to
// increase RAM pressure during the test.
func openChromeTabs(ctx context.Context, tconn, bTconn *chrome.TestConn, cs ash.ConnSource, bt browser.Type, tabletMode bool) (int, error) {
const numExtraWebsites = 5
// Also open a large slide deck for RAM pressure.
slidesURL, err := cuj.GetTestSlidesURL(ctx)
if err != nil {
return 0, errors.Wrap(err, "failed to get Google Slides URL")
}
simpleWebsites := append(simpleWebsites, slidesURL)
// Keep track of the initial number of windows, to ensure
// we open the right number of windows.
ws, err := ash.GetAllWindows(ctx, tconn)
if err != nil {
return 0, errors.Wrap(err, "failed to get window list")
}
initialNumWindows := len(ws)
// Open up a single window with a lot of tabs, to increase RAM pressure.
tabs, err := cuj.NewTabs(ctx, cs, false, numExtraWebsites)
if err != nil {
return 0, errors.Wrap(err, "failed to bulk open tabs")
}
// Lacros specific setup to close "New Tab" window.
if bt == browser.TypeLacros {
// Don't include the "New Tab" window in the initial window count.
initialNumWindows--
if err := browser.CloseTabByTitle(ctx, bTconn, "New Tab"); err != nil {
return 0, errors.Wrap(err, `failed to close "New Tab" tab`)
}
}
// Open up individual window for each website in simpleWebsites.
taskSwitchTabs, err := cuj.NewTabsByURLs(ctx, cs, true, simpleWebsites)
if err != nil {
return 0, err
}
tabs = append(tabs, taskSwitchTabs...)
// Close all current connections to tabs, because we don't need them.
for _, t := range tabs {
if err := t.Conn.Close(); err != nil {
return 0, errors.Wrapf(err, "failed to close connection to %s", t.URL)
}
}
if !tabletMode {
if err := ash.ForEachWindow(ctx, tconn, func(w *ash.Window) error {
return ash.SetWindowStateAndWait(ctx, tconn, w.ID, ash.WindowStateNormal)
}); err != nil {
return 0, errors.Wrap(err, "failed to set each window to normal state")
}
}
// Expected number of browser windows should include the number
// of websites in |simpleWebsites|, and the window with many tabs.
expectedNumBrowserWindows := len(simpleWebsites) + 1
if ws, err := ash.GetAllWindows(ctx, tconn); err != nil {
return 0, errors.Wrap(err, "failed to get window list after opening Chrome tabs")
} else if expectedNumWindows := expectedNumBrowserWindows + initialNumWindows; len(ws) != expectedNumWindows {
return 0, errors.Wrapf(err, "unexpected number of windows open after launching Chrome tabs, got: %d, expected: %d", len(ws), expectedNumWindows)
}
return expectedNumBrowserWindows, nil
}
|
package http
import (
"marketplace/accounts/domain"
"marketplace/accounts/internal/usecase"
"net/http"
"strconv"
"github.com/gin-gonic/gin"
"github.com/go-pg/pg/v10"
"github.com/sirupsen/logrus"
)
type GetUserByIdResponse struct {
Id int64 `json:"id"`
Email string `json:"email"`
Username string `json:"username"`
Password string `json:"-"`
Balance float64 `json:"-"`
Ads []domain.Ads `json:"ads"`
Admin bool `json:"admin"`
}
func GetUserByIdHandler(db *pg.DB, cmd usecase.GetUserByIdCmd) gin.HandlerFunc {
return func(c *gin.Context) {
userId := c.Param("id")
intUserId, err := strconv.ParseInt(userId, 10, 64)
if err != nil {
logrus.WithError(err).Error("Bad request : The id param must be an integer")
c.Status(http.StatusBadRequest)
return
}
user, err := cmd(db, c, intUserId)
if user == nil {
logrus.Error("This account can not be found.")
c.Status(http.StatusNotFound)
return
}
if err != nil {
logrus.WithError(err).Error("An error has occured.")
c.Status(http.StatusInternalServerError)
return
}
c.JSON(http.StatusOK, GetUserResponse(*user))
}
}
|
package main
import (
"database/sql"
"encoding/json"
"net/http"
"strings"
"time"
)
// Configures the serve mux for the application.
func (app *Application) setupRoutes() {
app.Routes = http.NewServeMux()
app.Routes.Handle("/api/newapikey/",
addHeaders(http.HandlerFunc(app.newAPIKey)))
app.Routes.Handle("/api/", addHeaders(
http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
parts := strings.Split(strings.Trim(req.URL.Path, "/"), "/")[1:]
switch {
case len(parts) == 1 && req.Method == http.MethodGet:
app.listAllForKey(parts[0])(w, req)
case len(parts) == 2 && req.Method == http.MethodGet:
app.getValue(parts[0], parts[1])(w, req)
case len(parts) == 2 && req.Method == http.MethodPut:
app.putValue(parts[0], parts[1])(w, req)
case req.Method == http.MethodOptions:
return // status 200 with cors headers
default:
http.NotFound(w, req)
}
})))
app.Routes.Handle("/", addHeaders(
http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
w.Header().Set("Content-Type", "text/html")
w.Write([]byte(`<html><body>see <a href="https://github.com/quillaja/kvss">https://github.com/quillaja/kvss</a></body></html>`))
})))
}
// Applies standard headers to all responses.
func addHeaders(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
w.Header().Add("Access-Control-Allow-Origin", "*")
w.Header().Add("Access-Control-Allow-Methods", "GET, POST, PUT")
w.Header().Add("Access-Control-Allow-Headers", "*")
w.Header().Add("Content-Type", "application/json")
h.ServeHTTP(w, req)
})
}
// A type for simpler JSONifing of arbitary data.
type dict map[string]interface{}
// Handler to create a new API key.
func (app *Application) newAPIKey(w http.ResponseWriter, req *http.Request) {
// get user name, email, and note from req body
var user User
dec := json.NewDecoder(req.Body)
defer req.Body.Close()
err := dec.Decode(&user)
if err != nil {
app.Log.Println(err)
w.WriteHeader(http.StatusInternalServerError)
}
// generate key and set time fields
user.Key = generateKey()
user.Created = time.Now().UTC()
user.Modified = time.Now().UTC()
// insert
_, err = app.DB.Exec(insertUser,
user.Created,
user.Modified,
user.Name,
user.Email,
user.Key,
user.Note)
if err != nil {
app.Log.Println(err)
w.WriteHeader(http.StatusInternalServerError)
return
}
// return response
enc := json.NewEncoder(w)
enc.SetIndent("", " ")
err = enc.Encode(user)
if err != nil {
app.Log.Println(err)
w.WriteHeader(http.StatusInternalServerError)
}
}
// returns a JSON array of key-value pairs associated with the given apikey.
func (app *Application) listAllForKey(apikey string) http.HandlerFunc {
return func(w http.ResponseWriter, req *http.Request) {
// get user
var user User
err := app.DB.Get(&user, getUser, apikey)
if err != nil {
app.Log.Println(err)
http.NotFound(w, req)
return
}
// get kv pair data
pairs := []Pair{}
err = app.DB.Select(&pairs, selectPairs, user.ID)
if err != nil {
app.Log.Println(err)
http.NotFound(w, req)
return
}
// write response
enc := json.NewEncoder(w)
enc.SetIndent("", " ")
err = enc.Encode(pairs)
if err != nil {
app.Log.Println(err)
w.WriteHeader(http.StatusInternalServerError)
}
}
}
// returns a single key-value pair for the given apikey and key.
func (app *Application) getValue(apikey, key string) http.HandlerFunc {
return func(w http.ResponseWriter, req *http.Request) {
// get user
var user User
err := app.DB.Get(&user, getUser, apikey)
if err != nil {
app.Log.Println(err)
http.NotFound(w, req)
return
}
// get pair
var pair Pair
err = app.DB.Get(&pair, getPair, user.ID, key)
if err != nil {
app.Log.Println(err)
http.NotFound(w, req)
return
}
// write response
enc := json.NewEncoder(w)
enc.SetIndent("", " ")
err = enc.Encode(dict{
"key": pair.Key,
"value": pair.Value,
"apikey": user.Key, // could probably get rid of this
"created": pair.Created,
"modified": pair.Modified,
})
if err != nil {
app.Log.Println(err)
w.WriteHeader(http.StatusInternalServerError)
}
}
}
// will update or create the key-value pair for the given apikey and key.
func (app *Application) putValue(apikey, key string) http.HandlerFunc {
return func(w http.ResponseWriter, req *http.Request) {
// get user
var user User
err := app.DB.Get(&user, getUser, apikey)
if err != nil {
app.Log.Println(err)
http.NotFound(w, req)
return
}
// get pair
// set a flag to "create" if the db says the key isn't found
var pair Pair
update := true
err = app.DB.Get(&pair, getPair, user.ID, key)
switch {
case err == sql.ErrNoRows:
// add new value
update = false
pair.Created = time.Now().UTC()
pair.OwnerID = user.ID
pair.Key = key
case err != nil:
app.Log.Println(err)
w.WriteHeader(http.StatusInternalServerError)
return
}
// attempt to read the value from the request body
body := dict{}
dec := json.NewDecoder(req.Body)
defer req.Body.Close()
err = dec.Decode(&body)
if err != nil {
app.Log.Println(err)
w.WriteHeader(http.StatusInternalServerError)
return
}
// value must be a string and less than a certain size to be accepted
// by the API.
value, ok := body["value"].(string)
if !ok || len(value) > maxValueSize {
w.WriteHeader(http.StatusUnprocessableEntity)
w.Write([]byte("value is not a string or is longer than 4096 bytes"))
return
}
pair.Value = value
pair.Modified = time.Now().UTC()
// do the update or create
switch update {
case true:
// UPDATE
_, err = app.DB.Exec(updatePair, pair.Value, pair.Modified, pair.ID)
case false:
// INSERT
_, err = app.DB.Exec(insertPair,
pair.Created,
pair.Modified,
pair.OwnerID,
pair.Key,
pair.Value)
}
if err != nil {
app.Log.Println(err)
w.WriteHeader(http.StatusInternalServerError)
return
}
// write the response
enc := json.NewEncoder(w)
enc.SetIndent("", " ")
err = enc.Encode(dict{
"key": pair.Key,
"value": pair.Value,
"apikey": user.Key,
"created": pair.Created,
"modified": pair.Modified,
})
if err != nil {
app.Log.Println(err)
w.WriteHeader(http.StatusInternalServerError)
}
}
}
// max length (bytes) allowed for "value"
const maxValueSize = 4096
// queries
const (
getUser = "SELECT * FROM apikey WHERE key=?"
insertUser = "INSERT INTO apikey (created, modified, name, email, key, note) VALUES (?, ?, ?, ?, ?, ?)"
selectPairs = "SELECT * FROM kvpair WHERE owner_id=?"
getPair = "SELECT * FROM kvpair WHERE owner_id=? AND key=?"
updatePair = "UPDATE kvpair SET value=?, modified=? WHERE id=?"
insertPair = "INSERT INTO kvpair (created, modified, owner_id, key, value) VALUES (?,?,?,?,?)"
)
|
package leetcode_go
func findTilt(root *TreeNode) int {
res := 0
helperP563(root, &res)
return res
}
func helperP563(root *TreeNode, tiltSum *int) int {
if root == nil {
return 0
}
leftSum := helperP563(root.Left, tiltSum)
rightSum := helperP563(root.Right, tiltSum)
tilt := leftSum - rightSum
if tilt < 0 {
tilt = -1 * tilt
}
*tiltSum += tilt
return leftSum + rightSum + root.Val
}
|
package main
import (
"os"
"path/filepath"
"github.com/shyang107/paw"
"github.com/shyang107/paw/filetree"
"github.com/urfave/cli"
)
func checkArgs(c *cli.Context, pdopt *filetree.PrintDirOption) {
switch c.NArg() {
case 0:
lg.WithField("arg", c.Args().Get(0)).Trace("no argument")
path, err := filepath.Abs(".")
if err != nil {
paw.Error.Println(err)
}
pdopt.SetRoot(path)
case 1:
lg.WithField("arg", c.Args().Get(0)).Trace("no argument")
path, err := filepath.Abs(c.Args().Get(0))
if err != nil {
paw.Error.Println(err)
}
fi, err := os.Stat(path)
if err != nil {
paw.Error.Println(err)
os.Exit(1)
}
if fi.IsDir() {
pdopt.SetRoot(path)
} else {
pdopt.AddPath(path)
}
default: // > 1
lg.WithField("arg", c.Args()).Trace("multi-arguments")
for i := 0; i < c.NArg(); i++ {
// paw.Logger.WithField("args", c.Args().Get(i)).Info()
path, err := filepath.Abs(c.Args().Get(i))
if err != nil {
paw.Error.Println(err)
continue
}
pdopt.AddPath(path)
}
}
}
|
package chpool
import (
"context"
"github.com/vahid-sohrabloo/chconn/v2"
)
type insertStmt struct {
chconn.InsertStmt
conn Conn
}
func (s *insertStmt) Flush(ctx context.Context) error {
if s.conn == nil {
return nil
}
defer s.conn.Release()
return s.InsertStmt.Flush(ctx)
}
func (s *insertStmt) Close() {
if s.conn == nil {
return
}
s.InsertStmt.Close()
s.conn.Release()
}
|
// project euler (projecteuler.net) problem 1
// solution by Kevin Retzke (retzkek@gmail.com) April 2012
package main
import (
"fmt"
)
// Natmult computes the sum of all multiples of the given bases that are
// below max.
func natmult(bases []int, max int) int {
result := 0
for i := 0; i < max; i++ {
for _, b := range bases {
if i%b == 0 {
result += i
break
}
}
}
return result
}
func main() {
// test case
bases := []int{3, 5}
r := natmult(bases, 10)
if r == 23 {
fmt.Println("Test: pass")
} else {
fmt.Printf("Test: fail (r=%v)\n", r)
}
// challenge
r = natmult(bases, 1000)
fmt.Printf("Result: %v\n", r)
}
|
package biz
import (
"context"
pb "edu/api/sys/v1"
"edu/service/sys/internal/model"
"github.com/golang/protobuf/ptypes"
"google.golang.org/protobuf/types/known/anypb"
"google.golang.org/protobuf/types/known/timestamppb"
)
func (uc *AdminUsecase) ListPost(c context.Context, token string, req *pb.ListPostRequest) (reply *pb.ApiReply, err error) {
// out, err := uc.mw.ValidationToken(token)
// if err != nil {
// return
// }
// dp := out.(*ssopb.DataPermission)
var pageSize = int(req.PageSize)
var pageIndex = int(req.PageIndex)
var data model.SysPost
data.PostId = int(req.PostId)
data.PostCode = req.PostCode
data.PostName = req.PostName
data.Status = req.Status
result, count, err := uc.d.GetPostPage(&data, pageSize, pageIndex)
if err != nil {
return
}
list := make([]*anypb.Any, 0)
for i := 0; i < len(result); i++ {
it := result[i]
d := &pb.Post{
PostId: int32(it.PostId),
PostName: it.PostName,
PostCode: it.PostCode,
Sort: int32(it.Sort),
Status: it.Status,
Remark: it.Remark,
CreatedAt: timestamppb.New(it.CreatedAt),
}
any, err1 := ptypes.MarshalAny(d)
if err1 != nil {
err = err1
return
}
list = append(list, any)
}
reply = &pb.ApiReply{
Code: 0,
Message: "OK",
Count: int32(count),
Data: list,
}
return
}
|
package utils
func Plural(n int32) string {
if n > 1 {
return "s"
}
return ""
}
|
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import kruntime "k8s.io/apimachinery/pkg/runtime"
//const (
// defaultRootDir = "/var/lib/kubelet"
//
// // When these values are updated, also update test/e2e/framework/util.go
// defaultPodInfraContainerImageName = "gcr.io/google_containers/pause"
// defaultPodInfraContainerImageVersion = "3.0"
// defaultPodInfraContainerImage = defaultPodInfraContainerImageName +
// "-" + runtime.GOARCH + ":" +
// defaultPodInfraContainerImageVersion
//
// // From pkg/kubelet/rkt/rkt.go to avoid circular import
// defaultRktAPIServiceEndpoint = "localhost:15441"
//
// AutoDetectCloudProvider = "auto-detect"
//
// defaultIPTablesMasqueradeBit = 14
// defaultIPTablesDropBit = 15
//)
//var (
// zeroDuration = metav1.Duration{}
// // Refer to [Node Allocatable](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/node-allocatable.md) doc for more information.
// defaultNodeAllocatableEnforcement = []string{"pods"}
//)
func addDefaultingFuncs(scheme *kruntime.Scheme) error {
return RegisterDefaults(scheme)
}
func SetDefaults_AuthConfiguration(obj *AuthConfiguration) {
//if obj.BindAddress == "" {
// obj.BindAddress = "0.0.0.0"
//}
//if obj.HealthzPort == 0 {
// obj.HealthzPort = 10249
//}
//if obj.HealthzBindAddress == "" {
// obj.HealthzBindAddress = "127.0.0.1"
//}
//if obj.OOMScoreAdj == nil {
// temp := int32(qos.KubeProxyOOMScoreAdj)
// obj.OOMScoreAdj = &temp
//}
//if obj.ResourceContainer == "" {
// obj.ResourceContainer = "/kube-proxy"
//}
//if obj.IPTablesSyncPeriod.Duration == 0 {
// obj.IPTablesSyncPeriod = metav1.Duration{Duration: 30 * time.Second}
//}
//zero := metav1.Duration{}
//if obj.UDPIdleTimeout == zero {
// obj.UDPIdleTimeout = metav1.Duration{Duration: 250 * time.Millisecond}
//}
//// If ConntrackMax is set, respect it.
//if obj.ConntrackMax == 0 {
// // If ConntrackMax is *not* set, use per-core scaling.
// if obj.ConntrackMaxPerCore == 0 {
// obj.ConntrackMaxPerCore = 32 * 1024
// }
// if obj.ConntrackMin == 0 {
// obj.ConntrackMin = 128 * 1024
// }
//}
//if obj.IPTablesMasqueradeBit == nil {
// temp := int32(14)
// obj.IPTablesMasqueradeBit = &temp
//}
//if obj.ConntrackTCPEstablishedTimeout == zero {
// obj.ConntrackTCPEstablishedTimeout = metav1.Duration{Duration: 24 * time.Hour} // 1 day (1/5 default)
//}
//if obj.ConntrackTCPCloseWaitTimeout == zero {
// // See https://github.com/kubernetes/kubernetes/issues/32551.
// //
// // CLOSE_WAIT conntrack state occurs when the the Linux kernel
// // sees a FIN from the remote server. Note: this is a half-close
// // condition that persists as long as the local side keeps the
// // socket open. The condition is rare as it is typical in most
// // protocols for both sides to issue a close; this typically
// // occurs when the local socket is lazily garbage collected.
// //
// // If the CLOSE_WAIT conntrack entry expires, then FINs from the
// // local socket will not be properly SNAT'd and will not reach the
// // remote server (if the connection was subject to SNAT). If the
// // remote timeouts for FIN_WAIT* states exceed the CLOSE_WAIT
// // timeout, then there will be an inconsistency in the state of
// // the connection and a new connection reusing the SNAT (src,
// // port) pair may be rejected by the remote side with RST. This
// // can cause new calls to connect(2) to return with ECONNREFUSED.
// //
// // We set CLOSE_WAIT to one hour by default to better match
// // typical server timeouts.
// obj.ConntrackTCPCloseWaitTimeout = metav1.Duration{Duration: 1 * time.Hour}
//}
}
//func boolVar(b bool) *bool {
// return &b
//}
//var (
// defaultCfg = KubeletConfiguration{}
//)
|
package main
import(
"net/http"
"html/template"
"os"
"path"
"fmt"
"math/rand"
)
//global variables
var Israndom bool = true
var Masterindex int = 0
type Question struct {
Thequestion string
}
// func InitializeVariables {
//@ToDo read text file and initialize variables
// Read a slice from a jason file
// import (
// "encoding/json"
// "fmt"
// "io/ioutil"
// )
// data, err := ioutil.ReadFile("data.json")
// if err != nil {
// fmt.Println(err)
// return
// }
// fmt.Print("data: ",string(data))
// var slice []string
// err = json.Unmarshal(data, &slice)
// if err != nil {
// fmt.Println(err)
// return
// }
// fmt.Printf("slice: %q\n",slice)
//}
// f, err := os.Open("data/list.txt") // For read access.
// if err != nil {
// // Failed to open file, log / handle error
// log.Fatal("Open Filename: ", err)
// panic(err)
// fmt.Println(err)
// return
// }
// defer f.Close()
// Here you may read from f
// }
func GetIndex(thelength int, userandomindex bool, currentindex int) int {
// return an integer [0, thelength-1]
var index int = 0
if userandomindex {
index = rand.Intn(thelength)
} else {
if currentindex < thelength-1 {
index = currentindex + 1
} else {
index = 0
}
}
return index
}
func GetQuestion (currentindex int, userandomindex bool) (string, int) {
questionlist := [...]string{"Vad är ditt nästa steg?", "Vad är det värsta som kan hända?", "Hur ser ditt drömmål ut?", "Hur känner du inför uppgiften?", "Kan du utveckla?"}
index := GetIndex(len(questionlist), userandomindex, currentindex)
return questionlist[index], index
}
func main() {
port := os.Getenv("PORT")
if port == "" {
port = "8080"
}
fmt.Println(port)
// handle the specific requests
http.HandleFunc("/coachquestion", ShowQuestion)
// handle all other requests.
// direct to the index.html file in "public" directory
http.Handle("/", http.FileServer(http.Dir("public")))
// start the server and listen to port. "nil" will make the server run until stopped.
http.ListenAndServe(":"+port, nil)
}
func ShowAbout (rw http.ResponseWriter, r *http.Request) {
// @ToDo
}
func ShowQuestion(rw http.ResponseWriter, r *http.Request) {
myquestion, newIndex := GetQuestion(Masterindex, Israndom)
Masterindex = newIndex
fp := path.Join("public", "question.html")
tmpl, err := template.ParseFiles(fp)
if err != nil {
http.Error(rw, err.Error(), http.StatusInternalServerError)
return
}
if err := tmpl.Execute(rw, Question{myquestion}); err != nil {
http.Error(rw, err.Error(), http.StatusInternalServerError)
}
}
|
package forum
import (
potato "github.com/rise-worlds/potato-go"
)
// NewVote is an action representing a simple vote to be broadcast
// through the chain network.
func NewVote(voter potato.AccountName, proposalName potato.Name, voteValue uint8, voteJSON string) *potato.Action {
a := &potato.Action{
Account: ForumAN,
Name: ActN("vote"),
Authorization: []potato.PermissionLevel{
{Actor: voter, Permission: potato.PermissionName("active")},
},
ActionData: potato.NewActionData(Vote{
Voter: voter,
ProposalName: proposalName,
Vote: voteValue,
VoteJSON: voteJSON,
}),
}
return a
}
// Vote represents the `poc.forum::vote` action.
type Vote struct {
Voter potato.AccountName `json:"voter"`
ProposalName potato.Name `json:"proposal_name"`
Vote uint8 `json:"vote"`
VoteJSON string `json:"vote_json"`
}
|
package main
import (
"fmt"
"net/http"
"github.com/OrbitalbooKING/booKING/server/controllers"
"github.com/gin-gonic/gin"
"github.com/OrbitalbooKING/booKING/server/services"
)
func main() {
r := gin.Default()
http.Handle("/", http.FileServer(http.Dir("./build")))
if err := services.ConnectDataBase(); err != nil {
fmt.Println("Database not connected successfully. " + err.Error())
panic(err)
} else {
fmt.Println("Database connected successfully.")
}
services.LoadAllCSV()
services.CreateAdminAccount()
controllers.StartAll(r)
}
|
package middleware
import (
"net/http"
"net/http/httputil"
"fmt"
"context"
"encoding/json"
)
type AuthResponse struct {
UserId string `json:"user_id"`
}
func AuthMiddleWare(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter,r *http.Request) {
client := &http.Client{}
req, err := http.NewRequest("POST", "http://localhost/api/v1/users/auth", nil)
if err != nil {
http.Error(w, err.Error(), http.StatusUnauthorized)
return
}
req.Header.Set("Authorization", r.Header.Get("Authorization"))
resp, err := client.Do(req)
if err != nil {
http.Error(w, err.Error(), http.StatusUnauthorized)
return
}
defer resp.Body.Close()
var a AuthResponse
err = json.NewDecoder(resp.Body).Decode(&a)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
newCtx := context.WithValue(r.Context(), "uid", a.UserId)
next.ServeHTTP(w, r.WithContext(newCtx))
})
}
func RequestDump(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
dump, err := httputil.DumpRequest(r, true)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
fmt.Println(string(dump))
next.ServeHTTP(w, r)
})
}
|
package main
import (
"bufio"
"fmt"
"log"
"os"
"sort"
"strings"
)
func roadTrip(s string) string {
var v, c int
t := strings.Split(s, ";")
l, m := make([]int, len(t)-1), make([]string, len(t)-1)
for i := range l {
u := strings.Split(t[i], ",")
fmt.Sscan(u[1], &v)
l[i] = v
}
sort.Ints(l)
for i := range l {
m[i], c = fmt.Sprint(l[i]-c), l[i]
}
return strings.Join(m, ",")
}
func main() {
data, err := os.Open(os.Args[1])
if err != nil {
log.Fatal(err)
}
defer data.Close()
scanner := bufio.NewScanner(data)
for scanner.Scan() {
fmt.Println(roadTrip(scanner.Text()))
}
}
|
package util
import "encoding/json"
import "net/http"
// Response is a struct to define body of http response
type ResponseType struct {
Erro bool `json:"erro"`
Mensagem string `json:"mensagem"`
Dados interface{} `json:"dados"`
}
//Creates a new response body to answer the request
func NewResponseType(hasError bool, message string, data interface{}) ResponseType {
return ResponseType{Erro: hasError, Mensagem: message, Dados: data}
}
//Sends an answer body to whoever requested the resource
func Respond(w http.ResponseWriter, httpStatus int, response ResponseType) {
w.WriteHeader(httpStatus)
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
json.NewEncoder(w).Encode(response)
}
|
package closureBase
import "fmt"
//闭包复制的是原对象指针
func a() func() int {
i := 0
b := func() int {
i++
fmt.Println(i)
return i
}
return b
}
func AClosure() {
var c = a()
c()
c()
c()
var c2 = a()
c2()
c2()
c2()
}
func Test() func() {
x := 100
fmt.Printf("fis - > x (%p) = %d\n", &x, x)
return func() {
fmt.Printf("second -> x (%p) = %d\n", &x, x)
}
}
// 外部引用函数参数局部变量
func add(base int) func(int) int {
return func(i int) int {
base += i
return base
}
}
func AddClosure() {
temp1 := add(10)
fmt.Println(temp1(5))
fmt.Println(temp1(6))
temp2 := add(10)
fmt.Println(temp2(6))
}
// base 相当于该环境下的全局变量
func test01(base int) (func(int) int, func(int) int) {
add := func(i int) int {
base += i
return base
}
sub := func(i int) int {
base -= i
return base
}
return add, sub
}
func Test01() {
f1, f2 := test01(5)
fmt.Println("f1 ---> ", f1(2))
fmt.Println("f2 ---> ", f2(3))
}
|
package mondohttp
import (
"net/url"
"strconv"
)
// ProductionAPI is the base URL of Mondo's production API.
const ProductionAPI string = "https://api.getmondo.co.uk/"
// StagingAPI is the usual base URL of Mondo's staging API (commonly available
// during hackathons).
const StagingAPI string = "https://staging-api.gmon.io/"
func auth(auth string) (string, string) {
return "Authorization", auth
}
func formContentType() (string, string) {
return "Content-Type", "application/x-www-form-urlencoded"
}
func appendPaginationParams(query *url.Values, since, before string, limit int) {
if since != "" {
query.Set("since", since)
}
if before != "" {
query.Set("before", before)
}
if limit != 0 {
query.Set("limit", strconv.Itoa(limit))
}
}
func appendQueryMap(query *url.Values, prefix, suffix string, params map[string]string) {
for key, value := range params {
query.Add(prefix+key+suffix, value)
}
}
|
// +build !linux !static
package cgo
// #cgo LDFLAGS: -lrocksdb -lstdc++ -lm -lz -ldl
import "C"
|
package individualparsers
import (
"strings"
)
type PowershellKeyword struct{}
func (b PowershellKeyword) Match(content []byte) (bool, error) {
// powershell contained within paste
lowerContent := strings.ToLower(string(content))
normalContent := strings.Replace(lowerContent, "^", "", -1)
if strings.Contains(normalContent, "powershell") {
return true, nil
}
return false, nil
}
func (b PowershellKeyword) Normalize(content []byte) (int, []byte, error) {
return KeyNonActionable, content, nil
}
|
package server
import (
"strconv"
"strings"
"net/http"
"encoding/json"
"github.com/google/uuid"
"github.com/gorilla/mux"
"toggl-card/internal/card"
"toggl-card/internal/deck"
)
type api struct {
router http.Handler
}
// Server is the interface that wraps the router method
type Server interface {
Router() http.Handler
}
// New implements Server iface and creates a router to handle api endpoints
func New() Server {
a := &api{}
r := mux.NewRouter()
// Endpoints to be handled by the router
r.HandleFunc("/deck", a.Open).Methods("GET")
r.HandleFunc("/deck/create", a.Create).Methods("POST")
r.HandleFunc("/deck/{uuid}/draw", a.Draw).Methods("GET")
a.router = r
return a
}
// Router returns the router of a given api
func (a *api) Router() http.Handler {
return a.router
}
// Create a deck of cards accepting shuffle and custom cards parameters
func (a *api) Create(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
shuffle := r.URL.Query().Get("shuffle")
cardsStr := r.URL.Query().Get("cards")
var d deck.Deck
if len(cardsStr) != 0 {
cardsLst := strings.Split(cardsStr, ",")
var cards []card.Card
cardCodes := card.AllCodes()
for _, code := range cardsLst {
if v, ok := cardCodes[code]; ok {
cards = append(cards, v)
}
}
d = deck.NewPartial(false, cards)
} else {
if shuffle == "true" {
d = deck.New(true)
d.Shuffle()
} else {
d = deck.New(false)
}
}
deck.Decks = append(deck.Decks, d)
json.NewEncoder(w).Encode(struct {
ID uuid.UUID `json:"deck_id"`
Shuffled bool `json:"shuffled"`
Remaining int `json:"remaining"`
}{d.ID, d.Shuffled, d.Remaining})
}
// Open a given deck by ist UUID and return all its properties
func (a *api) Open(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
uuid := r.URL.Query().Get("uuid")
if len(uuid) != 0 {
for _, deck := range deck.Decks {
if deck.ID.String() == uuid {
json.NewEncoder(w).Encode(deck)
return
}
}
http.Error(w, "404. Deck with provided UUID could not be found.", 404)
} else {
http.Error(w, "400. Missing UUID parameter.", 400)
}
}
// Draw n cards from a Deck that must exist or be passed over
func (a *api) Draw(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
vars := mux.Vars(r)
uuid := vars["uuid"]
count := r.URL.Query().Get("count")
n, countErr := strconv.Atoi(count)
if countErr == nil && n > 0 {
for i, d := range deck.Decks {
if d.ID.String() == uuid {
drawn, drawErr := d.Draw(n)
// Update deck from Decks slice with remaining cards
deck.Decks[i].Cards = d.Cards
deck.Decks[i].Remaining = len(d.Cards)
if drawErr == nil {
json.NewEncoder(w).Encode(struct {
Cards []card.Card `json:"cards"`
}{drawn})
return
}
http.Error(w, "400. Not enough cards in the deck", 400)
return
}
}
http.Error(w, "404. Deck with provided UUID could not be found.", 404)
} else {
http.Error(w, "400. Count parameter with number of cards to be drawn needs to be provided.", 400)
}
} |
package server
import (
pb "github.com/1851616111/xchain/pkg/protos"
sliceutil "github.com/1851616111/xchain/pkg/util/slice"
"log"
"os"
)
var (
endPointLog = log.New(os.Stderr, "[Event]", log.LstdFlags)
)
func newEndPointManager() *EndPointManager {
m := new(EndPointManager)
m.IDToAddress = map[string]string{}
m.AddressToID = map[string]string{}
return m
}
func (m *EndPointManager) findNewEndPointHandler(epList []*pb.EndPoint, handler func(*pb.EndPoint)) {
for _, newEP := range epList {
if !m.ifExistEndPoint(newEP) {
handler(newEP)
}
}
}
func (m *EndPointManager) ifExistEndPoint(epList *pb.EndPoint) bool {
_, exist := m.IDToAddress[epList.Id]
return exist
}
func (m *EndPointManager) addEndPoint(ep pb.EndPoint) {
var key string = ep.Id
switch ep.Type {
case pb.EndPoint_VALIDATOR:
m.ValidatorList = append(m.ValidatorList, ep.Id)
case pb.EndPoint_NON_VALIDATOR:
m.NonValidateList = append(m.NonValidateList, ep.Id)
}
m.IDToAddress[key] = ep.Address
m.AddressToID[ep.Address] = key
}
func (m *EndPointManager) delEndPoint(delID string) {
address, exist := m.IDToAddress[delID]
if !exist {
return
}
for idx, id := range m.ValidatorList {
if id == delID {
m.ValidatorList = append(m.ValidatorList[:idx], m.ValidatorList[idx+1:]...)
}
}
for idx, id := range m.NonValidateList {
if id == delID {
m.NonValidateList = append(m.NonValidateList[:idx], m.NonValidateList[idx+1:]...)
}
}
delete(m.IDToAddress, delID)
delete(m.AddressToID, address)
}
func (m *EndPointManager) list() []*pb.EndPoint {
if Is_Develop_Mod {
endPointLog.Printf("validate:%v\n", m.ValidatorList)
endPointLog.Printf("non-validate:%v\n", m.NonValidateList)
}
validateEPs, nonValidateEPs := []*pb.EndPoint{}, []*pb.EndPoint{}
rangeValidateFunc := func(idx int, id string) error {
validateEPs = append(validateEPs, &pb.EndPoint{
Id: id,
Address: m.IDToAddress[id],
Type: pb.EndPoint_VALIDATOR,
})
return nil
}
exec := true
sliceutil.RangeSlice(m.ValidatorList, &exec, rangeValidateFunc)
rangeNonValidateFunc := func(idx int, id string) error {
nonValidateEPs = append(nonValidateEPs, &pb.EndPoint{
Id: id,
Address: m.IDToAddress[id],
Type: pb.EndPoint_NON_VALIDATOR,
})
return nil
}
sliceutil.RangeSlice(m.NonValidateList, &exec, rangeNonValidateFunc)
return append(validateEPs, nonValidateEPs...)
}
func ListWithOutLocalEP(l []*pb.EndPoint) []*pb.EndPoint {
if len(l) == 0 {
return l
}
local := node.GetLocalEndPoint()
for idx, ep := range l {
if ep.Id == local.Id {
return append(l[:idx], l[idx+1:]...)
}
}
return l
}
|
package resolver
import (
"fmt"
"sort"
"github.com/aws/aws-sdk-go/aws"
"github.com/opsee/basic/schema"
opsee_aws_cloudwatch "github.com/opsee/basic/schema/aws/cloudwatch"
opsee "github.com/opsee/basic/service"
opsee_types "github.com/opsee/protobuf/opseeproto/types"
"golang.org/x/net/context"
)
type metricList []*schema.Metric
func (l metricList) Len() int { return len(l) }
func (l metricList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
func (l metricList) Less(i, j int) bool { return l[i].Timestamp.Millis() < l[j].Timestamp.Millis() }
func (c *Client) GetMetricStatistics(ctx context.Context, user *schema.User, region string, input *opsee_aws_cloudwatch.GetMetricStatisticsInput) (*schema.CloudWatchResponse, error) {
resp, err := c.Bezos.Get(ctx, &opsee.BezosRequest{User: user, Region: region, VpcId: "global", Input: &opsee.BezosRequest_Cloudwatch_GetMetricStatisticsInput{input}})
if err != nil {
return nil, err
}
output := resp.GetCloudwatch_GetMetricStatisticsOutput()
if output == nil {
return nil, fmt.Errorf("error decoding aws response")
}
metrics := make([]*schema.Metric, len(output.Datapoints))
for i, d := range output.Datapoints {
var statistic string
if len(input.Statistics) > 0 {
statistic = input.Statistics[0]
}
metrics[i] = &schema.Metric{
Name: aws.StringValue(input.MetricName),
// we really need support for other things?
Value: aws.Float64Value(d.Average),
Timestamp: d.Timestamp,
Unit: aws.StringValue(d.Unit),
Statistic: statistic,
}
}
sort.Sort(metricList(metrics))
return &schema.CloudWatchResponse{
Namespace: aws.StringValue(input.Namespace),
Metrics: metrics,
}, nil
}
func (c *Client) QueryCheckMetrics(ctx context.Context, user *schema.User, checkId, metricName string, ts0, ts1 *opsee_types.Timestamp, aggregator *opsee.Aggregator) ([]*schema.Metric, error) {
req := &opsee.QueryMetricsRequest{
Metrics: []*opsee.QueryMetric{
&opsee.QueryMetric{
Name: metricName,
GroupBy: []*opsee.GroupBy{
&opsee.GroupBy{
Name: "tag",
Tags: []string{"region"},
},
},
Tags: map[string]*opsee.StringList{
"check": &opsee.StringList{Values: []string{checkId}},
},
Aggregators: []*opsee.Aggregator{
aggregator,
},
},
},
CacheTime: 0,
StartAbsolute: ts0,
EndAbsolute: ts1,
}
r, err := c.Marktricks.QueryMetrics(ctx, req)
if err != nil {
return nil, err
}
// convert kairosdb types to schema.Metric
var m []*schema.Metric
for _, query := range r.Queries {
for _, result := range query.Results {
var tags []*schema.Tag
for k, v := range result.Tags {
if len(v.Values) > 0 {
tags = append(tags, &schema.Tag{k, v.Values[0]})
}
}
for _, dp := range result.Values {
nm := &schema.Metric{
Name: result.Name,
Value: float64(dp.Value),
Timestamp: dp.Timestamp,
Unit: "milliseconds",
Tags: tags,
}
m = append(m, nm)
}
}
}
return m, nil
}
|
// Copyright 2018 Aleksandr Demakin. All rights reserved.
package pjw
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestPJW(t *testing.T) {
const (
hello = "Hello"
world = ", world!"
)
r := require.New(t)
pjw := New()
r.Equal(4, pjw.Size())
r.Equal(1, pjw.BlockSize())
r.Equal(uint32(0), pjw.Sum32())
r.Equal([]byte{0, 0, 0, 0}, pjw.Sum(nil))
pjw.Reset()
written, err := pjw.Write([]byte(hello))
r.NoError(err)
r.Equal(len(hello), written)
r.Equal(uint32(0x004ec32f), pjw.Sum32())
r.Equal([]byte{0, 0x4e, 0xc3, 0x2f}, pjw.Sum(nil))
written, err = pjw.Write([]byte(world))
r.NoError(err)
r.Equal(len(world), written)
r.Equal(uint32(0x0925c3c1), pjw.Sum32())
r.Equal([]byte{0x09, 0x25, 0xc3, 0xc1}, pjw.Sum(nil))
pjw.Reset()
r.Equal(uint32(0), pjw.Sum32())
written, err = pjw.Write([]byte(hello + world))
r.NoError(err)
r.Equal(len(hello)+len(world), written)
r.Equal(uint32(0x925c3c1), pjw.Sum32())
r.Equal([]byte{0x01, 0x00, 0xff, 0x09, 0x25, 0xc3, 0xc1}, pjw.Sum([]byte{0x01, 0x00, 0xff}))
}
|
package solutions
func buildTree(preorder []int, inorder []int) *TreeNode {
if len(preorder) == 0 || len(inorder) == 0 {
return nil
}
position := -1
for i, number := range inorder {
if preorder[0] == number {
position = i
}
}
return &TreeNode{
preorder[0],
buildTree(preorder[1: position + 1], inorder[0: position]),
buildTree(preorder[position + 1:], inorder[position + 1:]),
}
}
|
package main
import (
"fmt"
"math"
// "encoding/hex"
"encoding/binary"
"go.bug.st/serial.v1"
// "go.bug.st/serial.v1/enumerator"
// "github.com/bugst/go-serial"
// "github.com/bugst/go-serial/enumerator"
)
// 全局变量,用来保存选定的串口
var g_port_lpms9 serial.Port
func serial_lpms9_open(name string) (serial.Port, error) {
mode := &serial.Mode{
BaudRate: 921600,
Parity: serial.NoParity,
DataBits: 8,
StopBits: serial.OneStopBit,
}
return serial.Open(name, mode)
}
func serial_lpms9_read(port serial.Port) {
out := make(chan []byte, 32)
stop := make(chan string)
// 启动接收串口协程
go recv_lpms9(g_port_lpms9, out, stop)
// 接收串口数据帧,并做处理
for {
select {
case msg := <-out:
process_lpms9(msg)
case <-stop:
return
}
}
}
func recv_lpms9(port serial.Port, out chan <- []byte, stop chan <- string) {
frame := make([]byte, 0)
buf := make([]byte, 10000000)
for {
// time.Sleep(3000* time.Millisecond)
n, err := port.Read(buf)
if err != nil {
stop <- "stop"
return
}
frame = append(frame, buf[:n]...)
// 分桢 桢开头处理
if frame[0] != 0x3A {
for i := 0; i < len(frame); i++ {
if (frame[i] == 0x3A) {
frame = frame[i:len(frame)]
break;
}
}
}
if len(frame) < 11 {
// 收到的数据不够一桢
continue
}
body_len := binary.LittleEndian.Uint16(frame[5:7])
if len(frame) < int(body_len + 11) {
// fmt.Println("len: ", len(frame), " body_len ", int(body_len + 10))
continue
}
// fmt.Println("len: ", body_len, " frame: ", hex.EncodeToString(frame[:body_len + 7]))
// 检查桢尾
if frame[body_len + 9] == 0x0D &&
frame[body_len + 10] == 0x0A {
// 发送出去
out <- frame[:body_len + 11]
}
// fmt.Println("frame2: ", hex.EncodeToString(frame[:body_len + 11]))
// 处理下一桢数据
frame = frame[body_len + 11 :]
}
}
func process_lpms9(data []byte) {
// fmt.Println("msg: ", hex.EncodeToString(data))
if len(data) < 11 {
return
}
switch data[3] {
case 0x09:
angel := math.Float32frombits(binary.LittleEndian.Uint32(data[71:75])) * 180 / math.Pi
angel_a := math.Float32frombits(binary.LittleEndian.Uint32(data[83:87]))
gyr_z := math.Float32frombits(binary.LittleEndian.Uint32(data[31:35]))
s := fmt.Sprintf("%d %.0f %.2f %.2f\n", NowAsUnixMilli(), angel, angel_a, gyr_z)
write_data("sensor_lpms9.dat", s)
}
}
|
package main
import "fmt"
func main() {
v := 43 // v contains int 43
w := v // w contains int 43
vw := &v // vw contains address of v
ww := &w // ww contains address of w
wv := *&v // wv contains int 43
*vw = 57 // changes v and wv to int 57, but w remains containing int 43
fmt.Println("'v'\t", v)
fmt.Println("w := v", w)
fmt.Println("'vw := &v' assigns to 'vw' the address of 'v':", vw)
fmt.Println("'ww := &w' assigns to 'ww' the address of 'w':", ww)
fmt.Printf("'vw' points to the address of v, and is therefore of type %T:", vw)
fmt.Println("'*vw' points to the value stored 'vw' which is an address containing '43':", *vw)
fmt.Println("'*ww' points to the value stored in the address of w which is assigned the value of 'v' which is '43':", *ww)
fmt.Println("\n\n,'wv := *&v': assigns to 'wv' a pointer to the value in the address of v thus returning '43':", wv)
fmt.Println("\n, '*vw = 57' assigns the value '57' to the address &vw therefore changing the value in the original location 'v'. '*vw' is '57':", *vw, "and 'v' is also '57':", v)
fmt.Println("\n, However, 'w' still contains the original value of 'v':", w)
fmt.Println("\n, And 'wv' which contains the value in the address of 'v' (*&v) is changed to '57':", *&v)
}
|
// Package intl provides utilties for internationalization.
package intl
import "strings"
// An L10NString is a string which should be localized. Defined as its
// own type so that you can't pass a variable of type string as the
// fmt argument to L10N.Fmt, but you can still pass a string literal.
type L10NString string
// An L10N is a localization; it can be used to translate strings to
// the target locale. The zero value assumes format strings are
// already in the target locale, providing no translations.
type L10N struct {
// Mapping from source code format strings to format strings
// for the target locale.
FmtStrings map[L10NString]L10NString
}
// Format a message for the target locale. 'fmt' is a string
// which may include format specifiers:
//
// %% emits a literal '%'
//
// %0, %1, ... %9 each emit args[i] where i is the number after
// the %.
//
// If the character after the % is anything else, or if the % is
// at the end of the string, it is emitted literally.
//
// Before formatting, the format string will first be translated
// to the target locale. If the string is not known to the receiver,
// it will be used as-is.
func (l L10N) Fmt(fmt L10NString, args ...string) string {
f, ok := l.FmtStrings[fmt]
if !ok {
f = fmt
}
return format(f, args...)
}
// format is like L10N.Fmt, but it doesn't translate the format string first.
func format(f L10NString, args ...string) string {
var b strings.Builder
s := string(f)
i := strings.Index(s, "%")
// Optimization: avoid a copy if there are no format specifiers:
if i == -1 {
return s
}
for i != -1 {
// Explicitly check for the last char so we don't get
// an out of bounds panic below:
if i == len(s)-1 {
break
}
b.WriteString(s[:i])
c := s[i+1]
if c == '%' {
b.WriteByte('%')
} else if c >= '0' && c <= '9' {
b.WriteString(args[c-'0'])
} else {
b.WriteString(s[i : i+2])
}
s = s[i+2:]
i = strings.Index(s, "%")
}
b.WriteString(s)
return b.String()
}
|
package ecs
import "fmt"
// Entity is a reference to an entity in a Core
type Entity struct {
co *Core
id EntityID
}
// NilEntity is the zero of Entity, representing "no entity, in no Core".
var NilEntity = Entity{}
func (ent Entity) String() string {
if ent.co == nil {
return fmt.Sprintf("Nil<>[%v]", ent.id)
}
if ent.co == nil {
return fmt.Sprintf("%p<>[%v]", ent.co, ent.id)
}
return fmt.Sprintf("%p<%v>[%v]",
ent.co,
ent.co.types[ent.id-1],
ent.id,
)
}
// Type returns the type of the referenced entity, or NoType if the reference
// is empty.
func (ent Entity) Type() ComponentType {
if ent.co == nil || ent.id == 0 {
return NoType
}
return ent.co.types[ent.id-1]
}
// ID returns the ID of the referenced entity; it SHOULD only be called in a
// context where the caller is sure of ownership; when in doubt, use
// Core.Deref(ent) instead.
func (ent Entity) ID() EntityID {
if ent.co == nil {
return 0
}
return ent.id
}
// Deref unpacks an Entity reference, returning its ID; it panics if the Core
// doesn't own the Entity.
func (co *Core) Deref(e Entity) EntityID {
if e.co == co {
return e.id
} else if e.co == nil {
panic("nil entity")
} else {
panic("foreign entity")
}
}
// Ref returns an Entity reference to the given ID; it is valid to return a
// reference to the zero entity, to represent "no entity, in this Core" (e.g.
// will Deref() to 0 EntityID).
func (co *Core) Ref(id EntityID) Entity {
if id == 0 {
return NilEntity
}
return Entity{co, id}
}
// AddEntity adds an entity to a core, returning an Entity reference; it MAY
// re-use a previously-used but since-destroyed entity (one whose type is still
// NoType). MAY invokes all allocators to make space for more entities (will do
// so if Cap() == Len()).
func (co *Core) AddEntity(nt ComponentType) Entity {
ent := Entity{co, co.allocate()}
co.SetType(ent.id, nt)
return ent
}
// Add sets bits in the entity's type, calling any creators that are newly
// satisfied by the new type.
func (ent Entity) Add(t ComponentType) {
if ent.co != nil && ent.id > 0 {
old := ent.co.types[ent.id-1]
ent.co.SetType(ent.id, old|t)
}
}
// Delete clears bits in the entity's type, calling any destroyers that are no
// longer satisfied by the new type (which may be NoType).
func (ent Entity) Delete(t ComponentType) {
if ent.co != nil && ent.id > 0 {
old := ent.co.types[ent.id-1]
ent.co.SetType(ent.id, old & ^t)
}
}
// Destroy sets the entity's type to NoType, invoking any destroyers that match
// the prior type.`
func (ent Entity) Destroy() {
if ent.co != nil && ent.id > 0 {
ent.co.SetType(ent.id, NoType)
}
}
// SetType sets the entity's type; may invoke creators and destroyers as
// appropriate.
func (ent Entity) SetType(t ComponentType) {
if ent.co != nil && ent.id > 0 {
ent.co.SetType(ent.id, t)
}
}
func (co *Core) allocate() EntityID {
if co.free > 0 {
for i := 0; i < len(co.types); i++ {
if co.types[i] == NoType {
co.free--
return EntityID(i + 1)
}
}
}
id := EntityID(len(co.types) + 1)
co.types = append(co.types, NoType)
for _, ef := range co.allocators {
ef.f(id, NoType)
}
return id
}
// Type returns the entity's type.
func (co *Core) Type(id EntityID) ComponentType { return co.types[id-1] }
// SetType changes an entity's type, calling any relevant lifecycle functions.
func (co *Core) SetType(id EntityID, new ComponentType) {
i := id - 1
old := co.types[i]
if old == new {
return
}
co.types[i] = new
if old == NoType {
for _, ef := range co.creators {
if ef.t == NoType {
ef.f(id, new)
new = co.types[i]
}
}
}
if new & ^old != 0 {
for _, ef := range co.creators {
if new.HasAll(ef.t) && !old.HasAll(ef.t) {
ef.f(id, new)
new = co.types[i]
}
}
}
if old & ^new != 0 {
for _, ef := range co.destroyers {
if old.HasAll(ef.t) && !new.HasAll(ef.t) {
ef.f(id, new)
new = co.types[i]
}
}
}
if new == NoType {
for _, ef := range co.destroyers {
if ef.t == NoType {
ef.f(id, new)
new = co.types[i]
}
}
co.free++
}
}
|
package main
import (
"log"
"net/http"
"github.com/16francs/examin_go/config"
"github.com/16francs/examin_go/infrastructure/router"
)
func main() {
// ログ出力設定
config.Logger()
// 環境変数
env, err := config.LoadEnv()
if err != nil {
log.Fatalf("alert: %s", err)
}
// 起動コマンド
router := router.Router()
if err := http.ListenAndServe(":"+env.Port, router); err != nil {
log.Fatalf("alert: %v", err)
}
}
|
package openrtb_ext
// ExtImpAvocet defines the contract for bidrequest.imp[i].ext.prebid.bidder.avocet
type ExtImpAvocet struct {
Placement string `json:"placement,omitempty"`
PlacementCode string `json:"placement_code,omitempty"`
}
|
package main
import "sort"
// Leetcode m16.24. (medium)
func pairSums(nums []int, target int) (res [][]int) {
sort.Slice(nums, func(i, j int) bool {
return nums[i] < nums[j]
})
i, j := 0, len(nums)-1
for i < j {
if nums[i]+nums[j] == target {
res = append(res, []int{nums[i], nums[j]})
i++
j--
} else if nums[i]+nums[j] > target {
j--
} else if nums[i]+nums[j] < target {
i++
}
}
return
}
|
package node
type Wait struct {
c chan *Resp
}
func NewWait() *Wait {
return &Wait{
c: make(chan *Resp),
}
}
func (w *Wait) Close(resp *Resp) {
for {
select {
case w.c <- resp:
continue
default:
return
}
}
}
func (w *Wait) Wait() *Resp {
r := <-w.c
return r
}
|
package repository
import (
"encoding/json"
"github.com/yerlan-tleubekov/go-redis/internal/models"
)
type IUser interface {
CreateUser(*models.User) error
GetUser(string) (*models.User, error)
}
func (repo *Repository) CreateUser(user *models.User) error {
userJSON, err := json.Marshal(user)
if err != nil {
return err
}
if err = repo.memCache.SetUser(user.Login, string(userJSON)); err != nil {
return err
}
return nil
}
func (repo *Repository) GetUser(login string) (*models.User, error) {
user, err := repo.memCache.GetUser(login)
if err != nil {
return nil, err
}
return user, nil
}
|
package helper
import (
"flag"
"fmt"
"log"
"path/filepath"
"regexp"
)
type Input struct {
inPutFile string
outPutFile string
}
func (i *Input) ProcessArgv() {
// Get argv from flag
savedFile := flag.String("s", "dir", "-s saved file")
flag.Parse()
inputs := flag.Args()
// Check main input
if len(inputs) < 1 {
log.Fatalln("Please input your file .xlsx")
}
// Make absolute path for main file
fullPathMainFile, err := filepath.Abs(inputs[0])
if err != nil {
log.Fatalln(err)
}
i.inPutFile = fullPathMainFile
mainFileName := filepath.Base(i.inPutFile)
// Check for valid file name
checkValidXLSXFileName(&mainFileName)
// Get dir of main file
dirPathMainFile := filepath.Dir(i.inPutFile)
// If no input for saved file
if *savedFile == "dir" {
*savedFile = fmt.Sprintf("%s/%s-%s", dirPathMainFile, "output", mainFileName)
} else {
// If input for saved file is not a file
outPutFileName := filepath.Base(*savedFile)
// Check for valid name.
checkValidXLSXFileName(&outPutFileName)
}
// Make absolute path for saved file
fullPathSavedDir, err := filepath.Abs(*savedFile)
if err != nil {
log.Fatalln(err)
}
i.outPutFile = fullPathSavedDir
fmt.Println("Input excel file is:", i.inPutFile)
fmt.Println("Output excel file is:", i.outPutFile)
if i.inPutFile == i.outPutFile {
log.Fatalln("The input and output file can not be the same.")
}
}
func checkValidXLSXFileName(fileName *string) {
match, err := regexp.MatchString("[a-zA-Z0-9_-]+.xlsx", *fileName)
if err != nil {
log.Fatalln(err)
}
if ! match {
log.Fatalln("The input and saved file must be *.xlsx")
}
}
|
package main
import (
"fmt"
)
type rectangle struct {
width, height int
}
func (r *rectangle) area() int {
return r.width * r.height
}
func (r rectangle) circumference() int {
return 2*r.width + 2*r.height
}
func main() {
r := rectangle{width: 10, height: 5}
fmt.Println("area: ", r.area())
fmt.Println("circumference: ", r.circumference())
rectanglePointer := &r
fmt.Println("area: ", rectanglePointer.area())
fmt.Println("circumference: ", rectanglePointer.circumference())
}
|
package connection
import (
"sync"
"time"
"github.com/multivactech/MultiVAC/logger"
"github.com/multivactech/MultiVAC/model/shard"
"github.com/multivactech/MultiVAC/model/wire"
"github.com/multivactech/MultiVAC/p2p/peer"
)
// Multiplexer defines the data structure for handler.
type Multiplexer struct {
msgHandlers map[Tag][]chan *MessageAndReply
}
// Tag is the key of multiplexer map.
type Tag struct {
Msg string
Shard shard.Index
}
// MessagesAndReceiver defines the data structure for Register handler.
type MessagesAndReceiver struct {
Tags []Tag
Channels chan *MessageAndReply
}
// MessageAndReply defines the data structure that the p2p network modules will
// send to other modules by channel.
type MessageAndReply struct {
Msg wire.Message
Reply peer.Reply
}
// MakeMultiplexer creates an empty Multiplexer.
func MakeMultiplexer() *Multiplexer {
var m Multiplexer
m.msgHandlers = make(map[Tag][]chan *MessageAndReply)
return &m
}
// Handle will find the channels which the message should be transmitted
// according to the command of message and the given shard index,
// and then put the message into the channels.
func (m *Multiplexer) Handle(message wire.Message, pr peer.Reply) bool {
shard := getMsgShard(message)
channels := m.getChannels(Tag{
Msg: message.Command(),
Shard: shard,
})
if channels != nil {
logger.ConnServerLogger().Debugf("success get channel")
messageAndReply := &MessageAndReply{
Msg: message,
Reply: pr,
}
for _, h := range channels {
select {
case h <- messageAndReply:
logger.ConnServerLogger().Debugf("successfully send message to the given channel")
case <-time.After(3 * time.Second):
logger.ConnServerLogger().Errorf("channel seems full,time out!"+
"len:%v,cap:%v,message:%v", len(h), cap(h), messageAndReply.Msg)
return false
}
}
return true
}
logger.ConnServerLogger().Debugf("failed to get channel,message:%v,shard:%v", message.Command(), shard)
return false
}
var mtx sync.RWMutex
// RegisterChannels is used to register the channels for message reception to the Map
// so that the network provider can easily find the channel and then put the message
// into the channel and pass it to other modules.
func (m *Multiplexer) RegisterChannels(dispatch *MessagesAndReceiver) {
mtx.Lock()
defer mtx.Unlock()
for _, tag := range dispatch.Tags {
if channels, ok := m.msgHandlers[tag]; ok {
if exist(dispatch.Channels, channels) {
logger.ConnServerLogger().Debugf("channel for message %v is already exist", dispatch.Tags)
} else {
channels = append(channels, dispatch.Channels)
m.msgHandlers[tag] = channels
}
} else {
m.msgHandlers[tag] = []chan *MessageAndReply{dispatch.Channels}
}
}
}
// exist will check if the message channel has already registered
func exist(handler chan *MessageAndReply, handlers []chan *MessageAndReply) bool {
if len(handlers) == 0 {
return false
}
for _, h := range handlers {
if h == handler {
return true
}
}
return false
}
func (m *Multiplexer) getChannels(tag Tag) []chan *MessageAndReply {
mtx.RLock()
defer mtx.RUnlock()
channels, ok := m.msgHandlers[tag]
if ok {
return channels
}
return nil
}
|
package frequency
// Queries processes the list of queries provided
func Queries(q [][]int32) []int32 {
var res []int32
var counts = map[int32]int32{}
for _, v := range q {
switch v[0] {
case 1:
// Insert statement
if _, ok := counts[v[1]]; ok {
counts[v[1]]++
} else {
counts[v[1]] = 1
}
case 2:
// Delete statement
// Reduce count (won't go below zero)
if count, ok := counts[v[1]]; ok && count > 0 {
counts[v[1]]--
}
case 3:
// Frequency lookup statement
var found int32
for _, val := range counts {
if val == v[1] {
found = 1
break
}
}
res = append(res, found)
}
}
return res
}
// Naive approach gets terminated due to timeout
// Potential slow points: iterating over all map entries
// Original: 105490 ns/op with long queue (230 entries)
// Frequency map is about 5x faster
// Plans for change: store frequency map as well
func FreqMap(q [][]int32) []int32 {
var res []int32
var counts = map[int32]int32{}
var frequencies = map[int32]map[int32]bool{}
for _, v := range q {
entry := v[1]
switch v[0] {
case 1:
// Insert statement
if count, ok := counts[entry]; ok {
// Move to the next highest frequency map entry
delete(frequencies[count], entry)
counts[entry]++
if frequencies[count+1] == nil {
// Create the map entry
frequencies[count+1] = make(map[int32]bool)
}
frequencies[count+1][entry] = true
} else {
counts[entry] = 1
if frequencies[1] == nil {
// Create the map entry
frequencies[1] = make(map[int32]bool)
}
frequencies[1][entry] = true
}
case 2:
// Delete statement
// Reduce count (won't go below zero)
if count, ok := counts[v[1]]; ok && count > 0 {
delete(frequencies[count], entry)
counts[v[1]]--
if count > 1 {
frequencies[count-1][entry] = true
}
}
case 3:
// Frequency lookup statement
if len(frequencies[entry]) > 0 {
res = append(res, 1)
} else {
res = append(res, 0)
}
}
}
return res
}
|
package main
import "fmt"
func main() {
// 创建一个整型切片,并赋值
slice := []int{10, 20, 30, 40}
// 迭代每一个元素,并显示其值
//Index: 0 Value: 10
//Index: 1 Value: 20
//Index: 2 Value: 30
//Index: 3 Value: 40
for index, value := range slice {
fmt.Printf("Index: %d Value: %d\n", index, value)
}
}
|
/*
Copyright 2023 Gravitational, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package web
import (
"net/http"
"github.com/gravitational/trace"
"github.com/julienschmidt/httprouter"
apiclient "github.com/gravitational/teleport/api/client"
"github.com/gravitational/teleport/api/types"
"github.com/gravitational/teleport/lib/reversetunnel"
"github.com/gravitational/teleport/lib/web/ui"
)
func (h *Handler) getUserGroups(_ http.ResponseWriter, r *http.Request, params httprouter.Params, sctx *SessionContext, site reversetunnel.RemoteSite) (any, error) {
// Get a client to the Auth Server with the logged in user's identity. The
// identity of the logged in user is used to fetch the list of nodes.
clt, err := sctx.GetUserClient(r.Context(), site)
if err != nil {
return nil, trace.Wrap(err)
}
req, err := convertListResourcesRequest(r, types.KindUserGroup)
if err != nil {
return nil, trace.Wrap(err)
}
page, err := apiclient.GetResourcePage[types.UserGroup](r.Context(), clt, req)
if err != nil {
return nil, trace.Wrap(err)
}
accessChecker, err := sctx.GetUserAccessChecker()
if err != nil {
return nil, trace.Wrap(err)
}
userGroups, err := ui.MakeUserGroups(site.GetName(), page.Resources, accessChecker.Roles())
if err != nil {
return nil, trace.Wrap(err)
}
return listResourcesGetResponse{
Items: userGroups,
StartKey: page.NextKey,
TotalCount: page.Total,
}, nil
}
|
//目前的jsonrpc库是基于tcp协议实现的,暂时不支持使用http进行数据传输
package main
import (
"./rpcObjects"
"io"
"log"
"net"
"net/rpc"
"net/rpc/jsonrpc"
"os"
)
func main() {
calc := new(rpcObjects.Args) // 服务器创建一个用于计算的对象
_ = rpc.Register(calc) // 注册rpc服务
listener, e := net.Listen("tcp", "localhost:1234") // 开启监听
if e != nil {
log.Fatalln("Starting RPC-server -listen error:", e)
}
_, _ = io.WriteString(os.Stdout, "start connection\n")
for {
conn, err := listener.Accept() // 接收客户端连接请求
if err != nil {
continue
}
go func(conn net.Conn) { // 并发处理客户端请求
_, _ = io.WriteString(os.Stdout, "new client in coming\n")
jsonrpc.ServeConn(conn) // 跟originalRpc一样, 最终都是调用RPC Server的 ServeCodec 方法
}(conn)
}
}
|
package e2e
import (
"context"
"fmt"
"log"
"testing"
"time"
"github.com/pkg/errors"
"github.com/stretchr/testify/require"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
nc "github.com/openshift/windows-machine-config-operator/pkg/controller/windowsmachine/nodeconfig"
)
const (
// deploymentRetryInterval is the retry time for WMCO deployment to scale up/down
deploymentRetryInterval = time.Second * 10
// deploymentTimeout is the maximum duration to update WMCO deployment
deploymentTimeout = time.Minute * 1
// resourceName is the name of a resource in the watched namespace (e.g pod name, deployment name)
resourceName = "windows-machine-config-operator"
// resourceNamespace is the namespace the resources are deployed in
resourceNamespace = "openshift-windows-machine-config-operator"
)
// upgradeTestSuite tests behaviour of the operator when an upgrade takes place.
func upgradeTestSuite(t *testing.T) {
testCtx, err := NewTestContext(t)
require.NoError(t, err)
// apply configuration steps before running the upgrade tests
err = testCtx.configureUpgradeTest()
require.NoError(t, err, "error configuring upgrade")
t.Run("Operator version upgrade", testUpgradeVersion)
t.Run("Version annotation tampering", testTamperAnnotation)
}
// testUpgradeVersion tests the upgrade scenario of the operator. The node version annotation is changed when
// the operator is shut-down. The function tests if the operator on restart deletes the machines and recreates
// them on version annotation mismatch.
func testUpgradeVersion(t *testing.T) {
testCtx, err := NewTestContext(t)
require.NoError(t, err)
err = testCtx.waitForWindowsNodes(gc.numberOfNodes, true, false, true)
require.NoError(t, err, "windows node upgrade failed")
// Test if the version annotation corresponds to the current operator version
testVersionAnnotation(t)
}
// testTamperAnnotation tests if the operator deletes machines and recreates them, if the node annotation is changed to an invalid value
// with the expected annotation when the operator is in running state
func testTamperAnnotation(t *testing.T) {
testCtx, err := NewTestContext(t)
require.NoError(t, err)
// tamper node annotation
nodes, err := testCtx.kubeclient.CoreV1().Nodes().List(context.TODO(),
metav1.ListOptions{LabelSelector: nc.WindowsOSLabel})
require.NoError(t, err)
for _, node := range nodes.Items {
patchData := fmt.Sprintf(`{"metadata":{"annotations":{"%s":"%s"}}}`, nc.VersionAnnotation, "badVersion")
_, err := testCtx.kubeclient.CoreV1().Nodes().Patch(context.TODO(), node.Name, types.MergePatchType, []byte(patchData), metav1.PatchOptions{})
require.NoError(t, err)
if err == nil {
break
}
}
err = testCtx.waitForWindowsNodes(gc.numberOfNodes, true, false, true)
require.NoError(t, err, "windows node upgrade failed")
// Test if the version annotation corresponds to the current operator version
testVersionAnnotation(t)
}
// configureUpgradeTest carries out steps required before running tests for upgrade scenario.
// The steps include -
// 1. Scale down the operator to 0.
// 2. Change Windows node version annotation to an invalid value
// 3. Scale up the operator to 1
func (tc *testContext) configureUpgradeTest() error {
// Scale down the WMCO deployment to 0
if err := tc.scaleWMCODeployment(0); err != nil {
return err
}
// Override the Windows Node Version Annotation
nodes, err := tc.kubeclient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{LabelSelector: nc.WindowsOSLabel})
if err != nil {
return err
}
if len(nodes.Items) != int(gc.numberOfNodes) {
return errors.Wrapf(nil, "unexpected number of nodes %v", gc.numberOfNodes)
}
for _, node := range nodes.Items {
patchData := fmt.Sprintf(`{"metadata":{"annotations":{"%s":"%s"}}}`, nc.VersionAnnotation, "badVersion")
_, err := tc.kubeclient.CoreV1().Nodes().Patch(context.TODO(), node.Name, types.MergePatchType, []byte(patchData), metav1.PatchOptions{})
if err != nil {
return err
}
log.Printf("Node Annotation changed to %v", node.Annotations[nc.VersionAnnotation])
}
// Scale up the WMCO deployment to 1
if err := tc.scaleWMCODeployment(1); err != nil {
return err
}
return nil
}
// scaleWMCODeployment scales the WMCO operator to the given replicas. If the deployment is managed by OLM, updating the
// replicas only scales the deployment to 0 or 1. If we want to scale the deployment to more than 1 replicas, we need to
// make changes in replicas defined in the corresponding CSV.
func (tc *testContext) scaleWMCODeployment(desiredReplicas int32) error {
// update the windows-machine-config-operator deployment to the desired replicas - 0 or 1
err := wait.Poll(deploymentRetryInterval, deploymentTimeout, func() (done bool, err error) {
patchData := fmt.Sprintf(`{"spec":{"replicas":%v}}`, desiredReplicas)
_, err = tc.kubeclient.AppsV1().Deployments(resourceNamespace).Patch(context.TODO(), resourceName,
types.MergePatchType, []byte(patchData), metav1.PatchOptions{})
if err != nil {
log.Printf("error patching operator deployment : %v", err)
return false, nil
}
return true, nil
})
if err != nil {
return err
}
// wait for the windows-machine-config-operator to scale up/down
err = wait.Poll(deploymentRetryInterval, deploymentTimeout, func() (done bool, err error) {
deployment, err := tc.kubeclient.AppsV1().Deployments(resourceNamespace).Get(context.TODO(), resourceName,
metav1.GetOptions{})
if err != nil {
log.Printf("error getting operator deployment: %v", err)
return false, nil
}
return deployment.Status.ReadyReplicas == desiredReplicas, nil
})
return err
}
|
package main
import (
"fmt"
"gin-use/bootstrap"
"gin-use/configs"
_ "gin-use/docs"
"gin-use/src/global"
"gin-use/src/routes"
"github.com/gin-gonic/gin"
_ "github.com/joho/godotenv/autoload"
)
var (
r = gin.Default()
)
// @title swagger 接口文档
// @version 2.0
// @description
// @contact.name
// @contact.url
// @contact.email
// @license.name MIT
// @license.url https://www.baidu.com
// @host 192.168.1.163:8081
// @BasePath
func main() {
//系统初始化
bootstrap.Init()
defer global.DB.DbRClose()
defer global.DB.DbWClose()
// 初始化 HTTP 服务
engine := routes.InitRouter()
if err := engine.Run(fmt.Sprintf(":%s", configs.ProjectPort())); err != nil {
global.Logger.Errorf("HTTP Server启动失败, err:%v", err)
}
}
|
package health
import "time"
type Controller struct {
states map[string]WatchedState // A dynamic state under a name. (eg: states["twilio"] = &twilio.State)
}
func NewController() *Controller {
m := make(map[string]WatchedState)
return &Controller{m}
}
func (c *Controller) Register(name string, value *string) {
c.states[name] = value
}
func (c *Controller) Report() *Report {
now := time.Now()
states := make([]State)
for name, value := range c.states {
states = append(states, State{
Value: *(value.Value),
Healthy: value.IsHealthy(),
})
}
return &Report{
At: now,
States: states,
}
}
type WatchedState struct {
Value *string
Expected string
}
func (w *WatchedState) IsHealthy() bool {
return *(w.Value) == w.Expected
}
type Report struct {
At time.Time
States []State `json:"states"`
}
type State struct {
Value string `json:"value"`
Healthy bool `json:"healthy"`
}
|
package main
import "testing"
func TestCalculateSystemEnergy(t *testing.T) {
actual := calculateSystemEnergy([]string{
"<x=-1, y=0, z=2>",
"<x=2, y=-10, z=-7>",
"<x=4, y=-8, z=8>",
"<x=3, y=5, z=-1>",
}, 10)
expected := 179
if expected != actual {
t.Errorf("Expected energy %v, but actual %v", expected, actual)
}
}
func TestCalculateStepsToPreviousPositions(t *testing.T) {
actual := calculateStepsToPreviousPositions([]string{
"<x=-1, y=0, z=2>",
"<x=2, y=-10, z=-7>",
"<x=4, y=-8, z=8>",
"<x=3, y=5, z=-1>",
})
expected := 2772
if expected != actual {
t.Errorf("Expected steps %v, but actual %v", expected, actual)
}
}
func TestCalculateStepsToPreviousPositions_LargerExample(t *testing.T) {
actual := calculateStepsToPreviousPositions([]string{
"<x=-8, y=-10, z=0>",
"<x=5, y=5, z=10>",
"<x=2, y=-7, z=3>",
"<x=9, y=-8, z=-3>",
})
expected := 4686774924
if expected != actual {
t.Errorf("Expected steps %v, but actual %v", expected, actual)
}
}
func TestCalculateSystemEnergy_LargerExample(t *testing.T) {
actual := calculateSystemEnergy([]string{
"<x=-8, y=-10, z=0>",
"<x=5, y=5, z=10>",
"<x=2, y=-7, z=3>",
"<x=9, y=-8, z=-3>",
}, 100)
expected := 1940
if expected != actual {
t.Errorf("Expected energy %v, but actual %v", expected, actual)
}
}
|
package metal
import (
"fmt"
// M "github.com/ionous/sashimi/compiler/model"
"github.com/ionous/sashimi/meta"
"github.com/ionous/sashimi/util/errutil"
"github.com/ionous/sashimi/util/ident"
)
var _ = fmt.Println
type objectList struct {
panicValue
targetProp ident.Id
objs []ident.Id
}
// the many side of a many-to-one, or one-to-many relation;
// returns a list.
func newManyValues(p *propBase) (ret meta.Values) {
var objs []ident.Id
rel, ok := p.mdl.Relations[p.prop.Relation]
if !ok {
panic(fmt.Sprintf("missing relation '%s'", p.prop.Relation))
}
// check instance because newManyValues can be called by class values ( getZero )
var targetProp ident.Id
if _, ok := p.mdl.Instances[p.src]; ok {
// FIX: would rather make this a datastore query;
// ( would require changing from ObjectValue interface to a full shadow model. )
targetProp = rel.GetOther(p.prop.Id)
// use the meta interface in order to get latest data
for i := 0; i < p.mdl.NumInstance(); i++ {
target := p.mdl.InstanceNum(i)
if t, ok := target.GetProperty(targetProp); ok {
if v := t.GetValue(); p.src.Equals(v.GetObject()) {
objs = append(objs, target.GetId())
}
}
}
}
return &objectList{panicValue{p}, targetProp, objs}
}
func (p objectList) NumValue() int {
return len(p.objs)
}
func (p objectList) ValueNum(i int) meta.Value {
return objectReadValue{p.panicValue, p.objs[i]}
}
func (p *objectList) ClearValues() (err error) {
for _, id := range p.objs {
if v, e := p.mdl.getFarPointer(id, p.targetProp); e != nil {
err = errutil.Append(err, e)
} else {
// possible, if unlikely, that its changed.
if p.src.Equals(v.GetObject()) {
v.SetObject(ident.Empty())
}
}
}
p.objs = nil
return
}
// AppendObject writes *this* object into the destination
// ( and updates our list of objects )
func (p *objectList) AppendObject(id ident.Id) (err error) {
// have to use the meta interface in order to trigger shadow properties
if v, e := p.mdl.getFarPointer(id, p.targetProp); e != nil {
err = e
} else {
found := false
for _, already := range p.objs {
if already == id {
found = true
break
}
}
if !found {
if e := v.SetObject(p.src); e != nil {
err = errutil.Append(err, e)
} else {
p.objs = append(p.objs, id)
}
}
}
return
}
|
package store
import (
"log"
"github.com/coreos/etcd/store/streams"
"sync"
"strconv"
"strings"
"fmt"
)
const PREFIX string = "/2/"
type StreamsStore interface {
StreamAppend(nodePath string, value []byte) (*Event, error)
StreamGet(nodePath string) (*Event, error)
}
type streamsStore struct {
basedir string
streams map[string]*streams.AppendStream
mutex sync.Mutex
}
func (s *streamsStore) init(basedir string) {
s.streams = make(map[string]*streams.AppendStream)
s.basedir = basedir
}
func (s *streamsStore) StreamAppend(nodePath string, value []byte) (*Event, error) {
stream, err := s.getStream(nodePath)
if err != nil {
return nil, err
}
pos, err := stream.Append(value)
if err != nil {
return nil, err
}
entryPath := nodePath + "/" + strconv.FormatInt(pos, 16)
node := &NodeExtern{
Key: entryPath,
ModifiedIndex: 0,
CreatedIndex: 0,
}
return &Event{
Action: Create,
Node: node,
}, nil
}
func (s *streamsStore) StreamGet(nodePath string) (*Event, error) {
lastSlash := strings.LastIndex(nodePath, "/")
if lastSlash == -1 {
return nil, fmt.Errorf("Invalid node path")
}
streamPath := nodePath[:lastSlash]
streamOffset := nodePath[lastSlash+1:]
stream, err := s.getStream(streamPath)
if err != nil {
return nil, err
}
if streamOffset == "info" {
tail := stream.GetTail()
infoString := strconv.FormatInt(tail, 16)
nodePath := streamPath + "info"
node := &NodeExtern{
Key: nodePath,
Value: &infoString,
ModifiedIndex: 0,
CreatedIndex: 0,
}
return &Event{
Action: Get,
Node: node,
}, nil
}
pos, err := strconv.ParseInt(streamOffset, 16, 64)
if err != nil {
return nil, fmt.Errorf("Not a valid offset")
}
value, err := stream.Read(pos)
if err != nil {
return nil, err
}
stringValue := string(value)
node := &NodeExtern{
Key: nodePath,
Value: &stringValue,
ModifiedIndex: 0,
CreatedIndex: 0,
}
return &Event{
Action: Get,
Node: node,
}, nil
}
func (s *streamsStore) getStream(key string) (*streams.AppendStream, error) {
log.Print("getStream", key)
s.mutex.Lock()
defer s.mutex.Unlock()
stream, found := s.streams[key]
if !found {
if !strings.HasPrefix(key, PREFIX) {
return nil, fmt.Errorf("Invalid prefix for stream")
}
streamId := key[len(PREFIX):]
var err error
stream, err = streams.NewAppendStream(s.basedir, streamId)
if err != nil {
log.Print("Error getting stream", err)
return nil, err
}
s.streams[key] = stream
}
return stream, nil
}
|
// Copyright 2019 Yunion
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package aws
import (
"fmt"
"github.com/aws/aws-sdk-go/service/elasticache"
"yunion.io/x/pkg/errors"
api "yunion.io/x/onecloud/pkg/apis/compute"
"yunion.io/x/onecloud/pkg/multicloud"
)
func (region *SRegion) DescribeCacheParameters(parameterGroupId string) ([]*elasticache.Parameter, error) {
ecClient, err := region.getAwsElasticacheClient()
if err != nil {
return nil, errors.Wrap(err, "client.getAwsElasticacheClient")
}
input := elasticache.DescribeCacheParametersInput{}
if len(parameterGroupId) > 0 {
input.CacheParameterGroupName = ¶meterGroupId
}
marker := ""
maxrecords := (int64)(50)
input.MaxRecords = &maxrecords
parameters := []*elasticache.Parameter{}
for {
if len(marker) >= 0 {
input.Marker = &marker
}
out, err := ecClient.DescribeCacheParameters(&input)
if err != nil {
return nil, errors.Wrap(err, "ecClient.DescribeCacheParameters")
}
parameters = append(parameters, out.Parameters...)
if out.Marker != nil && len(*out.Marker) > 0 {
marker = *out.Marker
} else {
break
}
}
return parameters, nil
}
type SElasticacheParameter struct {
multicloud.SElasticcacheParameterBase
multicloud.AwsTags
parameterGroup string
parameter *elasticache.Parameter
}
func (self *SElasticacheParameter) GetId() string {
return fmt.Sprintf("%s/%s", self.parameterGroup, *self.parameter.ParameterName)
}
func (self *SElasticacheParameter) GetName() string {
return *self.parameter.ParameterName
}
func (self *SElasticacheParameter) GetGlobalId() string {
return self.GetId()
}
func (self *SElasticacheParameter) GetStatus() string {
return api.ELASTIC_CACHE_PARAMETER_STATUS_AVAILABLE
}
func (self *SElasticacheParameter) GetParameterKey() string {
if self.parameter == nil || self.parameter.ParameterName == nil {
return ""
}
return *self.parameter.ParameterName
}
func (self *SElasticacheParameter) GetParameterValue() string {
if self.parameter == nil || self.parameter.ParameterValue == nil {
return ""
}
return *self.parameter.ParameterValue
}
func (self *SElasticacheParameter) GetParameterValueRange() string {
if self.parameter == nil || self.parameter.AllowedValues == nil {
return ""
}
return *self.parameter.AllowedValues
}
func (self *SElasticacheParameter) GetDescription() string {
if self.parameter == nil || self.parameter.Description == nil {
return ""
}
return *self.parameter.Description
}
func (self *SElasticacheParameter) GetModifiable() bool {
if self.parameter == nil || self.parameter.IsModifiable == nil {
return *self.parameter.IsModifiable
}
return false
}
func (self *SElasticacheParameter) GetForceRestart() bool {
if self.parameter == nil || self.parameter.ChangeType == nil {
return false
}
if *self.parameter.ChangeType == "requires-reboot" {
return true
}
return false
}
|
package main
func main() {
type num int
num(5)
}
|
package handler
import (
"fmt"
"log"
"net/http"
"workshop/internal/api"
)
type Handler struct {
jokeClient api.Client
}
func NewHandler(jokeClient api.Client) *Handler {
return &Handler{
jokeClient: jokeClient,
}
}
func (h *Handler) Home(w http.ResponseWriter, r *http.Request) {
log.Printf("Fetching joke")
joke, err := h.jokeClient.GetJoke()
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
} else {
_, err = fmt.Fprint(w, joke.Joke)
log.Println(err)
}
}
|
package cpu_test
import (
"net/http"
"net/http/httptest"
"testing"
"github.com/bmizerany/assert"
"github.com/gonitor/gonitor/config"
)
//TestCpuRestGetSumPercent .
func TestCpuRestGetSumPercent(test *testing.T) {
testRouter := config.SetupTestRouter()
url := config.GetRestEndPoint("/cpu/sum/percent")
req, _ := http.NewRequest("GET", url, nil)
resp := httptest.NewRecorder()
testRouter.ServeHTTP(resp, req)
assert.Equal(test, resp.Code, 200)
}
//TestCpuRestGetCount .
func TestCpuRestGetCount(test *testing.T) {
testRouter := config.SetupTestRouter()
url := config.GetRestEndPoint("/cpu/count")
req, _ := http.NewRequest("GET", url, nil)
resp := httptest.NewRecorder()
testRouter.ServeHTTP(resp, req)
assert.Equal(test, resp.Code, 200)
}
//TestCpuRestGetSumTime .
func TestCpuRestGetSumTime(test *testing.T) {
testRouter := config.SetupTestRouter()
url := config.GetRestEndPoint("/cpu/sum/time")
req, _ := http.NewRequest("GET", url, nil)
resp := httptest.NewRecorder()
testRouter.ServeHTTP(resp, req)
assert.Equal(test, resp.Code, 200)
}
//TestCpuRestGetInfo .
func TestCpuRestGetInfo(test *testing.T) {
testRouter := config.SetupTestRouter()
url := config.GetRestEndPoint("/cpu/info")
req, _ := http.NewRequest("GET", url, nil)
resp := httptest.NewRecorder()
testRouter.ServeHTTP(resp, req)
assert.Equal(test, resp.Code, 200)
}
//TestCpuRestGetPercent .
func TestCpuRestGetPercent(test *testing.T) {
testRouter := config.SetupTestRouter()
url := config.GetRestEndPoint("/cpu/percent")
req, _ := http.NewRequest("GET", url, nil)
resp := httptest.NewRecorder()
testRouter.ServeHTTP(resp, req)
assert.Equal(test, resp.Code, 200)
}
//TestCpuRestGetTime .
func TestCpuRestGetTime(test *testing.T) {
testRouter := config.SetupTestRouter()
url := config.GetRestEndPoint("/cpu/time")
req, _ := http.NewRequest("GET", url, nil)
resp := httptest.NewRecorder()
testRouter.ServeHTTP(resp, req)
assert.Equal(test, resp.Code, 200)
}
|
package ent_ex
import (
"context"
"fmt"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"time"
"net/http"
"strings"
"way-jasy-cron/common/ecron"
"way-jasy-cron/common/email"
"way-jasy-cron/cron/ecode"
"way-jasy-cron/cron/internal/dao/mail"
"way-jasy-cron/cron/internal/model/ent"
)
type JobStatus int
const (
JobDelete JobStatus = -1
JobRunning JobStatus = 1
JobStopping JobStatus = 0
)
type ListJobReq struct {
ListBaseReq
Name string `form:"name"`
Creator string `form:"creator"`
Comment string `form:"comment"`
}
type ListJobOptions struct {
ListBaseOptions
Name string
Creator string
Comment string
}
type ListJobResp struct {
ListBaseResp
Jobs []*ent.Job `json:"jobs"`
}
func (r ListJobReq) ToListJobOptions() (o *ListJobOptions) {
o = &ListJobOptions{
ListBaseOptions: ListBaseOptions{
PN: r.PN,
PS: r.PS,
},
Name: r.Name,
Creator: r.Creator,
Comment: r.Comment,
}
o.Completed()
return
}
type CronResp struct {
IDs []int `json:"ids"`
Total int `json:"total"`
}
type closer func(ctx context.Context, id, opt int) error
type logger func(ctx context.Context, msg, operator string) error
type JobExecutor struct {
job *ent.Job
httpClient *http.Client
closer func(ctx context.Context, id, opt int) error
email *mail.Manager
logger func(ctx context.Context, msg, operator string) error
}
type Manager struct {
}
type Config struct {
}
func (c *Config) Filename() string {
return "job.toml"
}
func New() *Manager {
//config := &Config{}
//utilpaladin.MustUnmarshalTOML(config)
return &Manager{
}
}
func (m *Manager) Create(
job *ent.Job,
closer closer,
client *http.Client,
email *mail.Manager,
logger logger,
) *JobExecutor {
return &JobExecutor{
job: job,
closer: closer,
httpClient: client,
email: email,
logger: logger,
}
}
func (j *JobExecutor) Validate() error {
spec := j.job.Spec
if _, err := ecron.ParseStandard(spec); err != nil {
return errors.Wrap(ecode.InvalidSpec, "job spec is incorrect!")
}
return nil
}
const errMsg = `定时任务出错通知
您创建的定时任务 (id: %d, name: %s) 执行出错!!!
您的任务已被禁用!请联系相关人员进行处理`
func (j *JobExecutor) Run() {
j.logger(context.TODO(), fmt.Sprintf("任务id(%d)开始执行", j.job.ID), j.job.Creator)
var (
err error
request *http.Request
)
defer func() {
if err != nil {
j.job.Retry--
if j.job.Retry == 0 {
log.Info("close the job:", j.job)
if err = j.closer(context.TODO(), j.job.ID, int(JobStopping)); err != nil {
log.Error("method: Run#ent_ex/job err:",err)
}
}
m := email.NewEmail(j.email.Host, j.email.Username, j.email.Password, j.email.Port)
m.WithInfo("定时任务失败报警",fmt.Sprintf(errMsg, j.job.ID, j.job.Name), []string{"306698601@qq.com"})
if err := m.Send(); err != nil {
log.Error("send mail notice err:", err)
}
return
}
j.job.Retry = j.job.RetryTemp
}()
request, err = http.NewRequest(j.job.Method, j.job.URL, strings.NewReader(j.job.Body))
if err != nil {
log.Error("NewRequest err:", err)
return
}
_, err = j.httpClient.Do(request)
if err != nil {
log.Error("do request err:", err)
return
}
j.job.Count--
if j.job.Count == 0 {
if err = j.closer(context.TODO(), j.job.ID, int(JobStopping)); err != nil {
log.Error("method: Run#ent_ex/job err:",err)
}
}
return
}
func (j *JobExecutor) EntryID() ecron.EntryID{
return ecron.EntryID(j.job.ID)
}
func (j *JobExecutor) GetSpec() string{
return j.job.Spec
}
type ToDoList struct {
Name string `json:"name"`
DoTime time.Time `json:"do_time"`
}
|
package main
import (
_ "namanerp/routers"
"namanerp/models/inventory"
"github.com/astaxie/beego"
"github.com/astaxie/beego/orm"
_ "github.com/go-sql-driver/mysql" // import your required driver
)
func init() {
// orm.RegisterDriver("sqlite", orm.DRSqlite)
// orm.RegisterDataBase("default", "sqlite3", "database/orm_test.db")
orm.RegisterDriver("mysql", orm.DRMySQL)
orm.RegisterDataBase("default", "mysql", "root:@/ayurvedic?charset=utf8")
// orm.RegisterDriver("postgres", orm.DRPostgres)
// orm.RegisterDataBase("default", "postgres", "user=postgres password=postgres dbname=testdb sslmode=disable")
orm.RegisterModel(new(inventory.Products))
orm.RegisterModel(new(inventory.ProductCategories))
orm.RegisterModel(new(inventory.ProductTypes))
orm.Debug = true
}
func main() {
beego.Run()
}
|
package rbootx
import "fmt"
type Memorizer interface {
Save(bucket, key string, value []byte) error
Find(bucket, key string) []byte
FindAll(bucket string) map[string][]byte
Update(bucket, key string, value []byte) error
Remove(bucket, key string) error
}
var memorizers = make(map[string]func() Memorizer)
// 注册存储器
func RegisterMemorizer(name string, m func() Memorizer) {
if name == "" {
panic("RegisterMemorizer: memorizer must have a name")
}
if _, ok := memorizers[name]; ok {
panic("RegisterMemorizer: memorizers named " + name + " already registered. ")
}
memorizers[name] = m
}
func DetectMemorizer(name string) (func() Memorizer, error) {
if memo, ok := memorizers[name]; ok {
return memo, nil
}
if len(memorizers) == 0 {
return nil, fmt.Errorf("no memorizer available")
}
if name == "" {
if len(memorizers) == 1 {
for _, memo := range memorizers {
return memo, nil
}
}
return nil, fmt.Errorf("multiple memorizers available; must choose one")
}
return nil, fmt.Errorf("unknown memorizer '%s'", name)
}
|
package controllers
import (
"encoding/json"
"net/http"
"strconv"
"project/config"
"project/models"
"github.com/labstack/echo"
)
/* POST /customer --> to add customer data
{
"full_name": "Irvan Tristian",
"mobile": "0811223456",
"address": "Mountain View",
"email": "irvan.t@google.com",
"id_card": "1708194517081945"
}
*/
func CreateCustomerController(c echo.Context) error {
customer := models.Customers{}
c.Bind(&customer)
// Store the request data to the database
added_customer, err := customer.SaveCustomer(config.DB)
if err != nil {
return c.JSON(http.StatusInternalServerError, models.Response{
Error: err.Error(),
})
}
return c.JSON(http.StatusOK, models.Response{
Message: "Customer data succesfully created",
Data: added_customer,
})
}
// GET /customers --> to get all customers data
func GetAllCustomersController(c echo.Context) error {
customer := models.Customers{}
// Check whether customer data available
counted_customers, err := customer.CountAllCustomers(config.DB)
if err != nil {
return c.JSON(http.StatusInternalServerError, models.Response{
Error: err.Error(),
})
}
if counted_customers == 0 {
return c.JSON(http.StatusOK, models.Response{
Message: "There's no customer data yet in database",
Error: err.Error(),
})
}
// Find all the available customers from database
customers, err := customer.GetAllCustomers(config.DB, c)
if err != nil {
return c.JSON(http.StatusInternalServerError, models.Response{
Error: err.Error(),
})
}
return c.JSON(http.StatusOK, models.Response{
Message: "Get all the customers data successful",
Data: customers,
})
}
// GET /customer/:id --> to get a customer data specified by id
func GetCustomerController(c echo.Context) error {
// Check the id parameter
id, err := strconv.Atoi(c.Param("id"))
if err != nil {
return c.JSON(http.StatusBadRequest, models.Response{
Error: err.Error(),
})
}
// Retreive customer object with the primary key (id)
customer := models.Customers{}
the_customer, err := customer.GetCustomer(config.DB, id)
if err != nil {
return c.JSON(http.StatusInternalServerError, models.Response{
Error: err.Error(),
})
}
return c.JSON(http.StatusOK, models.Response{
Message: "Get a customer data succesful",
Data: the_customer,
})
}
// PUT /customer/:id --> to update a customer data specified by id
func UpdateCustomerController(c echo.Context) error {
// Create a map to support attributes update
// and populate json data.
// It will only update non-zero value fields
request_body := make(map[string]interface{})
err := json.NewDecoder(c.Request().Body).Decode(&request_body)
if err != nil {
return c.JSON(http.StatusInternalServerError, models.Response{
Error: err.Error(),
})
}
// Check id parameter
id, err := strconv.Atoi(c.Param("id"))
if err != nil {
return c.JSON(http.StatusBadRequest, models.Response{
Error: err.Error(),
})
}
// Update customer data in database
customer := models.Customers{}
updated_customer, err := customer.UpdateCustomer(config.DB, id, request_body)
if err != nil {
return c.JSON(http.StatusInternalServerError, models.Response{
Error: err.Error(),
})
}
return c.JSON(http.StatusOK, models.Response{
Message: "Update a customer data succesful",
Data: updated_customer,
})
}
// DELETE /customer/:id --> to delete a customer data specified by id from database
func DeleteCustomerController(c echo.Context) error {
// Check id parameter
id, err := strconv.Atoi(c.Param("id"))
if err != nil {
return c.JSON(http.StatusBadRequest, models.Response{
Error: err.Error(),
})
}
// Delete requested data off the database
customer := models.Customers{}
deleted_customer, err := customer.DeleteCustomer(config.DB, id)
if err != nil {
return c.JSON(http.StatusInternalServerError, models.Response{
Message: err.Error(),
})
}
return c.JSON(http.StatusOK, models.Response{
Message: "Delete a customer data succesful",
Data: deleted_customer,
})
}
|
/**
*
* By So http://sooo.site
* -----
* Don't panic.
* -----
*
*/
package v1
import (
"encoding/json"
"fmt"
"github.com/Git-So/blog-api/models"
"github.com/Git-So/blog-api/service"
"github.com/Git-So/blog-api/utils/api"
"github.com/Git-So/blog-api/utils/conf"
"github.com/Git-So/blog-api/utils/e"
"github.com/gin-gonic/gin"
"github.com/gookit/validate"
"github.com/wonderivan/logger"
)
// Subject 接口提交数据
type Subject struct {
ID uint `validate:"required|number"`
Title string `validate:"required"`
State uint `validate:"number"`
PageNum uint `validate:"required|number"`
Search string
}
// ConfigValidation 验证配置
func (subject Subject) ConfigValidation(v *validate.Validation) {
// 字段名称
v.AddTranslates(validate.MS{
"ID": "专题ID",
"Title": "专题标题",
"State": "专题状态",
})
// 错误信息
v.AddMessages(validate.MS{
"required": "{field}不能为空",
"number": "{field}仅能为数字",
"maxLen": "{field}最大长度%d",
"ID.required": "专题ID错误",
"PageNum.required": "页数错误",
})
// 场景
v.WithScenes(validate.SValues{
"SubjectList": []string{
"PageNum",
},
"SubjectInfo": []string{
"ID",
},
"CreateSubject": []string{
"Title",
},
"UpdateArticle": []string{
"ID", "Title", "State",
},
"DeleteSubject": []string{
"ID",
},
})
}
// CreateSubject 创建专题
func CreateSubject(c *gin.Context) {
// request
var request Subject
if err := c.ShouldBind(&request); err != nil {
api.ErrValidate().Output(c)
return
}
// 验证
v := validate.Struct(request, "CreateSubject")
if !v.Validate() {
api.ErrValidate(v.Errors.One()).Output(c)
return
}
// 专题是否存在
stat, err := service.New(c).IsExistsSubjectByTitle(request.Title)
if _, isErr := api.IsServiceError(c, err); isErr {
return
}
if stat {
api.Err(e.ErrExistsSubject).Output(c)
return
}
// 创建专题
subject := &models.Subject{
Title: request.Title,
}
err = service.New(c).CreateSubject(subject)
if isErr, _ := api.IsServiceError(c, err); isErr {
return
}
api.Succ().SetMsg("专题创建成功").Output(c)
return
}
// UpdateSubject 更新专题
func UpdateSubject(c *gin.Context) {
// request
var request *Subject
if err := c.ShouldBind(&request); err != nil {
api.ErrValidate().Output(c)
return
}
// 验证
v := validate.Struct(request, "UpdateArticle")
if !v.Validate() {
api.ErrValidate(v.Errors.One()).Output(c)
return
}
// 专题是否存在
if request.ID > 0 {
stat, err := service.New(c).IsExistsSubjectByID(request.ID)
if _, isErr := api.IsServiceError(c, err); isErr {
return
}
if !stat {
api.Err(e.ErrNotFoundSubject).Output(c)
return
}
}
// 更新专题
subject := &models.Subject{
ID: request.ID,
Title: request.Title,
State: request.State,
}
err := service.New(c).UpdateSubject(subject)
if isErr, _ := api.IsServiceError(c, err); isErr {
return
}
api.Succ().SetMsg("专题更新成功").Output(c)
return
}
// DeleteSubject 删除专题
func DeleteSubject(c *gin.Context) {
// request
var request *Subject
if err := c.ShouldBind(&request); err != nil {
api.ErrValidate().Output(c)
return
}
// 验证
v := validate.Struct(request, "DeleteSubject")
if !v.Validate() {
api.ErrValidate(v.Errors.One()).Output(c)
return
}
// do
err := service.New(c).DeleteSubject(request.ID)
if _, isErr := api.IsServiceError(c, err); isErr {
return
}
api.Succ().SetMsg("专题删除成功").Output(c)
return
}
// SubjectList 专题列表
func SubjectList(c *gin.Context) {
// request
var request *Subject
var data = fmt.Sprintf(`{
"PageNum":%v,
"Search":"%v"
}`,
c.Param("PageNum"),
c.DefaultQuery("search", ""),
)
if err := json.Unmarshal([]byte(data), &request); err != nil {
api.ErrValidate().Output(c)
return
}
// 验证
v := validate.Struct(request, "SubjectList")
if !v.Validate() {
api.ErrValidate(v.Errors.One()).Output(c)
return
}
// 过滤
var whereKey string
var whereVal []interface{}
if len(request.Search) > 0 { // 关键词
whereKey += " title LIKE ? "
whereVal = append(whereVal, "%"+request.Search+"%")
}
where := append([]interface{}{whereKey}, whereVal...)
count, err := service.New(c).SubjectTotal(where)
if _, isErr := api.IsServiceError(c, err); isErr {
return
}
var subjectList []*models.Subject
pageSize := conf.Get().Page.Subject
if count > 0 {
subjectList, err = service.New(c).GetSubjectList(request.PageNum, pageSize, where)
}
response := map[string]interface{}{
"count": count,
"pageSize": pageSize,
"pageNum": request.PageNum,
"subjectList": &subjectList,
"search": request.Search,
}
api.Succ().SetData(&response).Output(c)
return
}
// SubjectInfo 专题详情
func SubjectInfo(c *gin.Context) {
// request
data := fmt.Sprintf(`{"ID":%s}`, c.Param("ID"))
var request Article
if err := json.Unmarshal([]byte(data), &request); err != nil {
logger.Warn(err)
api.ErrValidate().Output(c)
return
}
// 验证
v := validate.Struct(request, "SubjectInfo")
if !v.Validate() {
api.ErrValidate(v.Errors.One()).Output(c)
return
}
// response
subjectInfo, err := service.New(c).GetSubjectInfoByID(request.ID, isAdmin(c))
isNotFound, isErr := api.IsServiceError(c, err)
if isErr {
return
}
if isNotFound {
api.New(e.ErrNotFoundData).Output(c)
return
}
// response
response := map[string]interface{}{
"subjectInfo": subjectInfo,
}
api.Succ().SetData(&response).Output(c)
return
}
|
package main
import (
metrictools "../"
"encoding/json"
"fmt"
nsq "github.com/bitly/go-nsq"
"github.com/garyburd/redigo/redis"
"log"
"time"
)
// MetricDeliver define a metric proccess task
type MetricDeliver struct {
dataService *redis.Pool
configService *redis.Pool
writer *nsq.Writer
triggerTopic string
archiveTopic string
nsqdAddr string
}
// HandleMessage is MetricDeliver's nsq handle function
func (m *MetricDeliver) HandleMessage(msg *nsq.Message) error {
var err error
var c []metrictools.CollectdJSON
if err = json.Unmarshal(msg.Body, &c); err != nil {
log.Println(err)
return nil
}
for _, v := range c {
if len(v.Values) != len(v.DataSetNames) {
continue
}
if len(v.Values) != len(v.DataSetTypes) {
continue
}
metrics := v.GenerateMetricData()
if err := m.persistData(metrics); err != nil {
return err
}
}
return nil
}
func (m *MetricDeliver) persistData(metrics []*metrictools.MetricData) error {
var err error
dataCon := m.dataService.Get()
defer dataCon.Close()
for _, metric := range metrics {
var nvalue float64
if metric.DataSetType == "counter" || metric.DataSetType == "derive" {
nvalue, err = m.getRate(metric)
} else {
nvalue = metric.Value
}
if err != nil {
if err.Error() == "ignore" {
continue
}
if err != redis.ErrNil {
log.Println("fail to get new value", err)
break
}
}
record := fmt.Sprintf("%d:%.2f", metric.Timestamp, nvalue)
metricName := metric.Host + "_" + metric.GetMetricName()
_, err = dataCon.Do("ZADD", "archive:"+metricName, metric.Timestamp, record)
if err != nil {
log.Println(err)
break
}
var t int64
t, err = redis.Int64(dataCon.Do("HGET", metricName, "archivetime"))
if err != nil && err != redis.ErrNil {
log.Println("fail to get archivetime", err)
break
}
if time.Now().Unix()-t >= 600 {
m.writer.Publish(m.archiveTopic, []byte(metricName))
}
_, err = dataCon.Do("HMSET", metricName, "value", metric.Value, "timestamp", metric.Timestamp, "rate_value", nvalue, "dstype", metric.DataSetType, "dsname", metric.DataSetName, "interval", metric.Interval, "host", metric.Host, "plugin", metric.Plugin, "plugin_instance", metric.PluginInstance, "type", metric.Type, "type_instance", metric.TypeInstance)
if err != nil {
log.Println("hmset", metricName, err)
break
}
_, err = dataCon.Do("HSETNX", metricName, "ttl", metric.TTL)
_, err = dataCon.Do("SADD", "host:"+metric.Host, metricName)
}
return err
}
func (m *MetricDeliver) getRate(metric *metrictools.MetricData) (float64, error) {
dataCon := m.dataService.Get()
defer dataCon.Close()
rst, err := redis.Values(dataCon.Do("HMGET", metric.Host+"_"+metric.GetMetricName(), "value", "timestamp"))
if err != nil {
return 0, err
}
var value float64
var t int64
var v float64
_, err = redis.Scan(rst, &v, &t)
if err == nil {
value = (metric.Value - v) / float64(metric.Timestamp-t)
} else {
value = metric.Value
}
if value < 0 {
value = 0
}
return value, nil
}
// ScanTrigger will find out all trigger which not updated in 60s
func (m *MetricDeliver) ScanTrigger() {
ticker := time.Tick(time.Second * 30)
configCon := m.configService.Get()
defer configCon.Close()
for {
keys, err := redis.Strings(configCon.Do("KEYS", "trigger:*"))
if err != nil {
continue
}
now := time.Now().Unix()
for _, v := range keys {
last, err := redis.Int64(configCon.Do("HGET", v, "last"))
if err != nil && err != redis.ErrNil {
continue
}
if now-last < 61 {
continue
}
_, _, err = m.writer.Publish(m.triggerTopic, []byte(v))
}
<-ticker
}
}
|
// Code generated; DANGER ZONE FOR EDITS
package data
import (
"bytes"
"encoding/json"
"fmt"
"gopkg.in/yaml.v2"
)
const BondDefinitionName = "bond"
type BondDefinitions map[string]BondDefinition
func (d BondDefinitions) Keys() (out []string) {
for k := range d {
out = append(out, k)
}
return out
}
func (d BondDefinitions) Values() (out []BondDefinition) {
for _, v := range d {
out = append(out, v)
}
return out
}
func (d BondDefinitions) Find(id int) (out BondDefinition) {
for k, v := range d {
if k == fmt.Sprint(id) {
return v
}
}
return BondDefinition{}
}
func (d BondDefinitions) Name() string {
return BondDefinitionName
}
type BondDefinition struct {
Blacklisted bool `json:"blacklisted" yaml:"blacklisted,omitempty"`
DisplayProperties DisplayProperties `json:"displayProperties" yaml:"displayProperties,omitempty"`
Hash int `json:"hash" yaml:"hash,omitempty"`
Index int `json:"index" yaml:"index,omitempty"`
ProvidedUnlockHash int `json:"providedUnlockHash" yaml:"providedUnlockHash,omitempty"`
ProvidedUnlockValueHash int `json:"providedUnlockValueHash" yaml:"providedUnlockValueHash,omitempty"`
Redacted bool `json:"redacted" yaml:"redacted,omitempty"`
}
func (d BondDefinition) Name() string {
return BondDefinitionName
}
func (d BondDefinition) Json() ([]byte, error) {
return json.Marshal(d)
}
func (d BondDefinition) PrettyJson() ([]byte, error) {
jout, err := d.Json()
if err != nil {
return nil, err
}
var pretty bytes.Buffer
if err := json.Indent(&pretty, jout, "", " "); err != nil {
return nil, err
}
return pretty.Bytes(), nil
}
func (d BondDefinition) Yaml() ([]byte, error) {
return yaml.Marshal(d)
}
|
package objs
import (
"fmt"
"strconv"
"strings"
)
type Ratio struct {
Num int64
Den int64
}
var ZeroRatio = Ratio{0, 1}
func NewRatio(n int64, d int64) Ratio {
if d == 0 {
d = 1
}
return Ratio{n, d}.Reduced()
}
func Whole(n int64) Ratio {
return Ratio{n, 1}
}
func (r Ratio) String() string {
if r.Den == 1 {
return fmt.Sprintf("%d", r.Num)
}
return fmt.Sprintf("%d/%d", r.Num, r.Den)
}
func (r Ratio) ToFloat() float64 {
return float64(r.Num) / float64(r.Den)
}
func ParseRatio(instr string) (Ratio, error) {
parts := strings.Split(instr, "/")
if len(parts) == 1 {
//no divisions, try parsing as whole number
wpart, werr := strconv.ParseInt(instr, 10, 64)
if werr != nil {
return ZeroRatio, fmt.Errorf("Improper whole number: %v", werr)
}
return Whole(wpart), nil
}
if len(parts) != 2 {
return ZeroRatio, fmt.Errorf("Improper division placement: %s", instr)
}
npart, nerr := strconv.ParseInt(parts[0], 10, 64)
if nerr != nil {
return ZeroRatio, fmt.Errorf("Improper numerator: %s", instr)
}
dpart, derr := strconv.ParseInt(parts[1], 10, 64)
if derr != nil {
return ZeroRatio, fmt.Errorf("Improper denominator: %s", instr)
}
return Ratio{npart, dpart}.Reduced(), nil
}
func gcd(a int64, b int64) int64 {
if a == 0 || b == 0 {
return a + b
}
if a == 1 || b == 1 {
return a * b
}
if b > a {
return gcd(b, a)
}
return gcd(b, a%b)
}
func (r Ratio) Reduced() Ratio {
var m int64 = 1
if r.Den < 0 {
m = -1
}
g := gcd(r.Num, r.Den) * m
if g == 0 {
g = 1
}
if r.Num == 0 {
return Ratio{Num: 0, Den: 1}
}
return Ratio{Num: r.Num / g, Den: r.Den / g}
}
func (r Ratio) Add(oth Ratio) Ratio {
return Ratio{r.Num*oth.Den + r.Den*oth.Num, r.Den * oth.Den}.Reduced()
}
func (r Ratio) Multiply(oth Ratio) Ratio {
return Ratio{r.Num * oth.Num, r.Den * oth.Den}.Reduced()
}
func (r Ratio) EqualTo(oth Ratio) bool {
if r.Num == 0 && oth.Num == 0 {
return true
}
return r.Num == oth.Num && r.Den == oth.Den
}
func (r Ratio) GreaterThan(oth Ratio) bool {
return r.Num*oth.Den > r.Den*oth.Num
}
|
// Copyright (c) 2018 soren yang
//
// Licensed under the MIT License
// you may not use this file except in complicance with the License.
// You may obtain a copy of the License at
//
// https://opensource.org/licenses/MIT
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package logger
import (
"github.com/sirupsen/logrus"
"github.com/lsytj0413/ena/logger/convert"
)
// LayoutFormatter ...
// %d dateformat
// %level
// %M method name
// %L line
// %msg
// %P package name
// %F file name
type LayoutFormatter struct {
pattern string
c convert.Converter
}
// hasCallerField chech the format pattern is have caller info
func hasCallerField(c convert.Converter) bool {
hasField := false
v := convert.NewFieldVisitorForFunc(func(field convert.FieldConverter) bool {
key := convert.FieldKey(field.Key())
if key == convert.FieldKeyPackage || key == convert.FieldKeyFile || key == convert.FieldKeyMethod || key == convert.FieldKeyLine {
hasField = true
return false
}
return true
})
c.Visit(v)
return hasField
}
// NewLayoutFormatter ...
func NewLayoutFormatter(pattern string) (*LayoutFormatter, error) {
builder := convert.NewBuilder()
converter, err := builder.Build(pattern)
if err != nil {
return nil, err
}
return &LayoutFormatter{
pattern: pattern,
c: converter,
}, nil
}
// Format ...
func (l *LayoutFormatter) Format(entry *logrus.Entry) ([]byte, error) {
innerEntry := &convert.Entry{
Time: entry.Time,
Message: entry.Message,
Level: entry.Level.String(),
Package: placeHolder,
File: placeHolder,
Method: placeHolder,
Line: placeHolder,
}
if d, ok := entry.Data[loggerCallerKeyName]; ok {
s := d.(*source)
innerEntry.Package = s.p
innerEntry.File = s.f
innerEntry.Method = s.m
innerEntry.Line = s.l
}
return []byte(l.c.Convert(innerEntry)), nil
}
|
package main
import (
"net/http"
"os"
"path"
"path/filepath"
"github.com/packaged/logger/v2"
"github.com/packaged/logger/v2/ld"
"go.uber.org/zap"
cli "gopkg.in/alecthomas/kingpin.v2"
)
var (
configPath = cli.Flag("config", "Path to the config yaml").Short('c').String()
devEnvironment = cli.Flag("development", "Development Environment").Short('d').Bool()
verboseLog = cli.Flag("verbose", "Verbose logging").Short('v').Bool()
)
var logs *logger.Logger
func main() {
cli.Parse()
var opts []logger.Option
if *verboseLog {
opts = append(opts, logger.Debug)
} else {
opts = append(opts, func(config *zap.Config) { logger.DisableStacktrace(config) })
}
if *devEnvironment {
opts = append(opts, logger.WithConsoleEncoding)
}
logs = logger.Instance(opts...)
var configPaths []string
if *configPath != "" {
// config file specified
if filepath.IsAbs(*configPath) {
// absolute path
configPaths = append(configPaths, *configPath)
} else if cwd, err := os.Getwd(); err == nil {
// relative path
configPaths = append(configPaths, path.Join(cwd, *configPath))
}
} else {
if cwd, err := os.Getwd(); err == nil {
// no config specified, search in current directory
configPaths = append(configPaths, path.Join(cwd, "config.yaml"))
}
if binPath, err := filepath.Abs(filepath.Dir(os.Args[0])); err == nil {
// search in binary directory
configPaths = append(configPaths, path.Join(binPath, "config.yaml"))
}
}
var cfg *Config
for _, configFile := range configPaths {
info, err := os.Stat(configFile)
if !os.IsNotExist(err) && !info.IsDir() {
cfg, err = LoadConfig(configFile)
logs.FatalIf(err, "loading config")
break
}
}
if cfg == nil {
logs.Fatal("Config file not found")
}
go startSshTunnel(cfg)
p := NewProxy(cfg)
httpServer := http.Server{Addr: cfg.ListenAddress, Handler: p}
logs.Debug("Listening", ld.TrustedString("host", cfg.ListenAddress))
if cfg.Tls {
logs.Debug("Serving with TLS")
}
if cfg.Tls {
logs.FatalIf(httpServer.ListenAndServeTLS(cfg.TlsCertFile, cfg.TlsKeyFile), "serve")
}
logs.FatalIf(httpServer.ListenAndServe(), "serve")
}
|
// Copyright (c) 2016, Samvel Khalatyan. All rights reserved.
//
// gh is the main command for GitHub cli
package main
import (
"fmt"
"io/ioutil"
"log"
"os"
"github.com/skhal/gh/env"
)
var (
commands []*Command
)
func init() {
commands = []*Command{
cmdHelp,
cmdAuth,
cmdCfg,
cmdEnv,
cmdIssues,
}
}
func main() {
if env.Debug {
log.SetFlags(log.Lshortfile)
log.SetOutput(os.Stderr)
} else {
log.SetOutput(ioutil.Discard)
}
args := []string{"help"}
if len(os.Args) > 1 {
args = os.Args[1:]
}
if err := run(args); err != nil {
fmt.Fprintf(os.Stderr, "%s: %s\n", os.Args[0], err)
os.Exit(1)
}
}
func run(args []string) error {
command, args := args[0], args[1:]
for _, cmd := range commands {
if cmd.Name == command {
if cmd.Enabled {
if cmd.Flags != nil {
cmd.Flags.Parse(args)
args = cmd.Flags.Args()
}
return cmd.Run(args)
}
break
}
}
return fmt.Errorf("invalid command %q", command)
}
|
package main
import "fmt"
func main() {
func(){
fmt.Println("He")
}()
this := func(){
fmt.Println("Two")
}
this()
fmt.Printf("%T, \n", this)
fmt.Println(myCallback())
thisFunc := myCallback
fmt.Printf("%T, %v\n", thisFunc(), thisFunc())
}
func myCallback() func() int {
return func() int {
return 32
}
}
|
package main
import "fmt"
func main() {
a := 42
fmt.Println(a) // OUTPUT: 42
fmt.Println(&a) // OUTPUT: Memory location, pointer returns memory location of `a`
fmt.Printf("%T\n", a) //Type that is `a` OUTPUT: int
fmt.Printf("%T\n", &a) //Type that is `&a` OUTPUT: *int
//Sharing an address:
var b *int = &a
fmt.Println(b) // OUTPUT: the address of 'a' stored by `b`
c := &a
fmt.Println(*c) //In the (*c) the `*` is an operator whereas the OUTPUT above *int the `*` is part of the type the OUTPUT to c := &a gives the value 42. (De-referencing the address means giving the value stored at an address)
fmt.Println(*&a) //OUTPUT: 42 (* with & means give the value stored at the address/memory location)
*c = 43 // The value at memory location &c and &a is being changed to 43 from 42.
fmt.Println(a) //OUTPUT: 43
}
|
package dockercomposeservice
import (
"context"
"github.com/tilt-dev/tilt/internal/controllers/apicmp"
"github.com/tilt-dev/tilt/internal/dockercompose"
"github.com/tilt-dev/tilt/pkg/apis/core/v1alpha1"
"github.com/tilt-dev/tilt/pkg/logger"
)
// Sync all the project watches with the dockercompose objects
// we're currently tracking.
func (r *Reconciler) manageOwnedProjectWatches(ctx context.Context) {
r.mu.Lock()
defer r.mu.Unlock()
running := map[string]bool{}
for key := range r.projectWatches {
running[key] = true
}
owned := map[string]bool{}
for _, result := range r.results {
hash := result.ProjectHash
owned[hash] = true
if hash != "" && !running[hash] {
ctx, cancel := context.WithCancel(ctx)
pw := &ProjectWatch{
ctx: ctx,
cancel: cancel,
project: result.Spec.Project,
hash: hash,
}
r.projectWatches[hash] = pw
go r.runProjectWatch(pw)
running[hash] = true
}
}
for key := range r.projectWatches {
if !owned[key] {
r.projectWatches[key].cancel()
delete(r.projectWatches, key)
}
}
}
// Stream events from the docker-compose project and
// fan them out to each service in the project.
func (r *Reconciler) runProjectWatch(pw *ProjectWatch) {
defer func() {
r.mu.Lock()
delete(r.projectWatches, pw.hash)
r.mu.Unlock()
pw.cancel()
}()
ctx := pw.ctx
project := pw.project
ch, err := r.dcc.StreamEvents(ctx, project)
if err != nil {
// TODO(nick): Figure out where this error should be published.
return
}
for {
select {
case evtJson, ok := <-ch:
if !ok {
return
}
evt, err := dockercompose.EventFromJsonStr(evtJson)
if err != nil {
logger.Get(ctx).Debugf("[dcwatch] failed to unmarshal dc event '%s' with err: %v", evtJson, err)
continue
}
if evt.Type != dockercompose.TypeContainer {
continue
}
containerJSON, err := r.dc.ContainerInspect(ctx, evt.ID)
if err != nil {
logger.Get(ctx).Debugf("[dcwatch] inspecting container: %v", err)
continue
}
if containerJSON.ContainerJSONBase == nil || containerJSON.ContainerJSONBase.State == nil {
logger.Get(ctx).Debugf("[dcwatch] inspecting container: no state found")
continue
}
cState := containerJSON.ContainerJSONBase.State
dcState := dockercompose.ToContainerState(cState)
r.recordContainerEvent(evt, dcState)
case <-ctx.Done():
return
}
}
}
// Record the container event and re-reconcile the dockercompose service.
func (r *Reconciler) recordContainerEvent(evt dockercompose.Event, state *v1alpha1.DockerContainerState) {
r.mu.Lock()
defer r.mu.Unlock()
result, ok := r.resultsByServiceName[evt.Service]
if !ok {
return
}
if apicmp.DeepEqual(state, result.Status.ContainerState) {
return
}
// No need to copy because this is a value struct.
update := result.Status
update.ContainerID = evt.ID
update.ContainerState = state
result.Status = update
r.requeuer.Add(result.Name)
}
|
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the GO_LICENSE file.
//go:build s390x || ppc64le || ppc64
// +build s390x ppc64le ppc64
package hmacsha512
//go:noescape
func block(dig *digest, p []byte)
|
// Copyright 2018 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package systrap
import (
"math/rand"
"reflect"
"unsafe"
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/safecopy"
"gvisor.dev/gvisor/pkg/sentry/platform/systrap/sysmsg"
)
// initStubProcess is defined in arch-specific assembly.
func initStubProcess()
// addrOfInitStubProcess returns the start address of initStubProcess.
//
// In Go 1.17+, Go references to assembly functions resolve to an ABIInternal
// wrapper function rather than the function itself. We must reference from
// assembly to get the ABI0 (i.e., primary) address.
func addrOfInitStubProcess() uintptr
// stubCall calls the stub at the given address with the given pid.
func stubCall(addr, pid uintptr)
// unsafeSlice returns a slice for the given address and length.
func unsafeSlice(addr uintptr, length int) (slice []byte) {
sh := (*reflect.SliceHeader)(unsafe.Pointer(&slice))
sh.Data = addr
sh.Len = length
sh.Cap = length
return
}
// prepareSeccompRules compiles stub process seccomp filters and fill
// the sock_fprog structure. So the stub process will only need to call
// seccomp system call to apply these filters.
//
//go:nosplit
func prepareSeccompRules(stubSysmsgStart, stubSysmsgRules, stubSysmsgRulesLen uintptr) {
instrs := sysmsgThreadRules(stubSysmsgStart)
progLen := len(instrs) * int(unsafe.Sizeof(linux.BPFInstruction{}))
progPtr := stubSysmsgRules + unsafe.Sizeof(linux.SockFprog{})
if progLen+int(unsafe.Sizeof(linux.SockFprog{})) > int(stubSysmsgRulesLen) {
panic("not enough space for sysmsg seccomp rules")
}
var targetSlice []linux.BPFInstruction
sh := (*reflect.SliceHeader)(unsafe.Pointer(&targetSlice))
sh.Data = progPtr
sh.Cap = len(instrs)
sh.Len = sh.Cap
copy(targetSlice, instrs)
// stubSysmsgRules and progPtr are addresses from a stub mapping which
// is mapped once and never moved, so it is safe to use unsafe.Pointer
// this way for them.
sockProg := (*linux.SockFprog)(unsafe.Pointer(stubSysmsgRules))
sockProg.Len = uint16(len(instrs))
sockProg.Filter = (*linux.BPFInstruction)(unsafe.Pointer(progPtr))
// Make the seccomp rules stub read-only.
if _, _, errno := unix.RawSyscall(
unix.SYS_MPROTECT,
stubSysmsgRules,
stubSysmsgRulesLen,
unix.PROT_READ); errno != 0 {
panic("mprotect failed: " + errno.Error())
}
}
// stubInit allocates and initializes the stub memory region which includes:
// - the stub code to do initial initialization of a stub process.
// - the sysmsg signal handler code to notify sentry about new events such as
// system calls, memory faults, etc.
// - precompiled seccomp rules to trap application system calls.
// - reserved space for stub-thread stack regions.
func stubInit() {
// *--------stubStart-------------------*
// |--------stubInitProcess-------------|
// | stub code to init stub processes |
// |--------stubSysmsgStart-------------|
// | sysmsg code |
// |--------stubSysmsgRuleStart---------|
// | precompiled sysmsg seccomp rules |
// |--------guard page------------------|
// |--------random gap------------------|
// | |
// |--------stubSysmsgStack-------------|
// | Reserved space for per-thread |
// | sysmsg stacks. |
// |----------stubContextQueue----------|
// | Shared ringbuffer queue for stubs |
// | to select the next context. |
// |--------stubThreadContextRegion-----|
// | Reserved space for thread contexts |
// *------------------------------------*
// Grab the existing stub.
procStubBegin := addrOfInitStubProcess()
procStubLen := int(safecopy.FindEndAddress(procStubBegin) - procStubBegin)
procStubSlice := unsafeSlice(procStubBegin, procStubLen)
mapLen, _ := hostarch.PageRoundUp(uintptr(procStubLen))
stubSysmsgStart = mapLen
stubSysmsgLen := len(sysmsg.SighandlerBlob)
mapLen, _ = hostarch.PageRoundUp(mapLen + uintptr(stubSysmsgLen))
stubSysmsgRules = mapLen
stubSysmsgRulesLen = hostarch.PageSize * 4
mapLen += stubSysmsgRulesLen
stubROMapEnd = mapLen
// Add a guard page.
mapLen += hostarch.PageSize
stubSysmsgStack = mapLen
// Allocate maxGuestThreads plus ONE because each per-thread stack
// has to be aligned to sysmsg.PerThreadMemSize.
// Look at sysmsg/sighandler.c:sysmsg_addr() for more details.
mapLen, _ = hostarch.PageRoundUp(mapLen + sysmsg.PerThreadMemSize*(maxSystemThreads+1))
// Allocate context queue region
stubContextQueueRegion = mapLen
stubContextQueueRegionLen, _ = hostarch.PageRoundUp(unsafe.Sizeof(contextQueue{}))
mapLen += stubContextQueueRegionLen
stubSpinningThreadQueueAddr = mapLen
mapLen += sysmsg.SpinningQueueMemSize
// Allocate thread context region
stubContextRegion = mapLen
stubContextRegionLen = sysmsg.AllocatedSizeofThreadContextStruct * (maxGuestContexts + 1)
mapLen, _ = hostarch.PageRoundUp(mapLen + stubContextRegionLen)
// Randomize stubStart address.
randomOffset := uintptr(rand.Uint64() * hostarch.PageSize)
maxRandomOffset := maxRandomOffsetOfStubAddress - mapLen
stubStart = uintptr(0)
for offset := uintptr(0); offset < maxRandomOffset; offset += hostarch.PageSize {
stubStart = maxStubUserAddress + (randomOffset+offset)%maxRandomOffset
// Map the target address for the stub.
//
// We don't use FIXED here because we don't want to unmap
// something that may have been there already. We just walk
// down the address space until we find a place where the stub
// can be placed.
addr, _, _ := unix.RawSyscall6(
unix.SYS_MMAP,
stubStart,
stubROMapEnd,
unix.PROT_WRITE|unix.PROT_READ,
unix.MAP_PRIVATE|unix.MAP_ANONYMOUS,
0 /* fd */, 0 /* offset */)
if addr == stubStart {
break
}
if addr != 0 {
// Unmap the region we've mapped accidentally.
unix.RawSyscall(unix.SYS_MUNMAP, addr, stubROMapEnd, 0)
}
stubStart = uintptr(0)
}
if stubStart == 0 {
// This will happen only if we exhaust the entire address
// space, and it will take a long, long time.
panic("failed to map stub")
}
// Randomize stubSysmsgStack address.
gap := uintptr(rand.Uint64()) * hostarch.PageSize % (maximumUserAddress - stubStart - mapLen)
stubSysmsgStack += uintptr(gap)
stubContextQueueRegion += uintptr(gap)
stubContextRegion += uintptr(gap)
// Copy the stub to the address.
targetSlice := unsafeSlice(stubStart, procStubLen)
copy(targetSlice, procStubSlice)
stubInitProcess = stubStart
stubSysmsgStart += stubStart
stubSysmsgStack += stubStart
stubROMapEnd += stubStart
stubContextQueueRegion += stubStart
stubSpinningThreadQueueAddr += stubStart
stubContextRegion += stubStart
// Align stubSysmsgStack to the per-thread stack size.
// Look at sysmsg/sighandler.c:sysmsg_addr() for more details.
if offset := stubSysmsgStack % sysmsg.PerThreadMemSize; offset != 0 {
stubSysmsgStack += sysmsg.PerThreadMemSize - offset
}
stubSysmsgRules += stubStart
targetSlice = unsafeSlice(stubSysmsgStart, stubSysmsgLen)
copy(targetSlice, sysmsg.SighandlerBlob)
// Initialize stub globals
p := (*uint64)(unsafe.Pointer(stubSysmsgStart + uintptr(sysmsg.Sighandler_blob_offset____export_deep_sleep_timeout)))
*p = deepSleepTimeout
p = (*uint64)(unsafe.Pointer(stubSysmsgStart + uintptr(sysmsg.Sighandler_blob_offset____export_context_region)))
*p = uint64(stubContextRegion)
p = (*uint64)(unsafe.Pointer(stubSysmsgStart + uintptr(sysmsg.Sighandler_blob_offset____export_stub_start)))
*p = uint64(stubStart)
archState := (*sysmsg.ArchState)(unsafe.Pointer(stubSysmsgStart + uintptr(sysmsg.Sighandler_blob_offset____export_arch_state)))
archState.Init()
p = (*uint64)(unsafe.Pointer(stubSysmsgStart + uintptr(sysmsg.Sighandler_blob_offset____export_context_queue_addr)))
*p = uint64(stubContextQueueRegion)
p = (*uint64)(unsafe.Pointer(stubSysmsgStart + uintptr(sysmsg.Sighandler_blob_offset____export_spinning_queue_addr)))
*p = uint64(stubSpinningThreadQueueAddr)
prepareSeccompRules(stubSysmsgStart, stubSysmsgRules, stubSysmsgRulesLen)
// Make the stub executable.
if _, _, errno := unix.RawSyscall(
unix.SYS_MPROTECT,
stubStart,
stubROMapEnd-stubStart,
unix.PROT_EXEC|unix.PROT_READ); errno != 0 {
panic("mprotect failed: " + errno.Error())
}
// Set the end.
stubEnd = stubStart + mapLen + uintptr(gap)
log.Debugf("stubStart %x stubSysmsgStart %x stubSysmsgStack %x, stubContextQueue %x, stubThreadContextRegion %x, mapLen %x", stubStart, stubSysmsgStart, stubSysmsgStack, stubContextQueueRegion, stubContextRegion, mapLen)
log.Debugf(archState.String())
}
|
package handler
const (
ErrCodeOK = 0
ErrCodeApiGatewayFormat = 1001
ErrCodeUnknownAction = 1002
ErrCodeInvalidRequest = 1003
ErrCodeImportFail = 1004
ErrCodeReadFile = 1005
ErrCodeJsonMarshal = 1006
ErrCodeReadDB = 1007
ErrCodeCloudIdMissed = 1008
ErrCodeUploadFile = 1009
ErrCodeMd5Check = 1010
ErrCodeDataNotFound = 1011
ErrCodeUnsupportedFunc = 1012
ErrCodeShellCmd = 1013
ErrCodeMkdir = 1014
ErrCodeActivatePlan = 1015
)
const (
ErrMsgOK = "OK"
ErrMsgApiGatewayFormat = "API Gateway格式解析错误"
ErrMsgUnknownAction = "未知的Action"
ErrMsgInvalidRequest = "请求格式错误"
ErrMsgImportFail = "导入物料失败"
ErrMsgReadFile = "读取文件失败"
ErrMsgJsonMarshal = "JsonMarshal失败"
ErrMsgReadDB = "读取数据库失败"
ErrMsgCloudIdMissed = "获取不到CloudId信息"
ErrMsgUploadFile = "上传文件失败"
ErrMsgMd5Check = "Md5校验失败"
ErrMsgDataNotFound = "未查到对应数据"
ErrMsgUnsupportedFunc = "功能暂时不支持"
ErrMsgShellCmd = "执行CMD命令失败"
ErrMsgMkDir = "创建目录失败"
ErrMsgActivatePlan = "激活规划失败"
)
type ErrorMsg struct {
Code int `json:"Code"`
Message string `json:"Message"`
}
//错误码返回体
func (e ErrorMsg) WithErrRsp(code int, msg string) interface{} {
e.Code = code
e.Message = msg
return struct {
Error ErrorMsg `json:Error`
}{e}
}
|
package controllers
import (
"github.com/astaxie/beego"
)
//20151004加入index主页
type IndexController struct {
beego.Controller
}
// @router / [get]
func (i *IndexController) GetIndexPage() {
StaticPageRender("./view/index.html", i.Ctx.ResponseWriter)
}
|
// Copyright 2020 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package reporters
import (
"context"
"fmt"
"regexp"
"strings"
"chromiumos/tast/errors"
)
var (
rTargetHosted = regexp.MustCompile(`(?i)chrom(ium|e)os`)
rDevNameStripper = regexp.MustCompile(`p?[0-9]+$`)
)
// BootedFromRemovableDevice returns true if the root partition is on a removable device.
func (r *Reporter) BootedFromRemovableDevice(ctx context.Context) (bool, error) {
rootPart, err := RootPartition(ctx, r)
if err != nil {
return false, errors.Wrap(err, "failed to get root partition")
}
removable, err := IsRemovableDevice(ctx, r, rootPart)
if err != nil {
return false, errors.Wrapf(err, "failed to determine if %q is removable", rootPart)
}
return removable, nil
}
// RootPartition gets the root partition as reported by the 'rootdev -s' command.
func RootPartition(ctx context.Context, r *Reporter) (string, error) {
lines, err := r.CommandOutputLines(ctx, "rootdev", "-s")
if err != nil {
return "", errors.Wrap(err, "failed to determine root partition")
}
if len(lines) == 0 {
return "", errors.New("root partition not found")
}
return lines[0], nil
}
// isTargetHosted determines if DUT is hosted by checking if /etc/lsb-release has chromiumos attributes.
func isTargetHosted(ctx context.Context, r *Reporter) (bool, error) {
const targetHostedFile = "/etc/lsb-release"
lines, err := r.CatFileLines(ctx, targetHostedFile)
if err != nil {
return false, err
}
// If file is empty, then it's some kind of system error.
if len(lines) == 0 {
return false, nil
}
return rTargetHosted.FindStringIndex(lines[0]) != nil, nil
}
// IsRemovableDevice determines if the device is removable media.
// TODO(aluo): deduplicate with utils.deviceRemovable.
func IsRemovableDevice(ctx context.Context, r *Reporter, device string) (bool, error) {
hosted, err := isTargetHosted(ctx, r)
if err != nil {
return false, err
}
if !hosted {
return false, nil
}
// Removes the partition portion of the device.
baseDev := rDevNameStripper.ReplaceAllString(strings.Split(device, "/")[2], "")
removable, err := r.CatFile(ctx, fmt.Sprintf("/sys/block/%s/removable", baseDev))
if err != nil {
return false, err
}
if removable != "0" && removable != "1" {
return false, errors.Wrapf(err, "removable output %q is not 0 or 1", removable)
}
return removable == "1", nil
}
|
/*
* @Description:
* @Author: ccj
* @Date: 2020-12-28 21:34:19
* @LastEditTime: 2020-12-28 21:44:23
* @LastEditors:
*/
package basic
import(
"fmt"
"time"
)
func Learn4(){
ch1 := make(chan int)
ch2 := make(chan int)
go send(ch1,0)
go send(ch2,10)
time.Sleep(time.Second)
for{
select{
case val:= <- ch1:
fmt.Printf("get value %d from ch1\n",val)
case val:= <- ch2:
fmt.Printf("get value %d from ch2\n",val)
case <- time.After(22 * time.Second):
fmt.Printf("time out\n")
return
}
}
}
func send(ch chan int, begin int){
for i:=begin;i<begin+10;i++{
ch <- i
}
}
|
// Copyright 2021 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package fsstress runs fsstress tool inside a docker container.
package fsstress
import (
"context"
"flag"
"fmt"
"io/ioutil"
"math/rand"
"os"
"strconv"
"strings"
"testing"
"time"
"github.com/docker/docker/api/types/mount"
"gvisor.dev/gvisor/pkg/test/dockerutil"
"gvisor.dev/gvisor/pkg/test/testutil"
)
func init() {
rand.Seed(int64(time.Now().Nanosecond()))
}
func TestMain(m *testing.M) {
dockerutil.EnsureSupportedDockerVersion()
flag.Parse()
os.Exit(m.Run())
}
type config struct {
operations string
processes string
target string
mounts []mount.Mount
}
func fsstress(t *testing.T, conf config) {
ctx := context.Background()
d := dockerutil.MakeContainer(ctx, t)
defer d.CleanUp(ctx)
const image = "basic/fsstress"
seed := strconv.FormatUint(uint64(rand.Uint32()), 10)
args := []string{"-d", conf.target, "-n", conf.operations, "-p", conf.processes, "-s", seed, "-X"}
opts := dockerutil.RunOpts{
Image: image,
Mounts: conf.mounts,
}
var mounts string
if len(conf.mounts) > 0 {
mounts = " -v "
for _, m := range conf.mounts {
mounts += fmt.Sprintf("-v <any_dir>:%s", m.Target)
}
}
t.Logf("Repro: docker run --rm --runtime=%s%s gvisor.dev/images/%s %s", dockerutil.Runtime(), mounts, image, strings.Join(args, " "))
out, err := d.Run(ctx, opts, args...)
if err != nil {
t.Fatalf("docker run failed: %v\noutput: %s", err, out)
}
// This is to catch cases where fsstress spews out error messages during clean
// up but doesn't return error.
if len(out) > 0 {
t.Fatalf("unexpected output: %s", out)
}
}
func TestFsstressGofer(t *testing.T) {
// This takes between 30-60s to run on my machine. Adjust as needed.
cfg := config{
operations: "500",
processes: "20",
target: "/test",
}
fsstress(t, cfg)
}
func TestFsstressGoferShared(t *testing.T) {
dir, err := ioutil.TempDir(testutil.TmpDir(), "fsstress")
if err != nil {
t.Fatalf("ioutil.TempDir() failed: %v", err)
}
defer os.RemoveAll(dir)
// This takes between 30-60s to run on my machine. Adjust as needed.
cfg := config{
operations: "500",
processes: "20",
target: "/test",
mounts: []mount.Mount{
{
Source: dir,
Target: "/test",
Type: "bind",
},
},
}
fsstress(t, cfg)
}
func TestFsstressTmpfs(t *testing.T) {
// This takes between 10s to run on my machine. Adjust as needed.
cfg := config{
operations: "5000",
processes: "20",
target: "/tmp",
}
fsstress(t, cfg)
}
|
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Copyright 2019 The Go Authors. All rights reserved.
Use of this source code is governed by a BSD-style
license that can be found in the NOTICE.md file.
*/
package cache
import (
"crypto/sha256"
"fmt"
"io"
"os"
"testing"
)
const (
sampleData = "0123456789"
)
func TestDirectoryCache(t *testing.T) {
// with enough memory cache
newCache := func() (BlobCache, cleanFunc) {
tmp, err := os.MkdirTemp("", "testcache")
if err != nil {
t.Fatalf("failed to make tempdir: %v", err)
}
c, err := NewDirectoryCache(tmp, DirectoryCacheConfig{
MaxLRUCacheEntry: 10,
SyncAdd: true,
})
if err != nil {
t.Fatalf("failed to make cache: %v", err)
}
return c, func() { os.RemoveAll(tmp) }
}
testCache(t, "dir-with-enough-mem", newCache)
// with smaller memory cache
newCache = func() (BlobCache, cleanFunc) {
tmp, err := os.MkdirTemp("", "testcache")
if err != nil {
t.Fatalf("failed to make tempdir: %v", err)
}
c, err := NewDirectoryCache(tmp, DirectoryCacheConfig{
MaxLRUCacheEntry: 1,
SyncAdd: true,
})
if err != nil {
t.Fatalf("failed to make cache: %v", err)
}
return c, func() { os.RemoveAll(tmp) }
}
testCache(t, "dir-with-small-mem", newCache)
}
func TestMemoryCache(t *testing.T) {
testCache(t, "memory", func() (BlobCache, cleanFunc) { return NewMemoryCache(), func() {} })
}
type cleanFunc func()
func testCache(t *testing.T, name string, newCache func() (BlobCache, cleanFunc)) {
tests := []struct {
name string
blobs []string
checks []check
}{
{
name: "empty_data",
blobs: []string{
"",
},
checks: []check{
hit(""),
miss(sampleData),
},
},
{
name: "data",
blobs: []string{
sampleData,
},
checks: []check{
hit(sampleData),
miss("dummy"),
},
},
{
name: "manydata",
blobs: []string{
sampleData,
"test",
},
checks: []check{
hit(sampleData),
miss("dummy"),
},
},
{
name: "dup_data",
blobs: []string{
sampleData,
sampleData,
},
checks: []check{
hit(sampleData),
},
},
}
for _, tt := range tests {
t.Run(fmt.Sprintf("%s-%s", name, tt.name), func(t *testing.T) {
c, clean := newCache()
defer clean()
for _, blob := range tt.blobs {
d := digestFor(blob)
w, err := c.Add(d)
if err != nil {
t.Fatalf("failed to add %v: %v", d, err)
}
if n, err := w.Write([]byte(blob)); err != nil || n != len(blob) {
w.Close()
t.Fatalf("failed to write %v (len:%d): %v", d, len(blob), err)
}
if err := w.Commit(); err != nil {
w.Close()
t.Fatalf("failed to commit %v (len:%d): %v", d, len(blob), err)
}
w.Close()
}
for _, check := range tt.checks {
check(t, c)
}
})
}
}
type check func(*testing.T, BlobCache)
func digestFor(content string) string {
sum := sha256.Sum256([]byte(content))
return fmt.Sprintf("%x", sum)
}
func hit(sample string) check {
return func(t *testing.T, c BlobCache) {
// test whole blob
key := digestFor(sample)
testChunk(t, c, key, 0, sample)
// test a chunk
chunk := len(sample) / 3
testChunk(t, c, key, int64(chunk), sample[chunk:2*chunk])
}
}
func testChunk(t *testing.T, c BlobCache, key string, offset int64, sample string) {
p := make([]byte, len(sample))
r, err := c.Get(key)
if err != nil {
t.Errorf("missed %v", key)
return
}
if n, err := r.ReadAt(p, offset); err != nil && err != io.EOF {
t.Errorf("failed to fetch blob %q: %v", key, err)
return
} else if n != len(sample) {
t.Errorf("fetched size %d; want %d", len(p), len(sample))
return
}
if digestFor(sample) != digestFor(string(p)) {
t.Errorf("fetched %q; want %q", string(p), sample)
}
}
func miss(sample string) check {
return func(t *testing.T, c BlobCache) {
d := digestFor(sample)
if _, err := c.Get(d); err == nil {
t.Errorf("hit blob %q but must be missed: %v", d, err)
return
}
}
}
|
package block
import (
"github.com/transmutate-io/cryptocore/tx"
"github.com/transmutate-io/cryptocore/types"
)
type (
ForwardBlockNavigator interface {
NextBlockHash() types.Bytes
}
BackwardBlockNavigator interface {
PreviousBlockHash() types.Bytes
}
ConfirmationCounter interface {
Confirmations() int
}
TransactionsLister interface {
TransactionsHashes() []types.Bytes
}
TransactionsFetcher interface {
Transactions() []tx.Tx
}
Block interface {
Hash() types.Bytes
Height() int
Time() types.UnixTime
}
)
|
package ntfy
import (
"encoding/json"
"testing"
"github.com/TwiN/gatus/v5/alerting/alert"
"github.com/TwiN/gatus/v5/core"
)
func TestAlertDefaultProvider_IsValid(t *testing.T) {
scenarios := []struct {
name string
provider AlertProvider
expected bool
}{
{
name: "valid",
provider: AlertProvider{URL: "https://ntfy.sh", Topic: "example", Priority: 1},
expected: true,
},
{
name: "no-url-should-use-default-value",
provider: AlertProvider{Topic: "example", Priority: 1},
expected: true,
},
{
name: "valid-with-token",
provider: AlertProvider{Topic: "example", Priority: 1, Token: "tk_faketoken"},
expected: true,
},
{
name: "invalid-token",
provider: AlertProvider{Topic: "example", Priority: 1, Token: "xx_faketoken"},
expected: false,
},
{
name: "invalid-topic",
provider: AlertProvider{URL: "https://ntfy.sh", Topic: "", Priority: 1},
expected: false,
},
{
name: "invalid-priority-too-high",
provider: AlertProvider{URL: "https://ntfy.sh", Topic: "example", Priority: 6},
expected: false,
},
{
name: "invalid-priority-too-low",
provider: AlertProvider{URL: "https://ntfy.sh", Topic: "example", Priority: -1},
expected: false,
},
{
name: "no-priority-should-use-default-value",
provider: AlertProvider{URL: "https://ntfy.sh", Topic: "example"},
expected: true,
},
}
for _, scenario := range scenarios {
t.Run(scenario.name, func(t *testing.T) {
if scenario.provider.IsValid() != scenario.expected {
t.Errorf("expected %t, got %t", scenario.expected, scenario.provider.IsValid())
}
})
}
}
func TestAlertProvider_buildRequestBody(t *testing.T) {
firstDescription := "description-1"
secondDescription := "description-2"
scenarios := []struct {
Name string
Provider AlertProvider
Alert alert.Alert
Resolved bool
ExpectedBody string
}{
{
Name: "triggered",
Provider: AlertProvider{URL: "https://ntfy.sh", Topic: "example", Priority: 1},
Alert: alert.Alert{Description: &firstDescription, SuccessThreshold: 5, FailureThreshold: 3},
Resolved: false,
ExpectedBody: `{"topic":"example","title":"Gatus: endpoint-name","message":"An alert has been triggered due to having failed 3 time(s) in a row with the following description: description-1","tags":["x"],"priority":1}`,
},
{
Name: "resolved",
Provider: AlertProvider{URL: "https://ntfy.sh", Topic: "example", Priority: 2},
Alert: alert.Alert{Description: &secondDescription, SuccessThreshold: 5, FailureThreshold: 3},
Resolved: true,
ExpectedBody: `{"topic":"example","title":"Gatus: endpoint-name","message":"An alert has been resolved after passing successfully 5 time(s) in a row with the following description: description-2","tags":["white_check_mark"],"priority":2}`,
},
}
for _, scenario := range scenarios {
t.Run(scenario.Name, func(t *testing.T) {
body := scenario.Provider.buildRequestBody(
&core.Endpoint{Name: "endpoint-name"},
&scenario.Alert,
&core.Result{
ConditionResults: []*core.ConditionResult{
{Condition: "[CONNECTED] == true", Success: scenario.Resolved},
{Condition: "[STATUS] == 200", Success: scenario.Resolved},
},
},
scenario.Resolved,
)
if string(body) != scenario.ExpectedBody {
t.Errorf("expected:\n%s\ngot:\n%s", scenario.ExpectedBody, body)
}
out := make(map[string]interface{})
if err := json.Unmarshal(body, &out); err != nil {
t.Error("expected body to be valid JSON, got error:", err.Error())
}
})
}
}
|
package _3_smoothSailing
import (
"fmt"
"math"
)
func main() {
n := 123042
fmt.Println(isLucky(n))
}
func isLucky(n int) bool {
quontityOfNumbers := int(math.Log10(float64(n))) + 1
firstHalf := 0
secandHalf := 0
for i := 0; i < quontityOfNumbers; i++ {
if i < quontityOfNumbers/2 {
firstHalf += n % 10
} else {
secandHalf += n % 10
}
n = n / 10
}
return firstHalf == secandHalf
} |
package renderer
import (
"bytes"
"image"
"image/draw"
"strings"
"unicode"
"github.com/driusan/de/demodel"
"github.com/driusan/de/renderer"
"golang.org/x/image/font"
"golang.org/x/image/math/fixed"
)
type PHPSyntax struct {
renderer.DefaultSizeCalcer
renderer.DefaultImageMapper
}
func (rd *PHPSyntax) InvalidateCache() {
rd.DefaultSizeCalcer.InvalidateCache()
rd.DefaultImageMapper.InvalidateCache()
}
func (rd *PHPSyntax) CanRender(buf *demodel.CharBuffer) bool {
return strings.HasSuffix(buf.Filename, ".php") || strings.HasSuffix(buf.Filename, ".inc")
}
func (rd *PHPSyntax) RenderInto(dst draw.Image, buf *demodel.CharBuffer, viewport image.Rectangle) error {
bounds := dst.Bounds()
writer := font.Drawer{
Dst: dst,
Src: &image.Uniform{renderer.TextColour},
Dot: fixed.P(bounds.Min.X, bounds.Min.Y+renderer.MonoFontAscent.Floor()),
Face: renderer.MonoFontFace,
}
runes := bytes.Runes(buf.Buffer)
var inLineComment, inMultilineComment, inString, inCharString bool
// Some characters (like a terminating quote) only change the active colour
//after being rendered.
var nextColor image.Image
for i, r := range runes {
switch r {
case '\n':
if inLineComment && !inMultilineComment && !inString {
inLineComment = false
writer.Src = &image.Uniform{renderer.TextColour}
}
case '\'':
if !IsEscaped(i, runes) {
if inCharString {
// end of a string, colourize the quote too.
nextColor = &image.Uniform{renderer.TextColour}
inCharString = false
} else if !inLineComment && !inMultilineComment && !inString {
inCharString = true
writer.Src = &image.Uniform{renderer.StringColour}
}
}
case '"':
if !IsEscaped(i, runes) {
if inString {
inString = false
nextColor = &image.Uniform{renderer.TextColour}
} else if !inLineComment && !inMultilineComment && !inCharString {
inString = true
writer.Src = &image.Uniform{renderer.StringColour}
}
}
case '/':
if i+2 < len(runes) && string(runes[i:i+2]) == "//" {
if !inCharString && !inMultilineComment && !inString {
inLineComment = true
writer.Src = &image.Uniform{renderer.CommentColour}
}
} else if i+2 < len(runes) && string(runes[i:i+2]) == "/*" {
if !inCharString && !inString {
inMultilineComment = true
writer.Src = &image.Uniform{renderer.CommentColour}
}
}
if i > 1 && inMultilineComment && i+1 < len(runes) && string(runes[i-1:i+1]) == "*/" {
nextColor = &image.Uniform{renderer.TextColour}
inMultilineComment = false
}
case ' ', '\t':
if !inCharString && !inMultilineComment && !inString && !inLineComment {
writer.Src = &image.Uniform{renderer.TextColour}
}
default:
if !inCharString && !inMultilineComment && !inString && !inLineComment {
if IsLanguageKeyword(i, runes) {
writer.Src = &image.Uniform{renderer.KeywordColour}
} else if IsLanguageType(i, runes) {
writer.Src = &image.Uniform{renderer.BuiltinTypeColour}
} else if StartsLanguageDeliminator(r) {
writer.Src = &image.Uniform{renderer.TextColour}
}
}
}
runeRectangle := image.Rectangle{}
runeRectangle.Min.X = writer.Dot.X.Ceil()
runeRectangle.Min.Y = writer.Dot.Y.Ceil() - renderer.MonoFontAscent.Floor() + 1
switch r {
case '\t':
runeRectangle.Max.X = runeRectangle.Min.X + 8*renderer.MonoFontGlyphWidth.Ceil()
case '\n':
runeRectangle.Max.X = viewport.Max.X
default:
runeRectangle.Max.X = runeRectangle.Min.X + renderer.MonoFontGlyphWidth.Ceil()
}
runeRectangle.Max.Y = runeRectangle.Min.Y + renderer.MonoFontHeight.Ceil() + 1
if runeRectangle.Min.Y > viewport.Max.Y {
// exit the loop early since we're past the part that is being drawn.
return nil
}
if runeRectangle.Intersect(viewport) != image.ZR {
if uint(i) >= buf.Dot.Start && uint(i) <= buf.Dot.End {
// it's in dot, so highlight the background
draw.Draw(
dst,
image.Rectangle{
runeRectangle.Min.Sub(viewport.Min),
runeRectangle.Max.Sub(viewport.Min),
},
&image.Uniform{renderer.TextHighlight},
image.ZP,
draw.Src,
)
}
}
switch r {
case '\t':
writer.Dot.X += renderer.MonoFontGlyphWidth * 8
continue
case '\n':
writer.Dot.Y += renderer.MonoFontHeight
writer.Dot.X = fixed.I(bounds.Min.X)
continue
}
writer.Dot.X -= fixed.I(viewport.Min.X)
writer.Dot.Y -= fixed.I(viewport.Min.Y)
writer.DrawString(string(r))
writer.Dot.X += fixed.I(viewport.Min.X)
writer.Dot.Y += fixed.I(viewport.Min.Y)
if nextColor != nil {
writer.Src = nextColor
nextColor = nil
}
}
return nil
}
func StartsLanguageDeliminator(r rune) bool {
switch r {
case '+', '-', '*', '/', '%',
'&', '|', '^',
'<', '>', '=', '!',
':', '.',
'(', ')', '[', ']', '{', '}',
',', ';':
return true
}
return unicode.IsSpace(r)
}
// IsLanguageKeyword determins if a word starts a language keyword.
// List of PHP keywords taken from http://php.net/manual/en/reserved.keywords.php
func IsLanguageKeyword(pos int, runes []rune) bool {
if pos > 0 {
prev := runes[pos-1]
if !unicode.IsSpace(prev) && !StartsLanguageDeliminator(prev) {
return false
}
}
if len(runes) > pos+16 {
if unicode.IsSpace(runes[pos+15]) || StartsLanguageDeliminator(runes[pos+15]) {
if string(runes[pos:pos+15]) == "__halt_compiler" {
return true
}
}
}
if len(runes) > pos+13 {
if unicode.IsSpace(runes[pos+12]) || StartsLanguageDeliminator(runes[pos+12]) {
switch string(runes[pos : pos+12]) {
case "include_once", "require_once":
return true
}
}
}
if len(runes) > pos+11 {
if unicode.IsSpace(runes[pos+10]) || StartsLanguageDeliminator(runes[pos+10]) {
switch string(runes[pos : pos+10]) {
case "enddeclare", "endforeach", "implements", "instanceof":
return true
}
}
}
if len(runes) > pos+10 {
if unicode.IsSpace(runes[pos+9]) || StartsLanguageDeliminator(runes[pos+9]) {
switch string(runes[pos : pos+9]) {
case "endswitch", "insteadof", "interface", "namespace", "protected":
return true
}
}
}
if len(runes) > pos+9 {
if unicode.IsSpace(runes[pos+8]) || StartsLanguageDeliminator(runes[pos+8]) {
switch string(runes[pos : pos+8]) {
case "abstract", "callable", "continue", "endwhile", "function":
return true
}
}
}
if len(runes) > pos+8 {
if unicode.IsSpace(runes[pos+7]) || StartsLanguageDeliminator(runes[pos+7]) {
switch string(runes[pos : pos+7]) {
case "declare", "default", "elseif", "extends", "finally", "foreach",
"private", "require", "include":
return true
}
}
}
if len(runes) > pos+7 {
if unicode.IsSpace(runes[pos+6]) || StartsLanguageDeliminator(runes[pos+6]) {
switch string(runes[pos : pos+6]) {
case "elseif", "endfor", "global", "public", "return", "static", "switch":
return true
}
}
}
if len(runes) > pos+6 {
if unicode.IsSpace(runes[pos+5]) || StartsLanguageDeliminator(runes[pos+5]) {
switch string(runes[pos : pos+5]) {
case "array", "break", "catch", "class", "clone", "const", "empty", "endif",
"final", "isset", "print", "trait", "unset", "while", "yield":
return true
}
}
}
if len(runes) > pos+5 {
if unicode.IsSpace(runes[pos+4]) || StartsLanguageDeliminator(runes[pos+4]) {
switch string(runes[pos : pos+4]) {
case "case", "echo", "else", "eval", "exit", "goto", "list":
return true
}
}
}
if len(runes) > pos+4 {
if unicode.IsSpace(runes[pos+3]) || StartsLanguageDeliminator(runes[pos+3]) {
switch string(runes[pos : pos+3]) {
case "for", "die", "var", "try", "xor", "and", "new", "use":
return true
}
}
}
if len(runes) > pos+3 {
if unicode.IsSpace(runes[pos+2]) || StartsLanguageDeliminator(runes[pos+2]) {
switch string(runes[pos : pos+2]) {
case "if", "do", "or", "as":
return true
}
}
}
return false
}
func IsLanguageType(pos int, runes []rune) bool {
if pos < 3 {
return false
}
if !StartsLanguageDeliminator(runes[pos-1]) {
return false
}
if len(runes) > pos+4 {
if StartsLanguageDeliminator(runes[pos+3]) {
switch string(runes[pos : pos+3]) {
case "int":
return true
}
}
}
if len(runes) > pos+5 {
if StartsLanguageDeliminator(runes[pos+4]) {
switch string(runes[pos : pos+4]) {
case "int8", "bool", "byte", "rune", "uint":
return true
}
}
}
if len(runes) > pos+6 {
if unicode.IsSpace(runes[pos+5]) {
switch string(runes[pos : pos+5]) {
case "uint8", "int16", "int32", "int64":
return true
}
}
}
if len(runes) > pos+7 {
if unicode.IsSpace(runes[pos+6]) {
switch string(runes[pos : pos+6]) {
case "uint16", "uint32", "uint64":
return true
}
}
}
return false
}
func IsEscaped(pos int, runes []rune) bool {
if pos == 0 {
return false
}
isEscaped := false
for i := pos - 1; i >= 0 && runes[i] == '\\'; i-- {
isEscaped = !isEscaped
}
return isEscaped
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.