text stringlengths 11 4.05M |
|---|
/*********************************************
*
* (C) 2019
* Fabian Salamanca - fabian@nuo.com.mx
*
*********************************************/
package main
// Network for unmarshalling
type Network struct {
Services []Services `json:"services"`
Networks []Networks `json:"networks"`
}
// Services for unmarshalling
type Services struct {
Type string `json:"type"`
Address string `json:"address"`
}
// Networks for unmarshalling
type Networks struct {
Type string `json:"type"`
Netmask string `json:"netmask"`
IPAddress string `json:"ip_address"`
Routes []Routes `json:"routes"`
ID string `json:"id"`
}
// Routes for unmarshalling
type Routes struct {
Netmask string `json:"netmask"`
Network string `json:"network"`
Gateway string `json:"gateway"`
}
// Keys for
type Keys struct {
Data string `json:"data"`
}
// Meta from CD
type Meta struct {
Hostname string `json:"hostname"`
Keys []Keys `json:"keys"`
}
|
package cmd
import (
"io"
"github.com/jpnauta/remote-structure-test/pkg/version"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
var (
v string
)
func NewRootCommand(out, err io.Writer) *cobra.Command {
var rootCmd = &cobra.Command{
Use: "remote-structure-test",
Short: "remote-structure-test provides a framework to test the structure of a remote host",
Long: `remote-structure-test provides a powerful framework to validate
the structure of a remote host.
These tests can be used to check the output of commands,
as well as verify contents of the filesystem.`,
}
rootCmd.PersistentPreRunE = func(cmd *cobra.Command, _ []string) error {
if err := SetUpLogs(err, v); err != nil {
return err
}
rootCmd.SilenceUsage = true
logrus.Infof("remote-structure-test %+v", version.GetVersion())
return nil
}
rootCmd.SilenceErrors = true
rootCmd.AddCommand(NewCmdVersion(rootCmd, out))
rootCmd.AddCommand(NewCmdTest(rootCmd, out))
rootCmd.PersistentFlags().StringVarP(&v, "verbosity", "v", logrus.WarnLevel.String(), "Log level (debug, info, warn, error, fatal, panic)")
return rootCmd
}
func SetUpLogs(out io.Writer, level string) error {
logrus.SetOutput(out)
lvl, err := logrus.ParseLevel(v)
if err != nil {
return errors.Wrap(err, "parsing log level")
}
logrus.SetLevel(lvl)
return nil
}
|
package main
import (
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"net/http"
"sync"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
apiv1 "k8s.io/client-go/pkg/api/v1"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"errors"
"github.com/golang/glog"
"github.com/tsenart/vegeta/lib"
"os"
"path/filepath"
"strings"
"syscall"
"os/signal"
)
type replicas struct {
title string
loadbots int32
webserver int32
}
type resultEntry struct {
title string
result string
}
var scenarios = []replicas{
{
title: "Idle",
loadbots: 1,
webserver: 1,
},
{
title: "Under load",
loadbots: 1,
webserver: 10,
},
{
title: "Equal load",
loadbots: 10,
webserver: 10,
},
{
title: "Over load",
loadbots: 100,
webserver: 10,
},
{
title: "High load",
loadbots: 100,
webserver: 100,
},
}
var summary []resultEntry
var kubeconfig *string
var scaleTestNamespace string
var clientset *kubernetes.Clientset
const (
summaryDataMarker = "GENERATING SUMMARY OUTPUT"
summaryEndDataMarker = "END SUMMARY DATA"
loadbotsName = "loadbots"
webserverName = "webserver"
maxScaleReplicas = 100
iterations = 10
attempts = 3
)
var (
inCluster = flag.Bool("incluster", true, "Running aggregator inside Kubernetes")
selector = flag.String("selector", "app", "The label key as selector for pods")
loadbotsPort = flag.Int("loadbots-port", 8080, "Target port of selected pods")
maxReplicas = flag.Int("max-replicas", maxScaleReplicas, "Maximum replication count per service. Total replicas will be twice as much.")
sleep = flag.Duration("sleep", 1*time.Second, "The sleep period between aggregations")
)
func main() {
if home := homeDir(); home != "" {
kubeconfig = flag.String("kubeconfig", filepath.Join(home, ".kube", "config"), "(optional) absolute path to the kubeconfig file when in-cluster false")
} else {
kubeconfig = flag.String("kubeconfig", "", "(optional) absolute path to the kubeconfig file when in-cluster false")
}
flag.Parse()
glog.Info("Creating Kubernetes client")
createKubernetesClient()
setNamespace()
glog.Info("Running preflight checks")
preflightChecks()
glog.Info("Finished preflight checks")
glog.Infof("Running scale test with max replicas %d", *maxReplicas)
runScaleTest()
showSummary()
scaleReplicationController(scaleTestNamespace, loadbotsName, 0)
scaleReplicationController(scaleTestNamespace, webserverName, 0)
glog.Info("Aggregator finished work")
exitSignal := make(chan os.Signal)
signal.Notify(exitSignal, syscall.SIGINT, syscall.SIGTERM)
<-exitSignal
}
func createKubernetesClient() {
inClusterConf := ""
if *inCluster {
inClusterConf = "in"
} else {
inClusterConf = "out of"
}
glog.Infof("Creating %s cluster config", inClusterConf)
var clientsetError error
if *inCluster {
config, err := rest.InClusterConfig()
panicOnError(err)
clientset, clientsetError = kubernetes.NewForConfig(config)
} else {
config, err := clientcmd.BuildConfigFromFlags("", *kubeconfig)
panicOnError(err)
clientset, clientsetError = kubernetes.NewForConfig(config)
}
panicOnError(clientsetError)
v, err := clientset.Discovery().ServerVersion()
panicOnError(err)
glog.Infof("Running %s Kubernetes Cluster - version v%v.%v (%v) - platform %v",
inClusterConf, v.Major, v.Minor, v.GitVersion, v.Platform)
}
func panicOnError(err error) {
if err != nil {
glog.Errorf("Panicing due to error: %s", err)
panic(err.Error())
}
}
func setNamespace() {
if ns := os.Getenv("POD_NAMESPACE"); ns != "" {
scaleTestNamespace = ns
} else if data, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace"); err == nil {
if ns := strings.TrimSpace(string(data)); len(ns) > 0 {
scaleTestNamespace = ns
}
}
if scaleTestNamespace == "" {
scaleTestNamespace = "default"
}
glog.Infof("Running aggregator in namespace %s", scaleTestNamespace)
}
func preflightChecks() {
glog.Infof("Waiting for initial loadbot and webserver pods to be Running...")
waitForScaleTestServicesToBeRunning(1, 1)
}
func runScaleTest() {
var successfullIterations int
var currentLoadbots, currentWebservers int32
for _, s := range scenarios {
loadbotReplicas := s.loadbots * int32(*maxReplicas) / 100
webserverReplicas := s.webserver * int32(*maxReplicas) / 100
if s.loadbots != 1 {
time.Sleep(1 * time.Second)
if currentLoadbots != loadbotReplicas {
scaleReplicationController(scaleTestNamespace, loadbotsName, loadbotReplicas)
currentLoadbots = loadbotReplicas
}
} else {
currentLoadbots = 1
loadbotReplicas = 1
}
if s.webserver != 1 {
time.Sleep(1 * time.Second)
if currentWebservers != webserverReplicas {
scaleReplicationController(scaleTestNamespace, webserverName, webserverReplicas)
currentWebservers = webserverReplicas
}
} else {
currentWebservers = 1
webserverReplicas = 1
}
glog.Infof("Load scenario '%s': %d Loadbots - %d Webservers", s.title, loadbotReplicas, webserverReplicas)
waitForScaleTestServicesToBeRunning(currentLoadbots, currentWebservers)
time.Sleep(5 * time.Second)
parts := []vegeta.Metrics{}
glog.V(3).Infof("[D] Getting %s pods", loadbotsName)
loadbots, err := getPods(loadbotsName)
if err != nil {
glog.Infof("Error getting loadbot pods: %s", err)
}
glog.V(3).Infof("[D] Got %d pods", len(loadbots))
podNames := ""
for ix := range loadbots {
podNames += loadbots[ix].Name + " "
}
glog.V(3).Infof("[D] %s", podNames)
successfullIterations = 0
for i := 1; i <= iterations*attempts; i++ {
start := time.Now()
partsIteration := fetchResults(loadbots)
if len(partsIteration) == 0 {
glog.V(3).Info("[D] Failed to fetch results.")
} else {
successfullIterations++
parts = append(parts, partsIteration...)
latency := time.Since(start)
if successfullIterations >= iterations {
break
} else if latency < *sleep {
time.Sleep(*sleep - latency)
}
}
}
if len(parts) < iterations {
panicOnError(errors.New("failed to fetch results. Quitting aggregator"))
} else {
glog.V(4).Infof("[D] Fetched results:\n %s", parts)
queryPerSecond, success, latencyMean, latency99th := evaluateData(parts)
result := fmt.Sprintf("QPS: %-8.0f Success: %-8.2f%% Latency: %s (mean) %s (99th)",
queryPerSecond, success, latencyMean, latency99th)
summary = append(summary, resultEntry{
title: s.title,
result: result,
})
glog.Infof("Summary of load scenario '%s': %s", s.title, result)
}
}
}
func fetchResults(loadbots []*apiv1.Pod) []vegeta.Metrics {
parts := []vegeta.Metrics{}
lock := sync.Mutex{}
wg := sync.WaitGroup{}
wg.Add(len(loadbots))
for ix := range loadbots {
go func(ix int) {
defer wg.Done()
pod := loadbots[ix]
var data []byte
if *inCluster {
url := fmt.Sprintf("http://%s:%d/", pod.Status.PodIP, *loadbotsPort)
resp, err := http.Get(url)
if err != nil {
glog.Infof("Error getting %s: %v", url, err)
return
}
defer resp.Body.Close()
if data, err = ioutil.ReadAll(resp.Body); err != nil {
glog.Infof("Error reading response of %s: %v", url, err)
return
}
} else {
var err error
url := fmt.Sprintf("/api/v1/namespaces/%s/pods/%s:%d/proxy", scaleTestNamespace, pod.Name, *loadbotsPort)
data, err = clientset.Discovery().RESTClient().Get().AbsPath(url).DoRaw()
if err != nil {
glog.Infof("Error proxying to pod %s: %v", url, err)
return
}
}
var metrics vegeta.Metrics
if err := json.Unmarshal(data, &metrics); err != nil {
glog.Infof("Error decoding: %v\n", err)
return
}
lock.Lock()
defer lock.Unlock()
parts = append(parts, metrics)
}(ix)
}
wg.Wait()
return parts
}
func evaluateData(metrics []vegeta.Metrics) (queryPerSecond float64, success float64, latencyMean time.Duration, latency99th time.Duration) {
var latencyMeans time.Duration
var latency99ths time.Duration
for _, v := range metrics {
if v.Rate > 0 {
queryPerSecond += v.Rate
}
success += v.Success * 100
latencyMeans += v.Latencies.Mean
latency99ths += v.Latencies.P99
}
success /= float64(len(metrics))
latencyMean = time.Duration(latencyMeans.Nanoseconds() / int64(len(metrics)))
latency99th = time.Duration(latency99ths.Nanoseconds() / int64(len(metrics)))
return queryPerSecond, success, latencyMean, latency99th
}
func showSummary() {
glog.Info("Summary of load scenarios:")
glog.Info(summaryDataMarker)
for k, s := range summary {
glog.Infof("%d. %-10s: %s", k, s.title, s.result)
}
glog.Infof("%s\n", summaryEndDataMarker)
}
func getPods(appName string) ([]*apiv1.Pod, error) {
loadbots := []*apiv1.Pod{}
pods, err := clientset.CoreV1().Pods(scaleTestNamespace).List(metav1.ListOptions{
LabelSelector: fmt.Sprintf("%s=%s", *selector, appName),
})
if err != nil {
return loadbots, err
}
for ix := range pods.Items {
pod := &pods.Items[ix]
if pod.Status.PodIP == "" || pod.Status.Phase != apiv1.PodRunning {
continue
}
loadbots = append(loadbots, pod)
}
return loadbots, nil
}
func waitForScaleTestServicesToBeRunning(targetLoadbots int32, targetWebserver int32) {
waitTime := time.Second
done := false
for !done {
loadbotPods, err := clientset.CoreV1().Pods(scaleTestNamespace).List(metav1.ListOptions{
LabelSelector: fmt.Sprintf("%s=%s", *selector, loadbotsName),
})
if err != nil {
glog.Infof("Error getting list of loadbots: %s", err)
}
webserverPods, err := clientset.CoreV1().Pods(scaleTestNamespace).List(metav1.ListOptions{
LabelSelector: fmt.Sprintf("%s=%s", *selector, webserverName),
})
if err != nil {
glog.Infof("Error getting list of webservers: %s", err)
}
lines := int32(len(loadbotPods.Items) + len(webserverPods.Items))
if lines < targetLoadbots+targetWebserver {
glog.Infof("Pods status output too short. Waiting %v then checking again.", waitTime)
time.Sleep(waitTime)
waitTime *= 2
continue
}
loadbotsRunning := false
webserverRunning := false
totalLoadbotsRunning := 0
totalWebserverRunning := 0
for _, p := range loadbotPods.Items {
if p.Status.Phase == apiv1.PodRunning {
totalLoadbotsRunning++
if int32(totalLoadbotsRunning) >= targetLoadbots {
loadbotsRunning = true
break
}
}
}
for _, p := range webserverPods.Items {
if p.Status.Phase == apiv1.PodRunning {
totalWebserverRunning++
if int32(totalWebserverRunning) >= targetWebserver {
webserverRunning = true
break
}
}
}
glog.V(3).Infof("[D] Running are %v/%v webserver and %v/%v loadbots", totalWebserverRunning, targetWebserver, totalLoadbotsRunning, targetLoadbots)
if !loadbotsRunning || !webserverRunning {
glog.V(2).Infof("Pods are not running. Waiting %v then checking again.", waitTime)
time.Sleep(waitTime)
waitTime *= 2
} else {
done = true
}
}
}
func scaleReplicationController(namespace string, name string, replicas int32) error {
glog.Infof("Scaling %s to %d replicas", name, replicas)
rc, err := clientset.CoreV1().ReplicationControllers(namespace).Get(name, metav1.GetOptions{})
if err != nil {
glog.Infof("Error scaling %s to %d replicas: %s", name, replicas, err)
return err
}
rc.Spec.Replicas = &replicas
_, err = clientset.CoreV1().ReplicationControllers(namespace).Update(rc)
if err != nil {
glog.Infof("Error scaling %s to %d replicas: %s", name, replicas, err)
return err
}
return nil
}
func homeDir() string {
if h := os.Getenv("HOME"); h != "" {
return h
}
return os.Getenv("USERPROFILE") // windows
}
|
package models
import(
"encoding/json"
)
/**
* Type definition for FileTypeEnum enum
*/
type FileTypeEnum int
/**
* Value collection for FileTypeEnum enum
*/
const (
FileType_KROWS FileTypeEnum = 1 + iota
FileType_KLOG
FileType_KFILESTREAM
FileType_KNOTSUPPORTEDTYPE
FileType_KFULLTEXT
)
func (r FileTypeEnum) MarshalJSON() ([]byte, error) {
s := FileTypeEnumToValue(r)
return json.Marshal(s)
}
func (r *FileTypeEnum) UnmarshalJSON(data []byte) error {
var s string
json.Unmarshal(data, &s)
v := FileTypeEnumFromValue(s)
*r = v
return nil
}
/**
* Converts FileTypeEnum to its string representation
*/
func FileTypeEnumToValue(fileTypeEnum FileTypeEnum) string {
switch fileTypeEnum {
case FileType_KROWS:
return "kRows"
case FileType_KLOG:
return "kLog"
case FileType_KFILESTREAM:
return "kFileStream"
case FileType_KNOTSUPPORTEDTYPE:
return "kNotSupportedType"
case FileType_KFULLTEXT:
return "kFullText"
default:
return "kRows"
}
}
/**
* Converts FileTypeEnum Array to its string Array representation
*/
func FileTypeEnumArrayToValue(fileTypeEnum []FileTypeEnum) []string {
convArray := make([]string,len( fileTypeEnum))
for i:=0; i<len(fileTypeEnum);i++ {
convArray[i] = FileTypeEnumToValue(fileTypeEnum[i])
}
return convArray
}
/**
* Converts given value to its enum representation
*/
func FileTypeEnumFromValue(value string) FileTypeEnum {
switch value {
case "kRows":
return FileType_KROWS
case "kLog":
return FileType_KLOG
case "kFileStream":
return FileType_KFILESTREAM
case "kNotSupportedType":
return FileType_KNOTSUPPORTEDTYPE
case "kFullText":
return FileType_KFULLTEXT
default:
return FileType_KROWS
}
}
|
package core
import (
"github.com/I-Reven/Hexagonal/src/application/core/service"
request "github.com/I-Reven/Hexagonal/src/domain/http"
"github.com/I-Reven/Hexagonal/src/framework/logger"
"github.com/gin-gonic/gin"
"github.com/juju/errors"
"net/http"
)
type Tracker struct {
log logger.Log
service service.TrackService
track request.Track
}
func (h *Tracker) Handler(ctx *gin.Context) {
if err := ctx.ShouldBindJSON(&h.track); err != nil {
err = errors.NewBadRequest(err, "error.can-not-un-marshal-json")
h.log.Error(err)
ctx.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
if err := h.track.Validate(); err != nil {
err = errors.NewNotValid(err, "error.request is not valid")
h.log.Error(err)
ctx.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
Track, err := h.service.Get(h.track.TrackId)
if err != nil {
err = errors.NewNotSupported(err, "error.handler-get-error-from-get-track-service")
h.log.Error(err)
ctx.JSON(http.StatusNotFound, gin.H{"error": err.Error()})
return
}
ctx.JSON(http.StatusOK, Track)
}
|
/* Copyright 2020 by n3o33 <discord n3o33#2384>
* Not proprietary and confidential,
* feel free to share, copy and change
*/
/*
* VERSION 1.0
*/
/* DESCRIPTION
This script will repatriate resources from planet to homeworld if a storage is full
@sendAllRes, true will send all res of planet, false just the res of full storage
@leaveDeuterium , will leave XX deuterium on planet
*/
//#################### CONFIG ################################
//############################################################
sendAllRes = true
leaveDeuterium = 100000
//############################################################
func repatriate(res, from) {
myShips, _ = from.GetShips()
LogInfo("[STORAGE] resources to send " + res)
pf, lc, sc, cargo = CalcFastCargoPF(myShips.Pathfinder, myShips.LargeCargo, myShips.SmallCargo, res.Total())
fleet = NewFleet()
fleet.SetOrigin(from.GetCoordinate())
fleet.SetDestination(GetHomeWorld().GetCoordinate())
fleet.SetMission(TRANSPORT)
fleet.SetSpeed(HUNDRED_PERCENT)
fleet.SetResources(res)
fleet.AddShips(PATHFINDER, pf)
fleet.AddShips(LARGECARGO, lc)
fleet.AddShips(SMALLCARGO, sc)
f, err = fleet.SendNow()
LogInfo("[STORAGE] send fleet " + f)
if err != nil {
LogError("[STORAGE] send fleet error with " + err)
}
}
func checkIfStorageIsFull() {
slots = GetSlots()
freeSlots = slots.Total - slots.InUse
if freeSlots > GetFleetSlotsReserved() {
celestials,_ = GetCelestials()
for celestial in celestials {
if celestial.GetType() == PLANET_TYPE {
resDetails, _ = celestial.GetResourcesDetails()
resToSend = NewResources(0,0,0)
LogInfo("[STORAGE] " + celestial.GetCoordinate() + " " + celestial.GetName())
if resDetails.Metal.Available >= resDetails.Metal.StorageCapacity {
LogInfo("[STORAGE] ---- metal full")
if sendAllRes {
resToSend = NewResources(resDetails.Metal.Available, resDetails.Crystal.Available, resDetails.Deuterium.Available - leaveDeuterium)
} else {
resToSend = resToSend.Add(NewResources(resDetails.Metal.Available, 0, 0))
}
} else {
LogInfo("[STORAGE] ---- metal storage is " + Round(((100/resDetails.Metal.StorageCapacity)*resDetails.Metal.Available)) + " %")
}
if resDetails.Crystal.Available >= resDetails.Crystal.StorageCapacity {
LogInfo("[STORAGE] ---- crystal full")
if sendAllRes {
resToSend = NewResources(resDetails.Metal.Available, resDetails.Crystal.Available, resDetails.Deuterium.Available - leaveDeuterium)
} else {
resToSend = resToSend.Add(NewResources(0, resDetails.Crystal.Available, 0))
}
} else {
LogInfo("[STORAGE] ---- crystal storage is " + Round(((100/resDetails.Crystal.StorageCapacity)*resDetails.Crystal.Available)) + " %")
}
if resDetails.Deuterium.Available >= resDetails.Deuterium.StorageCapacity {
LogInfo("[STORAGE] ---- deuterium full")
if sendAllRes {
resToSend = NewResources(resDetails.Metal.Available, resDetails.Crystal.Available, resDetails.Deuterium.Available - leaveDeuterium)
} else {
resToSend = resToSend.Add(NewResources(0, 0, resDetails.Deuterium.Available))
}
} else {
LogInfo("[STORAGE] ---- deuterium storage is " + Round(((100/resDetails.Deuterium.StorageCapacity)*resDetails.Deuterium.Available)) + " %")
}
if resToSend.Total() > 0 {
repatriate(resToSend, celestial)
}
}
}
}
}
checkIfStorageIsFull()
|
package main
import (
_ "embed"
"flag"
"fmt"
"image"
"image/color"
"image/png"
"log"
"math/rand"
"os"
"strings"
"time"
"github.com/golang/freetype"
)
var cnt = flag.Int("cnt", 120, "总共的计算题数量")
//go:embed arabtype.ttf
var fontBytes []byte
func main() {
flag.Parse()
createImage()
}
func createImage() {
//图片的宽度
dx := 1920
//图片的高度
dy := 1080
imgfile, err := os.Create(time.Now().Format("2006-01-02_150405") + ".png")
if err != nil {
log.Println("1", err)
return
}
defer imgfile.Close()
img := image.NewNRGBA(image.Rect(0, 0, dx, dy))
//读取字体数据
// fontBytes, err := ioutil.ReadFile("arabtype.ttf")
// if err != nil {
// log.Println("2", err)
// }
//载入字体数据
font, err := freetype.ParseFont(fontBytes)
if err != nil {
log.Println("3.load front fail", err)
}
f := freetype.NewContext()
//设置分辨率
f.SetDPI(120)
//设置字体
f.SetFont(font)
//设置尺寸
f.SetFontSize(26)
f.SetClip(img.Bounds())
//设置输出的图片
f.SetDst(img)
//设置字体颜色(红色)
f.SetSrc(image.NewUniform(color.Black))
writeContent(f)
//以png 格式写入文件
err = png.Encode(imgfile, img)
if err != nil {
log.Fatal("4", err)
}
}
func writeContent(f *freetype.Context) {
calcs := buildFormula(*cnt)
w := 300
cyc := 6
adapter := 20
var h, v int
for i, clc := range calcs {
if i%30 == 0 && i != 0 {
pt := freetype.Pt(0, adapter+100+v*50+int(f.PointToFixed(26))>>8)
f.DrawString(strings.Repeat("-", 200), pt)
}
h = i % cyc
v = i / cyc
//设置字体的位置
pt := freetype.Pt(40+h*w, 60+v*50+int(f.PointToFixed(26))>>8)
_, err := f.DrawString(clc, pt)
if err != nil {
log.Fatal("5", err)
}
}
}
//CC CC
type CC func() (int, int)
func buildFormula(cnt int) []string {
symbols := []string{"+", "-", "×", "÷"}
md := map[string]CC{
"×": func() (int, int) {
a := rand.Intn(100)
time.Sleep(time.Millisecond * 20)
b := rand.Intn(9) + 1
return a, b
},
"÷": func() (int, int) {
a := rand.Intn(20) + 1
time.Sleep(time.Millisecond * 20)
b := rand.Intn(9) + 1
return a * (b + 1), b
},
"+": func() (int, int) {
a := rand.Intn(100)
time.Sleep(time.Millisecond * 20)
b := rand.Intn(100)
return a, b
},
"-": func() (int, int) {
a := rand.Intn(100)
time.Sleep(time.Millisecond * 20)
b := rand.Intn(100)
if a > b {
return a, b
}
return b, a
},
}
rows := []string{}
rand.Seed(int64(time.Now().Nanosecond()))
for i := 0; i < cnt; i++ {
p := rand.Intn(4)
a, b := md[symbols[p]]()
rows = append(rows, fmt.Sprintf("%d %s %d = ", a, symbols[p], b))
}
return rows
}
|
package gube
import (
"fmt"
"io/ioutil"
"sync"
"github.com/ghodss/yaml"
"github.com/mandelsoft/filepath/pkg/filepath"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
type GardenSetConfig interface {
GetConfig(name string) (GardenConfig, error)
GetNames() []string
GetConfigs() map[string]GardenConfig
GetGithubURL() string
GetDefault() string
}
type GardenConfig interface {
GetName() string
GetDescription() string
GetGarden() (Garden, error)
GetRuntimeObject() runtime.Object
KubeconfigProvider
}
func NewDefaultGardenSetConfig(g Garden) GardenSetConfig {
cfg := &GardenConfigImpl{
Name: "default",
Description: "default garden",
garden: g,
}
return &GardenSetConfigImpl{
Default: "default",
Gardens: []*GardenConfigImpl{cfg},
}
}
func NewGardenSetConfig(path string) (GardenSetConfig, error) {
return ReadGardenSetConfig(path)
}
func ReadGardenSetConfig(path string) (*GardenSetConfigImpl, error) {
data, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
config := &GardenSetConfigImpl{}
err = yaml.Unmarshal(data, config)
if err != nil {
return nil, err
}
config.SetPath(path)
return config, nil
}
/////////////////////////////////////////////////////////////////////////////
type GardenSetConfigImpl struct {
GithubURL string `yaml:"githubURL,omitempty" json:"githubURL,omitempty"`
Gardens []*GardenConfigImpl `yaml:"gardens,omitempty" json:"gardens,omitempty"`
Default string `yaml:"default,omitempty" json:"default,omitempty"`
path string
}
func (this *GardenSetConfigImpl) SetPath(path string) {
this.path = path
dir := filepath.Dir2(path)
for _, g := range this.Gardens {
g.makeAbsolute(dir)
}
}
func (this *GardenSetConfigImpl) GetDefault() string {
return this.Default
}
func (this *GardenSetConfigImpl) GetGithubURL() string {
return this.GithubURL
}
func (this *GardenSetConfigImpl) GetConfig(name string) (GardenConfig, error) {
if name == "" {
name = this.Default
}
if name == "" {
return nil, fmt.Errorf("No garden name given")
}
for _, conf := range this.Gardens {
if conf.Name == name {
return conf, nil
}
}
return nil, fmt.Errorf("Garden '%s' not found", name)
}
func (this *GardenSetConfigImpl) GetNames() []string {
result := []string{}
for _, conf := range this.Gardens {
result = append(result, conf.Name)
}
return result
}
func (this *GardenSetConfigImpl) GetConfigs() map[string]GardenConfig {
result := map[string]GardenConfig{}
for _, conf := range this.Gardens {
result[conf.Name] = conf
}
return result
}
/////////////////////////////////////////////////////////////////////////////
type GardenConfigImpl struct {
Name string `yaml:"name,omitempty" json:"name,omitempty"`
KubeConfigPath string `yaml:"kubeconfig,omitempty" json:"kubeconfig,omitempty"`
Description string `yaml:"description,omitempty" json:"description,omitempty"`
lock sync.Mutex
kubeconfig []byte
garden Garden
effectivePath string
}
func (this *GardenConfigImpl) makeAbsolute(dir string) {
this.lock.Lock()
defer this.lock.Unlock()
if !filepath.IsAbs(this.KubeConfigPath) {
this.effectivePath = filepath.Join(dir, this.KubeConfigPath)
}
}
func (this *GardenConfigImpl) GetName() string {
return this.Name
}
func (this *GardenConfigImpl) GetDescription() string {
return this.Description
}
func (this *GardenConfigImpl) GetKubeconfig() ([]byte, error) {
this.lock.Lock()
defer this.lock.Unlock()
if this.kubeconfig == nil {
var cfg []byte
g, err := this._getGarden()
if err != nil {
return nil, err
}
cfg, err = g.GetKubeconfig()
if cfg == nil && this.KubeConfigPath != "" {
path := this.effectivePath
if path == "" {
path = this.KubeConfigPath
}
cfg, err = ioutil.ReadFile(path)
}
if err != nil {
return nil, err
}
this.kubeconfig = cfg
}
return this.kubeconfig, nil
}
func (this *GardenConfigImpl) GetGarden() (Garden, error) {
this.lock.Lock()
defer this.lock.Unlock()
return this._getGarden()
}
func (this *GardenConfigImpl) _getGarden() (Garden, error) {
if this.garden == nil {
path := this.effectivePath
if path == "" {
path = this.KubeConfigPath
}
g, err := NewGardenFromConfigfile(path)
if err != nil {
return nil, fmt.Errorf("cannot create garden object for %s(%s)", this.Name, path)
}
this.garden = g
}
return this.garden, nil
}
type gardenObject struct {
*GardenConfigImpl `json:",inline"`
}
func (this *gardenObject) GetObjectKind() schema.ObjectKind {
return nil
}
func (this *gardenObject) DeepCopyObject() runtime.Object {
return nil
}
func (g *GardenConfigImpl) GetRuntimeObject() runtime.Object {
return &gardenObject{g}
}
|
package main
import (
"gorm.io/driver/mysql"
"gorm.io/gorm"
)
func freak(err error) {
if err != nil {
panic(err)
}
}
func main() {
dsn := "n9e2tq4wo6uhxslh:jmm9tpr4pjtkyazi@tcp(ao9moanwus0rjiex.cbetxkdyhwsb.us-east-1.rds.amazonaws.com:3306)/apc4exudm9mlh3ul"
db, err := gorm.Open(mysql.Open(dsn), &gorm.Config{})
freak((err))
println(db)
}
|
package seed
import (
crypto_rand "crypto/rand"
"encoding/binary"
math_rand "math/rand"
)
func init() {
var b [8]byte
_, err := crypto_rand.Read(b[:])
if err != nil {
panic("cannot seed math/rand package with cryptographically secure random number generator")
}
math_rand.Seed(int64(binary.LittleEndian.Uint64(b[:])))
}
|
package main
import (
"bufio"
"fmt"
"io"
"os"
"path/filepath"
"runtime"
"github.com/bborbe/stringutil"
)
func main() {
runtime.GOMAXPROCS(runtime.NumCPU())
err := do(os.Stdout, os.Stdin, os.Args[1:])
if err != nil {
fmt.Fprintf(os.Stderr, "dirof failed: %v", err)
os.Exit(1)
}
}
func do(writer io.Writer, reader io.Reader, args []string) error {
if len(args) > 0 {
return doWithArgs(writer, args)
} else {
return doWithReader(writer, reader)
}
}
func doWithArgs(writer io.Writer, args []string) error {
first := true
for _, arg := range args {
path, err := dirOf(arg)
if err != nil {
return err
}
if first {
fmt.Fprintf(writer, path)
first = false
} else {
fmt.Fprintf(writer, " %s", path)
}
}
return nil
}
func doWithReader(writer io.Writer, in io.Reader) error {
reader := bufio.NewReader(in)
for {
read, readErr := reader.ReadString('\n')
if len(read) > 0 {
if err := handleLine(writer, read); err != nil {
return err
}
}
if readErr != nil {
if readErr == io.EOF {
return nil
}
return readErr
}
}
}
func handleLine(writer io.Writer, line string) error {
content, err := dirOf(stringutil.Trim(line))
if err != nil {
return err
}
fmt.Fprintf(writer, "%s\n", content)
return nil
}
func dirOf(path string) (string, error) {
fsinfo, err := os.Stat(path)
if err != nil {
return "", err
}
if fsinfo.IsDir() {
return path, nil
}
return filepath.Dir(path), nil
}
|
package main
import (
"encoding/json"
"expvar"
"fmt"
"net"
"net/http"
)
var (
counts = expvar.NewMap("counters")
)
func init() {
counts.Add("a", 10)
counts.Add("b", 10)
}
func main() {
a := make(map[string]string)
a["name"] = "zyf"
b, _ := json.Marshal(a)
fmt.Println(string(b))
sock, err := net.Listen("tcp", "localhost:9123")
if err != nil {
panic("sock error")
}
go func() {
fmt.Println("HTTP now available at port 9123")
http.Serve(sock, nil)
}()
fmt.Println("htllo")
select {}
}
|
package main
//731. 我的日程安排表 II
//实现一个 MyCalendar 类来存放你的日程安排。如果要添加的时间内不会导致三重预订时,则可以存储这个新的日程安排。
//
//MyCalendar 有一个 book(int start, int end)方法。它意味着在 start 到 end 时间内增加一个日程安排,注意,这里的时间是半开区间,即 [start, end), 实数x 的范围为, start <= x < end。
//
//当三个日程安排有一些时间上的交叉时(例如三个日程安排都在同一时间内),就会产生三重预订。
//
//每次调用 MyCalendar.book方法时,如果可以将日程安排成功添加到日历中而不会导致三重预订,返回 true。否则,返回 false 并且不要将该日程安排添加到日历中。
//
//请按照以下步骤调用MyCalendar 类: MyCalendar cal = new MyCalendar(); MyCalendar.book(start, end)
//
//
//示例:
//
//MyCalendar();
//MyCalendar.book(10, 20); // returns true
//MyCalendar.book(50, 60); // returns true
//MyCalendar.book(10, 40); // returns true
//MyCalendar.book(5, 15); // returns false
//MyCalendar.book(5, 10); // returns true
//MyCalendar.book(25, 55); // returns true
//解释:
//前两个日程安排可以添加至日历中。 第三个日程安排会导致双重预订,但可以添加至日历中。
//第四个日程安排活动(5,15)不能添加至日历中,因为它会导致三重预订。
//第五个日程安排(5,10)可以添加至日历中,因为它未使用已经双重预订的时间10。
//第六个日程安排(25,55)可以添加至日历中,因为时间 [25,40] 将和第三个日程安排双重预订;
//时间 [40,50] 将单独预订,时间 [50,55)将和第二个日程安排双重预订。
//
//
//提示:
//
//每个测试用例,调用MyCalendar.book函数最多不超过1000次。
//调用函数MyCalendar.book(start, end)时,start 和end 的取值范围为[0, 10^9]。
// 线段树,动态开点
// lazy 标记区域累加次数,cnt标记区域最最大值
type pair struct {
cnt, lazy int
}
type MyCalendarTwo struct {
Node map[int]pair
}
func Constructor() MyCalendarTwo {
return MyCalendarTwo{make(map[int]pair)}
}
func (this *MyCalendarTwo) Book(start int, end int) bool {
this.Update(start, end-1, 0, 1e9, 1, 1)
if this.Node[1].cnt > 2 {
this.Update(start, end-1, 0, 1e9, 1, -1)
return false
}
return true
}
func (this *MyCalendarTwo) Update(s, e, l, r, index, val int) {
if s > r || e < l {
return
}
if s <= l && r <= e {
node := this.Node[index]
node.cnt += val
node.lazy += val
this.Node[index] = node
return
}
mid := (l + r) >> 1
this.Update(s, e, l, mid, index*2, val)
this.Update(s, e, mid+1, r, index*2+1, val)
node := this.Node[index]
node.cnt = node.lazy + max(this.Node[index*2].cnt, this.Node[index*2+1].cnt)
this.Node[index] = node
}
func max(x, y int) int {
if x > y {
return x
}
return y
}
func min(x, y int) int {
if x < y {
return x
}
return y
}
/**
* Your MyCalendarTwo object will be instantiated and called as such:
* obj := Constructor();
* param_1 := obj.Book(start,end);
*/
|
package reader
import (
"encoding/csv"
"io"
"os"
)
// CsvFromFileWithBreak 从文件中读取,允许中断 true为中断信号
func CsvFromFileWithBreak(name string, f func(rcs []string) bool) error {
file, err := os.Open(name)
if err != nil {
return err
}
defer file.Close()
return CsvWithBreak(file, f)
}
// CsvWithBreak 从csv io.Reader 中读取,允许中断 true为中断信号
func CsvWithBreak(r io.Reader, f func(rcs []string) bool) error {
cr := csv.NewReader(r)
for {
rcs, err := cr.Read()
if err == io.EOF {
break
}
if err != nil {
return err
}
f(rcs)
}
return nil
}
// Csv 读取解析csv内容
func Csv(r io.Reader, f func(rcs []string)) error {
cr := csv.NewReader(r)
for {
rcs, err := cr.Read()
if err == io.EOF {
break
}
if err != nil {
return err
}
f(rcs)
}
return nil
}
|
package cmd
import "github.com/bwmarrin/discordgo"
// Command works as an interface for other commands
type Command interface {
Execute(s *discordgo.Session, m *discordgo.MessageCreate)
GetCommons() CommandCommon
}
// Commands is a struct to hold all bot commands by type
type Commands struct {
DummyCommands []CommandDummy
ColorCommands []CommandColor
DebugCommands []CommandDebug
ModerationCommands []CommandModeration
}
// CmdList is the list of commands
var CmdList *Commands = &Commands{}
/*CommandCommon represents fields common between all commands
Fields are as follows:
Caller
Text used to call command
Response
Bot's response to command request
Description
Basic description of command result
Structure
Format for command
Channels
Channel(s) this command can be used in. Blank for any
RequiredPermissions
Channel permissions required to use command
RequiredUsers
UserID(s) required to use command
*/
type CommandCommon struct {
Caller string
Response string
Description string
Structure string
Action string
Channels []string
RequiredPermissions int
RequiredUsers []string
}
type Permission int
// all permissions
const (
createInstantInvite Permission = 1 << iota //other
kickMembers //other
banMembers //other
administrator //other
manageChannels //manage
manageGuild //manage
addReactions //text
viewAuditLog //other
none1
none2
readMessages //text
sendMessages //text
sendTTSMessages //text
manageMessages //text
embedLinks //text
attachFiles //text
readMessageHistory //text
mentionEveryone //text
useExternalEmojis //text
none3
connect //voice
speak //voice
muteMembers //voice
deafenMembers //voice
moveMembers //voice
useVAD //voice
changeNickname //other
manageNicknames //manage
manageRoles //manage
manageWebhooks //manage
manageEmojis //manage
none4
)
var permissionMap = map[Permission]string{
createInstantInvite: "CreateInstantInvite",
kickMembers: "KickMembers",
banMembers: "BanMembers",
administrator: "Administrator",
manageChannels: "ManageChannels",
manageGuild: "ManageGuild",
addReactions: "AddReactions",
viewAuditLog: "ViewAuditLog",
readMessages: "ReadMessages",
sendMessages: "SendMessages",
sendTTSMessages: "SendTTSMessages",
manageMessages: "ManageMessages",
embedLinks: "EmbedLinks",
attachFiles: "AttachFiles",
readMessageHistory: "ReadMessageHistory",
mentionEveryone: "MentionEveryone",
useExternalEmojis: "UseExternalEmojis",
connect: "Connect",
speak: "Speak",
muteMembers: "MuteMembers",
deafenMembers: "DeafenMembers",
moveMembers: "MoveMembers",
useVAD: "UseVAD",
changeNickname: "ChangeNickname",
manageNicknames: "ManageNicknames",
manageRoles: "ManageRoles",
manageWebhooks: "ManageWebhooks",
manageEmojis: "ManageEmojis",
}
// Check if a user can execute a command
func (c CommandCommon) canExecute(s *discordgo.Session, m *discordgo.MessageCreate) bool {
userPerms, _ := s.UserChannelPermissions(m.Author.ID, m.ChannelID)
// Most commands won't have tied permissions, so this speeds most up
if c.RequiredPermissions == 0 && len(c.RequiredUsers) == 0 {
return true
}
// This is pretty much untested. Need to add check for required users
/*if !checkVoicePerms(userPerms, c.RequiredPermissions) ||
!checkTextPerms(userPerms, c.RequiredPermissions) ||
!checkManagementPerms(userPerms, c.RequiredPermissions) ||
!checkOtherPerms(userPerms, c.RequiredPermissions) {
return false
}*/
if userPerms&c.RequiredPermissions != c.RequiredPermissions {
return false
}
return true
}
func getRequiredPermissionNames(permissions int) (perms []string) {
for val, name := range permissionMap {
if val&Permission(permissions) != 0 {
perms = append(perms, name)
}
}
return
}
// Send error
func (c CommandCommon) sendErrorResponse(s *discordgo.Session, channelID string) {
response := "Must use correct format: " + c.Structure
s.ChannelMessageSend(channelID, response)
return
}
// CheckPerm checks if a user has a specified permission
func CheckPerm(userPerms int, perm int) bool {
if userPerms&(perm) != 0 {
return true
}
return false
}
func checkOtherPerms(userPerms Permission, commandPerms Permission) bool {
if userPerms&createInstantInvite == 0 {
if commandPerms&createInstantInvite != 0 {
return false
}
}
if userPerms&kickMembers == 0 {
if commandPerms&kickMembers != 0 {
return false
}
}
if userPerms&banMembers == 0 {
if commandPerms&banMembers != 0 {
return false
}
}
if userPerms&administrator == 0 {
if commandPerms&administrator != 0 {
return false
}
}
if userPerms&viewAuditLog == 0 {
if commandPerms&viewAuditLog != 0 {
return false
}
}
if userPerms&changeNickname == 0 {
if commandPerms&changeNickname != 0 {
return false
}
}
return true
}
func checkManagementPerms(userPerms Permission, commandPerms Permission) bool {
if userPerms&manageChannels == 0 {
if commandPerms&manageChannels != 0 {
return false
}
}
if userPerms&manageEmojis == 0 {
if commandPerms&manageEmojis != 0 {
return false
}
}
if userPerms&manageGuild == 0 {
if commandPerms&manageGuild != 0 {
return false
}
}
if userPerms&manageMessages == 0 {
if commandPerms&manageMessages != 0 {
return false
}
}
if userPerms&manageNicknames == 0 {
if commandPerms&manageNicknames != 0 {
return false
}
}
if userPerms&manageRoles == 0 {
if commandPerms&manageRoles != 0 {
return false
}
}
if userPerms&manageWebhooks == 0 {
if commandPerms&manageWebhooks != 0 {
return false
}
}
return true
}
func checkTextPerms(userPerms Permission, commandPerms Permission) bool {
if userPerms&addReactions == 0 {
if commandPerms&addReactions != 0 {
return false
}
}
if userPerms&readMessages == 0 {
if commandPerms&readMessages != 0 {
return false
}
}
if userPerms&sendMessages == 0 {
if commandPerms&sendMessages != 0 {
return false
}
}
if userPerms&sendTTSMessages == 0 {
if commandPerms&sendTTSMessages != 0 {
return false
}
}
if userPerms&manageMessages == 0 {
if commandPerms&manageMessages != 0 {
return false
}
}
if userPerms&embedLinks == 0 {
if commandPerms&embedLinks != 0 {
return false
}
}
if userPerms&attachFiles == 0 {
if commandPerms&attachFiles != 0 {
return false
}
}
if userPerms&readMessageHistory == 0 {
if commandPerms&readMessageHistory != 0 {
return false
}
}
if userPerms&mentionEveryone == 0 {
if commandPerms&mentionEveryone != 0 {
return false
}
}
if userPerms&useExternalEmojis == 0 {
if commandPerms&useExternalEmojis != 0 {
return false
}
}
return true
}
func checkVoicePerms(userPerms Permission, commandPerms Permission) bool {
if userPerms&connect == 0 {
if commandPerms&connect != 0 {
return false
}
}
if userPerms&speak == 0 {
if commandPerms&speak != 0 {
return false
}
}
if userPerms&muteMembers == 0 {
if commandPerms&muteMembers != 0 {
return false
}
}
if userPerms&deafenMembers == 0 {
if commandPerms&deafenMembers != 0 {
return false
}
}
if userPerms&moveMembers == 0 {
if commandPerms&moveMembers != 0 {
return false
}
}
if userPerms&useVAD == 0 {
if commandPerms&useVAD != 0 {
return false
}
}
return true
}
|
package main
import (
"encoding/json"
"fmt"
"log"
"net/http"
)
//Post 格式
type Post struct {
User string
Sex string
}
type PostArr struct {
Post []*Post
}
func reloactionExample(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Location", "http://127.0.0.1:8080/json")
w.WriteHeader(302)
}
func jsonExample(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
post := &Post{
User: "Chen YuZhao",
Sex: "Male",
}
var postA []*Post
postA = append(postA, post)
postA = append(postA, post)
PostArr := &PostArr{
postA,
}
log.Println(PostArr)
j, _ := json.Marshal(PostArr)
fmt.Fprint(w, string(j))
//w.Write(j)
}
func main() {
server := http.Server{
Addr: "127.0.0.1:8080",
}
http.HandleFunc("/relocation", reloactionExample)
http.HandleFunc("/json", jsonExample)
server.ListenAndServe()
}
|
package datastructs
type Queue struct {
array []interface{}
tail, head int
fixSize int
}
func NewQueue(capacity int) *Queue {
q := new(Queue)
if capacity <= 0 {
panic("Queue must have Capacity")
}
q.array = make([]interface{}, capacity)
q.fixSize = capacity
return q
}
func (q *Queue) Enqueue(x interface{}) {
if q.tail == q.fixSize {
panic("overflow")
}
q.array[q.tail] = x
q.tail = q.tail + 1
}
func (q *Queue) Dequeue() interface{} {
if q.head == q.fixSize {
panic("underflow")
}
x := q.array[q.head]
q.head = q.head + 1
return x
}
|
package main
import (
"context"
vine "github.com/lack-io/vine/service"
log "github.com/lack-io/vine/service/logger"
"github.com/lack-io/vine/service/server"
"github.com/lack-io/vine/util/context/metadata"
proto "github.com/lack-io/vine-example/pubsub/proto"
)
// All methods of Sub will be executed when
// a message is received
type Sub struct{}
// Method can be of any name
func (s *Sub) Process(ctx context.Context, event *proto.Event) error {
md, _ := metadata.FromContext(ctx)
log.Infof("[pubsub.1] Received event %+v with metadata %+v\n", event, md)
// do something with event
return nil
}
// Alternatively a function can be used
func subEv(ctx context.Context, event *proto.Event) error {
md, _ := metadata.FromContext(ctx)
log.Infof("[pubsub.2] Received event %+v with metadata %+v\n", event, md)
// do something with event
return nil
}
func main() {
// create a service
service := vine.NewService(
vine.Name("go.vine.srv.pubsub"),
)
// parse command line
service.Init()
// register subscriber
vine.RegisterSubscriber("example.topic.pubsub.1", service.Server(), new(Sub))
// register subscriber with queue, each message is delivered to a unique subscriber
vine.RegisterSubscriber("example.topic.pubsub.2", service.Server(), subEv, server.SubscriberQueue("queue.pubsub"))
if err := service.Run(); err != nil {
log.Fatal(err)
}
}
|
package main
import (
"flag"
"net/http"
"os"
"os/user"
"runtime"
"github.com/fabric8-services/fabric8-common/log"
"github.com/fabric8-services/fabric8-common/metric"
"github.com/fabric8-services/fabric8-common/sentry"
"github.com/fabric8-services/fabric8-webhook/app"
"github.com/fabric8-services/fabric8-webhook/build"
"github.com/fabric8-services/fabric8-webhook/configuration"
"github.com/fabric8-services/fabric8-webhook/controller"
"github.com/fabric8-services/fabric8-webhook/verification"
"github.com/goadesign/goa"
goalogrus "github.com/goadesign/goa/logging/logrus"
"github.com/goadesign/goa/middleware"
"github.com/goadesign/goa/middleware/gzip"
"github.com/google/gops/agent"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
func main() {
// --------------------------------------------------------------------
// Parse flags
// --------------------------------------------------------------------
var configFilePath string
var printConfig bool
flag.StringVar(&configFilePath, "config", "", "Path to the config file to read")
flag.BoolVar(&printConfig, "printConfig", false, "Prints the config (including merged environment variables) and exits")
flag.Parse()
// Override default -config switch with environment variable only if -config switch was
// not explicitly given via the command line.
configSwitchIsSet := false
flag.Visit(func(f *flag.Flag) {
if f.Name == "config" {
configSwitchIsSet = true
}
})
if !configSwitchIsSet {
if envConfigPath, ok := os.LookupEnv("F8_CONFIG_FILE_PATH"); ok {
configFilePath = envConfigPath
}
}
config, err := configuration.New(configFilePath)
if err != nil {
log.Panic(nil, map[string]interface{}{
"config_file_path": configFilePath,
"err": err,
}, "failed to setup the configuration")
}
if printConfig {
os.Exit(0)
}
// Initialized developer mode flag and log level for the logger
log.InitializeLogger(config.IsLogJSON(), config.GetLogLevel())
// Initialize sentry client
haltSentry, err := sentry.InitializeSentryClient(
nil, // will use the `os.Getenv("Sentry_DSN")` instead
sentry.WithRelease(app.Commit),
sentry.WithEnvironment(config.GetEnvironment()),
)
if err != nil {
log.Panic(nil, map[string]interface{}{
"err": err,
}, "failed to setup the sentry client")
}
defer haltSentry()
printUserInfo()
// Create service
service := goa.New("fabric8-webhook")
// Mount middleware
service.Use(middleware.RequestID())
// Use our own log request to inject identity id and modify other properties
service.Use(gzip.Middleware(9))
service.Use(app.ErrorHandler(service, true))
service.Use(middleware.Recover())
// record HTTP request metrics in prometh
service.Use(
metric.Recorder(
"fabric8_webhook",
metric.WithRequestDurationBucket(prometheus.ExponentialBuckets(0.05, 2, 8))))
service.WithLogger(goalogrus.New(log.Logger()))
// service.Use(metric.Recorder())
// Mount the 'status controller
statusCtrl := controller.NewStatusController(service)
app.MountStatusController(service, statusCtrl)
verificationSvc, err := verification.New(service, config.GetMonitorIPDuration())
if err != nil {
log.Panic(nil, map[string]interface{}{
"err": err,
}, "failed to setup the verification service")
}
buildSvc := build.New()
if err != nil {
log.Logger().Fatal("Verification Service Initialisation Failed", err)
}
// Mount "webhook" controller
webhookCtrl := controller.NewWebhookController(service,
config, verificationSvc, buildSvc)
app.MountWebhookController(service, webhookCtrl)
log.Logger().Infoln("Git Commit SHA: ", app.Commit)
log.Logger().Infoln("UTC Build Time: ", app.BuildTime)
log.Logger().Infoln("UTC Start Time: ", app.StartTime)
log.Logger().Infoln("GOMAXPROCS: ", runtime.GOMAXPROCS(-1))
log.Logger().Infoln("NumCPU: ", runtime.NumCPU())
http.Handle("/api/", service.Mux)
http.Handle("/favicon.ico", http.NotFoundHandler())
if config.GetDiagnoseHTTPAddress() != "" {
log.Logger().Infoln("Diagnose: ", config.GetDiagnoseHTTPAddress())
// Start diagnostic http
if err := agent.Listen(agent.Options{Addr: config.GetDiagnoseHTTPAddress(), ConfigDir: "/tmp/gops/"}); err != nil {
log.Error(nil, map[string]interface{}{
"addr": config.GetDiagnoseHTTPAddress(),
"err": err,
}, "unable to connect to diagnose server")
}
}
// // Start/mount metrics http
if config.GetHTTPAddress() == config.GetMetricsHTTPAddress() {
http.Handle("/metrics", promhttp.Handler())
} else {
go func(metricAddress string) {
mx := http.NewServeMux()
mx.Handle("/metrics", promhttp.Handler())
if err := http.ListenAndServe(metricAddress, mx); err != nil {
log.Error(nil, map[string]interface{}{
"addr": metricAddress,
"err": err,
}, "unable to connect to metrics server")
service.LogError("startup", "err", err)
}
}(config.GetMetricsHTTPAddress())
}
// Start http
if err := http.ListenAndServe(config.GetHTTPAddress(), nil); err != nil {
log.Error(nil, map[string]interface{}{
"addr": config.GetHTTPAddress(),
"err": err,
}, "unable to connect to server")
service.LogError("startup", "err", err)
}
}
func printUserInfo() {
u, err := user.Current()
if err != nil {
log.Warn(nil, map[string]interface{}{
"err": err,
}, "failed to get current user")
} else {
log.Info(nil, map[string]interface{}{
"username": u.Username,
"uuid": u.Uid,
}, "Running as user name '%s' with UID %s.", u.Username, u.Uid)
g, err := user.LookupGroupId(u.Gid)
if err != nil {
log.Warn(nil, map[string]interface{}{
"err": err,
}, "failed to lookup group")
} else {
log.Info(nil, map[string]interface{}{
"groupname": g.Name,
"gid": g.Gid,
}, "Running as as group '%s' with GID %s.", g.Name, g.Gid)
}
}
}
|
package rst
import "testing"
func TestLastLineIndexOfFieldLists(t *testing.T) {
path := "../userpages/content/articles/2012/07/13/javascript-drag-and-drop-draggable-movable-element%en.rst"
r, err := NewRstStateMachine(path)
if err != nil {
t.Error(err)
}
r.Run()
//r.DebugPrintRstBlocks()
rfls, err := r.GetFieldLists()
if err != nil {
t.Error(err)
}
lastLineIndex := GetIndexOfLastLineOfFieldLists(rfls)
print("last: ")
println(lastLineIndex)
}
|
// Copyright 2018 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package time
import (
"strconv"
)
// ClockID is a Linux clock identifier.
type ClockID int32
// These are the supported Linux clock identifiers.
const (
Realtime ClockID = iota
Monotonic
)
// String implements fmt.Stringer.String.
func (c ClockID) String() string {
switch c {
case Realtime:
return "Realtime"
case Monotonic:
return "Monotonic"
default:
return strconv.Itoa(int(c))
}
}
|
package rdbms
import (
"fmt"
domEntity "github.com/d3ta-go/ddd-mod-account/modules/account/domain/entity"
"github.com/d3ta-go/system/system/handler"
migRDBMS "github.com/d3ta-go/system/system/migration/rdbms"
"github.com/d3ta-go/system/system/utils"
"gorm.io/gorm"
)
// Seed20201119001InitTable type
type Seed20201119001InitTable struct {
migRDBMS.BaseGormMigratorRunner
}
// NewSeed20201119001InitTable constructor
func NewSeed20201119001InitTable(h *handler.Handler) (migRDBMS.IGormMigratorRunner, error) {
gmr := new(Seed20201119001InitTable)
gmr.SetHandler(h)
gmr.SetID("Seed20201119001InitTable")
return gmr, nil
}
// GetID get Seed20201119001InitTable ID
func (dmr *Seed20201119001InitTable) GetID() string {
return fmt.Sprintf("%T", dmr)
}
// Run run Seed20201119001InitTable
func (dmr *Seed20201119001InitTable) Run(h *handler.Handler, dbGorm *gorm.DB) error {
if dbGorm != nil {
dmr.SetGorm(dbGorm)
}
if dmr.GetGorm() != nil {
if err := dmr._seeds(); err != nil {
return err
}
}
return nil
}
// RollBack rollback Seed20201119001InitTable
func (dmr *Seed20201119001InitTable) RollBack(h *handler.Handler, dbGorm *gorm.DB) error {
if dbGorm != nil {
dmr.SetGorm(dbGorm)
}
if dmr.GetGorm() != nil {
if err := dmr._unSeeds(); err != nil {
return err
}
}
return nil
}
func (dmr *Seed20201119001InitTable) _seeds() error {
if dmr.GetGorm().Migrator().HasTable(&domEntity.SysUserEntity{}) &&
dmr.GetGorm().Migrator().HasTable(&domEntity.SysUserClientAppsEntity{}) {
// get default admin config
cfg, err := dmr.GetHandler().GetDefaultConfig()
if err != nil {
return err
}
defaultAdmin := cfg.IAM.DefaultAdmin
// create default user
eUser := domEntity.SysUserEntity{
UUID: utils.GenerateUUID(),
Username: defaultAdmin.Username,
Password: utils.MD5([]byte(defaultAdmin.Password)),
NickName: defaultAdmin.NickName,
Email: defaultAdmin.Email,
IsActive: true,
AuthorityID: defaultAdmin.AuthorityID,
}
eUser.CreatedBy = "system.d3tago@installation"
if err := dmr.GetGorm().Create(&eUser).Error; err != nil {
return err
}
// create default client app
eCApp := domEntity.SysUserClientAppsEntity{
UUID: utils.GenerateUUID(),
ClientAppCode: fmt.Sprintf("default-%s-app", defaultAdmin.Username),
ClientAppName: fmt.Sprintf("Default %s App", defaultAdmin.NickName),
ClientAppDesc: fmt.Sprintf("Default %s App Description", defaultAdmin.NickName),
ClientKey: utils.GenerateClientKey(),
SecretKey: utils.GenerateSecretKey(),
IsActive: true,
UserID: eUser.ID,
}
eCApp.CreatedBy = "system.d3tago@installation"
if err := dmr.GetGorm().Create(&eCApp).Error; err != nil {
return err
}
}
return nil
}
func (dmr *Seed20201119001InitTable) _unSeeds() error {
if dmr.GetGorm().Migrator().HasTable(&domEntity.SysUserEntity{}) &&
dmr.GetGorm().Migrator().HasTable(&domEntity.SysUserClientAppsEntity{}) {
// find default user
cfg, err := dmr.GetHandler().GetDefaultConfig()
if err != nil {
return err
}
defaultAdmin := cfg.IAM.DefaultAdmin
var eUser domEntity.SysUserEntity
if err := dmr.GetGorm().Unscoped().Where(&domEntity.SysUserEntity{Username: defaultAdmin.Username}).First(&eUser).Error; err != nil {
return err
}
// delete default client app
if err := dmr.GetGorm().Unscoped().Where(&domEntity.SysUserClientAppsEntity{UserID: eUser.ID}).Delete(&domEntity.SysUserClientAppsEntity{}).Error; err != nil {
return err
}
// delete default user
if err := dmr.GetGorm().Unscoped().Where(&domEntity.SysUserEntity{Username: eUser.Username}).Delete(&domEntity.SysUserEntity{}).Error; err != nil {
return err
}
}
return nil
}
|
package LeetCode
import "fmt"
func Code37() {
board := [][]byte{
{'5', '3', '.', '.', '7', '.', '.', '.', '.'},
{'6', '.', '.', '1', '9', '5', '.', '.', '.'},
{'.', '9', '8', '.', '.', '.', '.', '6', '.'},
{'8', '.', '.', '.', '6', '.', '.', '.', '3'},
{'4', '.', '.', '8', '.', '3', '.', '.', '1'},
{'7', '.', '.', '.', '2', '.', '.', '.', '6'},
{'.', '6', '.', '.', '.', '.', '2', '8', '.'},
{'.', '.', '.', '4', '1', '9', '.', '.', '5'},
{'.', '.', '.', '.', '8', '.', '.', '7', '9'},
}
solveSudoku(board)
}
/**
编写一个程序,通过已填充的空格来解决数独问题。
一个数独的解法需遵循如下规则:
数字 1-9 在每一行只能出现一次。
数字 1-9 在每一列只能出现一次。
数字 1-9 在每一个以粗实线分隔的 3x3 宫内只能出现一次。
空白格用 '.' 表示。
一个数独。
答案被标成红色。
Note:
给定的数独序列只包含数字 1-9 和字符 '.' 。
你可以假设给定的数独只有唯一解。
给定数独永远是 9x9 形式的。
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/sudoku-solver
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
```
*/
func solveSudoku(board [][]byte) {
dfs_37(board, 0)
fmt.Println(board)
}
/* k 是把 board 转换成一维数组后,元素的索引值 */
func dfs_37(board [][]byte, k int) bool {
if k == 81 {
return true
}
r, c := k/9, k%9
if board[r][c] != '.' {
return dfs_37(board, k+1)
}
/* bi, bj 是 rc 所在块的左上角元素的索引值 */
bi, bj := r/3*3, c/3*3
// 按照数独的规则,检查 b 能否放在 board[r][c]
isValid := func(b byte) bool {
for n := 0; n < 9; n++ {
if board[r][n] == b ||
board[n][c] == b ||
board[bi+n/3][bj+n%3] == b {
return false
}
}
return true
}
for b := byte('1'); b <= '9'; b++ {
if isValid(b) {
board[r][c] = b
if dfs_37(board, k+1) {
return true
}
}
}
board[r][c] = '.'
return false
}
/*func solveSudoku2(board [][]byte) {
boardMap := make([][]map[byte]int, len(board))
for i := range boardMap {
boardMap[i] = make([]map[byte]int, len(board[0]))
}
for i := 0; i < len(board); i++ {
eachMap := make(map[byte]int, 0)
for j := 0; j < len(board[0]); j++ {
if board[i][j] != '.' {
eachMap[board[i][j]] = 1
} else {
eachMap[board[i][j]] = 0
}
boardMap[i][] = append(boardMap[i], eachMap)
}
}
fmt.Printf("%+v\n", boardMap)
solve := false
dfs_37(board, 0, 0, boardMap, solve)
fmt.Println(board)
}
func dfs_37(board [][]byte, x int, y int, boardMap [][]map[byte]int, solve bool) {
if solve {
return
}
if x >= 9 {
solve = true
return
}
if board[x][y] != '.' {
if y < 8 {
dfs_37(board, x, y+1, boardMap, solve)
} else if y == 8 {
dfs_37(board, x+1, 0, boardMap, solve)
}
return
} else {
for key := 1; key <= 9; key++ {
fmt.Println(key, key-'0')
if boardMap[x][y][byte(key)] == 1 {
continue
}
if check(board, x, y, 48+byte(key)) {
board[x][y] = '0' + byte(key)
if _, ok := boardMap[x][y][byte(key)]; ok {
}
boardMap[x][y][byte(key)] = 1
fmt.Println(board)
if y < 8 {
dfs_37(board, x, y+1, boardMap, solve)
return
} else if y == 8 {
dfs_37(board, x+1, 0, boardMap, solve)
return
}
if !solve {
board[x][y] = '.'
boardMap[x][y][byte(key)] = 0
}
}
}
}
}
func check(board [][]byte, x int, y int, s byte) bool {
for i := 1; i <= 9; i++ {
if board[x][i-1] == s {
return false
}
if board[i-1][y] == s {
return false
}
}
for i := 0; i < 3; i++ {
for j := 0; j < 3; j++ {
curX, curY := begin(x, y)
if board[curX+i][curY+j] == s {
return false
}
}
}
return true
}
func begin(i int, j int) (int, int) {
if i < 3 && j < 3 {
return 0, 0
}
if i < 3 && j >= 3 && j < 6 {
return 0, 3
}
if i < 3 && j >= 6 && j < 9 {
return 0, 6
}
if i >= 3 && i < 6 && j < 3 {
return 3, 0
}
if i >= 3 && i < 6 && j >= 3 && j < 6 {
return 3, 3
}
if i >= 3 && i < 6 && j >= 6 && j < 9 {
return 3, 6
}
if i >= 6 && i < 9 && j < 3 {
return 6, 0
}
if i >= 6 && i < 9 && j >= 3 && j < 6 {
return 6, 3
}
if i >= 6 && i < 9 && j >= 6 && j < 9 {
return 6, 6
}
return 0, 0
}
*/
|
package main
import (
"fmt"
"math/rand"
"time"
"xpfunds"
"xpfunds/simulate"
)
var (
funds []*xpfunds.Fund
maxDuration int
maxMonths = 60
numFunds = 1
)
func main() {
rand.Seed(time.Now().UnixNano())
funds = xpfunds.ReadFunds()
for _, f := range funds {
if f.Duration() > maxDuration {
maxDuration = f.Duration()
}
}
point := make([]float64, (funds[0].FeatureCount()+(&simulate.Weighted{}).FeatureCount())*numFunds)
for i := range point {
point[i] = rand.Float64()*2 - 1
}
step := 1.0
for i := 0; true; i++ {
start := time.Now()
best, perf := bestInRegion(point)
end := time.Now()
fmt.Printf("%v\t%v\t%v\t%v\t%v\n", i, perf, end.Sub(start).String(), best, step)
point = best
}
}
func bestInRegion(point []float64) ([]float64, float64) {
newPoint := make([]float64, len(point))
for i, p := range point {
newPoint[i] = p
}
bestPerf := simulate.MedianPerformance(funds, maxDuration, maxMonths*2, numFunds, simulate.NewWeighted(maxMonths, newPoint))
for i := 0; i < len(newPoint); i++ {
step := rand.Float64()*2 - 1
if newPoint[i]+step <= -1 || newPoint[i]+step >= 1 {
continue
}
newPoint[i] += step
perf := simulate.MedianPerformance(funds, maxDuration, maxMonths*2, numFunds, simulate.NewWeighted(maxMonths, newPoint))
if perf > bestPerf {
bestPerf = perf
continue
}
newPoint[i] -= step
}
return newPoint, bestPerf
}
|
// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package firmware
import (
"bufio"
"context"
"regexp"
"time"
"github.com/golang/protobuf/ptypes/empty"
"chromiumos/tast/errors"
"chromiumos/tast/remote/firmware"
"chromiumos/tast/remote/firmware/fixture"
"chromiumos/tast/ssh"
"chromiumos/tast/testing"
"chromiumos/tast/testing/hwdep"
)
type lidSwitchTest int
const (
checkKeyPresses lidSwitchTest = iota
bootWithLid
shutdownWithLid
unsuspendWithLid
)
func init() {
testing.AddTest(&testing.Test{
Func: ECLidSwitch,
Desc: "Test EC Lid Switch",
Contacts: []string{"tij@google.com", "cros-fw-engprod@google.com"},
Attr: []string{"group:firmware"},
Fixture: fixture.NormalMode,
HardwareDeps: hwdep.D(hwdep.ChromeEC(), hwdep.Lid()),
ServiceDeps: []string{"tast.cros.firmware.UtilsService"},
Timeout: 10 * time.Minute,
Params: []testing.Param{
{
Name: "check_key_press",
Val: checkKeyPresses,
ExtraHardwareDeps: hwdep.D(hwdep.Keyboard()),
ExtraAttr: []string{"firmware_ec"},
},
{
Name: "open_lid_to_boot",
Val: bootWithLid,
// Original test in suites: faft_ec, faft_ec_fw_qual, faft_ec_tot.
ExtraAttr: []string{"firmware_ec"},
},
{
Name: "close_lid_to_shutdown",
Val: shutdownWithLid,
ExtraAttr: []string{"firmware_unstable"},
},
{
// powerd_dbus_suspend is not very stable so leaving this in unstable.
// This wasn't a test case in autotest so it's hard to determine the expected amount of stability.
Name: "open_lid_to_unsuspend",
Val: unsuspendWithLid,
ExtraAttr: []string{"firmware_unstable"},
},
},
})
}
const (
lidDelay time.Duration = 1 * time.Second
wakeDelay time.Duration = 10 * time.Second
noDelay time.Duration = lidDelay // Just wait for lid state to change, no additional delay.
bootDelay time.Duration = 15 * time.Second
)
func ECLidSwitch(ctx context.Context, s *testing.State) {
h := s.FixtValue().(*fixture.Value).Helper
if err := h.RequireServo(ctx); err != nil {
s.Fatal("Failed to connect to servo: ", err)
}
if err := h.RequireRPCUtils(ctx); err != nil {
s.Fatal("Requiring RPC utils: ", err)
}
testMethod := s.Param().(lidSwitchTest)
switch testMethod {
case checkKeyPresses:
s.Log("Check for errant keypresses on lid open/close")
if err := checkKeyPressesWithLidClosed(ctx, h); err != nil {
s.Fatal("Error checking key presses: ", err)
}
case bootWithLid:
s.Log("Power off DUT and wake immediately")
if err := bootWithLidOpen(ctx, h, noDelay); err != nil {
s.Fatal("Failed to poweroff and wake immediately: ", err)
}
s.Log("Power off DUT and wake after delay")
if err := bootWithLidOpen(ctx, h, wakeDelay); err != nil {
s.Fatal("Failed to poweroff and wake after delay: ", err)
}
case shutdownWithLid:
s.Log("Close DUT lid and wake immediately")
if err := shutdownWithLidClose(ctx, h, noDelay); err != nil {
s.Fatal("Failed to close lid and wake immediately: ", err)
}
s.Log("Close DUT lid and wake immediately")
if err := shutdownWithLidClose(ctx, h, wakeDelay); err != nil {
s.Fatal("Failed to close lid and wake immediately: ", err)
}
case unsuspendWithLid:
s.Log("Suspend DUT and wake immediately")
if err := suspendAndWakeWithLid(ctx, h, noDelay); err != nil {
s.Fatal("Failed to suspend DUT and wake immediately: ", err)
}
s.Log("Suspend DUT and wake after delay")
if err := suspendAndWakeWithLid(ctx, h, wakeDelay); err != nil {
s.Fatal("Failed to suspend DUT and wake after delay: ", err)
}
}
}
func suspendAndWakeWithLid(ctx context.Context, h *firmware.Helper, delay time.Duration) error {
testing.ContextLog(ctx, "Suspending DUT")
cmd := h.DUT.Conn().CommandContext(ctx, "powerd_dbus_suspend", "--delay=5")
if err := cmd.Start(); err != nil {
return errors.Wrap(err, "failed to suspend DUT")
}
testing.ContextLog(ctx, "Checking for S0ix or S3 powerstate")
if err := h.WaitForPowerStates(ctx, firmware.PowerStateInterval, firmware.PowerStateTimeout, "S0ix", "S3"); err != nil {
return errors.Wrap(err, "failed to get S0ix or S3 powerstate")
}
if err := h.Servo.CloseLid(ctx); err != nil {
return err
}
// Used by main function to either immediately wake or wake after some delay.
if err := testing.Sleep(ctx, delay); err != nil {
return err
}
if err := h.Servo.OpenLid(ctx); err != nil {
return err
}
testing.ContextLog(ctx, "Waiting for S0 powerstate")
err := h.WaitForPowerStates(ctx, firmware.PowerStateInterval, firmware.PowerStateTimeout, "S0")
if err != nil {
return errors.Wrap(err, "failed to get S0 powerstate")
}
return nil
}
func shutdownWithLidClose(ctx context.Context, h *firmware.Helper, delay time.Duration) (reterr error) {
// WA to clean system state for lid misfunction if harmless. Root cause is still unknown.
if err := h.DUT.Reboot(ctx); err != nil {
return errors.Wrap(err, "failed to reboot DUT")
}
// Log variables from powerd files to monitor unexpected settings.
logCmd := `d="/var/lib/power_manager"; for f in $(ls -A $d); do echo "$f: $(cat $d/$f)"; done`
out, err := h.DUT.Conn().CommandContext(ctx, "sh", "-c", logCmd).Output(ssh.DumpLogOnError)
if err != nil {
return errors.Wrap(err, "failed to read files in /var/lib/power_manager")
}
testing.ContextLog(ctx, "Files in /var/lib/power_manager: ", string(out))
// Delay a few seconds to ensure no lid state change from system.
if err := testing.Sleep(ctx, bootDelay); err != nil {
return err
}
if err := h.Servo.CloseLid(ctx); err != nil {
return err
}
// This usually takes longer than usual to reach G3/S5, so increase timeout.
testing.ContextLog(ctx, "Check for G3 or S5 powerstate")
err = h.WaitForPowerStates(ctx, firmware.PowerStateInterval, 2*firmware.PowerStateTimeout, "G3", "S5")
if err != nil {
return errors.Wrap(err, "failed to get G3 or S5 powerstate")
}
// Delay by `lidDelay` to ensure lid is detected as closed.
if err := testing.Sleep(ctx, delay); err != nil {
return err
}
if err := h.Servo.OpenLid(ctx); err != nil {
return err
}
testing.ContextLog(ctx, "Waiting for S0 powerstate")
err = h.WaitForPowerStates(ctx, firmware.PowerStateInterval, firmware.PowerStateTimeout, "S0")
if err != nil {
return errors.Wrap(err, "failed to get S0 powerstate")
}
if err := h.WaitConnect(ctx); err != nil {
return errors.Wrap(err, "failed to connect to DUT")
}
return nil
}
func bootWithLidOpen(ctx context.Context, h *firmware.Helper, delay time.Duration) error {
testing.ContextLog(ctx, "Shutdown dut")
if err := h.DUT.Conn().CommandContext(ctx, "sh", "-c", "(sleep 2; /sbin/shutdown -P now) &").Start(); err != nil {
return errors.Wrap(err, "failed to run `/sbin/shutdown -P now` cmd")
}
testing.ContextLog(ctx, "Check for G3 or S5 powerstate")
if err := h.WaitForPowerStates(ctx, firmware.PowerStateInterval, 2*firmware.PowerStateTimeout, "G3", "S5"); err != nil {
return errors.Wrap(err, "failed to get G3 or S5 powerstate")
}
if err := h.Servo.CloseLid(ctx); err != nil {
return err
}
// Used by main function to either immediately wake or wake after some delay.
testing.ContextLogf(ctx, "Delay opening lid by %s", delay)
if err := testing.Sleep(ctx, delay); err != nil {
return err
}
if err := h.Servo.OpenLid(ctx); err != nil {
return err
}
testing.ContextLog(ctx, "Waiting for S0 powerstate")
if err := h.WaitForPowerStates(ctx, firmware.PowerStateInterval, firmware.PowerStateTimeout, "S0"); err != nil {
return errors.Wrap(err, "failed to get S0 powerstate")
}
if err := h.WaitConnect(ctx); err != nil {
return errors.Wrap(err, "failed to reconnect to dut")
}
return nil
}
func checkKeyPressesWithLidClosed(ctx context.Context, h *firmware.Helper) (reterr error) {
res, err := h.RPCUtils.FindPhysicalKeyboard(ctx, &empty.Empty{})
if err != nil {
return errors.Wrap(err, "failed to find keyboard")
}
device := res.Path
testing.ContextLogf(ctx, "Keyboard found at %v, checking for unexpected keypresses", device)
powerdCmd := "mkdir -p /tmp/power_manager && " +
"echo 0 > /tmp/power_manager/use_lid && " +
"mount --bind /tmp/power_manager /var/lib/power_manager && " +
"restart powerd"
if err := h.DUT.Conn().CommandContext(ctx, "sh", "-c", powerdCmd).Run(ssh.DumpLogOnError); err != nil {
return errors.Wrap(err, "failed to set use_lid")
}
defer func(ctx context.Context) {
restartPowerd := "umount /var/lib/power_manager && restart powerd"
if err := h.DUT.Conn().CommandContext(ctx, "sh", "-c", restartPowerd).Run(ssh.DumpLogOnError); err != nil {
reterr = errors.Wrap(err, "failed to restore powerd settings")
}
}(ctx)
cmd := h.DUT.Conn().CommandContext(ctx, "evtest", device)
stdout, err := cmd.StdoutPipe()
if err != nil {
return errors.Wrap(err, "failed to pipe stdout from 'evtest' cmd")
}
scanner := bufio.NewScanner(stdout)
cmd.Start()
testing.ContextLog(ctx, "Started piping output from 'evtest'")
defer cmd.Abort()
readKeyPress := func() error {
text := make(chan string)
go func() {
defer close(text)
for scanner.Scan() {
text <- scanner.Text()
}
}()
for {
select {
case <-time.After(5 * time.Second):
return nil
case out := <-text:
if match := regexp.MustCompile(`Event.*time.*code\s(\d*)\s\(\S+\)`).FindStringSubmatch(out); match != nil {
return errors.Errorf("unexpected key pressed detected: %s", match[0])
}
}
}
}
// Make sure lid is open in case DUT is in closed lid state at test start.
if err := h.Servo.OpenLid(ctx); err != nil {
return errors.Wrap(err, "error opening lid")
}
// Delay by `lidDelay` to ensure lid is detected as open before re-closing.
if err := testing.Sleep(ctx, lidDelay); err != nil {
return errors.Wrap(err, "failed to sleep")
}
if err := h.Servo.CloseLid(ctx); err != nil {
return errors.Wrap(err, "error closing lid")
}
testing.ContextLog(ctx, "Checking for unexpected keypresses on lid close")
if err := readKeyPress(); err != nil {
return errors.Wrap(err, "expected no keypresses with lid closed")
}
if err := h.Servo.OpenLid(ctx); err != nil {
return errors.Wrap(err, "error opening lid")
}
testing.ContextLog(ctx, "Checking for unexpected keypresses on lid open")
if err := readKeyPress(); err != nil {
return errors.Wrap(err, "expected no keypresses with lid closed")
}
return nil
}
|
package testCommon
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
)
// TestConfig is the configuration used for unit tests
type TestConfig struct {
ProjectRoot string
}
// Config is the instance of test config object loaded form default locations
var Config TestConfig
// MultiStageExampleRoot is the multistage example's root
var MultiStageExampleRoot string
func init() {
projectRoot, err := getProjectRoot()
if err != nil {
panic(err.Error())
}
defaultLocation := path.Join(projectRoot, "tests", "resources", "test_config.json")
configPtr, err := loadFrom(defaultLocation)
if err != nil {
panic(err.Error())
}
configPtr.ProjectRoot = projectRoot
Config = *configPtr
MultiStageExampleRoot = filepath.Join(Config.ProjectRoot, "tests", "resources", "hello-multistage")
}
func loadFrom(location string) (*TestConfig, error) {
bytes, err := ioutil.ReadFile(location)
if err != nil {
return nil, err
}
var config TestConfig
err = json.Unmarshal(bytes, &config)
if err != nil {
return nil, err
}
return &config, nil
}
// getProjectRoot scans and find the root of acr-builder project
func getProjectRoot() (string, error) {
dir, err := os.Getwd()
if err != nil {
return "", fmt.Errorf("Error getting pwd: %s", err)
}
for {
parent, name := filepath.Split(dir)
if name == "acr-builder" {
break
}
parent = filepath.Clean(parent)
if parent == "" {
panic("no acr-builder directory find on pwd")
}
dir = parent
}
return dir, nil
}
|
package es
import (
"context"
"fmt"
"github.com/olivere/elastic/v7"
"strings"
)
type LogData struct {
Topic string `json:"topic"`
Data string `json:"data"`
}
var (
client *elastic.Client
esCh = make(chan *LogData, 10000)
)
// 初始化es,准备接收kafka那边发过来的数据
func Init(addr string) (err error){
if !strings.HasPrefix(addr,"http") {
addr = "http://"+addr
}
client, err = elastic.NewClient(elastic.SetURL(addr))
if err != nil {
panic(err)
return
}
go sendToES()
fmt.Println("connect to es success")
return
}
// 发送数据到es
func SendToESChan(msg *LogData) {
esCh <- msg
}
// 从通道中取数据发送到es
func sendToES() {
for {
select {
case msg :=<- esCh:
put1, err := client.Index().Index(msg.Topic).BodyJson(msg.Data).Do(context.Background())
if err != nil {
// Handler error
fmt.Println(err)
}
fmt.Printf("Indexed success %v to index %s, type %s\n", put1.Id, put1.Index, put1.Type)
}
}
} |
package renderer
import (
"fmt"
"math"
"runtime"
"github.com/gmacd/rays/core"
"github.com/gmacd/rays/geom"
"github.com/gmacd/rays/raytracer"
)
const (
chunkSizeX int = 16
chunkSizeY int = 16
)
type Camera struct {
wx1, wy1 float64
wx2, wy2 float64
dx, dy float64
origin core.Vec3
}
func NewCamera(wx1, wy1, wx2, wy2 float64, canvasWidth, canvasHeight int, origin core.Vec3) *Camera {
dx := (wx2 - wx1) / float64(canvasWidth)
dy := (wy2 - wy1) / float64(canvasHeight)
return &Camera{wx1, wy1, wx2, wy2, dx, dy, origin}
}
func (camera *Camera) ScreenToWorld(x, y int) core.Vec3 {
return core.NewVec3(
camera.wx1+float64(x)*camera.dx,
camera.wy1+float64(y)*camera.dy,
0)
}
func Render(scene *geom.Scene, canvas *core.Canvas) {
fmt.Printf("Rendering with %v CPUs.\n", runtime.NumCPU())
runtime.GOMAXPROCS(runtime.NumCPU())
camera := NewCamera(-4, 3, 4, -3, canvas.Width, canvas.Height, core.NewVec3(0, 0, -5))
numXChunks := canvas.Width / chunkSizeX
numYChunks := canvas.Height / chunkSizeY
remainderChunkSizeX := canvas.Width - numXChunks*chunkSizeX
remainderChunkSizeY := canvas.Height - numYChunks*chunkSizeY
numGoRoutinesSpawned := 0
c := make(chan int, (numXChunks+1)*(numYChunks+1))
for chunkY := 0; chunkY < numYChunks; chunkY++ {
startY := chunkY * chunkSizeY
endY := startY + chunkSizeY
for chunkX := 0; chunkX < numXChunks; chunkX++ {
go renderChunk(
scene, camera, canvas, c,
chunkX*chunkSizeX, startY,
(chunkX+1)*chunkSizeX, endY)
numGoRoutinesSpawned++
}
if remainderChunkSizeX > 0 {
go renderChunk(
scene, camera, canvas, c,
numXChunks*chunkSizeX, startY,
canvas.Width, endY)
numGoRoutinesSpawned++
}
}
if remainderChunkSizeY > 0 {
startY := numYChunks * chunkSizeY
endY := canvas.Height
for chunkX := 0; chunkX < numXChunks; chunkX++ {
go renderChunk(
scene, camera, canvas, c,
chunkX*chunkSizeX, startY,
(chunkX+1)*chunkSizeX, endY)
numGoRoutinesSpawned++
}
if remainderChunkSizeX > 0 {
go renderChunk(
scene, camera, canvas, c,
numXChunks*chunkSizeX, startY,
canvas.Width, endY)
numGoRoutinesSpawned++
}
}
for i := 0; i < numGoRoutinesSpawned; i++ {
<-c
}
}
func renderChunk(scene *geom.Scene, camera *Camera, canvas *core.Canvas, c chan int, x1, y1, x2, y2 int) {
for y := y1; y < y2; y++ {
for x := x1; x < x2; x++ {
dir := camera.ScreenToWorld(x, y).Sub(camera.origin).Normal()
ray := core.NewRay(camera.origin, dir)
raytrace(scene, ray, canvas, x, y, c)
}
}
c <- 1
}
func raytrace(scene *geom.Scene, ray core.Ray, canvas *core.Canvas, x, y int, c chan int) {
_, _, _, colour := raytracer.Raytrace(scene, ray, 1.0)
colour.R = math.Min(colour.R, 1.0)
colour.G = math.Min(colour.G, 1.0)
colour.B = math.Min(colour.B, 1.0)
canvas.SetPixel(x, y, colour)
}
|
/*
* @Author: Sy.
* @Create: 2019-11-01 20:54:15
* @LastTime: 2019-11-16 17:09:35
* @LastEdit: Sy.
* @FilePath: \server\controllers\admin_controllers\admin_role_controller.go
* @Description: 角色
*/
package admin_controllers
import (
"strconv"
"strings"
"time"
"github.com/astaxie/beego"
"vue-typescript-beego-admin/server/models"
)
type RoleController struct {
BaseController
}
func (_this *RoleController) Info() {
id, _ := _this.GetInt("id", 0)
//获取选择的树节点
roleAuth, _ := models.RoleAuthGetById(id)
authId := make([]int, 0)
for _, v := range roleAuth {
authId = append(authId, v.AuthId)
}
row := make(map[string]interface{})
row["authIds"] = authId
_this.ajaxObj(row, "", MSG_OK)
}
func (_this *RoleController) Edit() {
role := new(models.Role)
role.RoleName = strings.TrimSpace(_this.GetString("roleName"))
role.Detail = strings.TrimSpace(_this.GetString("detail"))
role.CreateTime = time.Now().Unix()
role.UpdateTime = time.Now().Unix()
role.Status = 1
auths := strings.TrimSpace(_this.GetString("nodesData"))
role_id, _ := _this.GetInt("id")
if role_id <= 0 {
//新增
role.CreateTime = time.Now().Unix()
role.UpdateTime = time.Now().Unix()
role.CreateId = _this.userId
role.UpdateId = _this.userId
if id, err := models.RoleAdd(role); err != nil {
_this.ajaxMsg(err.Error(), MSG_ERR)
} else {
ra := new(models.RoleAuth)
authsSlice := strings.Split(auths, ",")
for _, v := range authsSlice {
aid, _ := strconv.Atoi(v)
ra.AuthId = aid
ra.RoleId = id
models.RoleAuthAdd(ra)
}
}
_this.ajaxMsg("", MSG_OK)
}
//修改
role.Id = role_id
role.UpdateTime = time.Now().Unix()
role.UpdateId = _this.userId
if err := role.Update(); err != nil {
_this.ajaxMsg(err.Error(), MSG_ERR)
} else {
// 删除该角色权限
models.RoleAuthDelete(role_id)
ra := new(models.RoleAuth)
authsSlice := strings.Split(auths, ",")
for _, v := range authsSlice {
aid, _ := strconv.Atoi(v)
ra.AuthId = aid
ra.RoleId = int64(role_id)
models.RoleAuthAdd(ra)
}
}
_this.ajaxMsg("", MSG_OK)
}
func (_this *RoleController) Del() {
role_id, _ := _this.GetInt("id")
role, _ := models.RoleGetById(role_id)
role.Status = 0
role.Id = role_id
role.UpdateTime = time.Now().Unix()
if err := role.Update(); err != nil {
_this.ajaxMsg(err.Error(), MSG_ERR)
}
// 删除该角色权限
// models.RoleAuthDelete(role_id)
_this.ajaxMsg("", MSG_OK)
}
func (_this *RoleController) List() {
//列表
page, err := _this.GetInt("page")
if err != nil {
page = 1
}
limit, err := _this.GetInt("limit")
if err != nil {
limit = 30
}
roleName := strings.TrimSpace(_this.GetString("roleName"))
_this.pageSize = limit
//查询条件
filters := make([]interface{}, 0)
filters = append(filters, "status", 1)
if roleName != "" {
filters = append(filters, "role_name__icontains", roleName)
}
result, count := models.RoleGetList(page, _this.pageSize, filters...)
list := make([]map[string]interface{}, len(result))
for k, v := range result {
row := make(map[string]interface{})
row["id"] = v.Id
row["roleName"] = v.RoleName
row["detail"] = v.Detail
row["createTime"] = beego.Date(time.Unix(v.CreateTime, 0), "Y-m-d H:i:s")
row["updateTime"] = beego.Date(time.Unix(v.UpdateTime, 0), "Y-m-d H:i:s")
list[k] = row
}
_this.ajaxList(count, list)
}
|
package model
// Instance ...
type Instance struct {
ID uint `db:"id"`
ProducerID uint `db:"producer_id"`
BookID uint `db:"book_id"`
Year string `db:"year"`
}
|
/**
条件语句
*/
package main
import "fmt"
func main() {
//if else
a:=false
if a{
fmt.Println(a)
}else{
fmt.Println(!a)
}
//for
for i:=0;i<10 ;i++ {
fmt.Print(i)
if i%2==0{
//可用于结束循环
goto breaks
}
}
//goto标签
breaks:
fmt.Println("结束循环")
//switch 变量
b:="bb"
switch b {
case "bb":
fmt.Println("bb")
//继续执行下一个case(不管是否符合)
fallthrough
case "aa":
fmt.Println("aa")
default:
fmt.Println("default")
}
//switch 表达式 switch后不跟变量
o := 8
switch {
case o %2 == 0 && o % 4 ==0:
fmt.Println("2")
case o % 4 ==0:
fmt.Println("4")
}
//break 具有与Java一致的功能
/**
break语句还可以在语句后面添加标签,表示退出某个标签对应的代码块,标签要求必须定义在对应的for、switch和 select的代码块上
*/
BREAK:
for i:=0;i<10 ;i++ {
if(i>=5){
break BREAK
}
}
//continue 具有与Java一致的功能
/**
在 continue语句后添加标签时,表示开始标签对应的循环
*/
forloop1:
for i := 0; i < 5; i++ {
// forloop2:
for j := 0; j < 5; j++ {
if i == 2 && j == 2 {
continue forloop1
}
fmt.Printf("%v-%v\n", i, j)
}
}
}
|
package crane
// Event describes an event sent to crane to trigger an action.
// Crane uses event category to find relevant event handler.
type Event struct {
Category string `json:"category"`
Commit string `json:"commit"`
}
|
package cryptoAPI
import (
"crypto/sha256"
"encoding/hex"
)
func GenerateSHA256Hash(data string) string{
hash := sha256.New()
hash.Write([]byte(data))
sum := hash.Sum(nil)
return hex.EncodeToString(sum)
}
|
package consul
import (
"fmt"
"github.com/saileifeng/pepsi/registry/consul/register"
"github.com/saileifeng/pepsi/registry/consul/resolver"
"google.golang.org/grpc"
"google.golang.org/grpc/balancer/roundrobin"
"google.golang.org/grpc/health/grpc_health_v1"
"log"
"net"
"os"
"os/signal"
"strconv"
"strings"
"syscall"
"time"
)
//NewClietnConn 创建客户端
func NewClietnConn(consulAddr,serviceName string) *grpc.ClientConn {
schema, err := resolver.StartConsulResolver(consulAddr, serviceName)
log.Println("NewClietnConn schema :",schema)
//consul集群在未完成选举的时候会创建失败,需要再次重试创建,这种问题似乎出现在低版本的consul
for i := 0;i<10;i++ {
if err!=nil {
time.Sleep(time.Second)
log.Println("retry NewClietnConn")
schema, err = resolver.StartConsulResolver(consulAddr, serviceName)
}else {
break
}
}
if err != nil {
log.Fatal("init consul resovler err", err.Error())
}
// Set up a connection to the server.
conn, err := grpc.Dial(fmt.Sprintf("%s:///%s", schema,serviceName), grpc.WithInsecure(), grpc.WithBalancerName(roundrobin.Name))
if err != nil {
log.Fatalf("did not connect: %v", err)
}
return conn
}
//Registry 服务注册自定义结构体
type Registry struct {
consulAddr,service string
port int
listener net.Listener
Server *grpc.Server
register *register.ConsulRegister
}
//NewRegister 创建新的服务注册
func NewRegister(consulAddr,service string,port int) *Registry {
listener, err := net.Listen("tcp", fmt.Sprintf("0.0.0.0:%v",port))
if err != nil {
log.Fatalln(err)
}
addrs := strings.Split(listener.Addr().String(),":")
port,err = strconv.Atoi(addrs[len(addrs)-1])
if err != nil {
log.Fatalln(err)
}
log.Println("start server port :",addrs[len(addrs)-1])
//consul service register
nr := register.NewConsulRegister(consulAddr,service,port)
nr.Register()
//start grpc server
serv := grpc.NewServer()
//registe health check
grpc_health_v1.RegisterHealthServer(serv, ®ister.HealthImpl{})
return &Registry{consulAddr:consulAddr,service:service,port:port,listener:listener,Server:serv,register:nr}
}
//Run 启动
func (r *Registry)Run() {
//server hook
go func() {
quit := make(chan os.Signal, 1)
signal.Notify(quit, syscall.SIGTERM, syscall.SIGINT, syscall.SIGKILL, syscall.SIGHUP, syscall.SIGQUIT)
<-quit
log.Println("do run hook")
r.register.Deregister()
r.Server.Stop()
}()
if err := r.Server.Serve(r.listener); err != nil {
panic(err)
}
}
|
package main
import (
"context"
"fmt"
pb "go-postgres/bidaspin"
"go-postgres/bidaspin/gift"
"go-postgres/bidaspin/redis"
"google.golang.org/grpc"
"log"
"net"
"strconv"
)
const (
port = ":50050"
)
type BidaSpinServer struct {
pb.BidaSpinServer
}
func (s *BidaSpinServer) UpdateTotalSpin(ctx context.Context, rq *pb.SpinRequest) (*pb.SpinResponse, error) {
uid := rq.UserId
count := int(rq.Count)
strUid := strconv.Itoa(int(uid))
rdb := redis.RedisClient()
val, err := rdb.Get(ctx, strUid).Result()
if err != nil {
} else {
intVar, _ := strconv.Atoi(val)
count = count + intVar
err := rdb.Set(ctx, strUid, strconv.Itoa(count), 0).Err()
if err != nil {
panic(err)
}
}
return &pb.SpinResponse{
Message: "success",
Data: strconv.Itoa(count),
}, nil
}
func (s *BidaSpinServer) GetTotalSpin(ctx context.Context, rq *pb.SpinRequest) (*pb.SpinResponse, error) {
uid := rq.UserId
strUid := strconv.Itoa(int(uid))
rdb := redis.RedisClient()
val, err := rdb.Get(ctx, strUid).Result()
if err != nil {
val = "0"
err := rdb.Set(ctx, strUid, "0", 0).Err()
if err != nil {
panic(err)
}
}
return &pb.SpinResponse{
Message: "success",
Data: val,
}, nil
}
func (s *BidaSpinServer) DoSpin(ctx context.Context, rq *pb.SpinRequest) (*pb.SpinResponse, error) {
uid := rq.UserId
strUid := strconv.Itoa(int(uid))
rdb := redis.RedisClient()
val, err := rdb.Get(ctx, strUid).Result()
var data = "Total spin invalid"
var msg = "Fail"
if err == nil {
intVar, _ := strconv.Atoi(val)
if intVar > 0 {
randomGift := gift.RandomGift()
data = randomGift.GiftToJsonString()
msg = "success"
//update
err := rdb.Set(ctx, strUid, strconv.Itoa(intVar-1), 0).Err()
if err != nil {
panic(err)
} else {
fmt.Println("Update total spin success")
}
}
}
return &pb.SpinResponse{
Message: msg,
Data: data,
}, nil
}
func main() {
lis, err := net.Listen("tcp", port)
if err != nil {
log.Fatalf("failed to listen: %v", err)
}
s := grpc.NewServer()
pb.RegisterBidaSpinServer(s, &BidaSpinServer{})
if err := s.Serve(lis); err != nil {
log.Fatalf("failed to serve: %v", err)
} else {
fmt.Println("BidaH5Spin server ready")
}
}
|
package family
import (
"familyTree/src/person"
"testing"
"github.com/stretchr/testify/assert"
)
type peopleStub struct {
name string
gender string
father string
mother string
spouse string
}
var people = []peopleStub{
{"King Shan", "Male", "", "", "Queen Anga"},
{"Chit", "Male", "King Shan", "Queen Anga", "Amba"},
{"Ish", "Male", "King Shan", "Queen Anga", ""},
{"Vich", "Male", "King Shan", "Queen Anga", "Lika"},
{"Aras", "Male", "King Shan", "Queen Anga", "Chitra"},
{"Satya", "Femal", "King Shan", "Queen Anga", "Vyan"},
}
var dummyTree = New()
func TestMain(m *testing.M) {
for _, individual := range people {
member := person.NewPersonBuilder().SetName(individual.name).SetGender(individual.gender).Build()
if individual.mother != "" {
mother := person.NewPerson(individual.mother, person.Female)
member = member.ToBuilder().SetMother(mother).Build()
}
if individual.father != "" {
father := person.NewPerson(individual.father, person.Male)
member = member.ToBuilder().SetFather(father).Build()
}
if individual.spouse != "" {
spouseGender := person.Male
if individual.gender == person.Male {
spouseGender = person.Female
}
spouse := person.NewPerson(individual.spouse, spouseGender)
member = member.ToBuilder().SetSpouse(spouse).Build()
}
dummyTree.Add(member)
}
m.Run()
}
func TestShouldFindMemberFromTree(t *testing.T) {
person, _ := dummyTree.FindMemberByName("Vich")
assert.NotNil(t, person)
assert.Equal(t, person.GetName(), "Vich")
assert.Equal(t, person.GetGender(), "Male")
assert.Equal(t, person.GetFather().GetName(), "King Shan")
assert.Equal(t, person.GetMother().GetName(), "Queen Anga")
}
func TestShouldAddChildToTheTree(t *testing.T) {
member := person.NewPersonBuilder().SetName("Vila").SetGender("Female").Build()
dummyTree.AddChild("Lika", member)
person, _ := dummyTree.FindMemberByName("Vila")
assert.Equal(t, person.GetName(), "Vila")
assert.Equal(t, person.GetGender(), "Female")
assert.Nil(t, person.GetSpouse())
assert.Equal(t, person.GetFather().GetName(), "Vich")
assert.Equal(t, person.GetMother().GetName(), "Lika")
}
func TestShouldVerifySameNameIsNotAddedMulitpleTimes(t *testing.T) {
member := person.NewPersonBuilder().SetName("Chit").SetGender("Male").Build()
err := dummyTree.Add(member)
assert.NotNil(t, err)
assert.Equal(t, err.Error(), "Person with the same name already exists")
}
func TestShouldReturnErrorWhenPersonIsNotFound(t *testing.T) {
member := person.NewPersonBuilder().SetName("Test").SetGender("Male").Build()
err := dummyTree.AddChild("dummy", member)
assert.NotNil(t, err)
assert.Equal(t, err.Error(), "PERSON_NOT_FOUND")
}
func TestShouldReturnChildAdditionFailed(t *testing.T) {
member := person.NewPersonBuilder().SetName("Test").SetGender("Male").Build()
err := dummyTree.AddChild("King Shan", member)
assert.NotNil(t, err)
assert.Equal(t, err.Error(), "CHILD_ADDITION_FAILED")
}
|
--- src/vendor/golang.org/x/net/route/zsys_dragonfly.go.orig 2019-10-17 22:02:09 UTC
+++ src/vendor/golang.org/x/net/route/zsys_dragonfly.go
@@ -46,8 +46,6 @@ const (
sysRTM_REDIRECT = 0x6
sysRTM_MISS = 0x7
sysRTM_LOCK = 0x8
- sysRTM_OLDADD = 0x9
- sysRTM_OLDDEL = 0xa
sysRTM_RESOLVE = 0xb
sysRTM_NEWADDR = 0xc
sysRTM_DELADDR = 0xd
@@ -89,6 +87,8 @@ const (
sizeofIfmaMsghdrDragonFlyBSD4 = 0x10
sizeofIfAnnouncemsghdrDragonFlyBSD4 = 0x18
+ sizeofIfaMsghdrDragonFlyBSD58 = 0x18
+
sizeofRtMsghdrDragonFlyBSD4 = 0x98
sizeofRtMetricsDragonFlyBSD4 = 0x70
|
package routes
import (
"encoding/json"
"net/http"
"github.com/cjburchell/survey/database"
"github.com/cjburchell/survey/models"
"github.com/cjburchell/uatu-go"
"github.com/gorilla/mux"
)
// Setup the routes
func Setup(router *mux.Router, log log.ILog) {
surveyRoute := router.PathPrefix("/survey").Subrouter()
surveyRoute.HandleFunc("/{surveyId}", func(writer http.ResponseWriter, request *http.Request) {
handleGetSurvey(writer, request, log)
}).Methods("GET")
surveyRoute.HandleFunc("/{surveyId}/results", func(writer http.ResponseWriter, request *http.Request) {
handleGetResults(writer, request, log)
}).Methods("GET")
surveyRoute.HandleFunc("/{surveyId}/results/{questionId}", func(writer http.ResponseWriter, request *http.Request) {
handleGetResultsForQuestion(writer, request, log)
}).Methods("GET")
surveyRoute.HandleFunc("/{surveyId}/answers", func(writer http.ResponseWriter, request *http.Request) {
handleSetAnswers(writer, request, log)
}).Methods("POST")
surveyRoute.HandleFunc("/{surveyId}/count",func(writer http.ResponseWriter, request *http.Request) {
handleGetSurveyCount(writer, request, log)
}).Methods("GET")
router.HandleFunc("/@status", func(writer http.ResponseWriter, _ *http.Request) {
reply, _ := json.Marshal("Ok")
writer.Header().Set("Content-Type", "application/json")
writer.WriteHeader(http.StatusOK)
writer.Write(reply)
}).Methods("GET")
}
func handleGetSurvey(writer http.ResponseWriter, request *http.Request, log log.ILog) {
log.Debugf("handleGetQuestions %s", request.URL.String())
vars := mux.Vars(request)
surveyID := vars["surveyId"]
survey, err := database.GetSurvey(surveyID)
if err != nil {
log.Error(err, "Unable to get survey")
writer.WriteHeader(http.StatusBadRequest)
return
}
reply, err := json.Marshal(survey)
if err != nil {
log.Error(err, "Marshal Failed")
writer.WriteHeader(http.StatusInternalServerError)
return
}
writer.Header().Set("Content-Type", "application/json")
writer.WriteHeader(http.StatusOK)
writer.Write(reply)
}
func handleGetSurveyCount(writer http.ResponseWriter, request *http.Request, log log.ILog) {
log.Debugf("handleGetSurveyCount %s", request.URL.String())
vars := mux.Vars(request)
surveyID := vars["surveyId"]
count := database.GetSubmitCount(surveyID)
result := struct {
Count int `json:"count"`
}{
count,
}
reply, err := json.Marshal(result)
if err != nil {
log.Error(err, "Marshal Failed")
writer.WriteHeader(http.StatusInternalServerError)
return
}
writer.Header().Set("Content-Type", "application/json")
writer.WriteHeader(http.StatusOK)
writer.Write(reply)
}
func handleGetResults(writer http.ResponseWriter, request *http.Request, log log.ILog) {
log.Debugf("handleGetResults %s", request.URL.String())
vars := mux.Vars(request)
surveyID := vars["surveyId"]
results, err := database.GetAllResults(surveyID)
if err != nil {
log.Error(err, "GetAllResults Failed")
writer.WriteHeader(http.StatusInternalServerError)
return
}
if results == nil {
writer.WriteHeader(http.StatusOK)
return
}
reply, err := json.Marshal(results)
if err != nil {
log.Error(err, "Marshal Failed")
writer.WriteHeader(http.StatusInternalServerError)
return
}
writer.Header().Set("Content-Type", "application/json")
writer.WriteHeader(http.StatusOK)
writer.Write(reply)
}
func handleGetResultsForQuestion(writer http.ResponseWriter, request *http.Request, log log.ILog) {
log.Debugf("handleGetResultsForId %s", request.URL.String())
vars := mux.Vars(request)
questionID := vars["questionId"]
surveyID := vars["surveyId"]
results, err := database.GetResults(surveyID, questionID)
if err != nil {
log.Error(err, "GetAllResults Failed")
writer.WriteHeader(http.StatusInternalServerError)
return
}
if results == nil {
writer.WriteHeader(http.StatusOK)
return
}
reply, err := json.Marshal(results)
if err != nil {
log.Error(err, "Marshal Failed")
writer.WriteHeader(http.StatusInternalServerError)
return
}
writer.Header().Set("Content-Type", "application/json")
writer.WriteHeader(http.StatusOK)
writer.Write(reply)
}
func handleSetAnswers(writer http.ResponseWriter, request *http.Request, log log.ILog) {
log.Debugf("handleSetAnswers %s", request.URL.String())
vars := mux.Vars(request)
surveyID := vars["surveyId"]
decoder := json.NewDecoder(request.Body)
var answers []models.Answer
err := decoder.Decode(&answers)
if err != nil {
log.Error(err, "Unmarshal Failed")
writer.WriteHeader(http.StatusBadRequest)
return
}
err = database.IncrementSubmitCount(surveyID)
if err != nil {
log.Error(err, "Unable to Increment Result")
writer.WriteHeader(http.StatusInternalServerError)
return
}
for _, answer := range answers {
err = database.IncrementResult(surveyID, answer.QuestionID, answer.Answer)
if err != nil {
log.Error(err, "Unable to Increment Result")
writer.WriteHeader(http.StatusInternalServerError)
return
}
}
writer.WriteHeader(http.StatusNoContent)
}
|
package main
import (
"encoding/csv"
"fmt"
"io"
"os"
"strings"
"github.com/fatih/color"
)
func generateFromFile(file string) {
isCSV := strings.HasSuffix(file, ".csv")
if !isCSV {
color.Red("el archivo de importación de paquetes debe tener extensión .csv")
os.Exit(1)
}
f, err := os.Open(file)
if err != nil {
color.Red(fmt.Sprintf("no se pudo abrir el archivo de importación de paquetes: %v", err))
os.Exit(1)
}
defer f.Close()
r := csv.NewReader(f)
r.Comma = ';'
r.FieldsPerRecord = 3
for {
record, err := r.Read()
if err == io.EOF {
break
}
if err != nil {
color.Red(fmt.Sprintf("error leyendo la línea del archivo de importación de paquetes: %v", err))
os.Exit(1)
}
if record[2] == "" {
color.Red(fmt.Sprintf("no se procesó el modelo: %s porque no se recibieron campos", record[0]))
continue
}
n = record[0]
t = record[1]
fs = getFields(record[2])
execute()
}
}
|
package main
import "fmt"
type Name string
const (
NA Name = "1"
YA Name = "2"
)
func ceshi(n string) {
switch Name(n) {
case NA:
fmt.Println("aNa")
case YA:
fmt.Println(2)
}
}
func main() {
s := "1"
ceshi(s)
}
|
//go:build !boringcrypto
// +build !boringcrypto
package noiseutil
import (
"github.com/flynn/noise"
)
// EncryptLockNeeded indicates if calls to Encrypt need a lock
const EncryptLockNeeded = false
// CipherAESGCM is the standard noise.CipherAESGCM when boringcrypto is not enabled
var CipherAESGCM noise.CipherFunc = noise.CipherAESGCM
|
package clair
import (
"encoding/json"
"fmt"
"net/http"
v1 "github.com/coreos/clair/api/v1"
)
func getErrorFromResponse(resp *http.Response) error {
var errorStruct struct {
Error v1.Error
}
errMsg := "(no message returned)"
// attempt to unmarshal the body into a JSON structure, ignoring any errors if they occur.
err := json.NewDecoder(resp.Body).Decode(&errorStruct)
if nil != err {
if "" != errorStruct.Error.Message {
errMsg = errorStruct.Error.Message
}
}
return fmt.Errorf("status code %d, %s", resp.StatusCode, errMsg)
}
|
package main
import "fmt"
func generate(numRows int) [][]int {
if numRows == 0 {
return [][]int{}
}
result := [][]int{{1}}
for i := 1; i < numRows; i++ {
temp := make([]int, i+1)
temp[0] = 1
temp[i] = 1
for j := 1; j < i; j++ {
temp[j] = result[i-1][j-1] + result[i-1][j]
}
result = append(result, temp)
}
return result
}
func main() {
fmt.Println(generate(5))
}
|
package testing
import (
"os"
"testing"
)
// PreCheckAcc only allows acceptance tests to run in explicitly enabled via OF_ACC
// environment variable
func PreCheckAcc(t *testing.T) {
if os.Getenv("OF_ACC") == "" {
t.Skip("To enable acceptance tests please set environment variable OF_ACC=1")
}
}
|
// Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package colexectestutils
import (
"context"
"fmt"
"math"
"math/rand"
"reflect"
"sort"
"strings"
"testing"
"testing/quick"
"github.com/cockroachdb/apd/v2"
"github.com/cockroachdb/cockroach/pkg/col/coldata"
"github.com/cockroachdb/cockroach/pkg/col/coldataext"
"github.com/cockroachdb/cockroach/pkg/col/typeconv"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/sql/colexecerror"
"github.com/cockroachdb/cockroach/pkg/sql/colexecop"
"github.com/cockroachdb/cockroach/pkg/sql/colmem"
"github.com/cockroachdb/cockroach/pkg/sql/execinfra"
"github.com/cockroachdb/cockroach/pkg/sql/execinfrapb"
"github.com/cockroachdb/cockroach/pkg/sql/parser"
"github.com/cockroachdb/cockroach/pkg/sql/randgen"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/util/duration"
"github.com/cockroachdb/cockroach/pkg/util/envutil"
"github.com/cockroachdb/cockroach/pkg/util/json"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/randutil"
"github.com/cockroachdb/cockroach/pkg/util/timeofday"
"github.com/cockroachdb/errors"
"github.com/pmezard/go-difflib/difflib"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// Tuple represents a row with any-type columns.
type Tuple []interface{}
func (t Tuple) String() string {
var sb strings.Builder
sb.WriteString("[")
for i := range t {
if i != 0 {
sb.WriteString(", ")
}
if d, ok := t[i].(apd.Decimal); ok {
sb.WriteString(d.String())
} else if d, ok := t[i].(*apd.Decimal); ok {
sb.WriteString(d.String())
} else if d, ok := t[i].([]byte); ok {
sb.WriteString(string(d))
} else {
sb.WriteString(fmt.Sprintf("%v", t[i]))
}
}
sb.WriteString("]")
return sb.String()
}
func (t Tuple) less(other Tuple, evalCtx *tree.EvalContext, tupleFromOtherSet Tuple) bool {
for i := range t {
// If either side is nil, we short circuit the comparison. For nil, we
// define: nil < {any_none_nil}
if t[i] == nil && other[i] == nil {
continue
} else if t[i] == nil && other[i] != nil {
return true
} else if t[i] != nil && other[i] == nil {
return false
}
// Check whether we have datum-backed values.
if d1, ok := t[i].(tree.Datum); ok {
d2 := other[i].(tree.Datum)
cmp := d1.Compare(evalCtx, d2)
if cmp == 0 {
continue
}
return cmp < 0
}
lhsVal := reflect.ValueOf(t[i])
rhsVal := reflect.ValueOf(other[i])
// apd.Decimal are not comparable, so we check that first.
if lhsVal.Type().Name() == "Decimal" && lhsVal.CanInterface() {
lhsDecimal := lhsVal.Interface().(apd.Decimal)
rhsDecimal := rhsVal.Interface().(apd.Decimal)
cmp := (&lhsDecimal).CmpTotal(&rhsDecimal)
if cmp == 0 {
continue
} else if cmp == -1 {
return true
} else {
return false
}
}
// types.Bytes is represented as []uint8.
if lhsVal.Type().String() == "[]uint8" {
lhsStr := string(lhsVal.Interface().([]uint8))
rhsStr := string(rhsVal.Interface().([]uint8))
if lhsStr == rhsStr {
continue
} else if lhsStr < rhsStr {
return true
} else {
return false
}
}
// No need to compare these two elements when they are the same.
if t[i] == other[i] {
continue
}
switch typ := lhsVal.Type().Name(); typ {
case "int", "int16", "int32", "int64":
return lhsVal.Int() < rhsVal.Int()
case "uint", "uint16", "uint32", "uint64":
return lhsVal.Uint() < rhsVal.Uint()
case "float", "float64":
return lhsVal.Float() < rhsVal.Float()
case "bool":
return !lhsVal.Bool() && rhsVal.Bool()
case "string":
lString, rString := lhsVal.String(), rhsVal.String()
if tupleFromOtherSet != nil && len(tupleFromOtherSet) > i {
if d, ok := tupleFromOtherSet[i].(tree.Datum); ok {
// The tuple from the other set has a datum value, so we
// will convert the string to datum. See the comment on
// tuples.sort for more details.
d1 := stringToDatum(lString, d.ResolvedType(), evalCtx)
d2 := stringToDatum(rString, d.ResolvedType(), evalCtx)
cmp := d1.Compare(evalCtx, d2)
if cmp == 0 {
continue
}
return cmp < 0
}
}
return lString < rString
default:
colexecerror.InternalError(errors.AssertionFailedf("Unhandled comparison type: %s", typ))
}
}
return false
}
func (t Tuple) clone() Tuple {
b := make(Tuple, len(t))
for i := range b {
b[i] = t[i]
}
return b
}
// Tuples represents a table with any-type columns.
type Tuples []Tuple
// Clone returns a deep copy of t.
func (t Tuples) Clone() Tuples {
b := make(Tuples, len(t))
for i := range b {
b[i] = t[i].clone()
}
return b
}
func (t Tuples) String() string {
var sb strings.Builder
sb.WriteString("[")
for i := range t {
if i != 0 {
sb.WriteString(", ")
}
sb.WriteString(t[i].String())
}
sb.WriteString("]")
return sb.String()
}
// sort returns a copy of sorted tuples. tupleFromOtherSet is any tuple that
// comes from other tuples and is used to determine the desired types.
//
// Currently, this function is only used in order to speed up the comparison of
// the expected tuple set with the actual one, and it is possible that we have
// tree.Datum in the latter but strings in the former. In order to use the same
// ordering when sorting the strings, we need to peek into the actual tuple to
// determine whether we want to convert the string to datum before comparison.
func (t Tuples) sort(evalCtx *tree.EvalContext, tupleFromOtherSet Tuple) Tuples {
b := make(Tuples, len(t))
for i := range b {
b[i] = make(Tuple, len(t[i]))
copy(b[i], t[i])
}
sort.SliceStable(b, func(i, j int) bool {
lhs := b[i]
rhs := b[j]
return lhs.less(rhs, evalCtx, tupleFromOtherSet)
})
return b
}
// VerifierType determines how the expected and the actual tuples should be
// compared.
type VerifierType int
const (
// OrderedVerifier compares the input and output tuples, returning an error
// if they're not identical.
OrderedVerifier VerifierType = iota
// UnorderedVerifier compares the input and output tuples as sets, returning
// an error if they aren't equal by set comparison (irrespective of order).
UnorderedVerifier
)
// maybeHasNulls is a helper function that returns whether any of the columns in b
// (maybe) have nulls.
func maybeHasNulls(b coldata.Batch) bool {
if b.Length() == 0 {
return false
}
for i := 0; i < b.Width(); i++ {
if b.ColVec(i).MaybeHasNulls() {
return true
}
}
return false
}
// TestRunner is the signature of RunTestsWithTyps that can be used to
// substitute it with RunTestsWithoutAllNullsInjection when applicable.
type TestRunner func(*testing.T, *colmem.Allocator, []Tuples, [][]*types.T, Tuples, VerifierType, func([]colexecop.Operator) (colexecop.Operator, error))
// RunTests is a helper that automatically runs your tests with varied batch
// sizes and with and without a random selection vector.
// tups is the sets of input tuples.
// expected is the set of output tuples.
// constructor is a function that takes a list of input Operators and returns
// the operator to test, or an error.
func RunTests(
t *testing.T,
allocator *colmem.Allocator,
tups []Tuples,
expected Tuples,
verifier VerifierType,
constructor func(inputs []colexecop.Operator) (colexecop.Operator, error),
) {
RunTestsWithTyps(t, allocator, tups, nil /* typs */, expected, verifier, constructor)
}
// RunTestsWithTyps is the same as RunTests with an ability to specify the
// types of the input tuples.
// - typs is the type schema of the input tuples. Note that this is a multi-
// dimensional slice which allows for specifying different schemas for each
// of the inputs.
func RunTestsWithTyps(
t *testing.T,
allocator *colmem.Allocator,
tups []Tuples,
typs [][]*types.T,
expected Tuples,
verifier VerifierType,
constructor func(inputs []colexecop.Operator) (colexecop.Operator, error),
) {
RunTestsWithoutAllNullsInjection(t, allocator, tups, typs, expected, verifier, constructor)
{
ctx := context.Background()
evalCtx := tree.NewTestingEvalContext(cluster.MakeTestingClusterSettings())
defer evalCtx.Stop(ctx)
log.Info(ctx, "allNullsInjection")
// This test replaces all values in the input tuples with nulls and ensures
// that the output is different from the "original" output (i.e. from the
// one that is returned without nulls injection).
onlyNullsInTheInput := true
OUTER:
for _, tup := range tups {
for i := 0; i < len(tup); i++ {
for j := 0; j < len(tup[i]); j++ {
if tup[i][j] != nil {
onlyNullsInTheInput = false
break OUTER
}
}
}
}
opConstructor := func(injectAllNulls bool) colexecop.Operator {
inputSources := make([]colexecop.Operator, len(tups))
var inputTypes []*types.T
for i, tup := range tups {
if typs != nil {
inputTypes = typs[i]
}
input := NewOpTestInput(allocator, 1 /* batchSize */, tup, inputTypes).(*opTestInput)
input.injectAllNulls = injectAllNulls
inputSources[i] = input
}
op, err := constructor(inputSources)
if err != nil {
t.Fatal(err)
}
op.Init()
return op
}
originalOp := opConstructor(false /* injectAllNulls */)
opWithNulls := opConstructor(true /* injectAllNulls */)
foundDifference := false
for {
originalBatch := originalOp.Next(ctx)
batchWithNulls := opWithNulls.Next(ctx)
if originalBatch.Length() != batchWithNulls.Length() {
foundDifference = true
break
}
if originalBatch.Length() == 0 {
break
}
var originalTuples, tuplesWithNulls Tuples
for i := 0; i < originalBatch.Length(); i++ {
// We checked that the batches have the same length.
originalTuples = append(originalTuples, GetTupleFromBatch(originalBatch, i))
tuplesWithNulls = append(tuplesWithNulls, GetTupleFromBatch(batchWithNulls, i))
}
if err := AssertTuplesSetsEqual(originalTuples, tuplesWithNulls, evalCtx); err != nil {
// err is non-nil which means that the batches are different.
foundDifference = true
break
}
}
if onlyNullsInTheInput {
require.False(t, foundDifference, "since there were only "+
"nulls in the input tuples, we expect for all nulls injection to not "+
"change the output")
} else {
require.True(t, foundDifference, "since there were "+
"non-nulls in the input tuples, we expect for all nulls injection to "+
"change the output")
}
closeIfCloser(ctx, t, originalOp)
closeIfCloser(ctx, t, opWithNulls)
}
}
// closeIfCloser is a testing utility function that checks whether op is a
// colexecop.Closer and closes it if so.
//
// RunTests harness needs to do that once it is done with op. In non-test
// setting, the closing happens at the end of the query execution.
func closeIfCloser(ctx context.Context, t *testing.T, op colexecop.Operator) {
if c, ok := op.(colexecop.Closer); ok {
if err := c.Close(ctx); err != nil {
t.Fatal(err)
}
}
}
// isOperatorChainResettable traverses the whole operator tree rooted at op and
// returns true if all nodes are resetters.
func isOperatorChainResettable(op execinfra.OpNode) bool {
if _, resettable := op.(colexecop.ResettableOperator); !resettable {
return false
}
for i := 0; i < op.ChildCount(true /* verbose */); i++ {
if !isOperatorChainResettable(op.Child(i, true /* verbose */)) {
return false
}
}
return true
}
// RunTestsWithoutAllNullsInjection is the same as RunTests, but it skips the
// all nulls injection test. Use this only when the all nulls injection should
// not change the output of the operator under testing.
// NOTE: please leave a justification why you're using this variant of
// RunTests.
func RunTestsWithoutAllNullsInjection(
t *testing.T,
allocator *colmem.Allocator,
tups []Tuples,
typs [][]*types.T,
expected Tuples,
verifier VerifierType,
constructor func(inputs []colexecop.Operator) (colexecop.Operator, error),
) {
ctx := context.Background()
verifyFn := (*OpTestOutput).VerifyAnyOrder
skipVerifySelAndNullsResets := true
if verifier == OrderedVerifier {
verifyFn = (*OpTestOutput).Verify
// Note that this test makes sense only if we expect tuples to be
// returned in the same order (otherwise the second batch's selection
// vector or nulls info can be different and that is totally valid).
skipVerifySelAndNullsResets = false
}
RunTestsWithFn(t, allocator, tups, typs, func(t *testing.T, inputs []colexecop.Operator) {
op, err := constructor(inputs)
if err != nil {
t.Fatal(err)
}
out := NewOpTestOutput(op, expected)
if err := verifyFn(out); err != nil {
t.Fatal(err)
}
if isOperatorChainResettable(op) {
log.Info(ctx, "reusing after reset")
out.Reset(ctx)
if err := verifyFn(out); err != nil {
t.Fatal(err)
}
}
closeIfCloser(ctx, t, op)
})
if !skipVerifySelAndNullsResets {
log.Info(ctx, "verifySelAndNullResets")
// This test ensures that operators that "own their own batches", such as
// any operator that has to reshape its output, are not affected by
// downstream modification of batches.
// We run the main loop twice: once to determine what the operator would
// output on its second Next call (we need the first call to Next to get a
// reference to a batch to modify), and a second time to modify the batch
// and verify that this does not change the operator output.
// NOTE: this test makes sense only if the operator returns two non-zero
// length batches (if not, we short-circuit the test since the operator
// doesn't have to restore anything on a zero-length batch).
var (
secondBatchHasSelection, secondBatchHasNulls bool
inputTypes []*types.T
)
for round := 0; round < 2; round++ {
inputSources := make([]colexecop.Operator, len(tups))
for i, tup := range tups {
if typs != nil {
inputTypes = typs[i]
}
inputSources[i] = NewOpTestInput(allocator, 1 /* batchSize */, tup, inputTypes)
}
op, err := constructor(inputSources)
if err != nil {
t.Fatal(err)
}
// We might short-circuit, so defer the closing of the operator.
defer closeIfCloser(ctx, t, op)
op.Init()
b := op.Next(ctx)
if b.Length() == 0 {
return
}
if round == 1 {
if secondBatchHasSelection {
b.SetSelection(false)
} else {
b.SetSelection(true)
}
if secondBatchHasNulls {
// ResetInternalBatch will throw away the null information.
b.ResetInternalBatch()
} else {
for i := 0; i < b.Width(); i++ {
b.ColVec(i).Nulls().SetNulls()
}
}
}
b = op.Next(ctx)
if b.Length() == 0 {
return
}
if round == 0 {
secondBatchHasSelection = b.Selection() != nil
secondBatchHasNulls = maybeHasNulls(b)
}
if round == 1 {
if secondBatchHasSelection {
assert.NotNil(t, b.Selection())
} else {
assert.Nil(t, b.Selection())
}
if secondBatchHasNulls {
assert.True(t, maybeHasNulls(b))
} else {
assert.False(t, maybeHasNulls(b))
}
}
}
}
{
log.Info(ctx, "randomNullsInjection")
// This test randomly injects nulls in the input tuples and ensures that
// the operator doesn't panic.
inputSources := make([]colexecop.Operator, len(tups))
var inputTypes []*types.T
for i, tup := range tups {
if typs != nil {
inputTypes = typs[i]
}
input := NewOpTestInput(allocator, 1 /* batchSize */, tup, inputTypes).(*opTestInput)
input.injectRandomNulls = true
inputSources[i] = input
}
op, err := constructor(inputSources)
if err != nil {
t.Fatal(err)
}
op.Init()
for b := op.Next(ctx); b.Length() > 0; b = op.Next(ctx) {
}
closeIfCloser(ctx, t, op)
}
}
// RunTestsWithFn is like RunTests, but the input function is responsible for
// performing any required tests. Please note that RunTestsWithFn is a worse
// testing facility than RunTests, because it can't get a handle on the operator
// under test and therefore can't perform as many extra checks. You should
// always prefer using RunTests over RunTestsWithFn.
// - tups is the sets of input tuples.
// - typs is the type schema of the input tuples. Note that this is a multi-
// dimensional slice which allows for specifying different schemas for each
// of the inputs. This can also be left nil in which case the types will be
// determined at the runtime looking at the first input tuple, and if the
// determination doesn't succeed for a value of the tuple (likely because
// it's a nil), then that column will be assumed by default of type Int64.
// - test is a function that takes a list of input Operators and performs
// testing with t.
func RunTestsWithFn(
t *testing.T,
allocator *colmem.Allocator,
tups []Tuples,
typs [][]*types.T,
test func(t *testing.T, inputs []colexecop.Operator),
) {
// Run tests over batchSizes of 1, (sometimes) a batch size that is small but
// greater than 1, and a full coldata.BatchSize().
batchSizes := make([]int, 0, 3)
batchSizes = append(batchSizes, 1)
smallButGreaterThanOne := int(math.Trunc(.002 * float64(coldata.BatchSize())))
if smallButGreaterThanOne > 1 {
batchSizes = append(batchSizes, smallButGreaterThanOne)
}
batchSizes = append(batchSizes, coldata.BatchSize())
for _, batchSize := range batchSizes {
for _, useSel := range []bool{false, true} {
log.Infof(context.Background(), "batchSize=%d/sel=%t", batchSize, useSel)
inputSources := make([]colexecop.Operator, len(tups))
var inputTypes []*types.T
if useSel {
for i, tup := range tups {
if typs != nil {
inputTypes = typs[i]
}
rng, _ := randutil.NewPseudoRand()
inputSources[i] = newOpTestSelInput(allocator, rng, batchSize, tup, inputTypes)
}
} else {
for i, tup := range tups {
if typs != nil {
inputTypes = typs[i]
}
inputSources[i] = NewOpTestInput(allocator, batchSize, tup, inputTypes)
}
}
test(t, inputSources)
}
}
}
// RunTestsWithFixedSel is a helper that (with a given fixed selection vector)
// automatically runs your tests with varied batch sizes. Provide a test
// function that takes a list of input Operators, which will give back the
// tuples provided in batches.
func RunTestsWithFixedSel(
t *testing.T,
allocator *colmem.Allocator,
tups []Tuples,
typs []*types.T,
sel []int,
test func(t *testing.T, inputs []colexecop.Operator),
) {
for _, batchSize := range []int{1, 2, 3, 16, 1024} {
log.Infof(context.Background(), "batchSize=%d/fixedSel", batchSize)
inputSources := make([]colexecop.Operator, len(tups))
for i, tup := range tups {
inputSources[i] = NewOpFixedSelTestInput(allocator, sel, batchSize, tup, typs)
}
test(t, inputSources)
}
}
func stringToDatum(val string, typ *types.T, evalCtx *tree.EvalContext) tree.Datum {
expr, err := parser.ParseExpr(val)
if err != nil {
colexecerror.InternalError(err)
}
semaCtx := tree.MakeSemaContext()
typedExpr, err := tree.TypeCheck(context.Background(), expr, &semaCtx, typ)
if err != nil {
colexecerror.InternalError(err)
}
d, err := typedExpr.Eval(evalCtx)
if err != nil {
colexecerror.InternalError(err)
}
return d
}
// setColVal is a test helper function to set the given value at the equivalent
// col[idx]. This function is slow due to reflection.
func setColVal(vec coldata.Vec, idx int, val interface{}, evalCtx *tree.EvalContext) {
canonicalTypeFamily := vec.CanonicalTypeFamily()
if canonicalTypeFamily == types.BytesFamily {
var (
bytesVal []byte
ok bool
)
bytesVal, ok = val.([]byte)
if !ok {
bytesVal = []byte(val.(string))
}
vec.Bytes().Set(idx, bytesVal)
} else if canonicalTypeFamily == types.DecimalFamily {
// setColVal is used in multiple places, therefore val can be either a float
// or apd.Decimal.
if decimalVal, ok := val.(apd.Decimal); ok {
vec.Decimal()[idx].Set(&decimalVal)
} else {
floatVal := val.(float64)
decimalVal, _, err := apd.NewFromString(fmt.Sprintf("%f", floatVal))
if err != nil {
colexecerror.InternalError(
errors.AssertionFailedf("unable to set decimal %f: %v", floatVal, err))
}
// .Set is used here instead of assignment to ensure the pointer address
// of the underlying storage for apd.Decimal remains the same. This can
// cause the code that does not properly use execgen package to fail.
vec.Decimal()[idx].Set(decimalVal)
}
} else if canonicalTypeFamily == typeconv.DatumVecCanonicalTypeFamily {
switch v := val.(type) {
case *coldataext.Datum:
vec.Datum().Set(idx, v)
case tree.Datum:
vec.Datum().Set(idx, v)
case string:
vec.Datum().Set(idx, stringToDatum(v, vec.Type(), evalCtx))
case json.JSON:
vec.Datum().Set(idx, &tree.DJSON{JSON: v})
default:
colexecerror.InternalError(errors.AssertionFailedf("unexpected type %T of datum-backed value: %v", v, v))
}
} else {
reflect.ValueOf(vec.Col()).Index(idx).Set(reflect.ValueOf(val).Convert(reflect.TypeOf(vec.Col()).Elem()))
}
}
// extrapolateTypesFromTuples determines the type schema based on the input
// tuples.
func extrapolateTypesFromTuples(tups Tuples) []*types.T {
typs := make([]*types.T, len(tups[0]))
for i := range typs {
// Default type for test cases is Int64 in case the entire column is
// null and the type is indeterminate.
typs[i] = types.Int
for _, tup := range tups {
if tup[i] != nil {
typs[i] = typeconv.UnsafeFromGoType(tup[i])
break
}
}
}
return typs
}
// opTestInput is an Operator that columnarizes test input in the form of
// tuples of arbitrary Go types. It's meant to be used in Operator unit tests
// in conjunction with OpTestOutput like the following:
//
// inputTuples := tuples{
// {1,2,3.3,true},
// {5,6,7.0,false},
// }
// tupleSource := NewOpTestInput(inputTuples, types.Bool)
// opUnderTest := newFooOp(tupleSource, ...)
// output := NewOpTestOutput(opUnderTest, expectedOutputTuples)
// if err := output.Verify(); err != nil {
// t.Fatal(err)
// }
type opTestInput struct {
colexecop.ZeroInputNode
allocator *colmem.Allocator
typs []*types.T
batchSize int
tuples Tuples
// initialTuples are tuples passed in into the constructor, and we keep the
// reference to them in order to be able to reset the operator.
initialTuples Tuples
batch coldata.Batch
useSel bool
rng *rand.Rand
selection []int
evalCtx *tree.EvalContext
// injectAllNulls determines whether opTestInput will replace all values in
// the input tuples with nulls.
injectAllNulls bool
// injectRandomNulls determines whether opTestInput will randomly replace
// each value in the input tuples with a null.
injectRandomNulls bool
}
var _ colexecop.ResettableOperator = &opTestInput{}
// NewOpTestInput returns a new opTestInput with the given input tuples and the
// given type schema. If typs is nil, the input tuples are translated into
// types automatically, using simple rules (e.g. integers always become Int64).
func NewOpTestInput(
allocator *colmem.Allocator, batchSize int, tuples Tuples, typs []*types.T,
) colexecop.Operator {
ret := &opTestInput{
allocator: allocator,
batchSize: batchSize,
tuples: tuples,
initialTuples: tuples,
typs: typs,
evalCtx: tree.NewTestingEvalContext(cluster.MakeTestingClusterSettings()),
}
return ret
}
func newOpTestSelInput(
allocator *colmem.Allocator, rng *rand.Rand, batchSize int, tuples Tuples, typs []*types.T,
) *opTestInput {
ret := &opTestInput{
allocator: allocator,
useSel: true,
rng: rng,
batchSize: batchSize,
tuples: tuples,
initialTuples: tuples,
typs: typs,
evalCtx: tree.NewTestingEvalContext(cluster.MakeTestingClusterSettings()),
}
return ret
}
func (s *opTestInput) Init() {
if s.typs == nil {
if len(s.tuples) == 0 {
colexecerror.InternalError(errors.AssertionFailedf("empty tuple source with no specified types"))
}
s.typs = extrapolateTypesFromTuples(s.tuples)
}
s.batch = s.allocator.NewMemBatchWithMaxCapacity(s.typs)
s.selection = make([]int, coldata.BatchSize())
for i := range s.selection {
s.selection[i] = i
}
}
func (s *opTestInput) Next(context.Context) coldata.Batch {
if len(s.tuples) == 0 {
return coldata.ZeroBatch
}
s.batch.ResetInternalBatch()
batchSize := s.batchSize
if len(s.tuples) < batchSize {
batchSize = len(s.tuples)
}
tups := s.tuples[:batchSize]
s.tuples = s.tuples[batchSize:]
tupleLen := len(tups[0])
for i := range tups {
if len(tups[i]) != tupleLen {
colexecerror.InternalError(errors.AssertionFailedf("mismatched tuple lens: found %+v expected %d vals",
tups[i], tupleLen))
}
}
if s.useSel {
for i := range s.selection {
s.selection[i] = i
}
// We have populated s.selection vector with possibly more indices than we
// have actual tuples for, so some "default" tuples will be introduced but
// will not be selected due to the length of the batch being equal to the
// number of actual tuples.
//
// To introduce an element of chaos in the testing process we shuffle the
// selection vector; however, in the real environment we expect that
// indices in the selection vector to be in ascending order, so we sort
// only those indices that correspond to the actual tuples. For example,
// say we have 3 actual tuples, and after shuffling the selection vector
// is [200, 50, 100, ...], so we sort only those 3 values to get to
// [50, 100, 200, ...] in order to "scan" the selection vector in
// sequential order.
s.rng.Shuffle(len(s.selection), func(i, j int) {
s.selection[i], s.selection[j] = s.selection[j], s.selection[i]
})
sort.Slice(s.selection[:batchSize], func(i, j int) bool {
return s.selection[i] < s.selection[j]
})
// Any unused elements in the selection vector are set to a value larger
// than the max batch size, so the test will panic if this part of the slice
// is accidentally accessed.
for i := range s.selection[batchSize:] {
s.selection[batchSize+i] = coldata.BatchSize() + 1
}
s.batch.SetSelection(true)
copy(s.batch.Selection(), s.selection)
}
// Reset nulls for all columns in this batch.
for _, colVec := range s.batch.ColVecs() {
if colVec.CanonicalTypeFamily() != types.UnknownFamily {
colVec.Nulls().UnsetNulls()
}
}
rng, _ := randutil.NewPseudoRand()
for i := range s.typs {
vec := s.batch.ColVec(i)
// Automatically convert the Go values into exec.Type slice elements using
// reflection. This is slow, but acceptable for tests.
col := reflect.ValueOf(vec.Col())
for j := 0; j < batchSize; j++ {
// If useSel is false, then the selection vector will contain
// [0, ..., batchSize] in ascending order.
outputIdx := s.selection[j]
injectRandomNull := s.injectRandomNulls && rng.Float64() < 0.5
if tups[j][i] == nil || s.injectAllNulls || injectRandomNull {
vec.Nulls().SetNull(outputIdx)
if rng.Float64() < 0.5 {
// With 50% probability we set garbage data in the value to make sure
// that it doesn't affect the computation when the value is actually
// NULL. For the other 50% of cases we leave the data unset which
// exercises other scenarios (like division by zero when the value is
// actually NULL).
canonicalTypeFamily := vec.CanonicalTypeFamily()
switch canonicalTypeFamily {
case types.DecimalFamily:
d := apd.Decimal{}
_, err := d.SetFloat64(rng.Float64())
if err != nil {
colexecerror.InternalError(errors.AssertionFailedf("%v", err))
}
col.Index(outputIdx).Set(reflect.ValueOf(d))
case types.BytesFamily:
newBytes := make([]byte, rng.Intn(16)+1)
rng.Read(newBytes)
setColVal(vec, outputIdx, newBytes, s.evalCtx)
case types.IntervalFamily:
setColVal(vec, outputIdx, duration.MakeDuration(rng.Int63(), rng.Int63(), rng.Int63()), s.evalCtx)
case typeconv.DatumVecCanonicalTypeFamily:
switch vec.Type().Family() {
case types.CollatedStringFamily:
collatedStringType := types.MakeCollatedString(types.String, *randgen.RandCollationLocale(rng))
randomBytes := make([]byte, rng.Intn(16)+1)
rng.Read(randomBytes)
d, err := tree.NewDCollatedString(string(randomBytes), collatedStringType.Locale(), &tree.CollationEnvironment{})
if err != nil {
colexecerror.InternalError(err)
}
setColVal(vec, outputIdx, d, s.evalCtx)
case types.JsonFamily:
newBytes := make([]byte, rng.Intn(16)+1)
rng.Read(newBytes)
j := json.FromString(string(newBytes))
setColVal(vec, outputIdx, j, s.evalCtx)
case types.TimeTZFamily:
setColVal(vec, outputIdx, tree.NewDTimeTZFromOffset(timeofday.FromInt(rng.Int63()), rng.Int31()), s.evalCtx)
case types.TupleFamily:
setColVal(vec, outputIdx, stringToDatum("(NULL)", vec.Type(), s.evalCtx), s.evalCtx)
default:
colexecerror.InternalError(errors.AssertionFailedf("unexpected datum-backed type: %s", vec.Type()))
}
default:
if val, ok := quick.Value(reflect.TypeOf(vec.Col()).Elem(), rng); ok {
setColVal(vec, outputIdx, val.Interface(), s.evalCtx)
} else {
colexecerror.InternalError(errors.AssertionFailedf("could not generate a random value of type %s", vec.Type()))
}
}
}
} else {
setColVal(vec, outputIdx, tups[j][i], s.evalCtx)
}
}
}
s.batch.SetLength(batchSize)
return s.batch
}
func (s *opTestInput) Reset(context.Context) {
s.tuples = s.initialTuples
}
type opFixedSelTestInput struct {
colexecop.ZeroInputNode
allocator *colmem.Allocator
typs []*types.T
batchSize int
tuples Tuples
batch coldata.Batch
sel []int
evalCtx *tree.EvalContext
// idx is the index of the tuple to be emitted next. We need to maintain it
// in case the provided selection vector or provided tuples (if sel is nil)
// is longer than requested batch size.
idx int
}
var _ colexecop.ResettableOperator = &opFixedSelTestInput{}
// NewOpFixedSelTestInput returns a new opFixedSelTestInput with the given
// input tuples and selection vector. The input tuples are translated into
// types automatically, using simple rules (e.g. integers always become Int64).
func NewOpFixedSelTestInput(
allocator *colmem.Allocator, sel []int, batchSize int, tuples Tuples, typs []*types.T,
) colexecop.Operator {
ret := &opFixedSelTestInput{
allocator: allocator,
batchSize: batchSize,
sel: sel,
tuples: tuples,
typs: typs,
evalCtx: tree.NewTestingEvalContext(cluster.MakeTestingClusterSettings()),
}
return ret
}
func (s *opFixedSelTestInput) Init() {
if s.typs == nil {
if len(s.tuples) == 0 {
colexecerror.InternalError(errors.AssertionFailedf("empty tuple source with no specified types"))
}
s.typs = extrapolateTypesFromTuples(s.tuples)
}
s.batch = s.allocator.NewMemBatchWithMaxCapacity(s.typs)
tupleLen := len(s.tuples[0])
for _, i := range s.sel {
if len(s.tuples[i]) != tupleLen {
colexecerror.InternalError(errors.AssertionFailedf("mismatched tuple lens: found %+v expected %d vals",
s.tuples[i], tupleLen))
}
}
// Reset nulls for all columns in this batch.
for i := 0; i < s.batch.Width(); i++ {
s.batch.ColVec(i).Nulls().UnsetNulls()
}
if s.sel != nil {
s.batch.SetSelection(true)
// When non-nil selection vector is given, we convert all tuples into the
// Go values at once, and we'll be copying an appropriate chunk of the
// selection vector later in Next().
for i := range s.typs {
vec := s.batch.ColVec(i)
// Automatically convert the Go values into exec.Type slice elements using
// reflection. This is slow, but acceptable for tests.
for j := 0; j < len(s.tuples); j++ {
if s.tuples[j][i] == nil {
vec.Nulls().SetNull(j)
} else {
setColVal(vec, j, s.tuples[j][i], s.evalCtx)
}
}
}
}
}
func (s *opFixedSelTestInput) Next(context.Context) coldata.Batch {
var batchSize int
if s.sel == nil {
batchSize = s.batchSize
if len(s.tuples)-s.idx < batchSize {
batchSize = len(s.tuples) - s.idx
}
// When nil selection vector is given, we convert only the tuples that fit
// into the current batch (keeping the s.idx in mind).
for i := range s.typs {
vec := s.batch.ColVec(i)
vec.Nulls().UnsetNulls()
for j := 0; j < batchSize; j++ {
if s.tuples[s.idx+j][i] == nil {
vec.Nulls().SetNull(j)
} else {
// Automatically convert the Go values into exec.Type slice elements using
// reflection. This is slow, but acceptable for tests.
setColVal(vec, j, s.tuples[s.idx+j][i], s.evalCtx)
}
}
}
} else {
if s.idx == len(s.sel) {
return coldata.ZeroBatch
}
batchSize = s.batchSize
if len(s.sel)-s.idx < batchSize {
batchSize = len(s.sel) - s.idx
}
// All tuples have already been converted to the Go values, so we only need
// to set the right selection vector for s.batch.
copy(s.batch.Selection(), s.sel[s.idx:s.idx+batchSize])
}
s.batch.SetLength(batchSize)
s.idx += batchSize
return s.batch
}
func (s *opFixedSelTestInput) Reset(context.Context) {
s.idx = 0
}
// OpTestOutput is a test verification struct that ensures its input batches
// match some expected output tuples.
type OpTestOutput struct {
colexecop.OneInputNode
expected Tuples
evalCtx *tree.EvalContext
curIdx int
batch coldata.Batch
}
// NewOpTestOutput returns a new OpTestOutput, initialized with the given input
// to verify that the output is exactly equal to the expected tuples.
func NewOpTestOutput(input colexecop.Operator, expected Tuples) *OpTestOutput {
input.Init()
return &OpTestOutput{
OneInputNode: colexecop.NewOneInputNode(input),
expected: expected,
evalCtx: tree.NewTestingEvalContext(cluster.MakeTestingClusterSettings()),
}
}
// GetTupleFromBatch is a helper function that extracts a tuple at index
// tupleIdx from batch.
func GetTupleFromBatch(batch coldata.Batch, tupleIdx int) Tuple {
ret := make(Tuple, batch.Width())
out := reflect.ValueOf(ret)
if sel := batch.Selection(); sel != nil {
tupleIdx = sel[tupleIdx]
}
for colIdx := range ret {
vec := batch.ColVec(colIdx)
if vec.Nulls().NullAt(tupleIdx) {
ret[colIdx] = nil
} else {
var val reflect.Value
if colBytes, ok := vec.Col().(*coldata.Bytes); ok {
val = reflect.ValueOf(append([]byte(nil), colBytes.Get(tupleIdx)...))
} else if vec.CanonicalTypeFamily() == types.DecimalFamily {
colDec := vec.Decimal()
var newDec apd.Decimal
newDec.Set(&colDec[tupleIdx])
val = reflect.ValueOf(newDec)
} else if vec.CanonicalTypeFamily() == typeconv.DatumVecCanonicalTypeFamily {
val = reflect.ValueOf(vec.Datum().Get(tupleIdx).(*coldataext.Datum).Datum)
} else {
val = reflect.ValueOf(vec.Col()).Index(tupleIdx)
}
out.Index(colIdx).Set(val)
}
}
return ret
}
func (r *OpTestOutput) next(ctx context.Context) Tuple {
if r.batch == nil || r.curIdx >= r.batch.Length() {
// Get a fresh batch.
r.batch = r.Input.Next(ctx)
if r.batch.Length() == 0 {
return nil
}
r.curIdx = 0
}
ret := GetTupleFromBatch(r.batch, r.curIdx)
r.curIdx++
return ret
}
// Reset implements the Resetter interface.
func (r *OpTestOutput) Reset(ctx context.Context) {
if r, ok := r.Input.(colexecop.Resetter); ok {
r.Reset(ctx)
}
r.curIdx = 0
r.batch = nil
}
// Verify ensures that the input to this OpTestOutput produced the same results
// and in the same order as the ones expected in the OpTestOutput's expected
// tuples, using a slow, reflection-based comparison method, returning an error
// if the input isn't equal to the expected.
func (r *OpTestOutput) Verify() error {
ctx := context.Background()
var actual Tuples
for {
tup := r.next(ctx)
if tup == nil {
break
}
actual = append(actual, tup)
}
return assertTuplesOrderedEqual(r.expected, actual, r.evalCtx)
}
// VerifyAnyOrder ensures that the input to this OpTestOutput produced the same
// results but in any order (meaning set comparison behavior is used) as the
// ones expected in the OpTestOutput's expected tuples, using a slow,
// reflection-based comparison method, returning an error if the input isn't
// equal to the expected.
func (r *OpTestOutput) VerifyAnyOrder() error {
ctx := context.Background()
var actual Tuples
for {
tup := r.next(ctx)
if tup == nil {
break
}
actual = append(actual, tup)
}
return AssertTuplesSetsEqual(r.expected, actual, r.evalCtx)
}
// tupleEquals checks that two tuples are equal, using a slow,
// reflection-based method to do the comparison. Reflection is used so that
// values can be compared in a type-agnostic way.
func tupleEquals(expected Tuple, actual Tuple, evalCtx *tree.EvalContext) bool {
if len(expected) != len(actual) {
return false
}
for i := 0; i < len(actual); i++ {
if expected[i] == nil || actual[i] == nil {
if expected[i] != nil || actual[i] != nil {
return false
}
} else {
// Special case for NaN, since it does not equal itself.
if f1, ok := expected[i].(float64); ok {
if f2, ok := actual[i].(float64); ok {
if math.IsNaN(f1) && math.IsNaN(f2) {
continue
} else if !math.IsNaN(f1) && !math.IsNaN(f2) && math.Abs(f1-f2) < 1e-6 {
continue
}
}
}
// Special case for decimals.
if d1, ok := actual[i].(apd.Decimal); ok {
if f2, ok := expected[i].(float64); ok {
d2, _, err := apd.NewFromString(fmt.Sprintf("%f", f2))
if err == nil && d1.Cmp(d2) == 0 {
continue
} else {
return false
}
}
}
// Special case for datum-backed types.
if d1, ok := actual[i].(tree.Datum); ok {
if d, ok := d1.(*coldataext.Datum); ok {
d1 = d.Datum
}
var d2 tree.Datum
switch d := expected[i].(type) {
case *coldataext.Datum:
d2 = d.Datum
case tree.Datum:
d2 = d
case string:
d2 = stringToDatum(d, d1.ResolvedType(), evalCtx)
default:
return false
}
if d1.Compare(evalCtx, d2) == 0 {
continue
}
return false
}
// Default case.
if !reflect.DeepEqual(
reflect.ValueOf(actual[i]).Convert(reflect.TypeOf(expected[i])).Interface(),
expected[i],
) || !reflect.DeepEqual(
reflect.ValueOf(expected[i]).Convert(reflect.TypeOf(actual[i])).Interface(),
actual[i],
) {
return false
}
}
}
return true
}
func makeError(expected Tuples, actual Tuples) error {
var expStr, actStr strings.Builder
for i := range expected {
expStr.WriteString(fmt.Sprintf("%d: %s\n", i, expected[i].String()))
}
for i := range actual {
actStr.WriteString(fmt.Sprintf("%d: %s\n", i, actual[i].String()))
}
diff := difflib.UnifiedDiff{
A: difflib.SplitLines(expStr.String()),
B: difflib.SplitLines(actStr.String()),
Context: 100,
}
text, err := difflib.GetUnifiedDiffString(diff)
if err != nil {
return errors.Errorf("expected didn't match actual, failed to make diff %s", err)
}
return errors.Errorf("expected didn't match actual. diff:\n%s", text)
}
// AssertTuplesSetsEqual asserts that two sets of tuples are equal.
func AssertTuplesSetsEqual(expected Tuples, actual Tuples, evalCtx *tree.EvalContext) error {
if len(expected) != len(actual) {
return makeError(expected, actual)
}
var tupleFromOtherSet Tuple
if len(expected) > 0 {
tupleFromOtherSet = expected[0]
}
actual = actual.sort(evalCtx, tupleFromOtherSet)
if len(actual) > 0 {
tupleFromOtherSet = actual[0]
}
expected = expected.sort(evalCtx, tupleFromOtherSet)
return assertTuplesOrderedEqual(expected, actual, evalCtx)
}
// assertTuplesOrderedEqual asserts that two permutations of tuples are equal
// in order.
func assertTuplesOrderedEqual(expected Tuples, actual Tuples, evalCtx *tree.EvalContext) error {
if len(expected) != len(actual) {
return errors.Errorf("expected %+v, actual %+v", expected, actual)
}
for i := range expected {
if !tupleEquals(expected[i], actual[i], evalCtx) {
return makeError(expected, actual)
}
}
return nil
}
// FiniteBatchSource is an Operator that returns the same batch a specified
// number of times.
type FiniteBatchSource struct {
colexecop.ZeroInputNode
repeatableBatch *colexecop.RepeatableBatchSource
usableCount int
}
var _ colexecop.Operator = &FiniteBatchSource{}
// NewFiniteBatchSource returns a new Operator initialized to return its input
// batch a specified number of times.
func NewFiniteBatchSource(
allocator *colmem.Allocator, batch coldata.Batch, typs []*types.T, usableCount int,
) *FiniteBatchSource {
return &FiniteBatchSource{
repeatableBatch: colexecop.NewRepeatableBatchSource(allocator, batch, typs),
usableCount: usableCount,
}
}
// Init implements the Operator interface.
func (f *FiniteBatchSource) Init() {
f.repeatableBatch.Init()
}
// Next implements the Operator interface.
func (f *FiniteBatchSource) Next(ctx context.Context) coldata.Batch {
if f.usableCount > 0 {
f.usableCount--
return f.repeatableBatch.Next(ctx)
}
return coldata.ZeroBatch
}
// Reset resets FiniteBatchSource to return the same batch usableCount number of
// times.
func (f *FiniteBatchSource) Reset(usableCount int) {
f.usableCount = usableCount
}
// finiteChunksSource is an Operator that returns a batch specified number of
// times. The first matchLen columns of the batch are incremented every time
// (except for the first) the batch is returned to emulate source that is
// already ordered on matchLen columns.
type finiteChunksSource struct {
colexecop.ZeroInputNode
repeatableBatch *colexecop.RepeatableBatchSource
usableCount int
matchLen int
adjustment []int64
}
var _ colexecop.Operator = &finiteChunksSource{}
// NewFiniteChunksSource returns a new finiteChunksSource.
func NewFiniteChunksSource(
allocator *colmem.Allocator, batch coldata.Batch, typs []*types.T, usableCount int, matchLen int,
) colexecop.Operator {
return &finiteChunksSource{
repeatableBatch: colexecop.NewRepeatableBatchSource(allocator, batch, typs),
usableCount: usableCount,
matchLen: matchLen,
}
}
func (f *finiteChunksSource) Init() {
f.repeatableBatch.Init()
f.adjustment = make([]int64, f.matchLen)
}
func (f *finiteChunksSource) Next(ctx context.Context) coldata.Batch {
if f.usableCount > 0 {
f.usableCount--
batch := f.repeatableBatch.Next(ctx)
if f.matchLen > 0 && f.adjustment[0] == 0 {
// We need to calculate the difference between the first and the last
// tuples in batch in first matchLen columns so that in the following
// calls to Next() the batch is adjusted such that tuples in consecutive
// batches are ordered on the first matchLen columns.
for col := 0; col < f.matchLen; col++ {
firstValue := batch.ColVec(col).Int64()[0]
lastValue := batch.ColVec(col).Int64()[batch.Length()-1]
f.adjustment[col] = lastValue - firstValue + 1
}
} else {
for i := 0; i < f.matchLen; i++ {
int64Vec := batch.ColVec(i).Int64()
for j := range int64Vec {
int64Vec[j] += f.adjustment[i]
}
// We need to update the adjustments because RepeatableBatchSource
// returns the original batch that it was instantiated with, and we
// want to have constantly non-decreasing vectors.
firstValue := batch.ColVec(i).Int64()[0]
lastValue := batch.ColVec(i).Int64()[batch.Length()-1]
f.adjustment[i] += lastValue - firstValue + 1
}
}
return batch
}
return coldata.ZeroBatch
}
// chunkingBatchSource is a batch source that takes unlimited-size columns and
// chunks them into BatchSize()-sized chunks when Nexted.
type chunkingBatchSource struct {
colexecop.ZeroInputNode
allocator *colmem.Allocator
typs []*types.T
cols []coldata.Vec
len int
curIdx int
batch coldata.Batch
}
var _ colexecop.ResettableOperator = &chunkingBatchSource{}
// NewChunkingBatchSource returns a new chunkingBatchSource with the given
// column types, columns, and length.
func NewChunkingBatchSource(
allocator *colmem.Allocator, typs []*types.T, cols []coldata.Vec, len int,
) colexecop.ResettableOperator {
return &chunkingBatchSource{
allocator: allocator,
typs: typs,
cols: cols,
len: len,
}
}
func (c *chunkingBatchSource) Init() {
c.batch = c.allocator.NewMemBatchWithMaxCapacity(c.typs)
for i := range c.cols {
c.batch.ColVec(i).SetCol(c.cols[i].Col())
c.batch.ColVec(i).SetNulls(c.cols[i].Nulls())
}
}
func (c *chunkingBatchSource) Next(context.Context) coldata.Batch {
if c.curIdx >= c.len {
return coldata.ZeroBatch
}
// Explicitly set to false since this could be modified by the downstream
// operators. This is sufficient because both the vectors and the nulls are
// explicitly set below. ResetInternalBatch cannot be used here because we're
// operating on Windows into the vectors.
c.batch.SetSelection(false)
lastIdx := c.curIdx + coldata.BatchSize()
if lastIdx > c.len {
lastIdx = c.len
}
for i := range c.typs {
// Note that new vectors could be appended to the batch, but we are not
// responsible for updating those, so we iterate only up to len(c.typs)
// as per out initialization.
c.batch.ColVec(i).SetCol(c.cols[i].Window(c.curIdx, lastIdx).Col())
nullsSlice := c.cols[i].Nulls().Slice(c.curIdx, lastIdx)
c.batch.ColVec(i).SetNulls(&nullsSlice)
}
c.batch.SetLength(lastIdx - c.curIdx)
c.curIdx = lastIdx
return c.batch
}
func (c *chunkingBatchSource) Reset(context.Context) {
c.curIdx = 0
}
// MinBatchSize is the minimum acceptable size of batches for tests in colexec*
// packages.
const MinBatchSize = 3
// GenerateBatchSize generates somewhat random value to set coldata.BatchSize()
// to.
func GenerateBatchSize() int {
randomizeBatchSize := envutil.EnvOrDefaultBool("COCKROACH_RANDOMIZE_BATCH_SIZE", true)
if randomizeBatchSize {
rng, _ := randutil.NewPseudoRand()
// sizesToChooseFrom specifies some predetermined and one random sizes
// that we will choose from. Such distribution is chosen due to the
// fact that most of our unit tests don't have a lot of data, so in
// order to exercise the multi-batch behavior we favor really small
// batch sizes. On the other hand, we also want to occasionally
// exercise that we handle batch sizes larger than default one
// correctly.
var sizesToChooseFrom = []int{
MinBatchSize,
MinBatchSize + 1,
MinBatchSize + 2,
coldata.BatchSize(),
MinBatchSize + rng.Intn(coldata.MaxBatchSize-MinBatchSize),
}
return sizesToChooseFrom[rng.Intn(len(sizesToChooseFrom))]
}
return coldata.BatchSize()
}
// CallbackMetadataSource is a utility struct that implements the
// colexecop.MetadataSource interface by calling a provided callback.
type CallbackMetadataSource struct {
DrainMetaCb func(context.Context) []execinfrapb.ProducerMetadata
}
// DrainMeta is part of the colexecop.MetadataSource interface.
func (s CallbackMetadataSource) DrainMeta(ctx context.Context) []execinfrapb.ProducerMetadata {
return s.DrainMetaCb(ctx)
}
|
package main
import (
"log"
"github.com/go-rod/rod/lib/utils"
)
func main() {
log.Println("npx eslint --ext .js,.html .")
utils.Exec("npx", "eslint", "--ext", ".js,.html", ".")
log.Println("npx prettier --loglevel error --write .")
utils.Exec("npx", "prettier", "--loglevel", "error", "--write", ".")
log.Println("godev lint")
utils.Exec("godev", "lint")
}
|
package dispatcher
import (
"fmt"
"reflect"
"sync"
)
// BadListenerError is raised when AddListener is called with an invalid listener function.
type BadListenerError string
func (err BadListenerError) Error() string {
return fmt.Sprintf("Bad listener func: %s", string(err))
}
// New returns a new dispatcher
func New() *Dispatcher {
return &Dispatcher{
listeners: make(map[reflect.Type][]interface{}),
interfaces: make([]reflect.Type, 0),
}
}
// A Dispatcher sends events to the listeners that listen to the
// events of a certain type.
type Dispatcher struct {
lock sync.RWMutex
listeners map[reflect.Type][]interface{}
interfaces []reflect.Type
}
// AddListener registers a listener function that will be called when a matching
// event is dispatched. The type of the function's first (and only) argument
// declares the event type (or interface) to listen for.
func (d *Dispatcher) AddListener(listeners ...interface{}) error {
// check for errors
for _, listener := range listeners {
listenerType := reflect.TypeOf(listener)
if listenerType.Kind() != reflect.Func || listenerType.NumIn() != 1 {
return BadListenerError("listener must be a function that takes exactly one argument")
}
}
// store them
d.lock.Lock()
defer d.lock.Unlock()
for _, listener := range listeners {
listenerType := reflect.TypeOf(listener)
// the first input parameter is the event
eventType := listenerType.In(0)
// keep a list of listeners for each event type
d.listeners[eventType] = append(d.listeners[eventType], listener)
// if the listener is an interface store it in a separate list
// so we can check non-interface objects against all interfaces
if eventType.Kind() == reflect.Interface {
d.interfaces = append(d.interfaces, eventType)
}
}
return nil
}
// Dispatch sends an event to all registered listeners that were declared
// to accept values of the event's type, or interfaces that the value implements.
func (d *Dispatcher) Dispatch(ev interface{}) {
d.lock.RLock()
defer d.lock.RUnlock()
evType := reflect.TypeOf(ev)
vals := []reflect.Value{reflect.ValueOf(ev)}
// Call listeners for the actual static type
d.callListeners(evType, vals)
// Also check if the type implements any of the registered interfaces
for _, in := range d.interfaces {
if evType.Implements(in) {
d.callListeners(in, vals)
}
}
}
func (d *Dispatcher) callListeners(t reflect.Type, vals []reflect.Value) {
for _, fn := range d.listeners[t] {
reflect.ValueOf(fn).Call(vals)
}
}
|
package commands
import (
"context"
"github.com/loft-sh/devspace/pkg/devspace/context/values"
"mvdan.cc/sh/v3/interp"
)
func IsDependency(ctx context.Context, args []string) error {
if len(args) > 0 {
return interp.NewExitStatus(1)
}
isDependency, ok := values.IsDependencyFrom(ctx)
if isDependency && ok {
return interp.NewExitStatus(0)
}
return interp.NewExitStatus(1)
}
|
package main
import "github.com/m7shapan/my-http/cmd"
func main() {
var c cmd.CMD
c.Start()
}
|
package main
import (
"encoding/json"
"log"
"github.com/bmob/bmob-go-sdk"
)
var (
appConfig = bmob.RestConfig{"b18cda25d056ac3a6e22f6a304cb37b8",
"f60c9cd10b04fa2a5fd6f914c64c6528"}
)
func BmobPushOrder(order *Order) error {
bytes, _ := json.Marshal(order)
header, err := bmob.DoRestReq(appConfig,
bmob.RestRequest{
bmob.BaseReq{
"POST",
bmob.ApiRestURL("Orders") + "/",
""},
"application/json",
bytes},
nil)
if err == nil {
log.Println(header)
} else {
log.Println(err)
}
log.Println("****************************************")
return err
}
|
package main
import (
"io/ioutil"
"os"
)
func main() {
f, err := os.Open("file")
defer f.Close() // defer 语句应该放在 if() 语句后面,先判断 err,再 defer 关闭文件句柄。
if err != nil {
return
}
b, err := ioutil.ReadAll(f)
println(string(b))
}
|
package main
import "testing"
func TestMorseCode(t *testing.T) {
for k, v := range map[string]string{
".- ...- ..--- .-- .... .. . -.-. -..- ....- .....": "AV2WHIECX 45",
"-... .... ...--": "BH3",
"-----": "0"} {
if r := morseCode(k); r != v {
t.Errorf("failed: morseCode %s is %s, got %s",
k, v, r)
}
}
}
const morse = "ETIANMSURWDKGOHVF L PJBXCYZQ 54 3 2 16 7 8 90"
func morseCode(q string) string {
var (
m int = 1
r []byte
)
for _, i := range q {
switch {
case i == '.':
m <<= 1
case i == '-':
m = (m << 1) + 1
case m == 1:
r = append(r, ' ')
default:
if m < 64 {
r = append(r, morse[m-2])
}
m = 1
}
}
if m > 1 && m < 64 {
r = append(r, morse[m-2])
}
return string(r)
}
|
package reflectUtil
import (
//"fmt"
"moqikaka.com/Test/src/Model"
//"reflect"
"fmt"
"testing"
)
func TestReflect(context *testing.T) {
// 反射调用方法
//tempFunc := func(i int) int {
// return i
//}
//reflectResult := reflect.ValueOf(tempFunc)
//fmt.Println("fv is reflect.Func ?", value.Kind() == reflect.Func)
//params := make([]reflect.Value, 1)
//params[0] = reflect.ValueOf(20)
//result := reflectResult.Call(params)
//fmt.Println(result[0])
//newReflect := NewReflectUtil("张", 20)
//value := reflect.ValueOf(newReflect)
//params := make([]reflect.Value, 1)
//params[0] = reflect.ValueOf(50)
//fmt.Println(value.Method(2).Call(nil))
//fmt.Println(value.Method(0).Call(nil)[0])
//fmt.Println(value.Method(1).Call(params))
//if value.Kind() == reflect.Ptr {
// value = value.Elem()
//}
//
//value.FieldByName("Age").Set(reflect.ValueOf(2000))
//fmt.Println()
//typeOfT := value.Type()
//for i := 0; i < value.NumField(); i++ {
// f := value.Field(i)
// fmt.Printf("%d: %s %s = %v\n", i, typeOfT.Field(i).Name, f.Type(), f.Interface())
//}
//fmt.Println(value.Method(2).Call(nil)[0])
//result := value.MethodByName("GetAge")
//fmt.Println(result.Call(nil))
//fmt.Println(value.Method(1).Call(nil)[0])
newReflect := NewReflectUtil()
tmepList, err := newReflect.CreateInstance(Model.NewHaremInfo(), "p_harem_info")
if err != nil {
fmt.Println(err)
}
for _, value := range tmepList {
haremInfo := value.(Model.HaremInfo)
fmt.Println(haremInfo.PlayerID)
}
}
|
package motion
import (
"fmt"
"net/http"
"os/exec"
"github.com/andreacioni/motionctrl/config"
"github.com/andreacioni/motionctrl/version"
"github.com/kpango/glg"
"github.com/parnurzeal/gorequest"
)
var (
motionConfigFile string
)
func Init(configFile string, autostart bool, detection bool) error {
if err := checkInstall(); err != nil {
return fmt.Errorf("Motion not found: %v", err)
}
if started, err := checkStarted(); err == nil {
if started {
glg.Warn("Motion started before %s", version.Name)
}
} else {
return fmt.Errorf("Unable to check is motion is running: %v", err)
}
if err := loadConfig(configFile); err != nil {
return fmt.Errorf("Failed to load motion configuration: %v", err)
}
motionConfigFile = configFile
if autostart {
glg.Infof("Starting motion")
if err := startMotion(detection); err != nil {
return fmt.Errorf("Unable to start motion: %v", err)
}
}
return nil
}
func GetStreamBaseURL() string {
return fmt.Sprintf("http://%s:%s", config.BaseAddress, readOnlyConfig[ConfigStreamPort])
}
func GetBaseURL() string {
return fmt.Sprintf("http://%s:%s", config.BaseAddress, readOnlyConfig[ConfigWebControlPort])
}
//CheckInstall will check if motion is available and ready to be controlled. If motion isn't available the program will exit showing an error
func checkInstall() error {
err := exec.Command("motion", "-h").Run()
//TODO unfortunatelly motion doesn't return 0 when invoked with the "-h" parameter
if err != nil && err.Error() != "exit status 1" {
return err
}
return nil
}
func webControlGet(path string, callback func(string) (interface{}, error)) (interface{}, error) {
var err error
var ret interface{}
resp, body, errs := gorequest.New().Get(GetBaseURL() + "/0" + path).End()
if errs == nil {
glg.Debugf("Response body: %s", body)
if resp.StatusCode == http.StatusOK {
ret, err = callback(body)
} else {
ret, err = nil, fmt.Errorf("request failed with code: %d", resp.StatusCode)
}
} else {
ret, err = nil, errs[0] //TODO errs[0] not the best
}
return ret, err
}
|
package controllers
import (
"fmt"
"github.com/revel/revel"
"html/template"
"io/ioutil"
"log"
)
type App struct {
*revel.Controller
}
func (c App) Index() revel.Result {
model := c.Params.Route.Get("model")
log.Printf("model=%s", model)
title := model
moreStyles := []string{
"css/style.css",
}
moreScripts := []string{
"js/three.js",
"js/viz.js",
}
buf, err := ioutil.ReadFile(fmt.Sprintf("/tmp/nmir/%s_vt.json", model))
if err != nil {
log.Println(err)
}
model_js := template.JS("var topo = " + string(buf) + ";")
return c.Render(title, moreStyles, moreScripts, model_js)
}
|
package graph
type Graph map[int][]int
func NewGraph() Graph {
g := make(Graph)
return g
}
|
package main
import (
"sync"
"time"
)
type _TgapFilter_ReceCntX struct{}
type _TgapFilter_ReceCnt struct {
gfrCnt int
gfrUnr _TudpNodeDataRece
gfrUnb []byte
}
type _TgapFilter_ReceStX struct{}
type _TgapFilter_ReceSt struct { // _TudpNodeDataReceX
now_ map[string]_TgapFilter_ReceCnt
last map[string]_TgapFilter_ReceCnt
las2 map[string]_TgapFilter_ReceCnt
}
type _TgapFilterX struct{}
type _TgapFilter struct {
gfMux sync.Mutex
gfGap time.Duration // default _T10s
gfCHudpNodeDataReceLO *chan _TudpNodeDataRece // _TudpNodeDataReceX
gfCHudpNodeDataReceI chan _TudpNodeDataRece
gfCHbyteLO *chan []byte
gfCHbyteI chan []byte
gfCHdelay chan byte
gfCBinit func(*_TgapFilter) // _FgapFilter__1200101x__init_default
gfCBdelay func(*_TgapFilter)
gfCBudpNodeDataRece func(*_TgapFilter)
gfCBbyte func(*_TgapFilter)
gfR _TgapFilter_ReceSt
}
|
package meerkat
import (
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"strconv"
"strings"
"time"
"github.com/ahmdrz/goinsta"
"gopkg.in/yaml.v2"
)
func exists(path string) bool {
_, err := os.Stat(path)
return os.IsExist(err)
}
type Meerkat struct {
Interval int
SleepTime int
Username string
Password string
TargetUsers []string
OutputType string
TelegramToken string
TelegramUser int
instagram *goinsta.Instagram
logger *log.Logger
lastTimeStamp int
targetUsers map[int64]User
login bool
loggerFile *os.File
}
type User struct {
Username string
Bio string
Followers int
Following int
Posts int
Tags int
}
func (m *Meerkat) parseArgs() error {
if len(os.Args) > 1 {
// meerkat init
// meerkat init config.yaml
if os.Args[1] == "init" {
configFile := "meerkat.yaml"
if len(os.Args) > 2 {
configFile = os.Args[2]
}
file, err := os.OpenFile(configFile, os.O_CREATE|os.O_WRONLY, 0666)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
file.WriteString(configTemplate)
file.Close()
fmt.Println(configFile, "generated.")
os.Exit(0)
}
}
configFile := "meerkat.yaml"
outputPtr := flag.String("output", "", "Log output file.")
configPtr := flag.String("config", "", "Configuration file (YAML format)")
flag.Parse()
if *outputPtr == "" {
m.logger = log.New(os.Stdout, "[meerkat] ", log.Ldate|log.Ltime)
} else {
file, err := os.OpenFile(*outputPtr, os.O_CREATE|os.O_WRONLY, 0666)
if err != nil {
return fmt.Errorf("output file [%s] does not exists", *outputPtr)
}
m.loggerFile = file
m.logger = log.New(file, "[meerkat] ", log.Ldate|log.Ltime)
}
if *configPtr != "" {
configFile = *configPtr
}
file, err := os.Open(configFile)
if err != nil {
return fmt.Errorf("config file [%s] does not exists", configFile)
}
defer file.Close()
bytes, err := ioutil.ReadAll(file)
if err != nil {
return err
}
err = yaml.Unmarshal(bytes, m)
if err != nil {
return err
}
return nil
}
func (m *Meerkat) Run(done chan bool) error {
m.logger.Println("Logging in to the Instagram")
m.instagram = goinsta.New(m.Username, m.Password)
err := m.instagram.Login()
if err != nil {
return fmt.Errorf("Instagram error , %s", err.Error())
}
m.login = true
m.logger.Println("Successfully logged in")
select {
case <-done:
return fmt.Errorf("Signal on meerkat !")
default:
for _, username := range m.TargetUsers {
m.logger.Printf("Getting %s information ", username)
user, err := m.instagram.GetUserByUsername(username)
if err != nil {
return err
}
m.targetUsers[user.User.ID] = User{
Username: username,
Followers: user.User.FollowerCount,
Following: user.User.FollowingCount,
Bio: user.User.Biography,
Posts: user.User.MediaCount,
Tags: user.User.UserTagsCount,
}
m.logger.Printf("User %s-%d information has been retrived successfully.", username, user.User.ID)
}
}
m.logger.Println("Starting watcher ...")
var failure int = 0
var exitErr error
// TODO : make select statement better !
tick := time.After(time.Duration(m.Interval) * time.Second)
for failure != 3 {
select {
case <-done:
return fmt.Errorf("Signal on meerkat !")
case <-tick:
tick = time.After(time.Duration(m.Interval) * time.Second)
m.logger.Println("Sending request to get following activities")
resp, err := m.instagram.GetFollowingRecentActivity()
if err != nil {
m.logger.Println("Error", err)
failure++
exitErr = err
continue
}
// to find last time stamp
maxTimeStamp := int(0)
for _, story := range resp.Stories {
unixTimeStamp := story.Args.Timestamp
if unixTimeStamp <= m.lastTimeStamp {
continue
}
for _, link := range story.Args.Links {
if link.Type == "user" {
userID, _ := strconv.ParseInt(link.ID, 10, 64)
if user, ok := m.targetUsers[userID]; ok {
unixTime := time.Unix(int64(unixTimeStamp), 0)
message := fmt.Sprintf("[%s] [%s] %s\n", user.Username, unixTime.Format("15:04:05"), story.Args.Text)
// TODO: parse to array of string and search over it.
if strings.Contains(m.OutputType, "telegram") {
m.sendToTelegram(m.TelegramUser, message)
}
if strings.Contains(m.OutputType, "logfile") {
m.logger.Println(message)
}
}
}
}
if unixTimeStamp > maxTimeStamp {
maxTimeStamp = unixTimeStamp
}
}
if maxTimeStamp != 0 {
m.lastTimeStamp = maxTimeStamp
}
failure = 0
for _, username := range m.TargetUsers {
m.logger.Printf("Getting %s information ", username)
user, err := m.instagram.GetUserByUsername(username)
if err != nil {
m.logger.Println("Error", err)
failure++
exitErr = err
continue
}
tmpUser := m.targetUsers[user.User.ID]
currentTime := time.Now().Format("15:04:05")
message := fmt.Sprintf("[%s] [%s] : \n", username, currentTime)
hasMessage := false
if user.User.Biography != tmpUser.Bio {
message += fmt.Sprintf("User %s biography changed to %s\n", username, user.User.Biography)
hasMessage = true
tmpUser.Bio = user.User.Biography
}
if user.User.FollowerCount != tmpUser.Followers {
message += fmt.Sprintf("User %s followers changed from %d to %d\n", username, tmpUser.Followers, user.User.FollowerCount)
hasMessage = true
tmpUser.Followers = user.User.FollowerCount
}
if user.User.FollowingCount != tmpUser.Following {
message += fmt.Sprintf("User %s following changed from %d to %d\n", username, tmpUser.Following, user.User.FollowingCount)
hasMessage = true
tmpUser.Following = user.User.FollowingCount
}
if user.User.FollowingCount != tmpUser.Following {
message += fmt.Sprintf("User %s posts changed from %d to %d\n", username, tmpUser.Posts, user.User.MediaCount)
hasMessage = true
tmpUser.Posts = user.User.MediaCount
}
if user.User.UserTagsCount != tmpUser.Tags {
message += fmt.Sprintf("User %s tags changed from %d to %d\n", username, tmpUser.Tags, user.User.UserTagsCount)
hasMessage = true
tmpUser.Tags = user.User.UserTagsCount
}
if hasMessage {
m.targetUsers[user.User.ID] = tmpUser
// TODO: parse to array of string and search over it.
if strings.Contains(m.OutputType, "telegram") {
m.sendToTelegram(m.TelegramUser, message)
}
if strings.Contains(m.OutputType, "logfile") {
m.logger.Println(message)
}
}
m.logger.Printf("User %s information has been updated successfully.", username)
time.Sleep(time.Duration(m.SleepTime) * time.Second)
}
}
}
if failure == 3 {
return exitErr
}
return nil
}
func (m *Meerkat) Logout() error {
if m.login {
return m.instagram.Logout()
}
if m.loggerFile != nil {
return m.loggerFile.Close()
}
return nil
}
func New() (*Meerkat, error) {
m := &Meerkat{}
m.targetUsers = make(map[int64]User)
m.loggerFile = nil
if err := m.parseArgs(); err != nil {
return nil, err
}
if len(m.TargetUsers) == 0 {
return nil, fmt.Errorf("There is no targetusers in yaml config file")
}
if m.OutputType == "" {
return nil, fmt.Errorf("Fill outputtype with ['logfile', 'telegram']")
}
if m.Interval < 10 {
log.Println("Interval is low, try more than 10 seconds.")
}
if m.SleepTime < 10 {
log.Println("SleepTime is low, try more than 10 seconds.")
}
return m, nil
}
func (m *Meerkat) sendToTelegram(to int, message string) error {
url := fmt.Sprintf("https://api.telegram.org/bot%s/sendMessage?chat_id=%d&text=%s", m.TelegramToken, to, message)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return err
}
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
return err
}
bytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
defer resp.Body.Close()
var output struct {
Ok bool `json:"ok"`
Description string `json:"description"`
}
json.Unmarshal(bytes, &output)
if !output.Ok {
return fmt.Errorf("Telegram bot %s", output.Description)
}
return nil
}
|
// Copyright (C) 2017 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package analysis
import (
"fmt"
"strings"
"github.com/google/gapid/gapil/semantic"
)
// Value interface compliance checks.
var (
_ = Value(&EnumValue{})
_ = SetRelational(&EnumValue{})
)
// Labels is a map of value to name.
type Labels map[uint64]string
// Merge adds all the labels from o into l.
func (l Labels) Merge(o Labels) {
for i, s := range o {
l[i] = s
}
}
// EnumValue is an implementation of Value that represents all the possible
// values of an enumerator.
type EnumValue struct {
Ty *semantic.Enum
Numbers *UintValue
Labels Labels
}
// Print returns a textual representation of the value.
func (v *EnumValue) Print(results *Results) string {
return v.String()
}
func (v *EnumValue) String() string {
bias := uintBias(v.Ty)
parts := []string{}
add := func(i uint64) {
s, ok := v.Labels[i]
if !ok {
s = fmt.Sprintf("%#x", bias(i))
}
parts = append(parts, s)
}
for _, r := range v.Numbers.Ranges {
if r.End-r.Start < 10 {
for i := r.Start; i != r.End; i++ {
add(i)
}
} else {
add(r.Start)
parts = append(parts, "...")
add(r.End - 1)
}
}
return fmt.Sprintf("[%v]", strings.Join(parts, ", "))
}
// Type returns the semantic type of the integer value represented by v.
func (v *EnumValue) Type() semantic.Type {
return v.Ty
}
// GreaterThan returns the possibility of v being greater than o.
// o must be of type *EnumValue.
func (v *EnumValue) GreaterThan(o Value) Possibility {
return v.Numbers.GreaterThan(o.(*EnumValue).Numbers)
}
// GreaterEqual returns the possibility of v being greater or equal to o.
// o must be of type *EnumValue.
func (v *EnumValue) GreaterEqual(o Value) Possibility {
return v.Numbers.GreaterEqual(o.(*EnumValue).Numbers)
}
// LessThan returns the possibility of v being less than o.
// o must be of type *EnumValue.
func (v *EnumValue) LessThan(o Value) Possibility {
return v.Numbers.LessThan(o.(*EnumValue).Numbers)
}
// LessEqual returns the possibility of v being less than or equal to o.
// o must be of type *EnumValue.
func (v *EnumValue) LessEqual(o Value) Possibility {
return v.Numbers.LessEqual(o.(*EnumValue).Numbers)
}
// SetGreaterThan returns a new value that represents the range of possible
// values in v that are greater than the lowest in o.
// o must be of type *EnumValue.
func (v *EnumValue) SetGreaterThan(o Value) Value {
a, b := v, o.(*EnumValue)
return &EnumValue{
Ty: a.Ty,
Numbers: a.Numbers.SetGreaterThan(b.Numbers).(*UintValue),
Labels: a.joinLabels(b),
}
}
// SetGreaterEqual returns a new value that represents the range of possible
// values in v that are greater than or equal to the lowest in o.
// o must be of type *EnumValue.
func (v *EnumValue) SetGreaterEqual(o Value) Value {
a, b := v, o.(*EnumValue)
return &EnumValue{
Ty: a.Ty,
Numbers: a.Numbers.SetGreaterEqual(b.Numbers).(*UintValue),
Labels: a.joinLabels(b),
}
}
// SetLessThan returns a new value that represents the range of possible
// values in v that are less than to the highest in o.
// o must be of type *EnumValue.
func (v *EnumValue) SetLessThan(o Value) Value {
a, b := v, o.(*EnumValue)
return &EnumValue{
Ty: a.Ty,
Numbers: a.Numbers.SetLessThan(b.Numbers).(*UintValue),
Labels: a.joinLabels(b),
}
}
// SetLessEqual returns a new value that represents the range of possible
// values in v that are less than or equal to the highest in o.
// o must be of type *EnumValue.
func (v *EnumValue) SetLessEqual(o Value) Value {
a, b := v, o.(*EnumValue)
return &EnumValue{
Ty: a.Ty,
Numbers: a.Numbers.SetLessEqual(b.Numbers).(*UintValue),
Labels: a.joinLabels(b),
}
}
// Equivalent returns true iff v and o are equivalent.
// Unlike Equals() which returns the possibility of two values being equal,
// Equivalent() returns true iff the set of possible values are exactly
// equal.
// o must be of type *EnumValue.
func (v *EnumValue) Equivalent(o Value) bool {
if v == o {
return true
}
a, b := v, o.(*EnumValue)
if !a.Numbers.Equivalent(b.Numbers) {
return false
}
if len(a.Labels) != len(b.Labels) {
return false
}
for i, v := range a.Labels {
if b.Labels[i] != v {
return false
}
}
return true
}
// Equals returns the possibility of v being equal to o.
// o must be of type *EnumValue.
func (v *EnumValue) Equals(o Value) Possibility {
if v == o && v.Valid() {
return True
}
a, b := v, o.(*EnumValue)
return a.Numbers.Equals(b.Numbers)
}
// Valid returns true if there is any possibility of this value equaling
// any other.
func (v *EnumValue) Valid() bool {
return v.Numbers.Valid()
}
// Union (∪) returns the values that are found in v or o.
// o must be of type *EnumValue.
func (v *EnumValue) Union(o Value) Value {
if v == o {
return v
}
a, b := v, o.(*EnumValue)
return &EnumValue{
Ty: a.Ty,
Numbers: a.Numbers.Union(b.Numbers).(*UintValue),
Labels: a.joinLabels(b),
}
}
// Intersect (∩) returns the values that are found in both v and o.
// o must be of type *EnumValue.
func (v *EnumValue) Intersect(o Value) Value {
if v == o {
return v
}
a, b := v, o.(*EnumValue)
return &EnumValue{
Ty: a.Ty,
Numbers: a.Numbers.Intersect(b.Numbers).(*UintValue),
Labels: a.joinLabels(b),
}
}
// Difference (\) returns the values that are found in v but not found in o.
// o must be of type *EnumValue.
func (v *EnumValue) Difference(o Value) Value {
a, b := v, o.(*EnumValue)
return &EnumValue{
Ty: a.Ty,
Numbers: a.Numbers.Difference(b.Numbers).(*UintValue),
Labels: a.joinLabels(b),
}
}
// Clone returns a copy of v with a unique pointer.
func (v *EnumValue) Clone() Value {
out := &EnumValue{
Ty: v.Ty,
Numbers: v.Numbers.Clone().(*UintValue),
Labels: make(Labels, len(v.Labels)),
}
for i, s := range v.Labels {
out.Labels[i] = s
}
return out
}
func (v *EnumValue) joinLabels(o *EnumValue) Labels {
out := make(Labels, len(v.Labels)+len(o.Labels))
out.Merge(v.Labels)
out.Merge(o.Labels)
return out
}
|
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//go:build linux
// +build linux
package libaudit
import (
"os"
"syscall"
"testing"
"github.com/stretchr/testify/assert"
)
// Validate NetlinkClient implements NetlinkSendReceiver.
var _ NetlinkSendReceiver = &NetlinkClient{}
func TestNewNetlinkClient(t *testing.T) {
c, err := NewNetlinkClient(syscall.NETLINK_AUDIT, 0, nil, nil)
if err != nil {
t.Fatal(err)
}
defer c.Close()
assert.Len(t, c.readBuf, os.Getpagesize())
// First PID assigned by the kernel will be our actual PID.
assert.EqualValues(t, os.Getpid(), c.pid)
c2, err := NewNetlinkClient(syscall.NETLINK_AUDIT, 0, nil, nil)
if err != nil {
t.Fatal(err)
}
defer c2.Close()
// Second PID assigned by kernel will be random.
assert.NotEqual(t, 0, c2.pid)
assert.NotEqual(t, c.pid, c2.pid)
}
|
package db
import (
"context"
"github.com/LILILIhuahuahua/ustc_tencent_game/db/databaseGrpc"
"time"
)
var playerService databaseGrpc.PlayerServiceClient
func getService() (databaseGrpc.PlayerServiceClient, error) {
if playerService == nil {
service, err := GetPlayerService()
if err != nil {
return nil, err
}
playerService = service
return service, nil
}
return playerService, nil
}
func PlayerUpdateHighestScoreByPlayerId(playerId, newScore int32) error {
service, err := getService()
if err != nil {
return err
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
requestMsg := &databaseGrpc.PlayerUpdateHighestScoreByPlayerIdRequest{
PlayerId: playerId,
HighestScore: newScore,
}
_, err = service.PlayerUpdateHighestScoreByPlayerId(ctx, requestMsg)
if err != nil {
return err
}
return nil
}
|
package ravendb
import (
"reflect"
"time"
)
// Note: Java's IDocumentQueryBase is DocumentQuery
// Note: Java's IDocumentQueryBaseSingle is DocumentQuery
// Note: Java's IDocumentQuery is DocumentQuery
// Note: Java's IFilterDocumentQueryBase is DocumentQuery
// DocumentQuery describes a query
type DocumentQuery struct {
*abstractDocumentQuery
}
// DocumentQueryOptions describes options for creating a query
type DocumentQueryOptions struct {
// CollectionName and Type are mutually exclusive
// if Collection is empty string we'll derive name of the collection
// from Type
CollectionName string
Type reflect.Type
// name of the index used for search query
// if set, CollectionName and Type should not be set
IndexName string
IsMapReduce bool
conventions *DocumentConventions
// rawQuery is mutually exclusive with IndexName and CollectionName/Type
rawQuery string
session *InMemoryDocumentSessionOperations
isGroupBy bool
declareToken *declareToken
loadTokens []*loadToken
fromAlias string
}
func newDocumentQuery(opts *DocumentQueryOptions) *DocumentQuery {
var err error
opts.IndexName, opts.CollectionName, err = processQueryParameters(opts.Type, opts.IndexName, opts.CollectionName, opts.conventions)
aq := newAbstractDocumentQuery(opts)
if err != nil {
aq.err = err
}
return &DocumentQuery{
abstractDocumentQuery: aq,
}
}
// SelectFields limits the returned values to one or more fields of the queried type.
func (q *DocumentQuery) SelectFields(projectionType reflect.Type, fieldsIn ...string) *DocumentQuery {
if q.err != nil {
return q
}
var fields []string
if len(fieldsIn) == 0 {
fields = FieldsFor(projectionType)
if len(fields) == 0 {
q.err = newIllegalArgumentError("type %T has no exported fields to select", projectionType)
return q
}
} else {
fields = fieldsIn
}
queryData := &QueryData{
Fields: fields,
Projections: fields,
}
res, err := q.createDocumentQueryInternal(projectionType, queryData)
if err != nil {
q.err = err
return q
}
return res
}
// SelectFieldsWithQueryData limits the returned values to one or more fields of the queried type.
func (q *DocumentQuery) SelectFieldsWithQueryData(projectionType reflect.Type, queryData *QueryData) *DocumentQuery {
// TODO: tests
// TODO: better name?
if q.err != nil {
return q
}
if len(queryData.Fields) != len(queryData.Projections) {
q.err = newIllegalArgumentError("fields and projections should be of the same size. Have %d and %d elements respectively", len(queryData.Fields), len(queryData.Projections))
return q
}
// TODO: check that fields exist on projectionType
res, err := q.createDocumentQueryInternal(projectionType, queryData)
if err != nil {
q.err = err
return q
}
return res
}
// Distinct marks query as distinct
func (q *DocumentQuery) Distinct() *DocumentQuery {
if q.err != nil {
return q
}
q.err = q.distinct()
return q
}
// OrderByScore orders results of the query by score
func (q *DocumentQuery) OrderByScore() *DocumentQuery {
if q.err != nil {
return q
}
q.err = q.orderByScore()
return q
}
// OrderByScoreDescending orders results of the query by score
// in descending order
func (q *DocumentQuery) OrderByScoreDescending() *DocumentQuery {
if q.err != nil {
return q
}
q.err = q.orderByScoreDescending()
return q
}
//TBD 4.1 IDocumentQuery<T> explainScores() {
// WaitForNonStaleResults waits for non-stale results for a given waitTimeout.
// Timeout of 0 means default timeout.
func (q *DocumentQuery) WaitForNonStaleResults(waitTimeout time.Duration) *DocumentQuery {
if q.err != nil {
return q
}
q.waitForNonStaleResults(waitTimeout)
return q
}
func (q *DocumentQuery) AddParameter(name string, value interface{}) *DocumentQuery {
if q.err != nil {
return q
}
q.err = q.addParameter(name, value)
return q
}
func (q *DocumentQuery) AddOrder(fieldName string, descending bool) *DocumentQuery {
if q.err != nil {
return q
}
return q.AddOrderWithOrdering(fieldName, descending, OrderingTypeString)
}
func (q *DocumentQuery) AddOrderWithOrdering(fieldName string, descending bool, ordering OrderingType) *DocumentQuery {
if q.err != nil {
return q
}
if descending {
return q.OrderByDescendingWithOrdering(fieldName, ordering)
}
return q.OrderByWithOrdering(fieldName, ordering)
}
//TBD expr IDocumentQuery<T> AddOrder<TValue>(Expression<Func<T, TValue>> propertySelector, bool descending, OrderingType ordering)
/*
IDocumentQuery<T> AddAfterQueryExecutedListener(Consumer<QueryResult> action) {
addAfterQueryExecutedListener(action);
return this;
}
IDocumentQuery<T> RemoveAfterQueryExecutedListener(Consumer<QueryResult> action) {
removeAfterQueryExecutedListener(action);
return this;
}
IDocumentQuery<T> addAfterStreamExecutedListener(Consumer<ObjectNode> action) {
addAfterStreamExecutedListener(action);
return this;
}
IDocumentQuery<T> removeAfterStreamExecutedListener(Consumer<ObjectNode> action) {
removeAfterStreamExecutedListener(action);
return this;
}
*/
// OpenSubclause opens a query sub-clause
func (q *DocumentQuery) OpenSubclause() *DocumentQuery {
if q.err != nil {
return q
}
q.err = q.openSubclause()
return q
}
// CloseSubclause closes a query sub-clause
func (q *DocumentQuery) CloseSubclause() *DocumentQuery {
if q.err != nil {
return q
}
q.err = q.closeSubclause()
return q
}
func (q *DocumentQuery) Search(fieldName string, searchTerms string) *DocumentQuery {
if q.err != nil {
return q
}
q.err = q.search(fieldName, searchTerms)
return q
}
func (q *DocumentQuery) SearchWithOperator(fieldName string, searchTerms string, operator SearchOperator) *DocumentQuery {
if q.err != nil {
return q
}
q.err = q.searchWithOperator(fieldName, searchTerms, operator)
return q
}
//TBD expr IDocumentQuery<T> Search<TValue>(Expression<Func<T, TValue>> propertySelector, string searchTerms, SearchOperator @operator)
func (q *DocumentQuery) Intersect() *DocumentQuery {
if q.err != nil {
return q
}
q.err = q.intersect()
return q
}
func (q *DocumentQuery) ContainsAny(fieldName string, values []interface{}) *DocumentQuery {
if q.err != nil {
return q
}
q.err = q.containsAny(fieldName, values)
return q
}
//TBD expr IDocumentQuery<T> ContainsAny<TValue>(Expression<Func<T, TValue>> propertySelector, IEnumerable<TValue> values)
func (q *DocumentQuery) ContainsAll(fieldName string, values []interface{}) *DocumentQuery {
if q.err != nil {
return q
}
q.err = q.containsAll(fieldName, values)
return q
}
//TBD expr IDocumentQuery<T> ContainsAll<TValue>(Expression<Func<T, TValue>> propertySelector, IEnumerable<TValue> values)
func (q *DocumentQuery) Statistics(stats **QueryStatistics) *DocumentQuery {
q.statistics(stats)
return q
}
func (q *DocumentQuery) UsingDefaultOperator(queryOperator QueryOperator) *DocumentQuery {
if q.err != nil {
return q
}
q.err = q.usingDefaultOperator(queryOperator)
return q
}
func (q *DocumentQuery) NoTracking() *DocumentQuery {
q.noTracking()
return q
}
func (q *DocumentQuery) NoCaching() *DocumentQuery {
q.noCaching()
return q
}
//TBD 4.1 IDocumentQuery<T> showTimings()
func (q *DocumentQuery) Include(path string) *DocumentQuery {
q.include(path)
return q
}
//TBD expr IDocumentQuery<T> IDocumentQueryBase<T, IDocumentQuery<T>>.Include(Expression<Func<T, object>> path)
func (q *DocumentQuery) Not() *DocumentQuery {
q.negateNext()
return q
}
func (q *DocumentQuery) Take(count int) *DocumentQuery {
q.take(count)
return q
}
func (q *DocumentQuery) Skip(count int) *DocumentQuery {
q.skip(count)
return q
}
func (q *DocumentQuery) Where(fieldName string, op string, value interface{}) *DocumentQuery {
if q.err != nil {
return q
}
q.err = q.where(fieldName, op, value)
return q
}
func (q *DocumentQuery) WhereLucene(fieldName string, whereClause string) *DocumentQuery {
if q.err != nil {
return q
}
q.err = q.whereLucene(fieldName, whereClause)
return q
}
func (q *DocumentQuery) WhereEquals(fieldName string, value interface{}) *DocumentQuery {
if q.err != nil {
return q
}
q.err = q.whereEquals(fieldName, value)
return q
}
// Exact marks previous Where statement (e.g. WhereEquals or WhereLucene) as exact
func (q *DocumentQuery) Exact() *DocumentQuery {
if q.err != nil {
return q
}
q.err = q.markLastTokenExact()
return q
}
func (q *DocumentQuery) WhereEqualsWithMethodCall(fieldName string, method MethodCall) *DocumentQuery {
if q.err != nil {
return q
}
q.err = q.whereEqualsWithMethodCall(fieldName, method)
return q
}
//TBD expr IDocumentQuery<T> IDocumentQueryBase<T, IDocumentQuery<T>>.WhereEquals<TValue>(Expression<Func<T, TValue>> propertySelector, TValue value, bool exact)
//TBD expr IDocumentQuery<T> IFilterDocumentQueryBase<T, IDocumentQuery<T>>.WhereEquals<TValue>(Expression<Func<T, TValue>> propertySelector, MethodCall value, bool exact)
func (q *DocumentQuery) WhereEqualsWithParams(whereParams *whereParams) *DocumentQuery {
if q.err != nil {
return q
}
q.err = q.whereEqualsWithParams(whereParams)
return q
}
func (q *DocumentQuery) WhereNotEquals(fieldName string, value interface{}) *DocumentQuery {
if q.err != nil {
return q
}
q.err = q.whereNotEquals(fieldName, value)
return q
}
func (q *DocumentQuery) WhereNotEqualsWithMethod(fieldName string, method MethodCall) *DocumentQuery {
if q.err != nil {
return q
}
q.err = q.whereNotEqualsWithMethod(fieldName, method)
return q
}
//TBD expr IDocumentQuery<T> IDocumentQueryBase<T, IDocumentQuery<T>>.WhereNotEquals<TValue>(Expression<Func<T, TValue>> propertySelector, TValue value, bool exact)
//TBD expr IDocumentQuery<T> IFilterDocumentQueryBase<T, IDocumentQuery<T>>.WhereNotEquals<TValue>(Expression<Func<T, TValue>> propertySelector, MethodCall value, bool exact)
func (q *DocumentQuery) WhereNotEqualsWithParams(whereParams *whereParams) *DocumentQuery {
if q.err != nil {
return q
}
q.err = q.whereNotEqualsWithParams(whereParams)
return q
}
func (q *DocumentQuery) WhereIn(fieldName string, values []interface{}) *DocumentQuery {
if q.err != nil {
return q
}
q.err = q.whereIn(fieldName, values)
return q
}
//TBD expr IDocumentQuery<T> WhereIn<TValue>(Expression<Func<T, TValue>> propertySelector, IEnumerable<TValue> values, bool exact = false)
func (q *DocumentQuery) WhereStartsWith(fieldName string, value interface{}) *DocumentQuery {
if q.err != nil {
return q
}
q.err = q.whereStartsWith(fieldName, value)
return q
}
func (q *DocumentQuery) WhereEndsWith(fieldName string, value interface{}) *DocumentQuery {
if q.err != nil {
return q
}
q.err = q.whereEndsWith(fieldName, value)
return q
}
//TBD expr IDocumentQuery<T> WhereEndsWith<TValue>(Expression<Func<T, TValue>> propertySelector, TValue value)
func (q *DocumentQuery) WhereBetween(fieldName string, start interface{}, end interface{}) *DocumentQuery {
if q.err != nil {
return q
}
q.err = q.whereBetween(fieldName, start, end)
return q
}
//TBD expr IDocumentQuery<T> WhereBetween<TValue>(Expression<Func<T, TValue>> propertySelector, TValue start, TValue end, bool exact = false)
func (q *DocumentQuery) WhereGreaterThan(fieldName string, value interface{}) *DocumentQuery {
if q.err != nil {
return q
}
q.err = q.whereGreaterThan(fieldName, value)
return q
}
func (q *DocumentQuery) WhereGreaterThanOrEqual(fieldName string, value interface{}) *DocumentQuery {
if q.err != nil {
return q
}
q.err = q.whereGreaterThanOrEqual(fieldName, value)
return q
}
//TBD expr IDocumentQuery<T> WhereGreaterThan<TValue>(Expression<Func<T, TValue>> propertySelector, TValue value, bool exact = false)
//TBD expr IDocumentQuery<T> WhereGreaterThanOrEqual<TValue>(Expression<Func<T, TValue>> propertySelector, TValue value, bool exact = false)
func (q *DocumentQuery) WhereLessThan(fieldName string, value interface{}) *DocumentQuery {
if q.err != nil {
return q
}
q.err = q.whereLessThan(fieldName, value)
return q
}
//TBD expr IDocumentQuery<T> WhereLessThanOrEqual<TValue>(Expression<Func<T, TValue>> propertySelector, TValue value, bool exact = false)
func (q *DocumentQuery) WhereLessThanOrEqual(fieldName string, value interface{}) *DocumentQuery {
if q.err != nil {
return q
}
q.err = q.whereLessThanOrEqual(fieldName, value)
return q
}
//TBD expr IDocumentQuery<T> WhereLessThanOrEqual<TValue>(Expression<Func<T, TValue>> propertySelector, TValue value, bool exact = false)
//TBD expr IDocumentQuery<T> WhereExists<TValue>(Expression<Func<T, TValue>> propertySelector)
func (q *DocumentQuery) WhereExists(fieldName string) *DocumentQuery {
if q.err != nil {
return q
}
q.err = q.whereExists(fieldName)
return q
}
//TBD expr IDocumentQuery<T> IFilterDocumentQueryBase<T, IDocumentQuery<T>>.WhereRegex<TValue>(Expression<Func<T, TValue>> propertySelector, string pattern)
func (q *DocumentQuery) WhereRegex(fieldName string, pattern string) *DocumentQuery {
if q.err != nil {
return q
}
q.err = q.whereRegex(fieldName, pattern)
return q
}
func (q *DocumentQuery) AndAlso() *DocumentQuery {
if q.err != nil {
return q
}
q.err = q.andAlso()
return q
}
func (q *DocumentQuery) OrElse() *DocumentQuery {
if q.err != nil {
return q
}
q.err = q.orElse()
return q
}
func (q *DocumentQuery) Boost(boost float64) *DocumentQuery {
if q.err != nil {
return q
}
q.err = q.boost(boost)
return q
}
func (q *DocumentQuery) Fuzzy(fuzzy float64) *DocumentQuery {
if q.err != nil {
return q
}
q.err = q.fuzzy(fuzzy)
return q
}
func (q *DocumentQuery) Proximity(proximity int) *DocumentQuery {
if q.err != nil {
return q
}
q.err = q.proximity(proximity)
return q
}
func (q *DocumentQuery) RandomOrdering() *DocumentQuery {
if q.err != nil {
return q
}
q.err = q.randomOrdering()
return q
}
func (q *DocumentQuery) RandomOrderingWithSeed(seed string) *DocumentQuery {
if q.err != nil {
return q
}
q.err = q.randomOrderingWithSeed(seed)
return q
}
//TBD 4.1 IDocumentQuery<T> customSortUsing(string typeName, bool descending)
// GroupBy makes a query grouped by fields
func (q *DocumentQuery) GroupBy(fieldName string, fieldNames ...string) *GroupByDocumentQuery {
res := newGroupByDocumentQuery(q)
if q.err == nil {
q.err = q.groupBy(fieldName, fieldNames...)
}
res.err = q.err
return res
}
// GroupByFieldWithMethod makes a query grouped by fields and also allows specifying method
// of grouping for each field
func (q *DocumentQuery) GroupByFieldWithMethod(field *GroupBy, fields ...*GroupBy) *GroupByDocumentQuery {
res := newGroupByDocumentQuery(q)
if q.err == nil {
q.err = q.groupByFieldWithMethod(field, fields...)
}
res.err = q.err
return res
}
// OrderBy orders query results by a field
func (q *DocumentQuery) OrderBy(field string) *DocumentQuery {
return q.OrderByWithOrdering(field, OrderingTypeString)
}
// OrderByWithOrdering orders query results by ordering
func (q *DocumentQuery) OrderByWithOrdering(field string, ordering OrderingType) *DocumentQuery {
if q.err != nil {
return q
}
q.err = q.orderByWithOrdering(field, ordering)
return q
}
//TBD expr IDocumentQuery<T> OrderBy<TValue>(params Expression<Func<T, TValue>>[] propertySelectors)
// OrderByDescending orders query by a field in descending order
func (q *DocumentQuery) OrderByDescending(field string) *DocumentQuery {
return q.OrderByDescendingWithOrdering(field, OrderingTypeString)
}
// OrderByDescendingWithOrdering orders query by ordering in descending order
func (q *DocumentQuery) OrderByDescendingWithOrdering(field string, ordering OrderingType) *DocumentQuery {
if q.err != nil {
return q
}
q.err = q.orderByDescendingWithOrdering(field, ordering)
return q
}
//TBD expr IDocumentQuery<T> OrderByDescending<TValue>(params Expression<Func<T, TValue>>[] propertySelectors)
// AddBeforeQueryExecutedListener adds a listener that will be called before query
// is executed
func (q *DocumentQuery) AddBeforeQueryExecutedListener(action func(*IndexQuery)) int {
return q.addBeforeQueryExecutedListener(action)
}
// RemoveBeforeQueryExecutedListener removes a listener registered with AddBeforeQueryExecutedListener
func (q *DocumentQuery) RemoveBeforeQueryExecutedListener(idx int) *DocumentQuery {
q.removeBeforeQueryExecutedListener(idx)
return q
}
// Note: compared to Java, had to move it down to abstractDocumentQuery
func (q *abstractDocumentQuery) createDocumentQueryInternal(resultClass reflect.Type, queryData *QueryData) (*DocumentQuery, error) {
var newFieldsToFetch *fieldsToFetchToken
if queryData != nil && len(queryData.Fields) > 0 {
fields := queryData.Fields
identityProperty := q.conventions.GetIdentityProperty(resultClass)
if identityProperty != "" {
// make a copy, just in case, because we might modify it
fields = append([]string{}, fields...)
for idx, p := range fields {
if p == identityProperty {
fields[idx] = IndexingFieldNameDocumentID
}
}
}
sourceAliasReference := getSourceAliasIfExists(resultClass, queryData, fields)
newFieldsToFetch = createFieldsToFetchToken(fields, queryData.Projections, queryData.isCustomFunction, sourceAliasReference)
}
if newFieldsToFetch != nil {
q.updateFieldsToFetchToken(newFieldsToFetch)
}
var declareToken *declareToken
var loadTokens []*loadToken
var fromAlias string
if queryData != nil {
declareToken = queryData.declareToken
loadTokens = queryData.loadTokens
fromAlias = queryData.fromAlias
}
opts := &DocumentQueryOptions{
Type: resultClass,
session: q.theSession,
IndexName: q.indexName,
CollectionName: q.collectionName,
isGroupBy: q.isGroupBy,
declareToken: declareToken,
loadTokens: loadTokens,
fromAlias: fromAlias,
}
query := newDocumentQuery(opts)
if query.err != nil {
return nil, query.err
}
query.queryRaw = q.queryRaw
query.pageSize = q.pageSize
query.selectTokens = q.selectTokens
query.fieldsToFetchToken = q.fieldsToFetchToken
query.whereTokens = q.whereTokens
query.orderByTokens = q.orderByTokens
query.groupByTokens = q.groupByTokens
query.queryParameters = q.queryParameters
query.start = q.start
query.timeout = q.timeout
query.queryStats = q.queryStats
query.theWaitForNonStaleResults = q.theWaitForNonStaleResults
query.negate = q.negate
//noinspection unchecked
query.includes = stringArrayCopy(q.includes)
// TODO: should this be deep copy so that adding/removing in one
// doesn't affect the other?
query.beforeQueryExecutedCallback = q.beforeQueryExecutedCallback
query.afterQueryExecutedCallback = q.afterQueryExecutedCallback
query.afterStreamExecutedCallback = q.afterStreamExecutedCallback
query.disableEntitiesTracking = q.disableEntitiesTracking
query.disableCaching = q.disableCaching
//TBD 4.1 ShowQueryTimings = ShowQueryTimings,
//TBD 4.1 query.shouldExplainScores = shouldExplainScores;
query.isIntersect = q.isIntersect
query.defaultOperator = q.defaultOperator
return query, nil
}
// AggregateByFacet aggregates the query by a facet
func (q *DocumentQuery) AggregateByFacet(facet FacetBase) *AggregationDocumentQuery {
res := newAggregationDocumentQuery(q)
if q.err != nil {
return res
}
res.err = q.aggregateBy(facet)
return res
}
// AggregateByFacets aggregates the query by facets
func (q *DocumentQuery) AggregateByFacets(facets ...*Facet) *AggregationDocumentQuery {
res := newAggregationDocumentQuery(q)
if q.err != nil {
return res
}
for _, facet := range facets {
if res.err = q.aggregateBy(facet); res.err != nil {
return res
}
}
return res
}
// AggregateUsing aggregates the query by facet setup
func (q *DocumentQuery) AggregateUsing(facetSetupDocumentID string) *AggregationDocumentQuery {
res := newAggregationDocumentQuery(q)
if q.err != nil {
return res
}
res.err = q.aggregateUsing(facetSetupDocumentID)
return res
}
//TBD 4.1 IDocumentQuery<T> IDocumentQueryBase<T, IDocumentQuery<T>>.Highlight(string fieldName, int fragmentLength, int fragmentCount, string fragmentsField)
//TBD 4.1 IDocumentQuery<T> IDocumentQueryBase<T, IDocumentQuery<T>>.Highlight(string fieldName, int fragmentLength, int fragmentCount, out FieldHighlightings highlightings)
//TBD 4.1 IDocumentQuery<T> IDocumentQueryBase<T, IDocumentQuery<T>>.Highlight(string fieldName,string fieldKeyName, int fragmentLength,int fragmentCount,out FieldHighlightings highlightings)
//TBD 4.1 IDocumentQuery<T> IDocumentQueryBase<T, IDocumentQuery<T>>.Highlight<TValue>(Expression<Func<T, TValue>> propertySelector, int fragmentLength, int fragmentCount, Expression<Func<T, IEnumerable>> fragmentsPropertySelector)
//TBD 4.1 IDocumentQuery<T> IDocumentQueryBase<T, IDocumentQuery<T>>.Highlight<TValue>(Expression<Func<T, TValue>> propertySelector, int fragmentLength, int fragmentCount, out FieldHighlightings fieldHighlightings)
//TBD 4.1 IDocumentQuery<T> IDocumentQueryBase<T, IDocumentQuery<T>>.Highlight<TValue>(Expression<Func<T, TValue>> propertySelector, Expression<Func<T, TValue>> keyPropertySelector, int fragmentLength, int fragmentCount, out FieldHighlightings fieldHighlightings)
//TBD 4.1 IDocumentQuery<T> IDocumentQueryBase<T, IDocumentQuery<T>>.SetHighlighterTags(string preTag, string postTag)
//TBD 4.1 IDocumentQuery<T> IDocumentQueryBase<T, IDocumentQuery<T>>.SetHighlighterTags(string[] preTags, string[] postTags)
//TBD expr IDocumentQuery<T> Spatial(Expression<Func<T, object>> path, Func<SpatialCriteriaFactory, SpatialCriteria> clause)
func (q *DocumentQuery) Spatial3(fieldName string, clause func(*SpatialCriteriaFactory) SpatialCriteria) *DocumentQuery {
if q.err != nil {
return q
}
criteria := clause(spatialCriteriaFactoryInstance)
q.err = q.spatial3(fieldName, criteria)
return q
}
func (q *DocumentQuery) Spatial2(field DynamicSpatialField, clause func(*SpatialCriteriaFactory) SpatialCriteria) *DocumentQuery {
if q.err != nil {
return q
}
criteria := clause(spatialCriteriaFactoryInstance)
q.err = q.spatial2(field, criteria)
return q
}
//TBD expr IDocumentQuery<T> Spatial(Func<SpatialDynamicFieldFactory<T>, DynamicSpatialField> field, Func<SpatialCriteriaFactory, SpatialCriteria> clause)
//TBD expr IDocumentQuery<T> IDocumentQueryBase<T, IDocumentQuery<T>>.WithinRadiusOf<TValue>(Expression<Func<T, TValue>> propertySelector, float64 radius, float64 latitude, float64 longitude, SpatialUnits? radiusUnits, float64 distanceErrorPct)
func (q *DocumentQuery) WithinRadiusOf(fieldName string, radius float64, latitude float64, longitude float64) *DocumentQuery {
if q.err != nil {
return q
}
q.err = q.withinRadiusOf(fieldName, radius, latitude, longitude, "", IndexingSpatialDefaultDistnaceErrorPct)
return q
}
func (q *DocumentQuery) WithinRadiusOfWithUnits(fieldName string, radius float64, latitude float64, longitude float64, radiusUnits SpatialUnits) *DocumentQuery {
if q.err != nil {
return q
}
q.err = q.withinRadiusOf(fieldName, radius, latitude, longitude, radiusUnits, IndexingSpatialDefaultDistnaceErrorPct)
return q
}
func (q *DocumentQuery) WithinRadiusOfWithUnitsAndError(fieldName string, radius float64, latitude float64, longitude float64, radiusUnits SpatialUnits, distanceErrorPct float64) *DocumentQuery {
if q.err != nil {
return q
}
q.err = q.withinRadiusOf(fieldName, radius, latitude, longitude, radiusUnits, distanceErrorPct)
return q
}
//TBD expr IDocumentQuery<T> IDocumentQueryBase<T, IDocumentQuery<T>>.RelatesToShape<TValue>(Expression<Func<T, TValue>> propertySelector, string shapeWkt, SpatialRelation relation, float64 distanceErrorPct)
func (q *DocumentQuery) RelatesToShape(fieldName string, shapeWkt string, relation SpatialRelation) *DocumentQuery {
return q.RelatesToShapeWithError(fieldName, shapeWkt, relation, IndexingSpatialDefaultDistnaceErrorPct)
}
func (q *DocumentQuery) RelatesToShapeWithError(fieldName string, shapeWkt string, relation SpatialRelation, distanceErrorPct float64) *DocumentQuery {
if q.err != nil {
return q
}
q.err = q.spatial(fieldName, shapeWkt, relation, distanceErrorPct)
return q
}
// OrderByDistanceLatLongDynamic orders a given field by lat / long
func (q *DocumentQuery) OrderByDistanceLatLongDynamic(field DynamicSpatialField, latitude float64, longitude float64) *DocumentQuery {
if q.err != nil {
return q
}
q.err = q.orderByDistanceLatLongDynamic(field, latitude, longitude)
return q
}
//TBD expr IDocumentQuery<T> IDocumentQueryBase<T, IDocumentQuery<T>>.OrderByDistance(Func<DynamicSpatialFieldFactory<T>, DynamicSpatialField> field, float64 latitude, float64 longitude)
func (q *DocumentQuery) OrderByDistanceWktDynamic(field DynamicSpatialField, shapeWkt string) *DocumentQuery {
if q.err != nil {
return q
}
q.err = q.orderByDistanceWktDynamic(field, shapeWkt)
return q
}
//TBD expr IDocumentQuery<T> IDocumentQueryBase<T, IDocumentQuery<T>>.OrderByDistance(Func<DynamicSpatialFieldFactory<T>, DynamicSpatialField> field, string shapeWkt)
//TBD expr IDocumentQuery<T> IDocumentQueryBase<T, IDocumentQuery<T>>.OrderByDistance<TValue>(Expression<Func<T, TValue>> propertySelector, float64 latitude, float64 longitude)
func (q *DocumentQuery) OrderByDistanceLatLong(fieldName string, latitude float64, longitude float64) *DocumentQuery {
if q.err != nil {
return q
}
q.err = q.orderByDistanceLatLong(fieldName, latitude, longitude)
return q
}
//TBD expr IDocumentQuery<T> IDocumentQueryBase<T, IDocumentQuery<T>>.OrderByDistance<TValue>(Expression<Func<T, TValue>> propertySelector, string shapeWkt)
func (q *DocumentQuery) OrderByDistanceWkt(fieldName string, shapeWkt string) *DocumentQuery {
if q.err != nil {
return q
}
q.err = q.orderByDistance3(fieldName, shapeWkt)
return q
}
func (q *DocumentQuery) OrderByDistanceDescendingLatLongDynamic(field DynamicSpatialField, latitude float64, longitude float64) *DocumentQuery {
if q.err != nil {
return q
}
q.err = q.orderByDistanceDescendingLatLongDynamic(field, latitude, longitude)
return q
}
//TBD expr IDocumentQuery<T> IDocumentQueryBase<T, IDocumentQuery<T>>.OrderByDistanceDescending(Func<DynamicSpatialFieldFactory<T>, DynamicSpatialField> field, float64 latitude, float64 longitude)
func (q *DocumentQuery) OrderByDistanceDescendingWktDynamic(field DynamicSpatialField, shapeWkt string) *DocumentQuery {
if q.err != nil {
return q
}
q.err = q.orderByDistanceDescendingWktDynamic(field, shapeWkt)
return q
}
//TBD expr IDocumentQuery<T> IDocumentQueryBase<T, IDocumentQuery<T>>.OrderByDistanceDescending(Func<DynamicSpatialFieldFactory<T>, DynamicSpatialField> field, string shapeWkt)
//TBD expr IDocumentQuery<T> IDocumentQueryBase<T, IDocumentQuery<T>>.OrderByDistanceDescending<TValue>(Expression<Func<T, TValue>> propertySelector, float64 latitude, float64 longitude)
func (q *DocumentQuery) OrderByDistanceDescendingLatLong(fieldName string, latitude float64, longitude float64) *DocumentQuery {
if q.err != nil {
return q
}
q.err = q.orderByDistanceDescendingLatLong(fieldName, latitude, longitude)
return q
}
//TBD expr IDocumentQuery<T> IDocumentQueryBase<T, IDocumentQuery<T>>.OrderByDistanceDescending<TValue>(Expression<Func<T, TValue>> propertySelector, string shapeWkt)
func (q *DocumentQuery) OrderByDistanceDescendingWkt(fieldName string, shapeWkt string) *DocumentQuery {
if q.err != nil {
return q
}
q.err = q.orderByDistanceDescendingWkt(fieldName, shapeWkt)
return q
}
func (q *DocumentQuery) MoreLikeThis(moreLikeThis MoreLikeThisBase) *DocumentQuery {
if q.err != nil {
return q
}
mlt, err := q.moreLikeThis()
if err != nil {
q.err = err
return q
}
defer mlt.Close()
mlt.withOptions(moreLikeThis.GetOptions())
if mltud, ok := moreLikeThis.(*MoreLikeThisUsingDocument); ok {
mlt.withDocument(mltud.documentJSON)
}
return q
}
func (q *DocumentQuery) MoreLikeThisWithBuilder(builder func(IMoreLikeThisBuilderForDocumentQuery)) *DocumentQuery {
if q.err != nil {
return q
}
f := NewMoreLikeThisBuilder()
builder(f)
moreLikeThis, err := q.moreLikeThis()
if err != nil {
q.err = err
return q
}
moreLikeThis.withOptions(f.GetMoreLikeThis().GetOptions())
tmp := f.GetMoreLikeThis()
if mlt, ok := tmp.(*MoreLikeThisUsingDocument); ok {
moreLikeThis.withDocument(mlt.documentJSON)
} else if mlt, ok := tmp.(*MoreLikeThisUsingDocumentForDocumentQuery); ok {
mlt.GetForDocumentQuery()(q)
}
moreLikeThis.Close()
return q
}
func (q *DocumentQuery) SuggestUsing(suggestion SuggestionBase) *SuggestionDocumentQuery {
res := newSuggestionDocumentQuery(q)
if q.err != nil {
res.err = q.err
return res
}
if q.err = q.suggestUsing(suggestion); q.err != nil {
res.err = q.err
return res
}
return res
}
|
package limiter
import (
"sync"
"time"
)
const (
default_counter_limit_num = 10
default_counter_interval_nano = 10
)
//计数限流
type CounterLimit struct {
counterNum int64 //计数器
limitNum int64 //指定时间窗口内允许的最大请求数
intervalNano int64 //指定时间窗口
lastNano int64 //unix时间戳,单位为纳秒
lock sync.RWMutex
}
func NewCounterLimit(params map[interface{}]interface{}) *CounterLimit {
limitNum := params["limit_num"].(int64)
if limitNum == 0 {
limitNum = default_counter_limit_num
}
intervalNano := params["interval_nano"].(int64)
if intervalNano == 0 {
intervalNano = default_counter_interval_nano
}
counterLimit := &CounterLimit{
counterNum: 0,
limitNum: limitNum,
intervalNano: intervalNano,
lastNano: 0,
}
return counterLimit
}
//是否通过
func (c *CounterLimit) Allow() bool {
c.lock.Lock()
defer c.lock.Unlock()
now := time.Now().UnixNano()
//到了下一个事件窗口内
if now-c.lastNano > c.intervalNano {
c.counterNum = 0
c.lastNano = now
return true
}
//增加数量
c.counterNum = c.counterNum + 1
//是否超了当前窗口的最大
return c.counterNum < c.limitNum
}
|
package builder
import (
"database/sql"
"fmt"
"github.com/astaxie/beego/orm"
"strings"
"github.com/chenwj93/utils"
)
type Update struct {
table string
cols []string
args []interface{}
Where
}
func NewUpdate() *Update {
return &Update{}
}
func (o *Update) Tb(tb string) *Update {
if o.table == utils.EMPTY_STRING {
o.table = tb
}
return o
}
func (o *Update) SetCols(cols ...string) *Update {
if len(o.cols) == 0 {
o.cols = cols
}
return o
}
func (o *Update) SetArgs(args ...interface{}) *Update {
if len(o.cols) != 0 && len(args) == len(o.cols) {
o.args = append(o.args, args...)
}
return o
}
func (o *Update) GenStat() string {
if len(o.cols) == 0 || len(o.args) == 0 || o.table == utils.EMPTY_STRING {
return utils.EMPTY_STRING
}
var stat strings.Builder
stat.WriteString(fmt.Sprintf("update %s set ", o.table))
cols := strings.Join(o.cols, " = ?, ")
stat.WriteString(cols)
stat.WriteString(" = ? ")
stat.WriteString(o.GetWhere().ToString())
return stat.String()
}
func (o *Update) GenArgs() (param []interface{}) {
param = append(param, o.args...)
param = append(param, o.GetParamWhere()...)
return
}
func (o *Update) Exec(or orm.Ormer) (sql.Result, error) {
return or.Raw(o.GenStat(), o.GenArgs()...).Exec()
}
|
package cregexp
import "regexp"
const langRus = "rus"
const langEng = "eng"
// Checking the correctness of the telephone number
// Pattern: ^(\s*)?(\+)?([- _():=+]?\d[- _():=+]?){10,14}(\s*)?$
// Examples: +7(903)888-88-88, +79161234567, 8(999)99-999-99, +380(67)777-7-777
func CheckPhone(someString string) bool {
var valid = regexp.MustCompile(`^(\s*)?(\+)?([- _():=+]?\d[- _():=+]?){10,14}(\s*)?$`)
return valid.MatchString(someString)
}
// Checking a Credit Card. 16 numbers. With spaces.
// Pattern: (\d{4}\s([-]|)\d{4}\s([-]|)\d{4}\s([-]|)\d{4})
func Card16Space(someString string) bool {
var valid = regexp.MustCompile(`(\d{4}\s([-]|)\d{4}\s([-]|)\d{4}\s([-]|)\d{4})`)
return valid.MatchString(someString)
}
// Checking a Credit Card. 16 numbers. No spaces.
// Pattern: (\d{4}([-]|)\d{4}([-]|)\d{4}([-]|)\d{4})
func Card16NoSpace(someString string) bool {
var valid = regexp.MustCompile(`(\d{4}([-]|)\d{4}([-]|)\d{4}([-]|)\d{4})`)
return valid.MatchString(someString)
}
// Checking the correctness of the domain
// Pattern: ^([a-zA-Z0-9]([a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])?\.)+[a-zA-Z]{2,6}$
func CheckDomain(someString string) bool {
var valid = regexp.MustCompile(`^([a-zA-Z0-9]([a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])?\.)+[a-zA-Z]{2,6}$`)
return valid.MatchString(someString)
}
// Checking the correctness of the email address
// Pattern: ^([a-z0-9_-]+\.)*[a-z0-9_-]+@[a-z0-9_-]+(\.[a-z0-9_-]+)*\.[a-z]{2,6}$
func CheckEmail(someString string) bool {
var valid = regexp.MustCompile(`^([a-z0-9_-]+\.)*[a-z0-9_-]+@[a-z0-9_-]+(\.[a-z0-9_-]+)*\.[a-z]{2,6}$`)
return valid.MatchString(someString)
}
// Only numbers are allowed in a string
// Pattern: ^[0-9]+$
func OnlyNumbers(someString string) bool {
var valid = regexp.MustCompile(`^[0-9]+$`)
return valid.MatchString(someString)
}
// Only latin characters are allowed in a string
// Pattern: ^[a-zA-Z]+$
func OnlyEngCharacters(someString string) bool {
var valid = regexp.MustCompile(`^[a-zA-Z]+$`)
return valid.MatchString(someString)
}
// Only cyrillic characters allowed in the string
// Pattern: ^[а-яА-ЯёЁ]+$
func OnlyRusCharacters(someString string) bool {
var valid = regexp.MustCompile(`^[а-яА-ЯёЁ]+$`)
return valid.MatchString(someString)
}
|
package rotateimage
import "testing"
const urlString = "http://farm1.static.flickr.com/122/263784734_c262172550.jpg"
func TestRotate(t *testing.T) {
rotateImageBy90(urlString)
}
|
package controllers
import (
"github.com/aws/aws-lambda-go/events"
jsoniter "github.com/json-iterator/go"
"goscrum/server/models"
"goscrum/server/services"
"goscrum/server/util"
"net/http"
"time"
)
type GitlabController struct {
gitlab *services.GitlabService
}
func NewGitlabController(gitlab *services.GitlabService) *GitlabController {
return &GitlabController{gitlab: gitlab}
}
func (g *GitlabController) Issues(req events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) {
var json = jsoniter.ConfigCompatibleWithStandardLibrary
issue := models.RequestIssues{}
err := json.Unmarshal([]byte(req.Body), &issue)
if err != nil {
return util.ResponseError(http.StatusBadRequest, err.Error())
}
issues, err := g.gitlab.Issues(issue)
if err != nil {
return util.ResponseError(http.StatusInternalServerError, err.Error())
}
resp, err := json.Marshal(issues)
if err != nil {
return util.ResponseError(http.StatusInternalServerError, err.Error())
}
return util.Success(string(resp))
}
func (g *GitlabController) Commits(req events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) {
var json = jsoniter.ConfigCompatibleWithStandardLibrary
id, err := util.GetStringKey(req.PathParameters, "projectId")
if err != nil {
return util.ServerError(err)
}
r := models.RequestCommits{Since: time.Now().AddDate(0, 0, -1)}
commits, err := g.gitlab.Commits(r, id)
if err != nil {
return util.ResponseError(http.StatusInternalServerError, err.Error())
}
resp, err := json.Marshal(commits)
if err != nil {
return util.ResponseError(http.StatusInternalServerError, err.Error())
}
return util.Success(string(resp))
}
func (g *GitlabController) Users(req events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) {
var json = jsoniter.ConfigCompatibleWithStandardLibrary
search, err := util.GetStringKey(req.QueryStringParameters, "search")
if err != nil {
return util.ServerError(err)
}
users, err := g.gitlab.Users(search)
if err != nil {
return util.ResponseError(http.StatusInternalServerError, err.Error())
}
resp, err := json.Marshal(users)
if err != nil {
return util.ResponseError(http.StatusInternalServerError, err.Error())
}
return util.Success(string(resp))
}
func (g *GitlabController) Projects(req events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) {
var json = jsoniter.ConfigCompatibleWithStandardLibrary
search, err := util.GetStringKey(req.QueryStringParameters, "search")
if err != nil {
return util.ServerError(err)
}
users, err := g.gitlab.Projects(search)
if err != nil {
return util.ResponseError(http.StatusInternalServerError, err.Error())
}
resp, err := json.Marshal(users)
if err != nil {
return util.ResponseError(http.StatusInternalServerError, err.Error())
}
return util.Success(string(resp))
}
func (g *GitlabController) UserEvents(req events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) {
var json = jsoniter.ConfigCompatibleWithStandardLibrary
action, err := util.GetStringKey(req.QueryStringParameters, "action")
if err != nil {
return util.ServerError(err)
}
userId, err := util.GetStringKey(req.PathParameters, "userId")
if err != nil {
return util.ServerError(err)
}
users, err := g.gitlab.UserContributions(userId, action, time.Now().AddDate(0, 0, -1))
if err != nil {
return util.ResponseError(http.StatusInternalServerError, err.Error())
}
resp, err := json.Marshal(users)
if err != nil {
return util.ResponseError(http.StatusInternalServerError, err.Error())
}
return util.Success(string(resp))
}
|
package arrays
import (
"fmt"
)
func sudoku2(grid [][]string) bool {
for i := range grid{
for j := range grid[i] {
if grid[i][j] != "." {
for k := 0; k < len(grid); k++ {
if grid[i][k] == grid[i][j] && j != k {
return false
}
if grid[k][j] == grid[i][j] && i != k {
return false
}
}
}
}
}
for r := 0; r < len(grid); r += 3 {
for c := 0; c < len(grid); c += 3 {
for i := r; i < r + 3; i++ {
for j := c; j < c + 3; j++ {
if grid[i][j] != "." {
val := grid[i][j]
fmt.Println(val)
for a := r; a < r+3; a++ {
for b := c; b < c+3; b++ {
if a==i && b==j {
continue
}
if val == grid[a][b]{
return false
}
if a==r+2 && b==c+2 {
val = grid[a][b]
}
if val != grid[a][b] {
continue
}
}
}
}
}
}
}
}
return true
}
|
package main
import (
"flag"
"fmt"
"go/ast"
"go/format"
"go/importer"
"go/parser"
"go/token"
"go/types"
"io/ioutil"
"os"
"path/filepath"
"sort"
"strings"
)
const exprgenSuffix = "_exprgen.go"
func main() {
flag.Parse()
filenames := flag.Args()
if len(filenames) == 0 {
flag.Usage()
os.Exit(1)
}
for _, filename := range filenames {
if err := generate(filename); err != nil {
fmt.Fprintf(os.Stderr, "generate '%s' error: %s", filename, err.Error())
os.Exit(2)
}
}
}
func generate(filename string) error {
fi, err := os.Stat(filename)
if err != nil {
return fmt.Errorf("stat err: %w", err)
}
if !fi.IsDir() {
return fmt.Errorf("filename must be dir")
}
tfs := token.NewFileSet()
packages, err := parser.ParseDir(tfs, filename, func(info os.FileInfo) bool {
return !strings.HasSuffix(info.Name(), exprgenSuffix)
}, parser.ParseComments)
if err != nil {
return fmt.Errorf("parse dir error: %w", err)
}
typesChecker := types.Config{
Importer: importer.ForCompiler(tfs, "source", nil),
}
for name, pkg := range packages {
if strings.HasSuffix(name, "_test") {
continue
}
files := make([]*ast.File, 0, len(pkg.Files))
for _, f := range pkg.Files {
files = append(files, f)
}
packageTypes, err := typesChecker.Check(name, tfs, files, nil)
if err != nil {
return fmt.Errorf("types check error: %w", err)
}
b, err := fileData(name, packageTypes)
if err != nil {
return err
}
err = ioutil.WriteFile(filepath.Join(filename, name+exprgenSuffix), b, 0644)
if err != nil {
return err
}
}
return nil
}
func fileData(pkgName string, pkg *types.Package) ([]byte, error) {
var data string
echo := func(s string, xs ...interface{}) {
data += fmt.Sprintf(s, xs...) + "\n"
}
echoRaw := func(s string) {
data += fmt.Sprint(s) + "\n"
}
echo(`// Code generated by exprgen. DO NOT EDIT.`)
echo(``)
echo(`package ` + pkgName)
echo(``)
echo(`--imports`)
echo(``)
echoRaw(`func toInt(a interface{}) int {
switch x := a.(type) {
case float32:
return int(x)
case float64:
return int(x)
case int:
return x
case int8:
return int(x)
case int16:
return int(x)
case int32:
return int(x)
case int64:
return int(x)
case uint:
return int(x)
case uint8:
return int(x)
case uint16:
return int(x)
case uint32:
return int(x)
case uint64:
return int(x)
default:
panic(fmt.Sprintf("invalid operation: int(%T)", x))
}
}`)
echo(``)
imports := make(map[string]string)
imports["fmt"] = "fmt"
scope := pkg.Scope()
for _, objectName := range scope.Names() {
obj := scope.Lookup(objectName)
// we should generate methods only for non-alias types
tn, ok := obj.(*types.TypeName)
if !ok || tn.IsAlias() {
continue
}
namedType, ok := obj.Type().(*types.Named)
if !ok {
continue
}
recvName := "v"
for i := 0; i < namedType.NumMethods(); i++ {
method := namedType.Method(i)
signature := method.Type().(*types.Signature)
recv := signature.Recv()
if recv != nil && recv.Name() != "" {
recvName = recv.Name()
break
}
}
switch t := namedType.Underlying().(type) {
case *types.Basic:
if t.Kind() != types.String {
break
}
echo("func (%s %s) Fetch(i interface{}) interface{} {", recvName, objectName)
echo("return %s[toInt(i)]", recvName)
echo("}")
case *types.Slice, *types.Array:
echo("func (%s %s) Fetch(i interface{}) interface{} {", recvName, objectName)
echo("return %s[toInt(i)]", recvName)
echo("}")
case *types.Map:
echo("func (%s %s) Fetch(i interface{}) interface{} {", recvName, objectName)
key := t.Key()
numericCases := []string{
"int",
"int8",
"int16",
"int32",
"int64",
"uint",
"uint8",
"uint16",
"uint32",
"uint64",
"uintptr",
"float32",
"float64",
}
switch k := key.(type) {
case *types.Named:
objKey := k.Obj()
keyName := objKey.Name()
if objKey.Pkg().Path() != pkg.Path() {
path := objKey.Pkg().Path()
name := objKey.Pkg().Name()
for imports[name] != "" && path != imports[name] {
name = name + "1"
}
imports[name] = path
keyName = name + "." + keyName
}
echo(`switch _x_i := i.(type) {`)
echo("case %s:", keyName)
echo("return %s[_x_i]", recvName)
if basicKey, ok := k.Underlying().(*types.Basic); ok {
if basicKey.Info()&types.IsNumeric != 0 {
for _, c := range numericCases {
echo("case %s:", c)
echo("return %s[%s(_x_i)]", recvName, keyName)
}
}
if basicKey.Info()&types.IsString != 0 {
echo(`case string:`)
echo("return %s[%s(_x_i)]", recvName, keyName)
echo("default:")
imports["fmt"] = "fmt"
echo("return %s[%s(fmt.Sprint(i))]", recvName, keyName)
}
}
echo(`}`)
case *types.Basic:
keyName := k.String()
echo(`switch _x_i := i.(type) {`)
echo("case %s:", keyName)
echo("return %s[_x_i]", recvName)
if k.Info()&types.IsNumeric != 0 {
for _, c := range numericCases {
if c == keyName {
continue
}
echo("case %s:", c)
echo("return %s[%s(_x_i)]", recvName, keyName)
}
}
if k.Info()&types.IsString != 0 {
echo("default:")
imports["fmt"] = "fmt"
echo("return %s[%s(fmt.Sprint(i))]", recvName, keyName)
}
echo(`}`)
}
echo("return nil")
echo(`}`)
case *types.Struct:
echo("func (%s %s) Fetch(i interface{}) interface{} {", recvName, objectName)
fields := make(map[string]string)
collectStruct(recvName, t, func(c string, r string) {
if _, ok := fields[c]; ok {
fields[c] = "-"
}
fields[c] = r
})
keys := make([]string, 0, len(fields))
for c, r := range fields {
if r == "-" {
continue
}
keys = append(keys, c)
}
sort.Strings(keys)
imports["fmt"] = "fmt"
echo(`var string_i string`)
echo(`if s, ok := i.(string); ok {`)
echo(`string_i = s`)
echo(`} else {`)
echo(`string_i = fmt.Sprint(i)`)
echo(`}`)
echo(`switch string_i {`)
for _, key := range keys {
echo("case \"%s\":", key)
echo("return %s", fields[key])
}
echo(`}`)
echo(`return nil`)
echo(`}`)
}
}
importsString := "import (\n"
for k, v := range imports {
importsString += k + "\"" + v + "\"\n"
}
importsString += ")"
data = strings.Replace(data, "--imports", importsString, 1)
return format.Source([]byte(data))
}
func collectStruct(recv string, t *types.Struct, collect func(string, string), skippedNames ...string) {
fieldNames := make([]string, 0, t.NumFields())
for i := 0; i < t.NumFields(); i++ {
fieldNames = append(fieldNames, t.Field(i).Name())
}
for i := 0; i < t.NumFields(); i++ {
v := t.Field(i)
if !v.Exported() || contains(skippedNames, v.Name()) {
continue
}
collect(v.Name(), recv+"."+v.Name())
if v.Embedded() {
tt := v.Type()
for dereference(tt) != underlying(tt) {
tt = dereference(tt)
tt = underlying(tt)
}
switch vt := tt.(type) {
case *types.Struct:
collectStruct(recv+"."+v.Name(), vt, collect, fieldNames...)
}
}
}
}
func dereference(t types.Type) types.Type {
if p, ok := t.(*types.Pointer); ok {
return dereference(p.Elem())
}
return t
}
func underlying(t types.Type) types.Type {
if t != t.Underlying() {
return underlying(t.Underlying())
}
return t
}
func contains(arr []string, s string) bool {
for _, e := range arr {
if e == s {
return true
}
}
return false
}
|
package player
import (
"fmt"
"testing"
)
func TestPlayerName(t *testing.T) {
playerName := "Test Name"
p := NewPlayer("Name")
p.SetName(playerName)
if p.GetName() != playerName {
t.Errorf("Error setting player name")
}
if fmt.Sprintf("%v", p) != playerName {
t.Errorf("Player name should be in string conversion")
}
}
func TestPlayerScore(t *testing.T) {
p := NewPlayer("Name")
p.SetScore(8)
if p.GetScore() != 8 {
t.Errorf("Error in setting Player Score")
}
p.IncrementScore()
if p.GetScore() != 9 {
t.Errorf("Error incrementing Player Sore")
}
}
func TestPlayerDefenceSize(t *testing.T) {
p := NewPlayer("Name")
p.SetDefenceSize(8)
if len(p.Defend()) != 8 {
t.Errorf("Error in defence array creation size")
}
}
func TestPlayerRegisteration(t *testing.T) {
p := NewPlayer("Name")
playerChannel := make(chan Player, 0)
go p.RegisterChampionship(playerChannel)
rec := <-playerChannel
if rec != p {
t.Errorf("Registeration not working correctly")
}
}
|
package main
import (
"fmt"
"os"
"gocv.io/x/gocv"
)
func main() {
if len(os.Args) < 2 {
fmt.Println("How to run:\n\tcapwindow [camera ID]")
return
}
// parse args
deviceID := os.Args[1]
webcam, err := gocv.OpenVideoCapture(deviceID)
if err != nil {
fmt.Printf("Error opening video capture device: %v\n", deviceID)
return
}
defer webcam.Close()
window := gocv.NewWindow("Capture Window")
defer window.Close()
raw := gocv.NewMat()
defer raw.Close()
mod := gocv.NewMat()
defer mod.Close()
fin := gocv.NewMat()
defer fin.Close()
// sub := gocv.NewBackgroundSubtractorMOG2()
// sub := gocv.NewBackgroundSubtractorMOG2WithParams(100, 3, false)
// sub := gocv.NewBackgroundSubtractorKNN()
sub := gocv.NewBackgroundSubtractorKNNWithParams(400, 10, false)
defer sub.Close()
fmt.Printf("Start reading device: %v\n", deviceID)
for {
if ok := webcam.Read(&raw); !ok {
fmt.Printf("Device closed: %v\n", deviceID)
return
}
if raw.Empty() {
continue
}
sub.Apply(raw, &mod)
// mod.CopyTo(&fin)
// gocv.Subtract(mod, raw, &fin)
// raw.CopyToWithMask(&fin, mod)
// mod.CopyToWithMask(&fin, raw)
gocv.CvtColor(mod, &mod, gocv.ColorGrayToBGR)
gocv.BitwiseAnd(raw, mod, &fin)
// gocv.BitwiseNot(mod, &fin)
// gocv.BitwiseAndWithMask(raw, mod, &fin)
window.IMShow(fin)
if window.WaitKey(1) == 27 {
break
}
}
}
|
// Copyright 2018 NetApp, Inc. All Rights Reserved.
package persistentstore
import (
"fmt"
log "github.com/sirupsen/logrus"
)
type EtcdDataMigrator struct {
SourceClient EtcdClient
DestClient EtcdClient
}
func NewEtcdDataMigrator(SourceClient, DestClient EtcdClient) *EtcdDataMigrator {
return &EtcdDataMigrator{
SourceClient: SourceClient,
DestClient: DestClient,
}
}
func (m *EtcdDataMigrator) Start(keyPrefix string, deleteSrc bool) error {
keys, err := m.SourceClient.ReadKeys(keyPrefix)
if err != nil {
if MatchKeyNotFoundErr(err) {
log.Infof("No key with prefix %v to migrate.", keyPrefix)
return nil
}
return fmt.Errorf("reading keys from the source client failed: %v", err)
}
for _, key := range keys {
val, err := m.SourceClient.Read(key)
if err != nil {
return fmt.Errorf("reading key %v by the source client failed: %v",
key, err)
}
log.WithFields(log.Fields{
"key": key,
}).Debug("Read key from the source.")
err = m.DestClient.Set(key, val)
if err != nil {
return fmt.Errorf("setting key %v by the destination client failed: %v",
key, err)
}
log.WithFields(log.Fields{
"key": key,
}).Debug("Wrote key to the destination.")
if deleteSrc {
err = m.SourceClient.Delete(key)
if err != nil {
return fmt.Errorf("deleting key %v by the source client failed: %v",
key, err)
}
log.WithFields(log.Fields{
"key": key,
}).Debug("Deleted key from the source.")
}
}
return nil
}
func (m *EtcdDataMigrator) Stop() error {
if err := m.SourceClient.Stop(); err != nil {
return fmt.Errorf("closing the source etcd client failed: %v", err)
}
if err := m.DestClient.Stop(); err != nil {
return fmt.Errorf("closing the destination etcd client failed: %v",
err)
}
return nil
}
|
// Copyright (C) 2019 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package capture
import (
"bufio"
"context"
"fmt"
"io"
"github.com/google/gapid/core/app/analytics"
"github.com/google/gapid/core/app/status"
"github.com/google/gapid/core/data/id"
"github.com/google/gapid/core/data/pack"
"github.com/google/gapid/core/data/protoconv"
"github.com/google/gapid/core/log"
"github.com/google/gapid/core/math/interval"
"github.com/google/gapid/core/memory/arena"
"github.com/google/gapid/gapis/api"
"github.com/google/gapid/gapis/database"
"github.com/google/gapid/gapis/memory"
"github.com/google/gapid/gapis/messages"
"github.com/google/gapid/gapis/replay/value"
"github.com/google/gapid/gapis/service"
"github.com/google/gapid/gapis/service/path"
"github.com/pkg/errors"
)
const (
// CurrentCaptureVersion is incremented on breaking changes to the capture format.
// NB: Also update equally named field in spy_base.cpp
CurrentCaptureVersion int32 = 3
)
type ErrUnsupportedVersion struct{ Version int32 }
func (e ErrUnsupportedVersion) Error() string {
return fmt.Sprintf("Unsupported capture format version: %+v", e.Version)
}
type GraphicsCapture struct {
name string
Header *Header
Commands []api.Cmd
APIs []api.API
Observed interval.U64RangeList
InitialState *InitialState
Arena arena.Arena
Messages []*TraceMessage
}
// Name returns the capture's name.
func (g *GraphicsCapture) Name() string {
return g.name
}
// Path returns the path of this capture in the database.
func (g *GraphicsCapture) Path(ctx context.Context) (*path.Capture, error) {
return New(ctx, g)
}
type InitialState struct {
Memory []api.CmdObservation
APIs map[api.API]api.State
}
func init() {
protoconv.Register(
func(ctx context.Context, in *InitialState) (*GlobalState, error) {
return &GlobalState{}, nil
},
func(ctx context.Context, in *GlobalState) (*InitialState, error) {
return &InitialState{APIs: map[api.API]api.State{}}, nil
},
)
}
// NewGraphicsCapture returns a new to a new graphics capture with the given name,
// header and commands, using the arena a for allocations.
func NewGraphicsCapture(ctx context.Context, a arena.Arena, name string, header *Header, initialState *InitialState, cmds []api.Cmd) (*GraphicsCapture, error) {
b := newBuilder(a)
if initialState != nil {
for _, state := range initialState.APIs {
b.addInitialState(ctx, state)
}
for _, mem := range initialState.Memory {
b.addInitialMemory(ctx, mem)
}
}
for _, cmd := range cmds {
b.addCmd(ctx, cmd)
}
hdr := *header
hdr.Version = CurrentCaptureVersion
return b.build(name, &hdr), nil
}
// NewState returns a new, default-initialized State object built for the
// capture held by the context.
func NewState(ctx context.Context) (*api.GlobalState, error) {
c, err := ResolveGraphics(ctx)
if err != nil {
return nil, err
}
return c.NewState(ctx), nil
}
// NewUninitializedState returns a new, uninitialized GlobalState built for the
// capture c. The returned state does not contain the capture's mid-execution
// state.
func (c *GraphicsCapture) NewUninitializedState(ctx context.Context) *api.GlobalState {
freeList := memory.InvertMemoryRanges(c.Observed)
interval.Remove(&freeList, interval.U64Span{Start: 0, End: value.FirstValidAddress})
s := api.NewStateWithAllocator(
memory.NewBasicAllocator(freeList),
c.Header.ABI.MemoryLayout,
)
return s
}
// NewState returns a new, initialized GlobalState object built for the capture
// c. If the capture contains a mid-execution state, then this will be copied
// into the returned state.
func (c *GraphicsCapture) NewState(ctx context.Context) *api.GlobalState {
out := c.NewUninitializedState(ctx)
if c.InitialState != nil {
ctx = status.Start(ctx, "CloneState")
defer status.Finish(ctx)
// Rebuild all the writes into the memory pools.
for _, m := range c.InitialState.Memory {
pool, _ := out.Memory.Get(memory.PoolID(m.Pool))
if pool == nil {
pool = out.Memory.NewAt(memory.PoolID(m.Pool))
}
pool.Write(m.Range.Base, memory.Resource(m.ID, m.Range.Size))
}
// Clone serialized state, and initialize it for use.
for k, v := range c.InitialState.APIs {
s := v.Clone(out.Arena)
s.SetupInitialState(ctx)
out.APIs[k.ID()] = s
}
}
return out
}
// CloneInitialState clones this capture's initial state and returns it.
func (c *GraphicsCapture) CloneInitialState(a arena.Arena) *InitialState {
if c.InitialState == nil {
return nil
}
is := &InitialState{
Memory: c.InitialState.Memory,
APIs: make(map[api.API]api.State, len(c.InitialState.APIs)),
}
for api, s := range c.InitialState.APIs {
is.APIs[api] = s.Clone(a)
}
return is
}
// Service returns the service.Capture description for this capture.
func (c *GraphicsCapture) Service(ctx context.Context, p *path.Capture) *service.Capture {
apis := make([]*path.API, len(c.APIs))
for i, a := range c.APIs {
apis[i] = &path.API{ID: path.NewID(id.ID(a.ID()))}
}
var observations []*service.MemoryRange
if !p.ExcludeMemoryRanges {
observations = make([]*service.MemoryRange, len(c.Observed))
for i, o := range c.Observed {
observations[i] = &service.MemoryRange{Base: o.First, Size: o.Count}
}
}
return &service.Capture{
Type: service.TraceType_Graphics,
Name: c.name,
Device: c.Header.Device,
ABI: c.Header.ABI,
NumCommands: uint64(len(c.Commands)),
APIs: apis,
Observations: observations,
}
}
// Export encodes the given capture and associated resources
// and writes it to the supplied io.Writer in the .gfxtrace format.
func (c *GraphicsCapture) Export(ctx context.Context, w io.Writer) error {
writer, err := pack.NewWriter(w)
if err != nil {
return err
}
e := newEncoder(c, writer)
// The encoder implements the ID Remapper interface,
// which protoconv functions need to handle resources.
ctx = id.PutRemapper(ctx, e)
return e.encode(ctx)
}
func isGFXTraceFormat(in *bufio.Reader) bool {
return pack.CheckMagic(in)
}
func deserializeGFXTrace(ctx context.Context, r *Record, in io.Reader) (out *GraphicsCapture, err error) {
stopTiming := analytics.SendTiming("capture", "deserialize")
defer func() {
size := len(r.Data)
count := 0
if out != nil {
count = len(out.Commands)
}
stopTiming(analytics.Size(size), analytics.Count(count))
}()
a := arena.New()
// Bind the arena used to for all allocations for this capture.
ctx = arena.Put(ctx, a)
d := newDecoder(a)
// The decoder implements the ID Remapper interface,
// which protoconv functions need to handle resources.
ctx = id.PutRemapper(ctx, d)
if err := pack.Read(ctx, in, d, false); err != nil {
switch err := errors.Cause(err).(type) {
case pack.ErrUnsupportedVersion:
log.E(ctx, "%v", err)
switch {
case err.Version.Major > pack.MaxMajorVersion:
return nil, &service.ErrUnsupportedVersion{
Reason: messages.ErrFileTooNew(),
SuggestUpdate: true,
}
case err.Version.Major < pack.MinMajorVersion:
return nil, &service.ErrUnsupportedVersion{
Reason: messages.ErrFileTooOld(),
}
default:
return nil, &service.ErrUnsupportedVersion{
Reason: messages.ErrFileCannotBeRead(),
}
}
case ErrUnsupportedVersion:
switch {
case err.Version > CurrentCaptureVersion:
return nil, &service.ErrUnsupportedVersion{
Reason: messages.ErrFileTooNew(),
SuggestUpdate: true,
}
case err.Version < CurrentCaptureVersion:
return nil, &service.ErrUnsupportedVersion{
Reason: messages.ErrFileTooOld(),
}
default:
return nil, &service.ErrUnsupportedVersion{
Reason: messages.ErrFileCannotBeRead(),
}
}
}
return nil, err
}
d.flush(ctx)
if d.header == nil {
return nil, log.Err(ctx, nil, "Capture was missing header chunk")
}
return d.builder.build(r.Name, d.header), nil
}
type builder struct {
apis []api.API
seenAPIs map[api.ID]struct{}
observed interval.U64RangeList
cmds []api.Cmd
resIDs []id.ID
initialState *InitialState
arena arena.Arena
messages []*TraceMessage
}
func newBuilder(a arena.Arena) *builder {
return &builder{
apis: []api.API{},
seenAPIs: map[api.ID]struct{}{},
observed: interval.U64RangeList{},
cmds: []api.Cmd{},
resIDs: []id.ID{id.ID{}},
arena: a,
initialState: &InitialState{APIs: map[api.API]api.State{}},
}
}
func (b *builder) addCmd(ctx context.Context, cmd api.Cmd) api.CmdID {
b.addAPI(ctx, cmd.API())
if observations := cmd.Extras().Observations(); observations != nil {
for i := range observations.Reads {
b.addObservation(ctx, &observations.Reads[i])
}
for i := range observations.Writes {
b.addObservation(ctx, &observations.Writes[i])
}
}
id := api.CmdID(len(b.cmds))
b.cmds = append(b.cmds, cmd)
return id
}
func (b *builder) addMessage(ctx context.Context, t *TraceMessage) {
b.messages = append(b.messages, &TraceMessage{Timestamp: t.Timestamp, Message: t.Message})
}
func (b *builder) addAPI(ctx context.Context, api api.API) {
if api != nil {
apiID := api.ID()
if _, found := b.seenAPIs[apiID]; !found {
b.seenAPIs[apiID] = struct{}{}
b.apis = append(b.apis, api)
}
}
}
func (b *builder) addObservation(ctx context.Context, o *api.CmdObservation) {
interval.Merge(&b.observed, o.Range.Span(), true)
}
func (b *builder) addRes(ctx context.Context, expectedIndex int64, data []byte) error {
dID, err := database.Store(ctx, data)
if err != nil {
return err
}
arrayIndex := int64(len(b.resIDs))
b.resIDs = append(b.resIDs, dID)
// If the Resource had the optional Index field, use it for verification.
if expectedIndex != 0 && arrayIndex != expectedIndex {
panic(fmt.Errorf("Resource has array index %v but we expected %v", arrayIndex, expectedIndex))
}
return nil
}
func (b *builder) addInitialState(ctx context.Context, state api.State) error {
if _, ok := b.initialState.APIs[state.API()]; ok {
return fmt.Errorf("We have more than one set of initial state for API %v", state.API())
}
b.initialState.APIs[state.API()] = state
b.addAPI(ctx, state.API())
return nil
}
func (b *builder) addInitialMemory(ctx context.Context, mem api.CmdObservation) error {
b.initialState.Memory = append(b.initialState.Memory, mem)
b.addObservation(ctx, &mem)
return nil
}
func (b *builder) build(name string, header *Header) *GraphicsCapture {
for _, api := range b.apis {
analytics.SendEvent("capture", "uses-api", api.Name())
}
// TODO: Mark the arena as read-only.
return &GraphicsCapture{
name: name,
Header: header,
Commands: b.cmds,
Observed: b.observed,
APIs: b.apis,
InitialState: b.initialState,
Arena: b.arena,
Messages: b.messages,
}
}
|
package day13
import (
"testing"
"github.com/wistler/aoc-2020/internal/io"
)
func TestSampleData(t *testing.T) {
input := []string{
"939",
"7,13,x,x,59,x,31,19",
}
got := part1(input)
want := 295
if got != want {
t.Fatalf("Part 1: Got: %v, but wanted: %v", got, want)
}
}
func TestPart2(t *testing.T) {
testCases := []struct {
input string
output int
}{
{input: "17,x,13,19", output: 3_417},
{input: "67,7,59,61", output: 754_018},
{input: "67,x,7,59,61", output: 779_210},
{input: "67,7,x,59,61", output: 1_261_476},
{input: "7,13,x,x,59,x,31,19", output: 1_068_781},
{input: "1789,37,47,1889", output: 1_202_161_486},
}
for _, tC := range testCases {
got := part2(tC.input, false)
if got != tC.output {
t.Fatalf("Part 2: Got: %v, but wanted: %v", got, tC.output)
}
}
}
func TestWithRealData(t *testing.T) {
input := io.ReadInputFile("./input.txt")
part1(input)
part2(input[1], true)
}
|
// Copyright 2020 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package network
import (
"context"
"chromiumos/tast/errors"
"chromiumos/tast/local/bundles/cros/network/shillscript"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/upstart"
"chromiumos/tast/testing"
)
func init() {
testing.AddTest(&testing.Test{
Func: ShillInitLogoutScript,
LacrosStatus: testing.LacrosVariantUnneeded,
Desc: "Test that shill init logout script perform as expected",
Contacts: []string{"hugobenichi@google.com", "cros-networking@google.com"},
SoftwareDeps: []string{"chrome"},
Attr: []string{"group:network"},
})
}
func ShillInitLogoutScript(ctx context.Context, s *testing.State) {
if err := shillscript.RunTest(ctx, testLogout, false); err != nil {
s.Fatal("Failed running testLogout: ", err)
}
}
// testLogout tests the logout process.
func testLogout(ctx context.Context, env *shillscript.TestEnv) error {
if err := upstart.StartJob(ctx, "shill"); err != nil {
return errors.Wrap(err, "failed starting shill")
}
cr, err := chrome.New(ctx)
if err != nil {
return errors.Wrap(err, "failed to start Chrome")
}
defer cr.Close(ctx)
timeoutCtx, cancel := context.WithTimeout(ctx, shillscript.DbusMonitorTimeout)
defer cancel()
stop, err := shillscript.DbusEventMonitor(timeoutCtx)
if err != nil {
return err
}
// TODO (b:159063029) Add a logout function in chrome.go and use it here
// instead of Restatring the ui.
// Emulate logout.
if err := upstart.RestartJob(ctx, "ui"); err != nil {
stop()
return errors.Wrap(err, "Chrome failed to log out")
}
calledMethods, err := stop()
if err != nil {
return err
}
expectedCalls := []string{shillscript.PopAllUserProfiles}
if err := shillscript.AssureMethodCalls(ctx, expectedCalls, calledMethods); err != nil {
return err
}
if err := shillscript.AssureNotExists(shillscript.ShillUserProfilesDir); err != nil {
return err
}
profiles, err := shillscript.GetProfileList(ctx)
if err != nil {
return err
}
if len(profiles) > 1 {
return errors.Wrapf(err, "found unexpected number of profiles in the profile stack: got %d, want 1", len(profiles))
}
return nil
}
|
package updating
// Service provides adding operations.
type Service interface{
SetArtistName(id int64, name string) (int64, error)
SetArtworkTitle(id int64, title string) (int64, error)
SetArtworkArtist(id int64, artistID int64) (int64, error)
}
// Repository provides access to the repository.
type Repository interface {
SetArtistName(id int64, name string) (int64, error)
SetArtworkTitle(id int64, title string) (int64, error)
SetArtworkArtist(id int64, artistID int64) (int64, error)
}
type service struct {
r Repository
}
// NewService creates an adding service with the necessary dependencies
func NewService(r Repository) Service {
return &service{r}
}
func (s *service) SetArtistName(id int64, name string) (int64, error){
//any validation can be done here
return s.r.SetArtistName(id, name)
}
func (s *service) SetArtworkTitle(id int64, title string) (int64, error){
//any validation can be done here
return s.r.SetArtworkTitle(id, title)
}
func (s *service) SetArtworkArtist(id int64, artistID int64) (int64, error){
//any validation can be done here
return s.r.SetArtworkArtist(id, artistID)
}
|
package supervisor_test
import (
"bytes"
"encoding/json"
"fmt"
"github.com/gorilla/securecookie"
"github.com/gorilla/sessions"
"github.com/markbates/goth"
"github.com/markbates/goth/gothic"
"io"
"io/ioutil"
"net/http"
"net/http/httptest"
"strings"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/ghttp"
// sql drivers
_ "github.com/mattn/go-sqlite3"
"github.com/starkandwayne/shield/db"
)
func Database(sqls ...string) (*db.DB, error) {
database := &db.DB{
Driver: "sqlite3",
DSN: ":memory:",
}
if err := database.Connect(); err != nil {
return nil, err
}
if err := database.Setup(); err != nil {
database.Disconnect()
return nil, err
}
for _, s := range sqls {
err := database.Exec(s)
if err != nil {
database.Disconnect()
return nil, err
}
}
return database, nil
}
func JSONValidated(h http.Handler, method string, uri string) {
req, _ := http.NewRequest(method, uri, strings.NewReader("}"))
res := httptest.NewRecorder()
h.ServeHTTP(res, req)
Ω(res.Code).Should(Equal(400),
fmt.Sprintf("%s %s should elicit HTTP 400 (Bad Request) response...", method, uri))
Ω(res.Body.String()).Should(
MatchJSON(`{"error":"bad JSON payload: invalid character '}' looking for beginning of value"}`),
fmt.Sprintf("%s %s should have a JSON error in the Response Body...", method, uri))
}
func NotImplemented(h http.Handler, method string, uri string, body io.Reader) {
req, _ := http.NewRequest(method, uri, body)
res := httptest.NewRecorder()
h.ServeHTTP(res, req)
Ω(res.Code).Should(Equal(501),
fmt.Sprintf("%s %s should elicit HTTP 501 (Not Implemented) response...", method, uri))
Ω(res.Body.String()).Should(Equal(""),
fmt.Sprintf("%s %s should have no HTTP Response Body...", method, uri))
}
func NotFound(h http.Handler, method string, uri string, body io.Reader) {
req, _ := http.NewRequest(method, uri, body)
res := httptest.NewRecorder()
h.ServeHTTP(res, req)
Ω(res.Code).Should(Equal(404))
Ω(res.Body.String()).Should(Equal(""))
}
func GET(h http.Handler, uri string) *httptest.ResponseRecorder {
res := httptest.NewRecorder()
req, _ := http.NewRequest("GET", uri, nil)
h.ServeHTTP(res, req)
return res
}
func WithJSON(s string) string {
var data interface{}
Ω(json.Unmarshal([]byte(s), &data)).Should(Succeed(),
fmt.Sprintf("this is not JSON:\n%s\n", s))
return s
}
func POST(h http.Handler, uri string, body string) *httptest.ResponseRecorder {
res := httptest.NewRecorder()
req, _ := http.NewRequest("POST", uri, strings.NewReader(body))
h.ServeHTTP(res, req)
return res
}
func PUT(h http.Handler, uri string, body string) *httptest.ResponseRecorder {
res := httptest.NewRecorder()
req, _ := http.NewRequest("PUT", uri, strings.NewReader(body))
h.ServeHTTP(res, req)
return res
}
func DELETE(h http.Handler, uri string) *httptest.ResponseRecorder {
res := httptest.NewRecorder()
req, _ := http.NewRequest("DELETE", uri, nil)
h.ServeHTTP(res, req)
return res
}
type FakeSessionStore struct {
Error bool
SaveError bool
Saved int
CookieError bool
Session *sessions.Session
}
func (fs *FakeSessionStore) Get(r *http.Request, name string) (*sessions.Session, error) {
if fs.Error {
return nil, fmt.Errorf("Error getting session")
}
if fs.CookieError {
fs.CookieError = false
return nil, securecookie.MultiError{}
}
if fs.Session == nil {
s, err := fs.New(r, name)
if err != nil {
return nil, err
}
fs.Session = s
}
return fs.Session, nil
}
func (fs *FakeSessionStore) New(r *http.Request, name string) (*sessions.Session, error) {
if fs.Error {
return nil, fmt.Errorf("Error creating session")
}
s := sessions.NewSession(gothic.Store, name)
s.Values[gothic.SessionName] = `{"session":"exists"}`
s.Values["User"] = "fakeUser"
s.Values["Membership"] = map[string]interface{}{"Groups": []interface{}{"fakeGroup1", "fakeGroup2"}}
s.Options = &sessions.Options{MaxAge: 60}
return s, nil
}
func (fs *FakeSessionStore) Save(r *http.Request, w http.ResponseWriter, s *sessions.Session) error {
if fs.SaveError {
return fmt.Errorf("Error saving session")
}
fs.Session = s
fs.Saved++
return nil
}
type FakeResponder struct {
Status int
Headers http.Header
Body *bytes.Buffer
}
func NewFakeResponder() *FakeResponder {
return &FakeResponder{Body: bytes.NewBuffer([]byte{}), Headers: http.Header{}}
}
func (fr *FakeResponder) Header() http.Header {
return fr.Headers
}
func (fr *FakeResponder) WriteHeader(i int) {
fr.Status = i
}
func (fr *FakeResponder) Write(data []byte) (int, error) {
if fr.Status == 0 {
fr.Status = 200
}
return fr.Body.Write(data)
}
func (fr *FakeResponder) ReadBody() (string, error) {
data, err := ioutil.ReadAll(fr.Body)
if err != nil {
return "", err
}
return string(data), nil
}
func FakeResponderHandler() http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("Processed request"))
})
}
type FakeVerifier struct {
Allow bool
MembershipError bool
}
func (fv *FakeVerifier) Verify(u string, m map[string]interface{}) bool {
return fv.Allow
}
func (fv *FakeVerifier) Membership(u goth.User, c *http.Client) (map[string]interface{}, error) {
if fv.MembershipError {
return nil, fmt.Errorf("Mock error")
}
return map[string]interface{}{}, nil
}
type FakeProxy struct {
Backend *ghttp.Server
ResponseCode int
}
func (fp *FakeProxy) RoundTrip(r *http.Request) (*http.Response, error) {
r.URL.Host = fp.Backend.Addr()
r.URL.Scheme = "http"
return (&http.Transport{}).RoundTrip(r)
}
|
// Attempted the following name for package:
// - authenticator: this sounds more like a verb
// - authentication: too long
// - userlogin: is too specific, since user can also register
// - loginUser: breaks the convention, since package name is preferable a noun.
// - authz and authn is better.
package authnsvc
import (
"context"
"github.com/pkg/errors"
"github.com/alextanhongpin/go-microservice/pkg/govalidator"
"github.com/alextanhongpin/passwd"
)
type (
// LoginRequest ... (means self-explanatory)
LoginRequest struct {
Username string `json:"username" validate:"required,email"`
Password string `json:"password" validate:"required,min=8"`
}
// LoginResponse ...
LoginResponse struct {
User User `json:"user"`
}
// interfaces are lowercase - clients have to implement them
// themselves.
loginRepository interface {
WithEmail(email string) (User, error)
}
// LoginUseCase ...
LoginUseCase struct {
users loginRepository
}
)
// NewLoginUseCase returns a new use case for login.
func NewLoginUseCase(users loginRepository) *LoginUseCase {
return &LoginUseCase{
users: users,
}
}
// Login checks if the user is authenticated.
func (l *LoginUseCase) Login(ctx context.Context, req LoginRequest) (*LoginResponse, error) {
if err := govalidator.Validate.Struct(req); err != nil {
return nil, errors.Wrap(err, "validate login request failed")
}
user, err := l.users.WithEmail(req.Username)
if err != nil {
return nil, errors.Wrap(err, "get user failed")
}
if err := passwd.Verify(req.Password, user.HashedPassword); err != nil {
return nil, errors.Wrap(err, "verify password failed")
}
return &LoginResponse{user}, errors.Wrap(err, "verify password failed")
}
|
package brewerydb
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"strconv"
)
const defaultBaseUrl string = "http://api.brewerydb.com/v2"
type breweryDBClient struct {
apiKey string
baseUrl string
VerboseMode bool
}
type SearchResponse struct {
CurrentPage int
NumberOfPages int
TotalResults int
}
type BeerSearchResponse struct {
SearchResponse
Beers []Beer `json:"Data"`
}
type Beer struct {
Name string
ABV string
IBU string
Style Style
Available Available
Breweries []Brewery
SocialAccounts []SocialAccount
}
type Style struct {
Name string
}
type Available struct {
Name string
}
type Brewery struct {
Name string
Website string
Locations []Location
}
type Location struct {
Locality string
Region string
IsPrimary string
}
type SocialAccount struct {
Link string
}
func NewClient(apiKey string) (c *breweryDBClient) {
c = new(breweryDBClient)
c.apiKey = apiKey
c.baseUrl = defaultBaseUrl
c.VerboseMode = false
return c
}
func (c *breweryDBClient) SearchBeers(q string, pg int) (resp BeerSearchResponse) {
// set up query string then url
v := url.Values{}
v.Set("type", "beer")
v.Add("withBreweries", "Y") // add premium features even
v.Add("withSocialAccounts", "Y") // if user isn't signed up for them
v.Add("q", q)
// brewerydb's page param isn't zero based unfortunately
// (but if you pass in 0 just in case it will still assume it's '1')
v.Add("p", strconv.Itoa(pg+1))
v.Add("key", c.apiKey)
url := c.baseUrl + "/search?" + v.Encode()
// perform request and convert response to an object
data, err := c.performGetRequest(url)
if err != nil {
fmt.Printf("err: %v", err)
return
}
// deserialize to objects
err = json.Unmarshal(data, &resp)
if err != nil {
fmt.Printf("json err: %v\n", err)
return
}
// report our search results
c.log("fetched pg %d (%d results spanning %d pages)\n", resp.CurrentPage, resp.TotalResults, resp.NumberOfPages)
return
}
func (c *breweryDBClient) performGetRequest(url string) (buf []byte, err error) {
c.log("fetching: %v\n", url)
res, err := http.Get(url)
if err != nil {
fmt.Printf("http err: %v\n", err)
return
}
if res == nil {
err = fmt.Errorf("err, response is nil")
return
}
buf, err = ioutil.ReadAll(res.Body)
defer res.Body.Close()
if err != nil {
fmt.Printf("ioutil err: %v\n", err)
}
return
}
func (c *breweryDBClient) log(format string, a ...interface{}) {
if c.VerboseMode {
fmt.Printf(format, a...)
}
}
|
package main
import (
"fmt"
"unicode"
)
func main() {
str := "123vfa`1545`1XZVP; [ 215;1"
for _, r := range str {
fmt.Printf("%s, IsDigit %v\n", string(r), unicode.IsDigit(r))
fmt.Printf("%s, IsLzetter %v\n", string(r), unicode.IsLetter(r))
fmt.Printf("%s, IsLozwer %v\n", string(r), unicode.IsLower(r))
fmt.Printf("%s, IsNuzmber %v\n", string(r), unicode.IsNumber(r))
fmt.Printf("%s, IsSpzace %v\n", string(r), unicode.IsSpace(r))
fmt.Printf("%s, IsSyzmbol %v\n", string(r), unicode.IsSymbol(r))
}
}
|
package deleting_test
import (
"testing"
"time"
"github.com/elhamza90/lifelog/internal/domain"
"github.com/elhamza90/lifelog/internal/store"
)
func TestDeleteExpense(t *testing.T) {
repo.Expenses = map[domain.ExpenseID]domain.Expense{
9: {
ID: 9,
Label: "Exp",
Value: 10,
Unit: "Dh",
ActivityID: 0,
Tags: []domain.Tag{},
},
}
tests := map[string]struct {
ID domain.ExpenseID
expectedErr error
}{
"Existing Expense": {ID: 9, expectedErr: nil},
"Non-Existing Expense": {ID: 988998, expectedErr: store.ErrExpenseNotFound},
}
for name, test := range tests {
t.Run(name, func(t *testing.T) {
err := deleter.Expense(test.ID)
failed := err != test.expectedErr
if failed {
t.Fatalf("Expecting Error: %v\nReturned Error; %v", err, test.expectedErr)
}
})
}
}
func TestDeleteActivityExpenses(t *testing.T) {
now := time.Now()
const testActID domain.ActivityID = 1
repo.Activities = map[domain.ActivityID]domain.Activity{
testActID: {ID: testActID, Label: "Test Act", Time: now.AddDate(0, 0, -1), Duration: time.Duration(time.Hour)},
}
// Subtests Definition
tests := map[string]struct {
actID domain.ActivityID
expectedErr error
}{
"Non-Existing Activity": {
actID: domain.ActivityID(98988),
expectedErr: store.ErrActivityNotFound,
},
"Zero Activity ID": {
actID: domain.ActivityID(0),
expectedErr: store.ErrActivityNotFound,
},
"Existing ID": {
actID: testActID,
expectedErr: nil,
},
}
// Subtests Execution
for name, test := range tests {
t.Run(name, func(t *testing.T) {
err := deleter.ActivityExpenses(test.actID)
if err != test.expectedErr {
t.Fatalf("\nExpected err: %v\nReturned err: %v", test.expectedErr, err)
}
})
}
}
|
package main
func datatype() {
var real bool = true
var number int = 0
var duble complex64 = 0
/*
byte == unit : type mismatched error
byte == uint8 : 바이트 코드에 사용
Go의 경우 묵시적 형 변환은 발생 X 명시적 O
명시적 형변환 : type(value)
*/
var byt byte = 0
var uni uint8 = 0
println(real)
println(number)
println(duble)
println(byt == uni)
str := "ABC"
bytes := []byte(str)
str2 := string(bytes)
println(bytes, str2)
rawLtr := `이거슨
자바스크립트의 백틱과 동일하게
동작한다.`
itrLtr := "\n" + "기존 문자열 연산처럼 사용할 수 있다."
println(rawLtr)
println(itrLtr)
}
|
package node
import (
"bufio"
"bytes"
"io"
"io/ioutil"
)
func readFile(file string, handler func(string) error) error {
contents, err := ioutil.ReadFile(file)
if err != nil {
return err
}
reader := bufio.NewReader(bytes.NewBuffer(contents))
for {
line, _, err := reader.ReadLine()
if err == io.EOF {
break
}
if err := handler(string(line)); err != nil {
return err
}
}
return nil
}
|
package main
import (
"fmt"
"github.com/vlasove/algs/deque"
)
func isSaller(name string) bool {
runeName := []rune(name)
return runeName[len(runeName)-1] == 'm'
}
func checkIn(name string, names []string) bool {
for _, val := range names {
if val == name {
return true
}
}
return false
}
func search(name string, graph map[string][]string) bool {
searchDeque := deque.New()
searchDeque.PushBack(graph[name])
searched := []string{}
for !searchDeque.IsEmpty() {
person := searchDeque.PopLeft()
if !checkIn(person, searched) {
if isSaller(person) {
fmt.Println("Seller was found: ", person)
return true
} else {
searchDeque.PushBack(graph[person])
searched = append(searched, person)
}
}
}
return false
}
func main() {
var graph map[string][]string = map[string][]string{}
graph["you"] = []string{"alice", "bob", "claire"}
graph["bob"] = []string{"anuj", "peggy"}
graph["alice"] = []string{"peggy"}
graph["claire"] = []string{"thom", "johny"}
graph["anuj"] = []string{}
graph["peggy"] = []string{}
graph["thom"] = []string{}
graph["johny"] = []string{}
search("you", graph)
}
|
package charger
import (
"fmt"
"net/http"
"strconv"
"time"
"github.com/evcc-io/evcc/api"
"github.com/evcc-io/evcc/charger/daheimladen"
"github.com/evcc-io/evcc/provider"
"github.com/evcc-io/evcc/util"
"github.com/evcc-io/evcc/util/request"
"golang.org/x/oauth2"
)
// DaheimLaden charger implementation
type DaheimLaden struct {
*request.Helper
stationID string
connectorID int32
idTag string
token string
transactionID int32
statusCache provider.Cacheable[daheimladen.GetLatestStatus]
meterCache provider.Cacheable[daheimladen.GetLatestMeterValueResponse]
cache time.Duration
}
func init() {
registry.Add("daheimladen", NewDaheimLadenFromConfig)
}
// NewDaheimLadenFromConfig creates a DaheimLaden charger from generic config
func NewDaheimLadenFromConfig(other map[string]interface{}) (api.Charger, error) {
cc := struct {
Token string
StationID string
Cache time.Duration
}{
Cache: time.Second,
}
if err := util.DecodeOther(other, &cc); err != nil {
return nil, err
}
return NewDaheimLaden(cc.Token, cc.StationID, cc.Cache)
}
// NewDaheimLaden creates DaheimLaden charger
func NewDaheimLaden(token, stationID string, cache time.Duration) (*DaheimLaden, error) {
c := &DaheimLaden{
Helper: request.NewHelper(util.NewLogger("daheim")),
stationID: stationID,
connectorID: 1,
idTag: daheimladen.EVCC_IDTAG,
token: token,
cache: cache,
}
c.Client.Transport = &oauth2.Transport{
Source: oauth2.StaticTokenSource(&oauth2.Token{
AccessToken: token,
TokenType: "Bearer",
}),
Base: c.Client.Transport,
}
c.statusCache = provider.ResettableCached(func() (daheimladen.GetLatestStatus, error) {
var res daheimladen.GetLatestStatus
err := c.GetJSON(fmt.Sprintf("%s/cs/%s/status", daheimladen.BASE_URL, c.stationID), &res)
return res, err
}, c.cache)
c.meterCache = provider.ResettableCached(func() (daheimladen.GetLatestMeterValueResponse, error) {
var res daheimladen.GetLatestMeterValueResponse
err := c.GetJSON(fmt.Sprintf("%s/cs/%s/metervalue", daheimladen.BASE_URL, c.stationID), &res)
return res, err
}, c.cache)
return c, nil
}
// reset cache
func (c *DaheimLaden) reset() {
c.statusCache.Reset()
c.meterCache.Reset()
}
// Status implements the api.Charger interface
func (c *DaheimLaden) Status() (api.ChargeStatus, error) {
res, err := c.statusCache.Get()
if err != nil {
return api.StatusNone, err
}
status := daheimladen.ChargePointStatus(res.Status)
switch status {
case daheimladen.AVAILABLE:
return api.StatusA, nil
case daheimladen.PREPARING:
return api.StatusB, nil
case daheimladen.CHARGING, daheimladen.FINISHING:
return api.StatusC, nil
case daheimladen.FAULTED:
return api.StatusF, nil
default:
return api.StatusNone, fmt.Errorf("invalid status: %s", res.Status)
}
}
// Enabled implements the api.Charger interface
func (c *DaheimLaden) Enabled() (bool, error) {
res, err := c.statusCache.Get()
return res.Status == string(daheimladen.CHARGING), err
}
// Enable implements the api.Charger interface
func (c *DaheimLaden) Enable(enable bool) error {
defer c.reset()
if enable {
data := daheimladen.RemoteStartRequest{
ConnectorID: c.connectorID,
IdTag: c.idTag,
}
uri := fmt.Sprintf("%s/cs/%s/remotestart", daheimladen.BASE_URL, c.stationID)
req, err := http.NewRequest(http.MethodPost, uri, request.MarshalJSON(data))
if err != nil {
return err
}
var res daheimladen.RemoteStartResponse
if err = c.DoJSON(req, &res); err == nil && res.Status != string(daheimladen.REMOTE_START_ACCEPTED) {
err = fmt.Errorf("charging station refused to start transaction")
}
return err
}
var res daheimladen.GetLatestInProgressTransactionResponse
uri := fmt.Sprintf("%s/cs/%s/get_latest_inprogress_transaction", daheimladen.BASE_URL, c.stationID)
if err := c.GetJSON(uri, &res); err != nil {
return err
}
c.transactionID = res.TransactionID
data := daheimladen.RemoteStopRequest{
TransactionID: c.transactionID,
}
uri = fmt.Sprintf("%s/cs/%s/remotestop", daheimladen.BASE_URL, c.stationID)
req, err := http.NewRequest(http.MethodPost, uri, request.MarshalJSON(data))
if err != nil {
return err
}
var remoteStopRes daheimladen.RemoteStartResponse
if err = c.DoJSON(req, &remoteStopRes); err == nil && remoteStopRes.Status != string(daheimladen.REMOTE_STOP_ACCEPTED) {
err = fmt.Errorf("charging station refused to stop transaction")
}
return err
}
// MaxCurrent implements the api.Charger interface
func (c *DaheimLaden) MaxCurrent(current int64) error {
defer c.reset()
data := daheimladen.ChangeConfigurationRequest{
Key: string(daheimladen.CHARGE_RATE),
Value: strconv.FormatInt(current, 10),
}
uri := fmt.Sprintf("%s/cs/%s/change_config", daheimladen.BASE_URL, c.stationID)
req, err := http.NewRequest(http.MethodPost, uri, request.MarshalJSON(data))
if err != nil {
return err
}
var res daheimladen.ChangeConfigurationResponse
if err = c.DoJSON(req, &res); err == nil && res.Status != string(daheimladen.CHANGE_CONFIG_ACCEPTED) {
err = fmt.Errorf("charging station refused to change max current")
}
return err
}
var _ api.Meter = (*DaheimLaden)(nil)
// CurrentPower implements the api.Meter interface
func (c *DaheimLaden) CurrentPower() (float64, error) {
res, err := c.meterCache.Get()
return float64(res.ActivePowerImport * 1e3), err
}
var _ api.MeterEnergy = (*DaheimLaden)(nil)
// TotalEnergy implements the api.MeterMeterEnergy interface
func (c *DaheimLaden) TotalEnergy() (float64, error) {
res, err := c.meterCache.Get()
return float64(res.EnergyActiveImportRegister), err
}
var _ api.PhaseCurrents = (*DaheimLaden)(nil)
// Currents implements the api.PhaseCurrents interface
func (c *DaheimLaden) Currents() (float64, float64, float64, error) {
res, err := c.meterCache.Get()
return float64(res.CurrentImportPhaseL1), float64(res.CurrentImportPhaseL2), float64(res.CurrentImportPhaseL3), err
}
|
package ogg
/********************************************************************
* *
* THIS FILE IS PART OF THE Ogg CONTAINER SOURCE CODE. *
* USE, DISTRIBUTION AND REPRODUCTION OF THIS LIBRARY SOURCE IS *
* GOVERNED BY A BSD-STYLE SOURCE LICENSE INCLUDED WITH THIS SOURCE *
* IN 'COPYING'. PLEASE READ THESE TERMS BEFORE DISTRIBUTING. *
* *
* THE OggVorbis SOURCE CODE IS (C) COPYRIGHT 1994-2010 *
* by the Xiph.Org Foundation http://www.xiph.org/ *
* *
********************************************************************
function: code raw packets into framed OggSquish stream and
decode Ogg streams back into raw packets
last mod: $Id: framing.c 18052 2011-08-04 17:57:02Z giles $
note: The CRC code is directly derived from public domain code by
Ross Williams (ross@guest.adelaide.edu.au). See docs/framing.html
for details.
********************************************************************/
import (
"bytes"
"fmt"
"log"
"os"
"reflect"
"testing"
)
var (
streamStateEnc, streamStateDecr StreamState
syncState SyncState
sequence int64
lastno int32
)
func checkpacket(op *Packet, length int, no int, pos int64) {
if len(op.Packet) != length {
fmt.Fprintf(os.Stderr, "incorrect packet length (%ld != %ld)!\n", len(op.Packet), length)
os.Exit(1)
}
if op.GranulePos != pos {
fmt.Fprintf(os.Stderr, "incorrect packet granpos (%ld != %ld)!\n", op.GranulePos, pos)
os.Exit(1)
}
// packet number just follows sequence/gap; adjust the input number for that
if no == 0 {
sequence = 0
} else {
sequence++
if no > int(lastno+1) {
sequence++
}
}
lastno = int32(no)
if op.PacketNo != sequence {
fmt.Fprintf(os.Stderr, "incorrect packet sequence %d != %d\n",
op.PacketNo, sequence)
os.Exit(1)
}
// Test data
for j := range op.Packet {
if op.Packet[j] != (byte(j+no) & 0xff) {
fmt.Fprintf(os.Stderr, "body data mismatch (1) at pos %d: %x!=%x!\n\n",
j, op.Packet[j], (j+no)&0xff)
os.Exit(1)
}
}
}
func check_page(data []byte, header []byte, page *Page) {
// Test data
for j := range page.Body {
if page.Body[j] != data[j] {
fmt.Fprintf(os.Stderr, "body data mismatch (2) at pos %d: %x!=%x!\n\n",
j, data[j], page.Body[j])
os.Exit(1)
}
}
// Test header
for j := range page.Header {
if page.Header[j] != header[j] {
fmt.Fprintf(os.Stderr, "header content mismatch at pos %d:\n", j)
for j = 0; j < int(header[26])+27; j++ {
fmt.Fprintf(os.Stderr, " (%d)%02x:%02x", j, header[j], page.Header[j])
}
fmt.Fprintf(os.Stderr, "\n")
os.Exit(1)
}
}
if len(page.Header) != int(header[26])+27 {
fmt.Fprintf(os.Stderr, "header length incorrect! (%d!=%d)\n",
len(page.Header), int(header[26])+27)
os.Exit(1)
}
}
func print_header(page *Page) {
fmt.Fprintf(os.Stderr, "\nHEADER:\n")
fmt.Fprintf(os.Stderr, " capture: %c %c %c %c version: %d flags: %x\n",
page.Header[0], page.Header[1], page.Header[2], page.Header[3],
int(page.Header[4]), int(page.Header[5]))
fmt.Fprintf(os.Stderr, " granulepos: %d serialno: %d pageno: %ld\n",
(page.Header[9]<<24)|(page.Header[8]<<16)|
(page.Header[7]<<8)|page.Header[6],
(page.Header[17]<<24)|(page.Header[16]<<16)|
(page.Header[15]<<8)|page.Header[14],
(page.Header[21]<<24)|(page.Header[20]<<16)|
(page.Header[19]<<8)|page.Header[18])
fmt.Fprintf(os.Stderr, " checksum: %02x:%02x:%02x:%02x\n segments: %d (",
int(page.Header[22]), int(page.Header[23]),
int(page.Header[24]), int(page.Header[25]),
int(page.Header[26]))
for j := 27; j < len(page.Header); j++ {
fmt.Fprintf(os.Stderr, "%d ", page.Header[j])
}
fmt.Fprintf(os.Stderr, ")\n\n")
}
func copy_page(page *Page) {
temp := make([]byte, len(page.Header))
copy(temp, page.Header)
page.Header = temp
temp = make([]byte, len(page.Body))
copy(temp, page.Body)
page.Body = temp
}
func free_page(page *Page) {
page.Header = nil
page.Body = nil
}
func Error() {
fmt.Fprintf(os.Stderr, "error!\n")
os.Exit(1)
}
// 17 only
var head1_0 = []byte{0x4f, 0x67, 0x67, 0x53, 0, 0x06,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0x02, 0x03, 0x04, 0, 0, 0, 0,
0x15, 0xed, 0xec, 0x91,
1, 17}
// 17, 254, 255, 256, 500, 510, 600 byte, pad
var head1_1 = []byte{0x4f, 0x67, 0x67, 0x53, 0, 0x02,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0x02, 0x03, 0x04, 0, 0, 0, 0,
0x59, 0x10, 0x6c, 0x2c,
1, 17}
var head2_1 = []byte{0x4f, 0x67, 0x67, 0x53, 0, 0x04,
0x07, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0x02, 0x03, 0x04, 1, 0, 0, 0,
0x89, 0x33, 0x85, 0xce,
13, 254, 255, 0, 255, 1, 255, 245, 255, 255, 0,
255, 255, 90}
// nil packets; beginning,middle,end
var head1_2 = []byte{0x4f, 0x67, 0x67, 0x53, 0, 0x02,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0x02, 0x03, 0x04, 0, 0, 0, 0,
0xff, 0x7b, 0x23, 0x17,
1, 0}
var head2_2 = []byte{0x4f, 0x67, 0x67, 0x53, 0, 0x04,
0x07, 0x28, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0x02, 0x03, 0x04, 1, 0, 0, 0,
0x5c, 0x3f, 0x66, 0xcb,
17, 17, 254, 255, 0, 0, 255, 1, 0, 255, 245, 255, 255, 0,
255, 255, 90, 0}
// large initial packet
var head1_3 = []byte{0x4f, 0x67, 0x67, 0x53, 0, 0x02,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0x02, 0x03, 0x04, 0, 0, 0, 0,
0x01, 0x27, 0x31, 0xaa,
18, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 10}
var head2_3 = []byte{0x4f, 0x67, 0x67, 0x53, 0, 0x04,
0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0x02, 0x03, 0x04, 1, 0, 0, 0,
0x7f, 0x4e, 0x8a, 0xd2,
4, 255, 4, 255, 0}
// continuing packet test
var head1_4 = []byte{0x4f, 0x67, 0x67, 0x53, 0, 0x02,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0x02, 0x03, 0x04, 0, 0, 0, 0,
0xff, 0x7b, 0x23, 0x17,
1, 0}
var head2_4 = []byte{0x4f, 0x67, 0x67, 0x53, 0, 0x00,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0x01, 0x02, 0x03, 0x04, 1, 0, 0, 0,
0xf8, 0x3c, 0x19, 0x79,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255}
var head3_4 = []byte{0x4f, 0x67, 0x67, 0x53, 0, 0x05,
0x07, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0x02, 0x03, 0x04, 2, 0, 0, 0,
0x38, 0xe6, 0xb6, 0x28,
6, 255, 220, 255, 4, 255, 0}
// spill expansion test
var head1_4b = []byte{0x4f, 0x67, 0x67, 0x53, 0, 0x02,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0x02, 0x03, 0x04, 0, 0, 0, 0,
0xff, 0x7b, 0x23, 0x17,
1, 0}
var head2_4b = []byte{0x4f, 0x67, 0x67, 0x53, 0, 0x00,
0x07, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0x02, 0x03, 0x04, 1, 0, 0, 0,
0xce, 0x8f, 0x17, 0x1a,
23, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 10, 255, 4, 255, 0, 0}
var head3_4b = []byte{0x4f, 0x67, 0x67, 0x53, 0, 0x04,
0x07, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0x02, 0x03, 0x04, 2, 0, 0, 0,
0x9b, 0xb2, 0x50, 0xa1,
1, 0}
// page with the 255 segment limit
var head1_5 = []byte{0x4f, 0x67, 0x67, 0x53, 0, 0x02,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0x02, 0x03, 0x04, 0, 0, 0, 0,
0xff, 0x7b, 0x23, 0x17,
1, 0}
var head2_5 = []byte{0x4f, 0x67, 0x67, 0x53, 0, 0x00,
0x07, 0xfc, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0x02, 0x03, 0x04, 1, 0, 0, 0,
0xed, 0x2a, 0x2e, 0xa7,
255,
10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10}
var head3_5 = []byte{0x4f, 0x67, 0x67, 0x53, 0, 0x04,
0x07, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0x02, 0x03, 0x04, 2, 0, 0, 0,
0x6c, 0x3b, 0x82, 0x3d,
1, 50}
// packet that overspans over an entire page
var head1_6 = []byte{0x4f, 0x67, 0x67, 0x53, 0, 0x02,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0x02, 0x03, 0x04, 0, 0, 0, 0,
0xff, 0x7b, 0x23, 0x17,
1, 0}
var head2_6 = []byte{0x4f, 0x67, 0x67, 0x53, 0, 0x00,
0x07, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0x02, 0x03, 0x04, 1, 0, 0, 0,
0x68, 0x22, 0x7c, 0x3d,
255, 100,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255}
var head3_6 = []byte{0x4f, 0x67, 0x67, 0x53, 0, 0x01,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0x01, 0x02, 0x03, 0x04, 2, 0, 0, 0,
0xf4, 0x87, 0xba, 0xf3,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255}
var head4_6 = []byte{0x4f, 0x67, 0x67, 0x53, 0, 0x05,
0x07, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0x02, 0x03, 0x04, 3, 0, 0, 0,
0xf7, 0x2f, 0x6c, 0x60,
5, 254, 255, 4, 255, 0}
// packet that overspans over an entire page
var head1_7 = []byte{0x4f, 0x67, 0x67, 0x53, 0, 0x02,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0x02, 0x03, 0x04, 0, 0, 0, 0,
0xff, 0x7b, 0x23, 0x17,
1, 0}
var head2_7 = []byte{0x4f, 0x67, 0x67, 0x53, 0, 0x00,
0x07, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0x02, 0x03, 0x04, 1, 0, 0, 0,
0x68, 0x22, 0x7c, 0x3d,
255, 100,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255}
var head3_7 = []byte{0x4f, 0x67, 0x67, 0x53, 0, 0x05,
0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0x02, 0x03, 0x04, 2, 0, 0, 0,
0xd4, 0xe0, 0x60, 0xe5,
1, 0}
func test_pack(pl []int32, headers [][]byte, byteskip, pageskip, packetskip int) {
var (
data = make([]byte, 1024*1024) // for scripted test cases only
inptr int
outptr int
deptr int
depacket int32
granule_pos int64 = 7
pageno int
packets int
pageout = pageskip
eosflag bool
bosflag bool
byteskipcount int
err error
)
streamStateEnc.Reset()
streamStateDecr.Reset()
syncState.Reset()
for packets = 0; packets < packetskip; packets++ {
depacket += pl[packets]
}
for packets = 0; ; packets++ {
if pl[packets] == -1 {
break
}
}
for i := 0; i < packets; i++ {
var op Packet // construct a test packet
length := int(pl[i])
op.Packet = data[inptr : inptr+length]
if pl[i+1] < 0 {
op.EOS = true
} else {
op.EOS = false
}
op.GranulePos = granule_pos
granule_pos += 1024
for j := range op.Packet {
op.Packet[j] = byte(i) + byte(j)
inptr++
}
// submit the test packet
if err = streamStateEnc.PacketIn(&op); err != nil {
fmt.Println("streamStateEnc PacketIn returned not null")
}
// retrieve any finished pages
var page Page
for streamStateEnc.PageOut(&page) == true {
// We have a page. Check it carefully
fmt.Fprintf(os.Stderr, "%d, ", pageno)
if headers[pageno] == nil {
fmt.Fprintf(os.Stderr, "coded too many pages!\n")
}
check_page(data[outptr:], headers[pageno], &page)
outptr += len(page.Body)
pageno++
if pageskip != 0 {
bosflag = true
pageskip--
deptr += len(page.Body)
}
// have a complete page; submit it to sync/decode
var pageDec Page
var packetDec, packetDec2 Packet
buf := syncState.Buffer(len(page.Header) + len(page.Body))
if buf == nil {
log.Fatal("buffer is nil\n")
}
var offset int
byteskipcount += len(page.Header)
if byteskipcount > int(byteskip) {
copy(buf, page.Header[0:byteskipcount-byteskip])
offset += byteskipcount - byteskip
byteskipcount = byteskip
}
byteskipcount += len(page.Body)
if byteskipcount > byteskip {
copy(buf[offset:], page.Body[0:byteskipcount-byteskip])
offset += (byteskipcount - byteskip)
byteskipcount = byteskip
}
if syncState.Wrote(offset) == -1 {
log.Fatal("func Wrote returned -1")
}
for {
ret := syncState.PageOut(&pageDec)
if ret == 0 {
break
}
if ret < 0 {
continue
}
// got a page. Happy happy. Verify that it's good.
fmt.Fprintf(os.Stderr, "(%d)", pageout)
check_page(data[deptr:], headers[pageout], &pageDec)
deptr += len(pageDec.Body)
pageout++
// submit it to deconstitution
if err = streamStateDecr.PageIn(&pageDec); err != nil {
log.Fatal(err)
}
// packets out?
for streamStateDecr.PacketPeek(&packetDec2) > 0 {
streamStateDecr.PacketPeek(nil)
streamStateDecr.PacketOut(&packetDec) // just catching them all
// verify peek and out match
if reflect.DeepEqual(packetDec, packetDec2) == false {
fmt.Fprintf(os.Stderr, "packetout != packetpeek! pos=%d\n",
depacket)
os.Exit(1)
}
// verify the packet!
// check data
if bytes.Equal(data[depacket:depacket+int32(len(packetDec.Packet))], packetDec.Packet) == false {
fmt.Fprintf(os.Stderr, "packet data mismatch in decode! pos=%d\n",
depacket)
os.Exit(1)
}
// check bos flag
if bosflag == false && packetDec.BOS == false {
fmt.Fprintf(os.Stderr, "BOS flag not set on packet!\n")
os.Exit(1)
}
if bosflag && packetDec.BOS != false {
fmt.Fprintf(os.Stderr, "BOS flag incorrectly set on packet!\n")
os.Exit(1)
}
bosflag = true
depacket += int32(len(packetDec.Packet))
// check eos flag
if eosflag {
fmt.Fprintf(os.Stderr, "Multiple decoded packets with eos flag!\n")
os.Exit(1)
}
if packetDec.EOS != false {
eosflag = true
}
// check granulepos flag
if packetDec.GranulePos != -1 {
fmt.Fprintf(os.Stderr, " granule:%d ", packetDec.GranulePos)
}
}
}
}
}
if headers[pageno] != nil {
fmt.Fprintf(os.Stderr, "did not write last page!\n")
}
if headers[pageout] != nil {
fmt.Fprintf(os.Stderr, "did not decode last page!\n")
}
if inptr != outptr {
fmt.Fprintf(os.Stderr, "encoded page data incomplete!\n")
}
if inptr != deptr {
fmt.Fprintf(os.Stderr, "decoded page data incomplete!\n")
}
if inptr != int(depacket) {
fmt.Fprintf(os.Stderr, "decoded packet data incomplete!\n")
}
if eosflag == false {
fmt.Fprintf(os.Stderr, "Never got a packet with eos set!\n")
}
fmt.Fprintf(os.Stderr, "ok.\n")
}
//
// Main test routine
//
func TestFraming(t *testing.T) {
streamStateEnc.Init(0x04030201)
streamStateDecr.Init(0x04030201)
// Exercise each code path in the framing code. Also verify that
// the checksums are working.
{
// 17 only
var packets = []int32{17, -1}
var headret = [][]byte{head1_0, nil}
fmt.Fprintf(os.Stderr, "testing single page encoding... ")
test_pack(packets, headret, 0, 0, 0)
}
{
// 17, 254, 255, 256, 500, 510, 600 byte, pad
var packets = []int32{17, 254, 255, 256, 500, 510, 600, -1}
var headret = [][]byte{head1_1, head2_1, nil}
fmt.Fprintf(os.Stderr, "testing basic page encoding... ")
test_pack(packets, headret, 0, 0, 0)
}
{
// nil packets; beginning,middle,end
var packets = []int32{0, 17, 254, 255, 0, 256, 0, 500, 510, 600, 0, -1}
var headret = [][]byte{head1_2, head2_2, nil}
fmt.Fprintf(os.Stderr, "testing basic nil packets... ")
test_pack(packets, headret, 0, 0, 0)
}
{
// large initial packet
var packets = []int32{4345, 259, 255, -1}
var headret = [][]byte{head1_3, head2_3, nil}
fmt.Fprintf(os.Stderr, "testing initial-packet lacing > 4k... ")
test_pack(packets, headret, 0, 0, 0)
}
{
// continuing packet test; with page spill expansion, we have to
// overflow the lacing table.
var packets = []int32{0, 65500, 259, 255, -1}
var headret = [][]byte{head1_4, head2_4, head3_4, nil}
fmt.Fprintf(os.Stderr, "testing single packet page span... ")
test_pack(packets, headret, 0, 0, 0)
}
{
// spill expand packet test
var packets = []int32{0, 4345, 259, 255, 0, 0, -1}
var headret = [][]byte{head1_4b, head2_4b, head3_4b, nil}
fmt.Fprintf(os.Stderr, "testing page spill expansion... ")
test_pack(packets, headret, 0, 0, 0)
}
// page with the 255 segment limit
{
var packets = []int32{0,
10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 50, -1}
var headret = [][]byte{head1_5, head2_5, head3_5, nil}
fmt.Fprintf(os.Stderr, "testing max packet segments... ")
test_pack(packets, headret, 0, 0, 0)
}
{
// packet that overspans over an entire page
var packets = []int32{0, 100, 130049, 259, 255, -1}
var headret = [][]byte{head1_6, head2_6, head3_6, head4_6, nil}
fmt.Fprintf(os.Stderr, "testing very large packets... ")
test_pack(packets, headret, 0, 0, 0)
}
{
// test for the libogg 1.1.1 resync in large continuation bug
// found by Josh Coalson)
var packets = []int32{0, 100, 130049, 259, 255, -1}
var headret = [][]byte{head1_6, head2_6, head3_6, head4_6, nil}
fmt.Fprintf(os.Stderr, "testing continuation resync in very large packets... ")
test_pack(packets, headret, 100, 2, 3)
}
{
// term only page. why not?
var packets = []int32{0, 100, 64770, -1}
var headret = [][]byte{head1_7, head2_7, head3_7, nil}
fmt.Fprintf(os.Stderr, "testing zero data page (1 nil packet)... ")
test_pack(packets, headret, 0, 0, 0)
}
{
// build a bunch of pages for testing
data := make([]byte, 1024*1024)
pl := []int32{0, 1, 1, 98, 4079, 1, 1, 2954, 2057, 76, 34, 912, 0, 234, 1000, 1000, 1000, 300, -1}
var inptr, i int32
var page [5]Page
streamStateEnc.Reset()
for i = 0; pl[i] != -1; i++ {
var op Packet
length := pl[i]
op.Packet = data[inptr : inptr+length]
if pl[i+1] < 0 {
op.EOS = true
} else {
op.EOS = false
}
op.GranulePos = int64((i + 1) * 1000)
for j := range op.Packet {
op.Packet[j] = byte(i) + byte(j)
}
streamStateEnc.PacketIn(&op)
}
data = nil
// retrieve finished pages
for i = 0; i < 5; i++ {
if streamStateEnc.PageOut(&page[i]) == false {
fmt.Fprintf(os.Stderr, "Too few pages output building sync tests!\n")
os.Exit(1)
}
copy_page(&page[i])
}
// Test lost pages on pagein/packetout: no rollback
{
var temp Page
var test Packet
fmt.Fprintf(os.Stderr, "Testing loss of pages... ")
syncState.Reset()
streamStateDecr.Reset()
for i = 0; i < 5; i++ {
copy(syncState.Buffer(len(page[i].Header)), page[i].Header)
syncState.Wrote(len(page[i].Header))
copy(syncState.Buffer(len(page[i].Body)), page[i].Body)
syncState.Wrote(len(page[i].Body))
}
syncState.PageOut(&temp)
streamStateDecr.PageIn(&temp)
syncState.PageOut(&temp)
streamStateDecr.PageIn(&temp)
syncState.PageOut(&temp)
// skip
syncState.PageOut(&temp)
streamStateDecr.PageIn(&temp)
// do we get the expected results/packets?
if streamStateDecr.PacketOut(&test) != 1 {
Error()
}
checkpacket(&test, 0, 0, 0)
if streamStateDecr.PacketOut(&test) != 1 {
Error()
}
checkpacket(&test, 1, 1, -1)
if streamStateDecr.PacketOut(&test) != 1 {
Error()
}
checkpacket(&test, 1, 2, -1)
if streamStateDecr.PacketOut(&test) != 1 {
Error()
}
checkpacket(&test, 98, 3, -1)
if streamStateDecr.PacketOut(&test) != 1 {
Error()
}
checkpacket(&test, 4079, 4, 5000)
if streamStateDecr.PacketOut(&test) != -1 {
fmt.Fprintf(os.Stderr, "Error: loss of page did not return error\n")
os.Exit(1)
}
if streamStateDecr.PacketOut(&test) != 1 {
Error()
}
checkpacket(&test, 76, 9, -1)
if streamStateDecr.PacketOut(&test) != 1 {
Error()
}
checkpacket(&test, 34, 10, -1)
fmt.Fprintf(os.Stderr, "ok.\n")
}
// Test lost pages on pagein/packetout: rollback with continuation
{
var temp Page
var test Packet
fmt.Fprintf(os.Stderr, "Testing loss of pages (rollback required)... ")
syncState.Reset()
streamStateDecr.Reset()
for i = 0; i < 5; i++ {
copy(syncState.Buffer(len(page[i].Header)), page[i].Header)
syncState.Wrote(len(page[i].Header))
copy(syncState.Buffer(len(page[i].Body)), page[i].Body)
syncState.Wrote(len(page[i].Body))
}
syncState.PageOut(&temp)
streamStateDecr.PageIn(&temp)
syncState.PageOut(&temp)
streamStateDecr.PageIn(&temp)
syncState.PageOut(&temp)
streamStateDecr.PageIn(&temp)
syncState.PageOut(&temp)
// skip
syncState.PageOut(&temp)
streamStateDecr.PageIn(&temp)
// do we get the expected results/packets?
if streamStateDecr.PacketOut(&test) != 1 {
Error()
}
checkpacket(&test, 0, 0, 0)
if streamStateDecr.PacketOut(&test) != 1 {
Error()
}
checkpacket(&test, 1, 1, -1)
if streamStateDecr.PacketOut(&test) != 1 {
Error()
}
checkpacket(&test, 1, 2, -1)
if streamStateDecr.PacketOut(&test) != 1 {
Error()
}
checkpacket(&test, 98, 3, -1)
if streamStateDecr.PacketOut(&test) != 1 {
Error()
}
checkpacket(&test, 4079, 4, 5000)
if streamStateDecr.PacketOut(&test) != 1 {
Error()
}
checkpacket(&test, 1, 5, -1)
if streamStateDecr.PacketOut(&test) != 1 {
Error()
}
checkpacket(&test, 1, 6, -1)
if streamStateDecr.PacketOut(&test) != 1 {
Error()
}
checkpacket(&test, 2954, 7, -1)
if streamStateDecr.PacketOut(&test) != 1 {
Error()
}
checkpacket(&test, 2057, 8, 9000)
if streamStateDecr.PacketOut(&test) != -1 {
fmt.Fprintf(os.Stderr, "Error: loss of page did not return error\n")
os.Exit(1)
}
if streamStateDecr.PacketOut(&test) != 1 {
Error()
}
checkpacket(&test, 300, 17, 18000)
fmt.Fprintf(os.Stderr, "ok.\n")
}
// the rest only test sync
{
var pageDec Page
// Test fractional page inputs: incomplete capture
fmt.Fprintf(os.Stderr, "Testing sync on partial inputs... ")
syncState.Reset()
copy(syncState.Buffer(len(page[1].Header)), page[1].Header[0:3])
syncState.Wrote(3)
if syncState.PageOut(&pageDec) > 0 {
Error()
}
// Test fractional page inputs: incomplete fixed header
copy(syncState.Buffer(len(page[1].Header)), page[1].Header[3:3+20])
syncState.Wrote(20)
if syncState.PageOut(&pageDec) > 0 {
Error()
}
// Test fractional page inputs: incomplete header
copy(syncState.Buffer(len(page[1].Header)), page[1].Header[23:23+5])
syncState.Wrote(5)
if syncState.PageOut(&pageDec) > 0 {
Error()
}
// Test fractional page inputs: incomplete body
copy(syncState.Buffer(len(page[1].Header)), page[1].Header[28:])
syncState.Wrote(len(page[1].Header) - 28)
if syncState.PageOut(&pageDec) > 0 {
Error()
}
copy(syncState.Buffer(len(page[1].Body)), page[1].Body[0:1000])
syncState.Wrote(1000)
if syncState.PageOut(&pageDec) > 0 {
Error()
}
copy(syncState.Buffer(len(page[1].Body)), page[1].Body[1000:])
syncState.Wrote(len(page[1].Body) - 1000)
if syncState.PageOut(&pageDec) <= 0 {
Error()
}
fmt.Fprintf(os.Stderr, "ok.\n")
}
// Test fractional page inputs: page + incomplete capture
{
var pageDec Page
fmt.Fprintf(os.Stderr, "Testing sync on 1+partial inputs... ")
syncState.Reset()
copy(syncState.Buffer(len(page[1].Header)), page[1].Header)
syncState.Wrote(len(page[1].Header))
copy(syncState.Buffer(len(page[1].Body)), page[1].Body)
syncState.Wrote(len(page[1].Body))
copy(syncState.Buffer(len(page[1].Header)), page[1].Header[0:20])
syncState.Wrote(20)
if syncState.PageOut(&pageDec) <= 0 {
Error()
}
if syncState.PageOut(&pageDec) > 0 {
Error()
}
copy(syncState.Buffer(len(page[1].Header)), page[1].Header[20:])
syncState.Wrote(len(page[1].Header) - 20)
copy(syncState.Buffer(len(page[1].Body)), page[1].Body)
syncState.Wrote(len(page[1].Body))
if syncState.PageOut(&pageDec) <= 0 {
Error()
}
fmt.Fprintf(os.Stderr, "ok.\n")
}
// Test recapture: garbage + page
{
var pageDec Page
fmt.Fprintf(os.Stderr, "Testing search for capture... ")
syncState.Reset()
// 'garbage'
copy(syncState.Buffer(len(page[1].Body)), page[1].Body)
syncState.Wrote(len(page[1].Body))
copy(syncState.Buffer(len(page[1].Header)), page[1].Header)
syncState.Wrote(len(page[1].Header))
copy(syncState.Buffer(len(page[1].Body)), page[1].Body)
syncState.Wrote(len(page[1].Body))
copy(syncState.Buffer(len(page[2].Header)), page[2].Header[0:20])
syncState.Wrote(20)
if syncState.PageOut(&pageDec) > 0 {
Error()
}
if syncState.PageOut(&pageDec) <= 0 {
Error()
}
if syncState.PageOut(&pageDec) > 0 {
Error()
}
copy(syncState.Buffer(len(page[2].Header)), page[2].Header[20:])
syncState.Wrote(len(page[2].Header) - 20)
copy(syncState.Buffer(len(page[2].Body)), page[2].Body)
syncState.Wrote(len(page[2].Body))
if syncState.PageOut(&pageDec) <= 0 {
Error()
}
fmt.Fprintf(os.Stderr, "ok.\n")
}
// Test recapture: page + garbage + page
{
var pageDec Page
fmt.Fprintf(os.Stderr, "Testing recapture... ")
syncState.Reset()
copy(syncState.Buffer(len(page[1].Header)), page[1].Header)
syncState.Wrote(len(page[1].Header))
copy(syncState.Buffer(len(page[1].Body)), page[1].Body)
syncState.Wrote(len(page[1].Body))
copy(syncState.Buffer(len(page[2].Header)), page[2].Header)
syncState.Wrote(len(page[2].Header))
copy(syncState.Buffer(len(page[2].Header)), page[2].Header)
syncState.Wrote(len(page[2].Header))
if syncState.PageOut(&pageDec) <= 0 {
Error()
}
copy(syncState.Buffer(len(page[2].Body)), page[2].Body[0:len(page[2].Body)-5])
syncState.Wrote(len(page[2].Body) - 5)
copy(syncState.Buffer(len(page[3].Header)), page[3].Header)
syncState.Wrote(len(page[3].Header))
copy(syncState.Buffer(len(page[3].Body)), page[3].Body)
syncState.Wrote(len(page[3].Body))
if syncState.PageOut(&pageDec) > 0 {
Error()
}
if syncState.PageOut(&pageDec) <= 0 {
Error()
}
fmt.Fprintf(os.Stderr, "ok.\n")
}
// Free page data that was previously copied
for i = 0; i < 5; i++ {
free_page(&page[i])
}
}
}
|
package main
import (
"antalk-go/internal/auth"
"antalk-go/internal/auth/config"
"flag"
"fmt"
"log"
)
var (
configName = flag.String("config_name", "auth", "config name")
configType = flag.String("config_type", "toml", "config type")
configPath = flag.String("config_path", ".", "config path")
)
func init() {
fmt.Println("test")
}
func main() {
flag.Parse()
c := &config.Config{
Name: *configName,
Type: *configType,
Path: *configPath,
}
c.Init()
s, err := auth.New(c)
if err != nil {
log.Fatal("auth.New error, ", err)
}
defer s.Close()
} |
// Package ratsnest provides simple validation for deeply-nested (or not deeply-nested), arbitrary maps in Golang.
package ratsnest
|
package minisentinel
import "github.com/alicebob/miniredis/v2/server"
func commandsPing(s *Sentinel) {
s.srv.Register("PING", s.cmdPing)
}
// cmdPing
func (s *Sentinel) cmdPing(c *server.Peer, cmd string, args []string) {
if len(args) != 0 {
c.WriteError(errWrongNumber(cmd))
return
}
if !s.handleAuth(c) {
return
}
c.WriteInline("PONG")
}
|
package game
import (
"bytes"
"encoding/json"
"github.com/dchest/uniuri"
"github.com/gorilla/websocket"
. "github.com/qaisjp/studenthackv-go-gameserver/structs"
"log"
"time"
)
type CharacterID int
const (
UnassignedCharacter CharacterID = iota
MonsterCharacter
KingCharacter
ServantCharacter
)
type PlayerID string
type Player struct {
ID PlayerID
game *Game // The game they belong to
conn *websocket.Conn // The websocket connection
send chan []byte // Buffer channel of outbound messages
Character CharacterID
Position Position
Dead bool
}
var (
newline = []byte{'\n'}
space = []byte{' '}
)
const (
// Time allowed to write a message to the peer.
writeWait = 500 * time.Millisecond
// Time allowed to read the next pong message from the peer.
pongWait = 1 * time.Second
// Send pings to peer with this period. Must be less than pongWait.
pingPeriod = (pongWait * 9) / 10
// Maximum message size allowed from peer.
maxMessageSize = 512
)
func NewPlayer(g *Game, conn *websocket.Conn) *Player {
client := &Player{
ID: PlayerID(string(uniuri.NewLen(uniuri.UUIDLen))),
game: g,
conn: conn,
send: make(chan []byte, 256),
}
client.Position.X, client.Position.Z = g.Map.GetRandomSpace()
log.Printf("New player(%s) connected...\n", client.ID)
g.register <- client
go client.writePump()
client.readPump()
return client
}
// readPump pumps messages from the websocket connection to the game.
//
// The application runs readPump in a per-connection goroutine. The application
// ensures that there is at most one reader on a connection by executing all
// reads from this goroutine.
func (p *Player) readPump() {
defer func() {
p.game.unregister <- p
p.conn.Close()
log.Println("ReadPump ended")
}()
// p.conn.SetReadLimit(maxMessageSize)
// p.conn.SetReadDeadline(time.Now().Add(pongWait))
// p.conn.SetPongHandler(func(string) error { p.conn.SetReadDeadline(time.Now().Add(pongWait)); return nil })
for {
_, message, err := p.conn.ReadMessage()
if err != nil {
// if websocket.IsUnexpectedCloseError(err, websocket.CloseGoingAway) {
log.Printf("error: %v", err)
// }
break
}
message = bytes.TrimSpace(bytes.Replace(message, newline, space, -1))
m := MessageIn{}
err = json.Unmarshal(message, &m)
if err != nil {
log.Println(err)
log.Printf("Invalid JSON received from %s: %s\n", p.ID, message)
continue
}
m.Player = p
p.game.broadcast <- m
}
}
// writePump pumps messages from the game to the websocket connection.
//
// A goroutine running writePump is started for each connection. The
// application ensures that there is at most one writer to a connection by
// executing all writes from this goroutine.
func (p *Player) writePump() {
ticker := time.NewTicker(pingPeriod)
defer func() {
ticker.Stop()
p.conn.Close()
}()
for {
select {
case message, ok := <-p.send:
// p.conn.SetWriteDeadline(time.Now().Add(writeWait))
if !ok {
// The hub closed the channel.
p.conn.WriteMessage(websocket.CloseMessage, []byte{})
return
}
w, err := p.conn.NextWriter(websocket.TextMessage)
if err != nil {
return
}
w.Write(message)
// Add queued chat messages to the current websocket message.
n := len(p.send)
for i := 0; i < n; i++ {
w.Write(newline)
w.Write(<-p.send)
}
if err := w.Close(); err != nil {
return
}
case <-ticker.C:
// p.conn.SetWriteDeadline(time.Now().Add(writeWait))
if err := p.conn.WriteMessage(websocket.PingMessage, []byte{}); err != nil {
return
}
}
}
}
func (p *Player) Send(m MessageOut) {
b, err := json.Marshal(m)
if err != nil {
panic(err)
}
p.send <- b
}
func (p *Player) SendMap() {
p.Send(MessageOut{
Type: "map",
Payload: p.game.Map,
})
}
func (p *Player) onIdentify(name string) {
if string(name) == "oculus" {
p.game.Monster = p
p.Character = MonsterCharacter
} else if string(name) == "player" {
if p.game.King == nil {
p.Character = KingCharacter
p.game.King = p
} else {
p.Character = ServantCharacter
p.game.Servants = append(p.game.Servants, p)
}
} else {
panic("Invalid name " + name)
}
players := []*Player{}
for p := range p.game.players {
players = append(players, p)
}
p.Send(MessageOut{
Type: "registered",
Payload: struct {
You *Player
Players []*Player
}{
You: p,
Players: players,
},
})
for player := range p.game.players {
if p.ID != player.ID {
player.Send(MessageOut{
Type: "joined",
Payload: p,
})
}
}
}
func (p *Player) onLeave() {
log.Printf("Player(%s) left the server", p.ID)
for player := range p.game.players {
if player.ID != p.ID {
player.Send(MessageOut{
Type: "quit",
Payload: p.ID,
})
}
}
}
|
package props
import (
"strconv"
"sort"
)
// Source 定义了配置数据来源.
// 一个数据来源必须提供一个 Find 方法用于支持从该来源读取配置数据
type Source interface {
Find(key string) (string, bool)
}
// Props 定义了配置数据的操作集合.
// 该接口提供了添加配置数据源,配置项查找,配置项读取,使用配置项扩展变量等多个函数.
type Props interface {
// Add 用于将配置数据源 s,以优先级 priority,添加到 Props 对象中去.
Add(priority uint8, s Source) Source
// Find 函数尝试从多个配置数据源查找名字为 key 的配置项.
// 如果能够找到返回找到的配置项的值和true;否则,第二个返回值为false。
// 如果多个配置数据源都存在名字为 key 的配置项,那么以优先级最高的配置项为准。
// String、Int64、Uint64、Bool 等函数是对 Find 函数的简单封装。
Find(key string) (string, bool)
// String 尝试将找到的配置项以 string 的形式返回,如果找不到就返回 def.
String(key string, def string) string
// Int64 尝试将找到的配置项转换为 int64 的形式,如果找不到或者转换失败,就返回 def.
Int64(key string, def int64) int64
// Uint64 尝试将找到的配置项转换为 uint64 的形式,如果找不到或者转换失败,就返回 def.
Uint64(key string, def uint64) uint64
// Bool 尝试将找到的配置项转换为 bool 的形式,如果找不到或者转换失败,就返回 def.
// 当前支持的数据映射方式为:
// "1", "t", "T", "true", "TRUE", "True" 会被映射为 true;
// "0", "f", "F", "false", "FALSE", "False" 会被映射为 false。
// 如果配置项的值为其他的值,作为转换失败处理。
Bool(key string, def bool) bool
// 将字符串 s 中的所有 ${key} 形式的可扩展变量,替换为本 Props 对象内部的配置项 key 的值,并最终返回替换后的结果.
// 如果某个 key 的配置项在 Props 对象中不存在,将原样保留 ${key};
// 如果某个 key 的配置项的值里面还含有 ${xxx},那么也会自动展开;
// 如果变量存在循环引用现象,返回失败;
Expand(s string) (string, error)
}
// NewProps 用于创建一个新的 Props 对象.
func NewProps() Props {
return &implProps{
items: make([]*implSourceItem, 0, 5),
}
}
type implSourceItem struct {
priority int
source Source
next *implSourceItem
}
type implProps struct {
items []*implSourceItem
}
func (p *implProps) Add(priority uint8, s Source) Source {
newItem := &implSourceItem{
priority: int(priority),
source: s,
next: nil,
}
for i := 0; i < len(p.items); i++ {
if int(priority) == p.items[i].priority {
newItem.next = p.items[i]
p.items[i] = newItem
return s
}
}
p.items = append(p.items, newItem)
sort.Slice(p.items, func(i, j int) bool {
return p.items[i].priority < p.items[j].priority
})
return s
}
func (p implProps) Find(key string) (string, bool) {
for i := len(p.items) - 1; i >= 0; i-- {
val, ok := p.items[i].source.Find(key)
if ok {
return val, true
}
}
return "", false
}
func (p implProps) String(key string, def string) string {
val, ok := p.Find(key)
if !ok {
return def
}
return val
}
func (p implProps) Int64(key string, def int64) int64 {
val, ok := p.Find(key)
if !ok {
return def
}
i, err := strconv.ParseInt(val, 0, 64)
if nil != err {
return def
}
return i
}
func (p implProps) Uint64(key string, def uint64) uint64 {
val, ok := p.Find(key)
if !ok {
return def
}
i, err := strconv.ParseUint(val, 0, 64)
if nil != err {
return def
}
return i
}
func (p implProps) Bool(key string, def bool) bool {
val, ok := p.Find(key)
if !ok {
return def
}
i, err := strconv.ParseBool(val)
if nil != err {
return def
}
return i
}
func (p implProps) Expand(s string) (string, error) {
return s, nil
}
|
package nosql
import (
"context"
"errors"
"fmt"
"net/url"
"strings"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
)
const sectionName = "mongodb"
const schemePrefix = "mongodb://"
var errorVariable = errors.New("variable not found")
const sides = 2
// IniFile is interface to read INI file variables.
type IniFile interface {
Get(section string, name string) string
}
// Databases represents mongodb database pool.
type Databases struct {
ini IniFile
dbs map[string]*Database
clientOpts []*options.ClientOptions
}
// Open returns mongodb database pool.
func Open(ini IniFile, opts ...*options.ClientOptions) *Databases {
return &Databases{
ini: ini,
dbs: map[string]*Database{},
clientOpts: opts,
}
}
// Close closes mongo database pool.
func (d *Databases) Close(ctx context.Context) error {
for _, db := range d.dbs {
if err := db.Client().Disconnect(ctx); err != nil {
return err
}
}
return nil
}
// Get returns database hanle.
func (d *Databases) Get(ctx context.Context, name string, dbOpts ...*options.DatabaseOptions,
) (*Database, error) {
if db, ok := d.dbs[name]; ok {
return db, nil
}
uri, dbname, err := d.getURI(name)
if err != nil {
return nil, err
}
client, err := mongo.Connect(ctx,
append([]*options.ClientOptions{options.Client().ApplyURI(uri)}, d.clientOpts...)...)
if err != nil {
return nil, err
}
db := NewDatabase(client.Database(dbname, dbOpts...))
d.dbs[name] = db
return db, nil
}
// getURI returns database URI and name using INI file substitution.
func (d *Databases) getURI(name string) (string, string, error) {
uri := d.ini.Get(sectionName, name)
if uri == "" {
return "", "", fmt.Errorf("%w ([%s] %s)", errorVariable, sectionName, name)
}
if !strings.HasPrefix(uri, schemePrefix) {
uri = schemePrefix + uri
}
u, err := url.Parse(uri)
if err != nil {
return "", "", err
}
// remove dbname
dbname := strings.TrimPrefix(u.Path, "/")
u.Path = "/"
// user = username:password or separate lines, user = username and password = password
if d.ini.Get(u.Host, "user") != "" && d.ini.Get(u.Host, "password") != "" {
u.User = url.UserPassword(d.ini.Get(u.Host, "user"), d.ini.Get(u.Host, "password"))
} else if users := strings.SplitN(d.ini.Get(u.Host, "user"), ":", sides); len(users) == sides {
u.User = url.UserPassword(users[0], users[1])
}
// query = tag=value
if query := d.ini.Get(u.Host, "query"); query != "" {
if u.RawQuery != "" {
u.RawQuery += "&"
}
u.RawQuery += query
}
// host substitution must be latest
if host := d.ini.Get(u.Host, "host"); host != "" {
u.Host = host
}
// converting each "%AB" into hex-decoded byte
if uri, err = url.QueryUnescape(u.String()); err != nil {
return "", "", err
}
return uri, dbname, nil
}
|
package k8sconv
import (
"context"
"fmt"
"k8s.io/apimachinery/pkg/types"
"github.com/tilt-dev/tilt/pkg/apis"
"github.com/tilt-dev/tilt/pkg/model/logstore"
"github.com/tilt-dev/tilt/pkg/apis/core/v1alpha1"
v1 "k8s.io/api/core/v1"
"github.com/tilt-dev/tilt/internal/k8s"
"github.com/tilt-dev/tilt/pkg/logger"
"github.com/tilt-dev/tilt/pkg/model"
)
func Pod(ctx context.Context, pod *v1.Pod, ancestorUID types.UID) *v1alpha1.Pod {
podInfo := &v1alpha1.Pod{
UID: string(pod.UID),
Name: pod.Name,
Namespace: pod.Namespace,
CreatedAt: apis.NewTime(pod.CreationTimestamp.Time),
Phase: string(pod.Status.Phase),
Deleting: pod.DeletionTimestamp != nil && !pod.DeletionTimestamp.IsZero(),
Conditions: PodConditions(pod.Status.Conditions),
InitContainers: PodContainers(ctx, pod, pod.Status.InitContainerStatuses),
Containers: PodContainers(ctx, pod, pod.Status.ContainerStatuses),
AncestorUID: string(ancestorUID),
PodTemplateSpecHash: pod.Labels[k8s.TiltPodTemplateHashLabel],
Status: PodStatusToString(*pod),
Errors: PodStatusErrorMessages(*pod),
}
if len(pod.OwnerReferences) > 0 {
owner := pod.OwnerReferences[0]
podInfo.Owner = &v1alpha1.PodOwner{
Name: owner.Name,
APIVersion: owner.APIVersion,
Kind: owner.Kind,
}
}
return podInfo
}
func PodConditions(conditions []v1.PodCondition) []v1alpha1.PodCondition {
result := make([]v1alpha1.PodCondition, 0, len(conditions))
for _, c := range conditions {
condition := v1alpha1.PodCondition{
Type: string(c.Type),
Status: string(c.Status),
LastTransitionTime: apis.NewTime(c.LastTransitionTime.Time),
Reason: c.Reason,
Message: c.Message,
}
result = append(result, condition)
}
return result
}
// Convert a Kubernetes Pod into a list of simpler Container models to store in the engine state.
func PodContainers(ctx context.Context, pod *v1.Pod, containerStatuses []v1.ContainerStatus) []v1alpha1.Container {
result := make([]v1alpha1.Container, 0, len(containerStatuses))
for _, cStatus := range containerStatuses {
c, err := ContainerForStatus(pod, cStatus)
if err != nil {
logger.Get(ctx).Debugf("%s", err.Error())
continue
}
if c.Name != "" {
result = append(result, c)
}
}
return result
}
// Convert a Kubernetes Pod and ContainerStatus into a simpler Container model to store in the engine state.
func ContainerForStatus(pod *v1.Pod, cStatus v1.ContainerStatus) (v1alpha1.Container, error) {
cSpec := k8s.ContainerSpecOf(pod, cStatus)
ports := make([]int32, len(cSpec.Ports))
for i, cPort := range cSpec.Ports {
ports[i] = cPort.ContainerPort
}
cID, err := k8s.NormalizeContainerID(cStatus.ContainerID)
if err != nil {
return v1alpha1.Container{}, fmt.Errorf("error parsing container ID: %w", err)
}
c := v1alpha1.Container{
Name: cStatus.Name,
ID: string(cID),
Ready: cStatus.Ready,
Image: cStatus.Image,
Restarts: cStatus.RestartCount,
State: v1alpha1.ContainerState{},
Ports: ports,
}
if cStatus.State.Waiting != nil {
c.State.Waiting = &v1alpha1.ContainerStateWaiting{
Reason: cStatus.State.Waiting.Reason,
}
} else if cStatus.State.Running != nil {
c.State.Running = &v1alpha1.ContainerStateRunning{
StartedAt: apis.NewTime(cStatus.State.Running.StartedAt.Time),
}
} else if cStatus.State.Terminated != nil {
c.State.Terminated = &v1alpha1.ContainerStateTerminated{
StartedAt: apis.NewTime(cStatus.State.Terminated.StartedAt.Time),
FinishedAt: apis.NewTime(cStatus.State.Terminated.FinishedAt.Time),
Reason: cStatus.State.Terminated.Reason,
ExitCode: cStatus.State.Terminated.ExitCode,
}
}
return c, nil
}
func ContainerStatusToRuntimeState(status v1alpha1.Container) v1alpha1.RuntimeStatus {
state := status.State
if state.Terminated != nil {
if state.Terminated.ExitCode == 0 {
return v1alpha1.RuntimeStatusOK
} else {
return v1alpha1.RuntimeStatusError
}
}
if state.Waiting != nil {
if ErrorWaitingReasons[state.Waiting.Reason] {
return v1alpha1.RuntimeStatusError
}
return v1alpha1.RuntimeStatusPending
}
// TODO(milas): this should really consider status.Ready
if state.Running != nil {
return v1alpha1.RuntimeStatusOK
}
return v1alpha1.RuntimeStatusUnknown
}
var ErrorWaitingReasons = map[string]bool{
"CrashLoopBackOff": true,
"ErrImagePull": true,
"ImagePullBackOff": true,
"RunContainerError": true,
"StartError": true,
"Error": true,
}
// SpanIDForPod creates a span ID for a given pod associated with a manifest.
//
// Generally, a given Pod is only referenced by a single manifest, but there are
// rare occasions where it can be referenced by multiple. If the span ID is not
// unique between them, things will behave erratically.
func SpanIDForPod(mn model.ManifestName, podID k8s.PodID) logstore.SpanID {
return logstore.SpanID(fmt.Sprintf("pod:%s:%s", mn.String(), podID))
}
// copied from https://github.com/kubernetes/kubernetes/blob/aedeccda9562b9effe026bb02c8d3c539fc7bb77/pkg/kubectl/resource_printer.go#L692-L764
// to match the status column of `kubectl get pods`
func PodStatusToString(pod v1.Pod) string {
reason := string(pod.Status.Phase)
if pod.Status.Reason != "" {
reason = pod.Status.Reason
}
for i, container := range pod.Status.InitContainerStatuses {
state := container.State
switch {
case state.Terminated != nil && state.Terminated.ExitCode == 0:
continue
case state.Terminated != nil:
// initialization is failed
if len(state.Terminated.Reason) == 0 {
if state.Terminated.Signal != 0 {
reason = fmt.Sprintf("Init:Signal:%d", state.Terminated.Signal)
} else {
reason = fmt.Sprintf("Init:ExitCode:%d", state.Terminated.ExitCode)
}
} else {
reason = "Init:" + state.Terminated.Reason
}
case state.Waiting != nil && len(state.Waiting.Reason) > 0 && state.Waiting.Reason != "PodInitializing":
reason = "Init:" + state.Waiting.Reason
default:
reason = fmt.Sprintf("Init:%d/%d", i, len(pod.Spec.InitContainers))
}
break
}
if isPodStillInitializing(pod) {
return reason
}
for i := len(pod.Status.ContainerStatuses) - 1; i >= 0; i-- {
container := pod.Status.ContainerStatuses[i]
state := container.State
if state.Waiting != nil && state.Waiting.Reason != "" {
reason = state.Waiting.Reason
} else if state.Terminated != nil && state.Terminated.Reason != "" {
reason = state.Terminated.Reason
} else if state.Terminated != nil && state.Terminated.Reason == "" {
if state.Terminated.Signal != 0 {
reason = fmt.Sprintf("Signal:%d", state.Terminated.Signal)
} else {
reason = fmt.Sprintf("ExitCode:%d", state.Terminated.ExitCode)
}
}
}
return reason
}
// Pull out interesting error messages from the pod status
func PodStatusErrorMessages(pod v1.Pod) []string {
result := []string{}
if isPodStillInitializing(pod) {
for _, container := range pod.Status.InitContainerStatuses {
result = append(result, containerStatusErrorMessages(container)...)
}
}
for i := len(pod.Status.ContainerStatuses) - 1; i >= 0; i-- {
container := pod.Status.ContainerStatuses[i]
result = append(result, containerStatusErrorMessages(container)...)
}
return result
}
func containerStatusErrorMessages(container v1.ContainerStatus) []string {
result := []string{}
state := container.State
if state.Waiting != nil {
lastState := container.LastTerminationState
if lastState.Terminated != nil &&
lastState.Terminated.ExitCode != 0 &&
lastState.Terminated.Message != "" {
result = append(result, lastState.Terminated.Message)
}
// If we're in an error mode, also include the error message.
// Many error modes put important information in the error message,
// like when the pod will get rescheduled.
if state.Waiting.Message != "" && ErrorWaitingReasons[state.Waiting.Reason] {
result = append(result, state.Waiting.Message)
}
} else if state.Terminated != nil &&
state.Terminated.ExitCode != 0 &&
state.Terminated.Message != "" {
result = append(result, state.Terminated.Message)
}
return result
}
func isPodStillInitializing(pod v1.Pod) bool {
for _, container := range pod.Status.InitContainerStatuses {
state := container.State
isFinished := state.Terminated != nil && state.Terminated.ExitCode == 0
if !isFinished {
return true
}
}
return false
}
|
package main
import (
"bufio"
"context"
"database/sql"
"flag"
"fmt"
"io/ioutil"
"log"
"os"
"os/signal"
"reflect"
"sync"
"syscall"
"time"
impala "github.com/bippio/go-impala"
)
func main() {
var timeout int
var verbose bool
opts := impala.DefaultOptions
flag.StringVar(&opts.Host, "host", "", "impalad hostname")
flag.StringVar(&opts.Port, "p", "21050", "impala daemon port")
flag.BoolVar(&opts.UseLDAP, "l", false, "use ldap authentication")
flag.StringVar(&opts.Username, "username", "", "ldap usename")
flag.StringVar(&opts.Password, "password", "", "ldap password")
flag.BoolVar(&opts.UseTLS, "tls", false, "use tls")
flag.StringVar(&opts.CACertPath, "ca-cert", "", "ca certificate path")
flag.IntVar(&opts.BatchSize, "batch-size", 1024, "fetch batch size")
flag.StringVar(&opts.MemoryLimit, "mem-limit", "0", "memory limit")
flag.IntVar(&timeout, "timeout", 0, "timeout in ms; set 0 to disable timeout")
flag.BoolVar(&verbose, "v", false, "verbose")
flag.Parse()
if opts.UseLDAP {
if opts.Username == "" {
log.Fatalf("Please specify username with --username flag")
}
if opts.Password == "" {
log.Fatalf("Please specify password with --password flag")
}
}
if opts.UseTLS {
if opts.CACertPath == "" {
log.Fatalf("Please specify ca certificate path with --ca-cert flag")
}
}
if verbose {
opts.LogOut = os.Stderr
}
connector := impala.NewConnector(&opts)
db := sql.OpenDB(connector)
defer db.Close()
appctx, cancel := context.WithCancel(context.Background())
sig := make(chan os.Signal, 1)
signal.Notify(sig, os.Interrupt, syscall.SIGTERM)
go func() {
select {
case <-sig:
cancel()
}
}()
if err := db.PingContext(appctx); err != nil {
log.Fatal(err)
}
var q string
stdinstat, err := os.Stdin.Stat()
if err != nil {
log.Fatal(err)
}
if stdinstat.Mode()&os.ModeNamedPipe != 0 {
bytes, err := ioutil.ReadAll(os.Stdin)
if err != nil {
log.Fatal(err)
}
q = string(bytes)
} else if len(flag.Args()) == 1 {
q = flag.Arg(0)
} else {
fmt.Print("> ")
reader := bufio.NewReader(os.Stdin)
line, err := reader.ReadString('\n')
if err != nil {
log.Fatal(err)
}
q = line
}
if timeout != 0 {
ctx, release := context.WithTimeout(appctx, time.Duration(timeout*int(time.Millisecond)))
defer release()
appctx = ctx
}
query(appctx, db, q)
//exec(appctx, db, q)
}
func query(ctx context.Context, db *sql.DB, query string) {
startTime := time.Now()
rows, err := db.QueryContext(ctx, query)
if err != nil {
log.Fatal(err)
}
coltypes, err := rows.ColumnTypes()
if err != nil {
log.Fatal(err)
}
in := make([]reflect.Value, len(coltypes))
for i, coltype := range coltypes {
in[i] = reflect.New(coltype.ScanType())
}
once := new(sync.Once)
var results int
scanner := reflect.ValueOf(rows).MethodByName("Scan")
for rows.Next() {
errv := scanner.Call(in)
if !errv[0].IsNil() {
errtext := errv[0].MethodByName("Error").Call(nil)[0].String()
log.Fatal(errtext)
}
once.Do(func() {
for _, coltype := range coltypes {
fmt.Printf("%s\t", coltype.Name())
}
fmt.Print("\n")
})
for _, col := range in {
fmt.Printf("%v\t", col.Elem())
}
fmt.Print("\n")
results++
}
if err := rows.Err(); err != nil {
log.Fatal(err)
}
fmt.Printf("Fetch %d rows(s) in %.2fs\n", results, time.Duration(time.Since(startTime)).Seconds())
}
func exec(ctx context.Context, db *sql.DB, query string) {
res, err := db.ExecContext(ctx, query)
if err != nil {
log.Fatal(err)
}
log.Print(res)
fmt.Print("The operation has no results.\n")
}
|
package main
//Valid
//Tries to resolve all types of exxpressions within print to base type
func f () {
type num int
type fl float64
type s string
type b bool
var a1 num
var a2 fl
var a3 s
var a4 b
print (a1, a2, a3, a4)
} |
package main
import "log"
// Check ...
func Check(e error) {
if e != nil {
log.Println(e)
EnterToExit()
}
}
|
package macgen
import (
"bytes"
"context"
"encoding/binary"
"errors"
"macaddress_io_grabber/models"
"macaddress_io_grabber/utils/redispool"
"math/big"
"math/rand"
"strings"
"time"
)
var (
ErrInconsistentAdminType = errors.New("macgen: inconsistent administration type")
ErrInconsistentTransType = errors.New("macgen: inconsistent transmission type")
ErrIncorrectRange = errors.New("macgen: range is incorrect")
)
func init() {
r = rand.New(rand.NewSource(time.Now().UnixNano()))
go func() {
for {
i := r.Uint64()
b := make([]byte, 8)
binary.LittleEndian.PutUint64(b, i)
select {
case gb <- b:
case gi <- i:
}
}
}()
}
var r *rand.Rand
var gb = make(chan []byte, 128)
var gi = make(chan uint64, 128)
func generate(length uint) []byte {
if length < 1 {
return []byte{}
}
b := make([]byte, (length+7)/8*8)
for i := len(b); i > 0; i -= 8 {
copy(b[i-8:i], <-gb)
}
b = b[:length]
return b
}
type result struct {
MAC []byte
Error error
}
type Generator interface {
Generate(ctx context.Context) <-chan result
}
type GeneratorFactory struct {
Length int
Upper bool
Multicast bool
Local bool
Separator byte
ChunkSize uint
Count uint
}
func (gf *GeneratorFactory) ByPrefixGenerator(prefix string) (Generator, error) {
m, err := models.NewMacNaive(prefix)
if err != nil {
return nil, err
}
// Check Administration and Transmission Types
if m.Length() >= 2 {
if (m.AdministrationType() == models.LAA) != gf.Local {
return nil, ErrInconsistentAdminType
}
if (m.TransmissionType() == models.Multicast) != gf.Multicast {
return nil, ErrInconsistentTransType
}
}
maxCount := big.NewInt(1)
if gf.Length*2-m.Length() > 0 {
maxCount.SetString(strings.Repeat("F", gf.Length*2-m.Length()), 16)
maxCount.Add(maxCount, big.NewInt(1))
if m.Length() < 2 {
maxCount.Div(maxCount, big.NewInt(4))
}
}
var g *generatorPrefix
g = &generatorPrefix{
prefix: m.String(),
length: 0,
upper: gf.Upper,
multicast: gf.Multicast,
local: gf.Local,
separator: gf.Separator,
chunkSize: gf.ChunkSize,
count: gf.Count,
maxCount: maxCount,
gen: func() mac {
return mac(generate(g.length))
},
}
if gf.Length > 0 {
g.length = uint(gf.Length)
}
return g, nil
}
func (gf *GeneratorFactory) ByRandPrefixGenerator() (Generator, error) {
m := mac(<-gb)
m.setTransType(gf.Multicast)
m.setAdminType(gf.Local)
pr := m.format(' ', 0, true)
switch (<-gi) % 3 {
case 0:
pr = pr[:6]
case 1:
pr = pr[:7]
case 2:
pr = pr[:9]
}
return gf.ByPrefixGenerator(string(pr))
}
func (gf *GeneratorFactory) ByOUIGenerator(pool *redispool.Pool) (Generator, error) {
var g *generatorOUI
g = &generatorOUI{
rPool: pool,
length: 0,
upper: gf.Upper,
separator: gf.Separator,
chunkSize: gf.ChunkSize,
count: gf.Count,
gen: func() mac {
return mac(generate(g.length))
},
}
if gf.Length > 0 {
g.length = uint(gf.Length)
}
return g, nil
}
func (gf *GeneratorFactory) ByRangeGenerator(prefix1, prefix2 string) (Generator, error) {
var commonLen int
p1, err := fixPrefix(prefix1)
if err != nil {
return nil, err
}
p2, err := fixPrefix(prefix2)
if err != nil {
return nil, err
}
// Trim prefixes to generated MAC length
if len(p1) > gf.Length*2 {
p1 = p1[:gf.Length*2]
}
if len(p2) > gf.Length*2 {
p2 = p2[:gf.Length*2]
}
// Extend Prefixes to generated MAC length
b := bytes.Repeat([]byte{'0'}, gf.Length*2)
copy(b, p1)
p1 = b
b = bytes.Repeat([]byte{'f'}, gf.Length*2)
copy(b, p2)
p2 = b
p1, _ = newMAC(string(p1))
p2, _ = newMAC(string(p2))
if bytes.Compare(p1, p2) > 0 {
return nil, ErrIncorrectRange
}
// Try to move borders to exclude irrelevant values from range
if t := fixRangeLeft(p1, gf.Multicast, gf.Local); t != 0 {
if bytes.Compare(p1, p2) > 0 {
if t&2 != 0 {
return nil, ErrInconsistentAdminType
}
return nil, ErrInconsistentTransType
}
}
if t := fixRangeRight(p2, gf.Multicast, gf.Local); t != 0 {
if bytes.Compare(p1, p2) > 0 {
if t&2 != 0 {
return nil, ErrInconsistentAdminType
}
return nil, ErrInconsistentTransType
}
}
// Find piece which is common for 2 prefixes
for i := 0; i < len(p1) && i < len(p2); i++ {
if p1[i] != p2[i] {
break
}
commonLen++
}
maxCount := new(big.Int).Sub(new(big.Int).SetBytes(p2), new(big.Int).SetBytes(p1))
maxCount.Add(maxCount, big.NewInt(1))
var g *generatorRange
g = &generatorRange{
length: 0,
upper: gf.Upper,
multicast: gf.Multicast,
local: gf.Local,
separator: gf.Separator,
chunkSize: gf.ChunkSize,
prefix: prefix1[:commonLen],
first: p1,
last: p2,
count: gf.Count,
maxCount: maxCount,
gen: func(m *big.Int, n *big.Int) *big.Int {
l := new(big.Int).Sub(n, m)
l = new(big.Int).Rand(r, l)
l = new(big.Int).Add(m, l)
return l
},
}
if gf.Length > 0 {
g.length = uint(gf.Length)
}
return g, nil
}
func fixPrefix(p string) ([]byte, error) {
m, err := models.NewMacNaive(p)
if err != nil {
return nil, err
}
mac, err := newMAC(m.String())
if err != nil {
return nil, err
}
return mac.encode()[:m.Length()], nil
}
// Moves left border up
func fixRangeLeft(p mac, m, l bool) byte {
n := p[0]
t := p[0]
for (n&1 != 0 != m) || (n&2 != 0 != l) {
if n == 255 {
// Dead end
for i := len(p) - 1; i > 0; i-- {
p[i] = 255
}
return (t | n) & ^(t & n) & 3
}
n++
}
if t != n {
p[0] = n
for i := len(p) - 1; i > 0; i-- {
p[i] = 0
}
}
return (t | n) & ^(t & n) & 3
}
// Moves right border down
func fixRangeRight(p mac, m, l bool) byte {
n := p[0]
t := p[0]
for (n&1 != 0 != m) || (n&2 != 0 != l) {
if n == 0 {
// Dead end
for i := len(p) - 1; i > 0; i-- {
p[i] = 0
}
return (t | n) & ^(t & n) & 3
}
n--
}
if t != n {
p[0] = n
for i := len(p) - 1; i > 0; i-- {
p[i] = 255
}
}
return (t | n) & ^(t & n) & 3
}
|
package serve_test
import (
"flag"
"fmt"
"log"
"net/http"
"net/http/httptest"
"os"
"path/filepath"
"strings"
"testing"
"github.com/kirubasankars/serve/driver"
"github.com/kirubasankars/serve/serve"
)
type CommonSiteHandler struct{}
func (csh *CommonSiteHandler) Build(module serve.Module) {
module.Handlers["/"] = func(ctx serve.Context, w http.ResponseWriter, r *http.Request) {
n := ctx.Namespace
a := ctx.Application
m := ctx.Module
var (
namespace string
app string
module string
)
if n != nil {
namespace = n.Name
}
if a != nil {
app = a.Name
}
if m != nil {
module = m.Name
}
//fmt.Println(ctx.Namespace, ctx.Application, app, ctx.Module, ctx.Path)
fmt.Fprintf(w, "%s %s %s", namespace, app, module)
}
}
func TestMain(m *testing.M) {
flag.Parse()
os.Exit(m.Run())
}
func TestServeHttp(t *testing.T) {
req, err := http.NewRequest("GET", "http://localhost:3000/path/to/file", nil)
if err != nil {
log.Fatal(err)
}
w := httptest.NewRecorder()
getConfig := func(path string) *[]byte {
if path == filepath.FromSlash("/serve/config.json") {
b := []byte("{ \"roles\" : { \"admin\" : [\"home:permission\"] } }")
return &b
}
if path == filepath.FromSlash("/serve/modules/home/config.json") {
b := []byte("{ \"permissions\" : { \"permission\" : [\"admin\",\"url(GET /path/to/file)\"] } }")
return &b
}
return nil
}
stat := func(path string) bool {
if path == filepath.FromSlash("/serve") || path == filepath.FromSlash("/serve/apps/app") || path == filepath.FromSlash("/serve/modules/home") {
return true
}
return false
}
d := driver.NewFileSystem(stat, getConfig, nil)
server := serve.NewServer("3000", "/serve", d)
server.RegisterProvider(".", new(CommonSiteHandler))
server.ServeHTTP(w, req)
//fmt.Printf("%d - %s", w.Code, w.Body.String())
if !(w.Code == 200 && strings.TrimSpace(w.Body.String()) == ". . home") {
t.Error("return code is not 200")
}
}
func TestServeHttpModuleRootRedirect(t *testing.T) {
req, err := http.NewRequest("GET", "http://localhost:3000/module", nil)
if err != nil {
log.Fatal(err)
}
w := httptest.NewRecorder()
getConfig := func(path string) *[]byte {
if path == filepath.FromSlash("/serve/config.json") {
b := []byte("{ \"modules\" : [\"module\"], \"roles\" : { \"admin\" : [\"module:permission\"] } }")
return &b
}
if path == filepath.FromSlash("/serve/modules/module/config.json") {
b := []byte("{ \"permissions\" : { \"permission\" : [\"url(GET /?)\"] } }")
return &b
}
return nil
}
stat := func(path string) bool {
if path == filepath.FromSlash("/serve") || path == filepath.FromSlash("/serve/modules/module") {
return true
}
return false
}
d := driver.NewFileSystem(stat, getConfig, nil)
server := serve.NewServer("3000", "/serve", d)
server.ServeHTTP(w, req)
if !(w.Code == 301 && strings.TrimSpace(w.Body.String()) == "<a href=\"/module/\">Moved Permanently</a>.") {
fmt.Printf("%d - %s", w.Code, w.Body.String())
t.Error("return code is not 301")
}
}
func TestServeHttpAppModuleRootRedirect(t *testing.T) {
req, err := http.NewRequest("GET", "http://localhost:3000/app/module", nil)
if err != nil {
log.Fatal(err)
}
w := httptest.NewRecorder()
getConfig := func(path string) *[]byte {
if path == filepath.FromSlash("/serve/apps/app/config.json") {
ba := []byte("{ \"modules\" : [\"module\"], \"roles\" : { \"admin\" : [\"module:permission\"] } }")
return &ba
}
if path == filepath.FromSlash("/serve/modules/module/config.json") {
ba := []byte("{ \"permissions\" : { \"permission\" : [\"url(GET /?)\"] } }")
return &ba
}
return nil
}
stat := func(path string) bool {
if path == filepath.FromSlash("/serve") || path == filepath.FromSlash("/serve/apps/app") || path == filepath.FromSlash("/serve/modules/module") {
return true
}
return false
}
d := driver.NewFileSystem(stat, getConfig, nil)
server := serve.NewServer("3000", "/serve", d)
server.ServeHTTP(w, req)
//fmt.Printf("%d - %s", w.Code, w.Body.String())
if !(w.Code == 301 || strings.TrimSpace(w.Body.String()) == "<a href=\"/app/module/\">Moved Permanently</a>.") {
t.Error("return code is not 301")
}
}
func TestServeHttpAppModuleRoot(t *testing.T) {
req, err := http.NewRequest("GET", "http://localhost:3000/app/module/path/2/file", nil)
if err != nil {
log.Fatal(err)
}
w := httptest.NewRecorder()
stat := func(path string) bool {
if path == filepath.FromSlash("/serve") || path == filepath.FromSlash("/serve/apps/app") || path == filepath.FromSlash("/serve/modules/module") {
return true
}
return false
}
getConfig := func(path string) *[]byte {
if path == filepath.FromSlash("/serve/apps/app/config.json") {
ba := []byte("{ \"modules\" : [\"module\"], \"roles\" : { \"admin\" : [\"module:permission\"] } }")
return &ba
}
if path == filepath.FromSlash("/serve/modules/module/config.json") {
ba := []byte("{ \"permissions\" : { \"permission\" : [\"url(GET /path/2/file)\"] } }")
return &ba
}
return nil
}
d := driver.NewFileSystem(stat, getConfig, nil)
server := serve.NewServer("3000", "/serve", d)
server.RegisterProvider(".", new(CommonSiteHandler))
server.ServeHTTP(w, req)
//fmt.Printf("%d - %s", w.Code, w.Body)
if !(w.Code == 200 && w.Body.String() == ". app module") {
log.Printf("%d - %s", w.Code, w.Body)
t.Error("return code is not 200")
}
}
func TestServeHttpModuleRoot(t *testing.T) {
req, err := http.NewRequest("GET", "http://localhost:3000/module/", nil)
if err != nil {
log.Fatal(err)
}
w := httptest.NewRecorder()
getConfig := func(path string) *[]byte {
if path == filepath.FromSlash("/serve/config.json") {
ba := []byte("{ \"modules\" : [\"module\"], \"roles\" : { \"admin\" : [\"module:permission\"] } }")
return &ba
}
if path == filepath.FromSlash("/serve/modules/module/config.json") {
ba := []byte("{ \"permissions\" : { \"permission\" : [\"url(GET /)\"] } }")
return &ba
}
return nil
}
stat := func(path string) bool {
if path == filepath.FromSlash("/serve") || path == filepath.FromSlash("/serve/modules/module") {
return true
}
return false
}
d := driver.NewFileSystem(stat, getConfig, nil)
server := serve.NewServer("3000", "/serve", d)
server.RegisterProvider(".", new(CommonSiteHandler))
server.ServeHTTP(w, req)
//fmt.Printf("%d - %s", w.Code, w.Body.String())
if !(w.Code == 200 || w.Body.String() == ". module") {
t.Logf("%d - %s", w.Code, w.Body.String())
t.Error("return code is not 301")
}
}
func TestServeHttpApp(t *testing.T) {
req, err := http.NewRequest("GET", "http://localhost:3000/app/", nil)
if err != nil {
log.Fatal(err)
}
w := httptest.NewRecorder()
getConfig := func(path string) *[]byte {
if path == filepath.FromSlash("/serve/apps/app/config.json") {
ba := []byte("{ \"roles\" : { \"admin\" : [\"home:permission\"] } }")
return &ba
}
if path == filepath.FromSlash("/serve/modules/home/config.json") {
ba := []byte("{ \"permissions\" : { \"permission\" : [\"url(GET /)\"] } }")
return &ba
}
return nil
}
stat := func(path string) bool {
if path == filepath.FromSlash("/serve") || path == filepath.FromSlash("/serve/apps/app") || path == filepath.FromSlash("/serve/modules/home") {
return true
}
return false
}
d := driver.NewFileSystem(stat, getConfig, nil)
server := serve.NewServer("3000", "/serve", d)
server.RegisterProvider(".", new(CommonSiteHandler))
server.ServeHTTP(w, req)
//fmt.Printf("%d - %s", w.Code, w.Body.String())
if !(w.Code == 200 && w.Body.String() == ". app home") {
t.Logf("%d - %s", w.Code, w.Body.String())
t.Error("return code is not 301")
}
}
func TestServeHttpNamespaceAppNamespaceModuleRoot(t *testing.T) {
req, err := http.NewRequest("GET", "http://localhost:3000/namespace/app/module/", nil)
if err != nil {
log.Fatal(err)
}
w := httptest.NewRecorder()
stat := func(path string) bool {
if path == filepath.FromSlash("/serve/namespace") || path == filepath.FromSlash("/serve/namespace/apps/app") || path == filepath.FromSlash("/serve/namespace/modules/module") {
return true
}
return false
}
getConfig := func(path string) *[]byte {
if path == filepath.FromSlash("/serve/namespace/apps/app/config.json") {
ba := []byte("{ \"modules\" : [\"module\"], \"roles\" : { \"admin\" : [\"module:permission\"] } }")
return &ba
}
if path == filepath.FromSlash("/serve/namespace/modules/module/config.json") {
ba := []byte("{ \"permissions\" : { \"permission\" : [\"url(GET /)\"] } }")
return &ba
}
return nil
}
d := driver.NewFileSystem(stat, getConfig, nil)
server := serve.NewServer("3000", "/serve", d)
server.RegisterProvider(".", new(CommonSiteHandler))
server.ServeHTTP(w, req)
//fmt.Printf("%d - %s", w.Code, w.Body.String())
if w.Code != 200 || w.Body.String() != "namespace app module" {
t.Logf("%d - %s", w.Code, w.Body.String())
t.Error("return code is not 301")
}
}
func TestServeHttpNamespaceModuleRoot(t *testing.T) {
req, err := http.NewRequest("GET", "http://localhost:3000/namespace/module/", nil)
if err != nil {
log.Fatal(err)
}
w := httptest.NewRecorder()
getConfig := func(path string) *[]byte {
if path == filepath.FromSlash("/serve/namespace/config.json") {
ba := []byte("{ \"modules\" : [\"module\"], \"roles\" : { \"admin\" : [\"module:admin\"] } }")
return &ba
}
if path == filepath.FromSlash("/serve/namespace/modules/module/config.json") {
ba := []byte("{ \"permissions\" : { \"admin\" : [\"url(GET /?)\"] } }")
return &ba
}
return nil
}
stat := func(path string) bool {
if path == filepath.FromSlash("/serve/namespace") || path == filepath.FromSlash("/serve/namespace/modules/module") {
return true
}
return false
}
d := driver.NewFileSystem(stat, getConfig, nil)
server := serve.NewServer("3000", "/serve", d)
server.RegisterProvider(".", new(CommonSiteHandler))
server.ServeHTTP(w, req)
//fmt.Printf("%d - %s", w.Code, w.Body.String())
if w.Code != 200 || w.Body.String() != "namespace . module" {
t.Logf("%d - %s", w.Code, w.Body.String())
t.Error("return code is not 301")
}
}
func TestServeHttpNamespcaeAppModuleRootRedirect(t *testing.T) {
req, err := http.NewRequest("GET", "http://localhost:3000/namespace/app/module", nil)
if err != nil {
log.Fatal(err)
}
w := httptest.NewRecorder()
stat := func(path string) bool {
if path == filepath.FromSlash("/serve/namespace") || path == filepath.FromSlash("/serve/namespace/apps/app") || path == filepath.FromSlash("/serve/namespace/modules/module") {
return true
}
return false
}
getConfig := func(path string) *[]byte {
if path == filepath.FromSlash("/serve/namespace/apps/app/config.json") {
ba := []byte("{ \"modules\" : [\"module\"], \"roles\" : { \"admin\" : [\"module:admin\"] } }")
return &ba
}
if path == filepath.FromSlash("/serve/namespace/modules/module/config.json") {
ba := []byte("{ \"permissions\" : { \"admin\" : [\"url(GET /?)\"] } }")
return &ba
}
return nil
}
d := driver.NewFileSystem(stat, getConfig, nil)
server := serve.NewServer("3000", "/serve", d)
server.ServeHTTP(w, req)
//fmt.Printf("%d - %s", w.Code, w.Body.String())
if !(w.Code == 301 && strings.TrimSpace(w.Body.String()) == "<a href=\"/namespace/app/module/\">Moved Permanently</a>.") {
log.Printf("%d - %s", w.Code, w.Body.String())
t.Error("return code is not 301")
}
}
func TestServeHttpOAuthUserPassword(t *testing.T) {
query := "?grant_type=password&client_id=client_id&client_secret=client_secret&username=admin&password=admin"
req, err := http.NewRequest("POST", "http://localhost:3000/_oauth2/token"+query, nil)
if err != nil {
log.Fatal(err)
}
w := httptest.NewRecorder()
getConfig := func(path string) *[]byte {
if path == filepath.FromSlash("/serve/users.json") {
ba := []byte("{ \"admin\" : { \"id\" : \"admin\", \"password\" : \"admin\", \"namespaces\" : { \"namespace\" : { \"apps\" : { \"app\" : { \"roles\" : [ \"\"] } } } } } }")
return &ba
}
if path == filepath.FromSlash("/serve/clients.json") {
ba := []byte("{ \"client_id\" : { \"id\" : \"client_id\", \"secret\" : \"client_secret\" } }")
return &ba
}
return nil
}
stat := func(path string) bool {
if path == filepath.FromSlash("/serve") {
return true
}
if path == filepath.FromSlash("/serve/modules/_oauth2") {
return true
}
return false
}
d := driver.NewFileSystem(stat, getConfig, nil)
server := serve.NewServer("3000", "/serve", d)
server.RegisterProvider(".", new(CommonSiteHandler))
server.ServeHTTP(w, req)
if !(w.Code == 200 && strings.TrimSpace(w.Body.String()) == "{\"access_token\":\"access_token\",\"issued_at\":\"issued_at\",\"signature\":\"signature\"}") {
t.Error("return code is not 200")
}
}
func TestServeHttpOAuthUserAgent(t *testing.T) {
query := "?response_code=token&client_id=client_id&redirect_uri=namespace/app&username=admin&password=admin"
req, err := http.NewRequest("POST", "http://localhost:3000/_oauth2/authorize"+query, nil)
if err != nil {
log.Fatal(err)
}
w := httptest.NewRecorder()
getConfig := func(path string) *[]byte {
if path == filepath.FromSlash("/serve/users.json") {
ba := []byte("{ \"admin\" : { \"id\" : \"admin\", \"password\" : \"admin\", \"roles\" : { \"namespace:app\" : [ \"\"] } } }")
return &ba
}
if path == filepath.FromSlash("/serve/clients.json") {
ba := []byte("{ \"client_id\" : { \"id\" : \"client_id\", \"secret\" : \"client_secret\" } }")
return &ba
}
return nil
}
stat := func(path string) bool {
if path == filepath.FromSlash("/serve") {
return true
}
if path == filepath.FromSlash("/serve/modules/_oauth2") {
return true
}
return false
}
d := driver.NewFileSystem(stat, getConfig, nil)
server := serve.NewServer("3000", "/serve", d)
server.RegisterProvider(".", new(CommonSiteHandler))
server.ServeHTTP(w, req)
if !(w.Code == 302 && w.Header().Get("Location") == "/namespace/app?access_token=access_token&issued_at=issuedAt") {
t.Error("return code is not 302")
}
}
func TestServeHttpOAuthWebServer(t *testing.T) {
query := "?response_code=code&client_id=client_id&redirect_uri=namespace/app&username=admin&password=admin"
req, err := http.NewRequest("GET", "http://localhost:3000/_oauth2/authorize"+query, nil)
req.Method = "POST"
if err != nil {
log.Fatal(err)
}
w := httptest.NewRecorder()
getConfig := func(path string) *[]byte {
if path == filepath.FromSlash("/serve/users.json") {
ba := []byte("{ \"admin\" : { \"id\" : \"admin\", \"password\" : \"admin\", \"roles\" : { \"namespace:app\" : [ \"\"] } } }")
return &ba
}
if path == filepath.FromSlash("/serve/clients.json") {
ba := []byte("{ \"client_id\" : { \"id\" : \"client_id\", \"secret\" : \"client_secret\" } }")
return &ba
}
return nil
}
stat := func(path string) bool {
if path == filepath.FromSlash("/serve") {
return true
}
if path == filepath.FromSlash("/serve/modules/_oauth2") {
return true
}
return false
}
d := driver.NewFileSystem(stat, getConfig, nil)
server := serve.NewServer("3000", "/serve", d)
server.RegisterProvider(".", new(CommonSiteHandler))
server.ServeHTTP(w, req)
if !(w.Code == 302 && w.Header().Get("Location") == "/oauth/code_callback?code=12345678&redirect_uri=namespace/app") {
t.Error("return code is not 302")
}
}
func TestServeHttpOAuth2Module(t *testing.T) {
req, err := http.NewRequest("GET", "http://localhost:3000/_oauth2/authorize/", nil)
if err != nil {
log.Fatal(err)
}
w := httptest.NewRecorder()
getConfig := func(path string) *[]byte {
return nil
}
stat := func(path string) bool {
if path == filepath.FromSlash("/serve") {
return true
}
if path == filepath.FromSlash("/serve/modules/_oauth2") {
return true
}
return false
}
serveFile := func(ctx *serve.Context, w http.ResponseWriter, r *http.Request) {
w.Write([]byte("login screen"))
}
d := driver.NewFileSystem(stat, getConfig, serveFile)
server := serve.NewServer("3000", "/serve", d)
server.ServeHTTP(w, req)
if !(w.Code == 200 && w.Body.String() == "login screen") {
t.Error("return code is not 200")
}
}
|
/*
Copyright 2019 The Skaffold Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package jib
import (
"context"
"errors"
"fmt"
"io"
"os"
"os/exec"
"runtime"
"strings"
"testing"
"time"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/docker"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/platform"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/schema/latest"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/util"
"github.com/GoogleContainerTools/skaffold/v2/testutil"
)
func TestBuildJibGradleToDocker(t *testing.T) {
tests := []struct {
description string
artifact *latest.JibArtifact
commands util.Command
shouldErr bool
expectedError string
}{
{
description: "build",
artifact: &latest.JibArtifact{},
commands: testutil.CmdRun(
"gradle fake-gradleBuildArgs-for-jibDockerBuild --image=img:tag",
),
},
{
description: "build with project",
artifact: &latest.JibArtifact{Project: "project"},
commands: testutil.CmdRun(
"gradle fake-gradleBuildArgs-for-project-for-jibDockerBuild --image=img:tag",
),
},
{
description: "build with custom base image",
artifact: &latest.JibArtifact{BaseImage: "docker://busybox"},
commands: testutil.CmdRun(
"gradle fake-gradleBuildArgs-for-jibDockerBuild -Djib.from.image=docker://busybox --image=img:tag",
),
},
{
description: "fail build",
artifact: &latest.JibArtifact{},
commands: testutil.CmdRunErr(
"gradle fake-gradleBuildArgs-for-jibDockerBuild --image=img:tag",
errors.New("BUG"),
),
shouldErr: true,
expectedError: "gradle build failed",
},
}
for _, test := range tests {
testutil.Run(t, test.description, func(t *testutil.T) {
t.NewTempDir().Touch("build.gradle").Chdir()
t.Override(&gradleBuildArgsFunc, getGradleBuildArgsFuncFake(t, MinimumJibGradleVersion))
t.Override(&util.DefaultExecCommand, test.commands)
api := (&testutil.FakeAPIClient{}).Add("img:tag", "imageID")
localDocker := fakeLocalDaemon(api)
builder := NewArtifactBuilder(localDocker, &mockConfig{}, false, false, nil)
result, err := builder.Build(context.Background(), io.Discard, &latest.Artifact{
ArtifactType: latest.ArtifactType{
JibArtifact: test.artifact,
},
}, "img:tag", platform.Matcher{})
t.CheckError(test.shouldErr, err)
if test.shouldErr {
t.CheckErrorContains(test.expectedError, err)
} else {
t.CheckDeepEqual("imageID", result)
}
})
}
}
func TestBuildJibGradleToRegistry(t *testing.T) {
tests := []struct {
description string
artifact *latest.JibArtifact
commands util.Command
shouldErr bool
expectedError string
}{
{
description: "remote build",
artifact: &latest.JibArtifact{},
commands: testutil.CmdRun(
"gradle fake-gradleBuildArgs-for-jib --image=img:tag",
),
},
{
description: "build with project",
artifact: &latest.JibArtifact{Project: "project"},
commands: testutil.CmdRun(
"gradle fake-gradleBuildArgs-for-project-for-jib --image=img:tag",
),
},
{
description: "build with custom base image",
artifact: &latest.JibArtifact{BaseImage: "docker://busybox"},
commands: testutil.CmdRun(
"gradle fake-gradleBuildArgs-for-jib -Djib.from.image=docker://busybox --image=img:tag",
),
},
{
description: "fail build",
artifact: &latest.JibArtifact{},
commands: testutil.CmdRunErr(
"gradle fake-gradleBuildArgs-for-jib --image=img:tag",
errors.New("BUG"),
),
shouldErr: true,
expectedError: "gradle build failed",
},
}
for _, test := range tests {
testutil.Run(t, test.description, func(t *testutil.T) {
t.NewTempDir().Touch("build.gradle").Chdir()
t.Override(&gradleBuildArgsFunc, getGradleBuildArgsFuncFake(t, MinimumJibGradleVersion))
t.Override(&util.DefaultExecCommand, test.commands)
t.Override(&docker.RemoteDigest, func(identifier string, _ docker.Config, _ []v1.Platform) (string, error) {
if identifier == "img:tag" {
return "digest", nil
}
return "", errors.New("unknown remote tag")
})
localDocker := fakeLocalDaemon(&testutil.FakeAPIClient{})
builder := NewArtifactBuilder(localDocker, &mockConfig{}, true, false, nil)
result, err := builder.Build(context.Background(), io.Discard, &latest.Artifact{
ArtifactType: latest.ArtifactType{
JibArtifact: test.artifact,
},
}, "img:tag", platform.Matcher{})
t.CheckError(test.shouldErr, err)
if test.shouldErr {
t.CheckErrorContains(test.expectedError, err)
} else {
t.CheckDeepEqual("digest", result)
}
})
}
}
func TestMinimumGradleVersion(t *testing.T) {
testutil.CheckDeepEqual(t, "1.4.0", MinimumJibGradleVersion)
}
func TestGradleWrapperDefinition(t *testing.T) {
testutil.CheckDeepEqual(t, "gradle", GradleCommand.Executable)
testutil.CheckDeepEqual(t, "gradlew", GradleCommand.Wrapper)
}
func TestGetDependenciesGradle(t *testing.T) {
tmpDir := testutil.NewTempDir(t)
tmpDir.Touch("build", "dep1", "dep2")
build := tmpDir.Path("build")
dep1 := tmpDir.Path("dep1")
dep2 := tmpDir.Path("dep2")
ctx := context.Background()
tests := []struct {
description string
stdout string
modTime time.Time
expected []string
err error
}{
{
description: "failure",
stdout: "",
modTime: time.Unix(0, 0),
err: errors.New("error"),
},
{
description: "success",
stdout: fmt.Sprintf("BEGIN JIB JSON\n{\"build\":[\"%s\"],\"inputs\":[\"%s\"],\"ignore\":[]}", build, dep1),
modTime: time.Unix(0, 0),
expected: []string{"build", "dep1"},
},
{
// Expected output differs from stdout since build file hasn't change, thus gradle command won't run
description: "success",
stdout: fmt.Sprintf("BEGIN JIB JSON\n{\"build\":[\"%s\"],\"inputs\":[\"%s\", \"%s\"],\"ignore\":[]}", build, dep1, dep2),
modTime: time.Unix(0, 0),
expected: []string{"build", "dep1"},
},
{
description: "success",
stdout: fmt.Sprintf("BEGIN JIB JSON\n{\"build\":[\"%s\"],\"inputs\":[\"%s\", \"%s\"],\"ignore\":[]}", build, dep1, dep2),
modTime: time.Unix(10000, 0),
expected: []string{"build", "dep1", "dep2"},
},
}
for _, test := range tests {
testutil.Run(t, test.description, func(t *testutil.T) {
t.Override(&util.DefaultExecCommand, testutil.CmdRunOutErr(
strings.Join(getCommandGradle(ctx, tmpDir.Root(), &latest.JibArtifact{Project: "gradle-test"}).Args, " "),
test.stdout,
test.err,
))
// Change build file mod time
if err := os.Chtimes(build, test.modTime, test.modTime); err != nil {
t.Fatal(err)
}
ws := tmpDir.Root()
deps, err := getDependenciesGradle(ctx, ws, &latest.JibArtifact{Project: "gradle-test"})
if test.err != nil {
prefix := fmt.Sprintf("could not fetch dependencies for workspace %s: initial Jib dependency refresh failed: failed to get Jib dependencies: ", ws)
t.CheckErrorAndDeepEqual(true, err, prefix+test.err.Error(), err.Error())
} else {
t.CheckDeepEqual(test.expected, deps)
}
})
}
}
func TestGetCommandGradle(t *testing.T) {
ctx := context.Background()
tests := []struct {
description string
jibArtifact latest.JibArtifact
filesInWorkspace []string
expectedCmd func(workspace string) exec.Cmd
}{
{
description: "gradle default",
jibArtifact: latest.JibArtifact{},
filesInWorkspace: []string{},
expectedCmd: func(workspace string) exec.Cmd {
return GradleCommand.CreateCommand(ctx, workspace, []string{"_skaffoldFailIfJibOutOfDate", "-Djib.requiredVersion=" + MinimumJibGradleVersion, ":_jibSkaffoldFilesV2", "-q", "--console=plain"})
},
},
{
description: "gradle default with project",
jibArtifact: latest.JibArtifact{Project: "project"},
filesInWorkspace: []string{},
expectedCmd: func(workspace string) exec.Cmd {
return GradleCommand.CreateCommand(ctx, workspace, []string{"_skaffoldFailIfJibOutOfDate", "-Djib.requiredVersion=" + MinimumJibGradleVersion, ":project:_jibSkaffoldFilesV2", "-q", "--console=plain"})
},
},
{
description: "gradle with wrapper",
jibArtifact: latest.JibArtifact{},
filesInWorkspace: []string{"gradlew", "gradlew.cmd"},
expectedCmd: func(workspace string) exec.Cmd {
return GradleCommand.CreateCommand(ctx, workspace, []string{"_skaffoldFailIfJibOutOfDate", "-Djib.requiredVersion=" + MinimumJibGradleVersion, ":_jibSkaffoldFilesV2", "-q", "--console=plain"})
},
},
{
description: "gradle with wrapper and project",
jibArtifact: latest.JibArtifact{Project: "project"},
filesInWorkspace: []string{"gradlew", "gradlew.cmd"},
expectedCmd: func(workspace string) exec.Cmd {
return GradleCommand.CreateCommand(ctx, workspace, []string{"_skaffoldFailIfJibOutOfDate", "-Djib.requiredVersion=" + MinimumJibGradleVersion, ":project:_jibSkaffoldFilesV2", "-q", "--console=plain"})
},
},
}
for _, test := range tests {
testutil.Run(t, test.description, func(t *testutil.T) {
tmpDir := t.NewTempDir().
Touch(test.filesInWorkspace...)
cmd := getCommandGradle(ctx, tmpDir.Root(), &test.jibArtifact)
expectedCmd := test.expectedCmd(tmpDir.Root())
t.CheckDeepEqual(expectedCmd.Path, cmd.Path)
t.CheckDeepEqual(expectedCmd.Args, cmd.Args)
t.CheckDeepEqual(expectedCmd.Dir, cmd.Dir)
})
}
}
func TestGetSyncMapCommandGradle(t *testing.T) {
ctx := context.Background()
tests := []struct {
description string
workspace string
jibArtifact latest.JibArtifact
expectedCmd func(workspace string) exec.Cmd
}{
{
description: "single module",
jibArtifact: latest.JibArtifact{},
expectedCmd: func(workspace string) exec.Cmd {
return GradleCommand.CreateCommand(ctx, workspace, []string{"fake-gradleBuildArgs-for-_jibSkaffoldSyncMap-skipTests"})
},
},
{
description: "multi module",
jibArtifact: latest.JibArtifact{Project: "project"},
expectedCmd: func(workspace string) exec.Cmd {
return GradleCommand.CreateCommand(ctx, workspace, []string{"fake-gradleBuildArgs-for-project-for-_jibSkaffoldSyncMap-skipTests"})
},
},
}
for _, test := range tests {
testutil.Run(t, test.description, func(t *testutil.T) {
t.Override(&gradleBuildArgsFunc, getGradleBuildArgsFuncFake(t, MinimumJibGradleVersionForSync))
cmd := getSyncMapCommandGradle(ctx, test.workspace, &test.jibArtifact)
expectedCmd := test.expectedCmd(test.workspace)
t.CheckDeepEqual(expectedCmd.Path, cmd.Path)
t.CheckDeepEqual(expectedCmd.Args, cmd.Args)
t.CheckDeepEqual(expectedCmd.Dir, cmd.Dir)
})
}
}
func TestGenerateGradleBuildArgs(t *testing.T) {
tests := []struct {
description string
in latest.JibArtifact
platforms platform.Matcher
expectedMinVersion string
deps []*latest.ArtifactDependency
image string
skipTests bool
pushImages bool
r ArtifactResolver
insecureRegistries map[string]bool
out []string
}{
{description: "single module", image: "image", out: []string{"fake-gradleBuildArgs-for-testTask", "--image=image"}},
{description: "single module without tests", image: "image", skipTests: true, out: []string{"fake-gradleBuildArgs-for-testTask-skipTests", "--image=image"}},
{description: "multi module", in: latest.JibArtifact{Project: "project"}, image: "image", out: []string{"fake-gradleBuildArgs-for-project-for-testTask", "--image=image"}},
{description: "multi module without tests", in: latest.JibArtifact{Project: "project"}, image: "image", skipTests: true, out: []string{"fake-gradleBuildArgs-for-project-for-testTask-skipTests", "--image=image"}},
{description: "multi module without tests with insecure registries", in: latest.JibArtifact{Project: "project"}, image: "registry.tld/image", skipTests: true, insecureRegistries: map[string]bool{"registry.tld": true}, out: []string{"fake-gradleBuildArgs-for-project-for-testTask-skipTests", "-Djib.allowInsecureRegistries=true", "--image=registry.tld/image"}},
{description: "single module with custom base image", in: latest.JibArtifact{BaseImage: "docker://busybox"}, image: "image", out: []string{"fake-gradleBuildArgs-for-testTask", "-Djib.from.image=docker://busybox", "--image=image"}},
{description: "multi module with custom base image", in: latest.JibArtifact{Project: "project", BaseImage: "docker://busybox"}, image: "image", out: []string{"fake-gradleBuildArgs-for-project-for-testTask", "-Djib.from.image=docker://busybox", "--image=image"}},
{description: "host platform", image: "image", platforms: platform.Matcher{Platforms: []v1.Platform{{OS: runtime.GOOS, Architecture: runtime.GOARCH}}}, out: []string{"fake-gradleBuildArgs-for-testTask", fmt.Sprintf("-Djib.from.platforms=%s/%s", runtime.GOOS, runtime.GOARCH), "--image=image"}},
{description: "cross-platform", image: "image", platforms: platform.Matcher{Platforms: []v1.Platform{{OS: "freebsd", Architecture: "arm"}}}, out: []string{"fake-gradleBuildArgs-for-testTask", "-Djib.from.platforms=freebsd/arm", "--image=image"}, expectedMinVersion: MinimumJibGradleVersionForCrossPlatform},
{description: "multi-platform", image: "image", platforms: platform.Matcher{Platforms: []v1.Platform{{OS: "linux", Architecture: "amd64"}, {OS: "darwin", Architecture: "arm64"}}}, out: []string{"fake-gradleBuildArgs-for-testTask", "-Djib.from.platforms=linux/amd64,darwin/arm64", "--image=image"}, expectedMinVersion: MinimumJibGradleVersionForCrossPlatform},
{
description: "single module with local base image from required artifacts",
in: latest.JibArtifact{BaseImage: "alias"},
image: "image",
deps: []*latest.ArtifactDependency{{ImageName: "img", Alias: "alias"}},
r: mockArtifactResolver{m: map[string]string{"img": "img:tag"}},
out: []string{"fake-gradleBuildArgs-for-testTask", "-Djib.from.image=docker://img:tag", "--image=image"},
},
{
description: "multi module with local base image from required artifacts",
in: latest.JibArtifact{Project: "project", BaseImage: "alias"},
image: "image",
deps: []*latest.ArtifactDependency{{ImageName: "img", Alias: "alias"}},
r: mockArtifactResolver{m: map[string]string{"img": "img:tag"}},
out: []string{"fake-gradleBuildArgs-for-project-for-testTask", "-Djib.from.image=docker://img:tag", "--image=image"},
}, {
description: "single module with remote base image from required artifacts",
in: latest.JibArtifact{BaseImage: "alias"},
image: "image",
pushImages: true,
deps: []*latest.ArtifactDependency{{ImageName: "img", Alias: "alias"}},
r: mockArtifactResolver{m: map[string]string{"img": "img:tag"}},
out: []string{"fake-gradleBuildArgs-for-testTask", "-Djib.from.image=img:tag", "--image=image"},
},
{
description: "multi module with remote base image from required artifacts",
in: latest.JibArtifact{Project: "project", BaseImage: "alias"},
image: "image",
pushImages: true,
deps: []*latest.ArtifactDependency{{ImageName: "img", Alias: "alias"}},
r: mockArtifactResolver{m: map[string]string{"img": "img:tag"}},
out: []string{"fake-gradleBuildArgs-for-project-for-testTask", "-Djib.from.image=img:tag", "--image=image"},
},
}
for _, test := range tests {
testutil.Run(t, test.description, func(t *testutil.T) {
minVersion := MinimumJibGradleVersion
if test.expectedMinVersion != "" {
minVersion = test.expectedMinVersion
}
t.Override(&gradleBuildArgsFunc, getGradleBuildArgsFuncFake(t, minVersion))
command := GenerateGradleBuildArgs("testTask", test.image, &test.in, test.platforms, test.skipTests, test.pushImages, test.deps, test.r, test.insecureRegistries, false)
t.CheckDeepEqual(test.out, command)
})
}
}
func TestGradleArgs(t *testing.T) {
tests := []struct {
description string
jibArtifact latest.JibArtifact
expected []string
}{
{
description: "single module",
jibArtifact: latest.JibArtifact{},
expected: []string{"_skaffoldFailIfJibOutOfDate", "-Djib.requiredVersion=test-version", ":testTask"},
},
{
description: "multi module",
jibArtifact: latest.JibArtifact{Project: "module"},
expected: []string{"_skaffoldFailIfJibOutOfDate", "-Djib.requiredVersion=test-version", ":module:testTask"},
},
}
for _, test := range tests {
args := gradleArgs(&test.jibArtifact, "testTask", "test-version")
testutil.CheckDeepEqual(t, test.expected, args)
}
}
func TestGradleBuildArgs(t *testing.T) {
tests := []struct {
description string
jibArtifact latest.JibArtifact
skipTests bool
showColors bool
expected []string
}{
{
description: "single module",
jibArtifact: latest.JibArtifact{},
skipTests: false,
showColors: true,
expected: []string{"-Djib.console=plain", "fake-gradleArgs-for-testTask"},
},
{
description: "single module skip tests",
jibArtifact: latest.JibArtifact{},
skipTests: true,
showColors: true,
expected: []string{"-Djib.console=plain", "fake-gradleArgs-for-testTask", "-x", "test"},
},
{
description: "single module plain console",
jibArtifact: latest.JibArtifact{},
skipTests: true,
showColors: false,
expected: []string{"--console=plain", "fake-gradleArgs-for-testTask", "-x", "test"},
},
{
description: "single module with extra flags",
jibArtifact: latest.JibArtifact{Flags: []string{"--flag1", "--flag2"}},
skipTests: false,
showColors: true,
expected: []string{"-Djib.console=plain", "fake-gradleArgs-for-testTask", "--flag1", "--flag2"},
},
{
description: "multi module",
jibArtifact: latest.JibArtifact{Project: "module"},
skipTests: false,
showColors: true,
expected: []string{"-Djib.console=plain", "fake-gradleArgs-for-module-for-testTask"},
},
{
description: "single module skip tests",
jibArtifact: latest.JibArtifact{Project: "module"},
skipTests: true,
showColors: true,
expected: []string{"-Djib.console=plain", "fake-gradleArgs-for-module-for-testTask", "-x", "test"},
},
{
description: "multi module with extra flags",
jibArtifact: latest.JibArtifact{Project: "module", Flags: []string{"--flag1", "--flag2"}},
skipTests: false,
showColors: true,
expected: []string{"-Djib.console=plain", "fake-gradleArgs-for-module-for-testTask", "--flag1", "--flag2"},
},
}
for _, test := range tests {
testutil.Run(t, test.description, func(t *testutil.T) {
t.Override(&gradleArgsFunc, getGradleArgsFuncFake(t, "test-version"))
args := gradleBuildArgs("testTask", &test.jibArtifact, test.skipTests, test.showColors, "test-version")
t.CheckDeepEqual(test.expected, args)
})
}
}
func getGradleArgsFuncFake(t *testutil.T, expectedMinimumVersion string) func(*latest.JibArtifact, string, string) []string {
return func(a *latest.JibArtifact, task string, minimumVersion string) []string {
t.CheckDeepEqual(expectedMinimumVersion, minimumVersion)
if a.Project == "" {
return []string{"fake-gradleArgs-for-" + task}
}
return []string{"fake-gradleArgs-for-" + a.Project + "-for-" + task}
}
}
// check that parameters are actually passed though
func getGradleBuildArgsFuncFake(t *testutil.T, expectedMinimumVersion string) func(string, *latest.JibArtifact, bool, bool, string) []string {
return func(task string, a *latest.JibArtifact, skipTests, showColors bool, minimumVersion string) []string {
t.CheckDeepEqual(expectedMinimumVersion, minimumVersion)
testString := ""
if skipTests {
testString = "-skipTests"
}
if a.Project == "" {
return []string{"fake-gradleBuildArgs-for-" + task + testString}
}
return []string{"fake-gradleBuildArgs-for-" + a.Project + "-for-" + task + testString}
}
}
type mockConfig struct {
Config
}
func (c *mockConfig) GetInsecureRegistries() map[string]bool { return nil }
type mockArtifactResolver struct {
m map[string]string
}
func (r mockArtifactResolver) GetImageTag(imageName string) (string, bool) {
val, found := r.m[imageName]
return val, found
}
|
// Patches to normalize the proto types
package proto
import (
"time"
)
// TimeSinceEpoch UTC time in seconds, counted from January 1, 1970.
// To convert a time.Time to TimeSinceEpoch, for example:
//
// proto.TimeSinceEpoch(time.Now().Unix())
//
// For session cookie, the value should be -1.
type TimeSinceEpoch float64
// Time interface
func (t TimeSinceEpoch) Time() time.Time {
return (time.Unix(0, 0)).Add(
time.Duration(t * TimeSinceEpoch(time.Second)),
)
}
// String interface
func (t TimeSinceEpoch) String() string {
return t.Time().String()
}
// MonotonicTime Monotonically increasing time in seconds since an arbitrary point in the past.
type MonotonicTime float64
// Duration interface
func (t MonotonicTime) Duration() time.Duration {
return time.Duration(t * MonotonicTime(time.Second))
}
// String interface
func (t MonotonicTime) String() string {
return t.Duration().String()
}
// Point from the origin (0, 0)
type Point struct {
X float64 `json:"x"`
Y float64 `json:"y"`
}
// NewPoint instance
func NewPoint(x, y float64) Point {
return Point{x, y}
}
// Add v to p and returns a new Point
func (p Point) Add(v Point) Point {
return NewPoint(p.X+v.X, p.Y+v.Y)
}
// Minus v from p and returns a new Point
func (p Point) Minus(v Point) Point {
return NewPoint(p.X-v.X, p.Y-v.Y)
}
// Scale p with s and returns a new Point
func (p Point) Scale(s float64) Point {
return NewPoint(p.X*s, p.Y*s)
}
// Len is the number of vertices
func (q DOMQuad) Len() int {
return len(q) / 2
}
// Each point
func (q DOMQuad) Each(fn func(pt Point, i int)) {
for i := 0; i < q.Len(); i++ {
fn(Point{q[i*2], q[i*2+1]}, i)
}
}
// Center of the polygon
func (q DOMQuad) Center() Point {
var x, y float64
q.Each(func(pt Point, _ int) {
x += pt.X
y += pt.Y
})
return Point{x / float64(q.Len()), y / float64(q.Len())}
}
// Area of the polygon
// https://en.wikipedia.org/wiki/Polygon#Area
func (q DOMQuad) Area() float64 {
area := 0.0
l := len(q)/2 - 1
for i := 0; i < l; i++ {
area += q[i*2]*q[i*2+3] - q[i*2+2]*q[i*2+1]
}
area += q[l*2]*q[1] - q[0]*q[l*2+1]
return area / 2
}
// OnePointInside the shape
func (res *DOMGetContentQuadsResult) OnePointInside() *Point {
for _, q := range res.Quads {
if q.Area() >= 1 {
pt := q.Center()
return &pt
}
}
return nil
}
// Box returns the smallest leveled rectangle that can cover the whole shape.
func (res *DOMGetContentQuadsResult) Box() (box *DOMRect) {
return Shape(res.Quads).Box()
}
// Shape is a list of DOMQuad
type Shape []DOMQuad
// Box returns the smallest leveled rectangle that can cover the whole shape.
func (qs Shape) Box() (box *DOMRect) {
if len(qs) == 0 {
return
}
left := qs[0][0]
top := qs[0][1]
right := left
bottom := top
for _, q := range qs {
q.Each(func(pt Point, _ int) {
if pt.X < left {
left = pt.X
}
if pt.Y < top {
top = pt.Y
}
if pt.X > right {
right = pt.X
}
if pt.Y > bottom {
bottom = pt.Y
}
})
}
box = &DOMRect{left, top, right - left, bottom - top}
return
}
// MoveTo X and Y to x and y
func (p *InputTouchPoint) MoveTo(x, y float64) {
p.X = x
p.Y = y
}
// CookiesToParams converts Cookies list to NetworkCookieParam list
func CookiesToParams(cookies []*NetworkCookie) []*NetworkCookieParam {
list := []*NetworkCookieParam{}
for _, c := range cookies {
list = append(list, &NetworkCookieParam{
Name: c.Name,
Value: c.Value,
Domain: c.Domain,
Path: c.Path,
Secure: c.Secure,
HTTPOnly: c.HTTPOnly,
SameSite: c.SameSite,
Expires: c.Expires,
Priority: c.Priority,
})
}
return list
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.