text stringlengths 11 4.05M |
|---|
package main
import (
"fmt"
"log"
"net/http"
"time"
)
func main() {
http.HandleFunc("/", httpHandler)
log.Println("Server started on port 8080")
log.Fatal(http.ListenAndServe(":8080", nil))
}
func httpHandler(res http.ResponseWriter, req *http.Request) {
log.Println("Handler started")
defer log.Println("Handler ended")
time.Sleep(time.Second * 5)
fmt.Fprintln(res, "Hello")
}
|
package main
import "fmt"
func main() {
a := 5 // Type inference
fmt.Println(a)
var str = "My String"
fmt.Println(str)
var b int
b = 2
fmt.Println(b)
var c, d = 3, 4
fmt.Printf("%d, %d\n", c, d)
var e int
fmt.Println(e)
slice := []int { // Variable size 'slice' placed on heap. Size can be modified
1, 2, 3,
}
fmt.Println(slice)
arr := [3]int { // Fixed size array placed on stack. Block of memory with size for three integers only
4, 5, 6,
}
fmt.Println(arr)
myMap := map[string]int { // map[Key]Value
"BadRequest": 120,
"NoAuth": 65,
"OK": 30,
}
fmt.Println(myMap)
}
|
package shared
import (
"testing"
"github.com/containers/libpod/pkg/util"
"github.com/stretchr/testify/assert"
)
var (
name = "foo"
imageName = "bar"
)
func TestGenerateRunEnvironment(t *testing.T) {
opts := make(map[string]string)
opts["opt1"] = "one"
opts["opt2"] = "two"
opts["opt3"] = "three"
envs := GenerateRunEnvironment(name, imageName, opts)
assert.True(t, util.StringInSlice("OPT1=one", envs))
assert.True(t, util.StringInSlice("OPT2=two", envs))
assert.True(t, util.StringInSlice("OPT3=three", envs))
}
func TestGenerateRunEnvironmentNoOpts(t *testing.T) {
opts := make(map[string]string)
envs := GenerateRunEnvironment(name, imageName, opts)
assert.False(t, util.StringInSlice("OPT1=", envs))
assert.False(t, util.StringInSlice("OPT2=", envs))
assert.False(t, util.StringInSlice("OPT3=", envs))
}
func TestGenerateRunEnvironmentSingleOpt(t *testing.T) {
opts := make(map[string]string)
opts["opt1"] = "one"
envs := GenerateRunEnvironment(name, imageName, opts)
assert.True(t, util.StringInSlice("OPT1=one", envs))
assert.False(t, util.StringInSlice("OPT2=", envs))
assert.False(t, util.StringInSlice("OPT3=", envs))
}
func TestGenerateRunEnvironmentName(t *testing.T) {
opts := make(map[string]string)
envs := GenerateRunEnvironment(name, imageName, opts)
assert.True(t, util.StringInSlice("NAME=foo", envs))
}
func TestGenerateRunEnvironmentImage(t *testing.T) {
opts := make(map[string]string)
envs := GenerateRunEnvironment(name, imageName, opts)
assert.True(t, util.StringInSlice("IMAGE=bar", envs))
}
|
package server
import (
"github.com/siddontang/ledisdb/client/go/ledis"
"testing"
)
func TestKV(t *testing.T) {
c := getTestConn()
defer c.Close()
if ok, err := ledis.String(c.Do("set", "a", "1234")); err != nil {
t.Fatal(err)
} else if ok != OK {
t.Fatal(ok)
}
if n, err := ledis.Int(c.Do("setnx", "a", "123")); err != nil {
t.Fatal(err)
} else if n != 0 {
t.Fatal(n)
}
if n, err := ledis.Int(c.Do("setnx", "b", "123")); err != nil {
t.Fatal(err)
} else if n != 1 {
t.Fatal(n)
}
if v, err := ledis.String(c.Do("get", "a")); err != nil {
t.Fatal(err)
} else if v != "1234" {
t.Fatal(v)
}
if v, err := ledis.String(c.Do("getset", "a", "123")); err != nil {
t.Fatal(err)
} else if v != "1234" {
t.Fatal(v)
}
if v, err := ledis.String(c.Do("get", "a")); err != nil {
t.Fatal(err)
} else if v != "123" {
t.Fatal(v)
}
if n, err := ledis.Int(c.Do("exists", "a")); err != nil {
t.Fatal(err)
} else if n != 1 {
t.Fatal(n)
}
if n, err := ledis.Int(c.Do("exists", "empty_key_test")); err != nil {
t.Fatal(err)
} else if n != 0 {
t.Fatal(n)
}
if _, err := ledis.Int(c.Do("del", "a", "b")); err != nil {
t.Fatal(err)
}
if n, err := ledis.Int(c.Do("exists", "a")); err != nil {
t.Fatal(err)
} else if n != 0 {
t.Fatal(n)
}
if n, err := ledis.Int(c.Do("exists", "b")); err != nil {
t.Fatal(err)
} else if n != 0 {
t.Fatal(n)
}
}
func TestKVM(t *testing.T) {
c := getTestConn()
defer c.Close()
if ok, err := ledis.String(c.Do("mset", "a", "1", "b", "2")); err != nil {
t.Fatal(err)
} else if ok != OK {
t.Fatal(ok)
}
if v, err := ledis.MultiBulk(c.Do("mget", "a", "b", "c")); err != nil {
t.Fatal(err)
} else if len(v) != 3 {
t.Fatal(len(v))
} else {
if vv, ok := v[0].([]byte); !ok || string(vv) != "1" {
t.Fatal("not 1")
}
if vv, ok := v[1].([]byte); !ok || string(vv) != "2" {
t.Fatal("not 2")
}
if v[2] != nil {
t.Fatal("must nil")
}
}
}
func TestKVIncrDecr(t *testing.T) {
c := getTestConn()
defer c.Close()
if n, err := ledis.Int64(c.Do("incr", "n")); err != nil {
t.Fatal(err)
} else if n != 1 {
t.Fatal(n)
}
if n, err := ledis.Int64(c.Do("incr", "n")); err != nil {
t.Fatal(err)
} else if n != 2 {
t.Fatal(n)
}
if n, err := ledis.Int64(c.Do("decr", "n")); err != nil {
t.Fatal(err)
} else if n != 1 {
t.Fatal(n)
}
if n, err := ledis.Int64(c.Do("incrby", "n", 10)); err != nil {
t.Fatal(err)
} else if n != 11 {
t.Fatal(n)
}
if n, err := ledis.Int64(c.Do("decrby", "n", 10)); err != nil {
t.Fatal(err)
} else if n != 1 {
t.Fatal(n)
}
}
func TestKVErrorParams(t *testing.T) {
c := getTestConn()
defer c.Close()
if _, err := c.Do("get", "a", "b", "c"); err == nil {
t.Fatalf("invalid err %v", err)
}
if _, err := c.Do("set", "a", "b", "c"); err == nil {
t.Fatalf("invalid err %v", err)
}
if _, err := c.Do("getset", "a", "b", "c"); err == nil {
t.Fatalf("invalid err %v", err)
}
if _, err := c.Do("setnx", "a", "b", "c"); err == nil {
t.Fatalf("invalid err %v", err)
}
if _, err := c.Do("exists", "a", "b"); err == nil {
t.Fatalf("invalid err %v", err)
}
if _, err := c.Do("incr", "a", "b"); err == nil {
t.Fatalf("invalid err %v", err)
}
if _, err := c.Do("incrby", "a"); err == nil {
t.Fatalf("invalid err %v", err)
}
if _, err := c.Do("decrby", "a"); err == nil {
t.Fatalf("invalid err %v", err)
}
if _, err := c.Do("del"); err == nil {
t.Fatal("invalid err of %v", err)
}
if _, err := c.Do("mset"); err == nil {
t.Fatal("invalid err of %v", err)
}
if _, err := c.Do("mset", "a", "b", "c"); err == nil {
t.Fatal("invalid err of %v", err)
}
if _, err := c.Do("mget"); err == nil {
t.Fatal("invalid err of %v", err)
}
if _, err := c.Do("expire"); err == nil {
t.Fatal("invalid err of %v", err)
}
if _, err := c.Do("expire", "a", "b"); err == nil {
t.Fatal("invalid err of %v", err)
}
if _, err := c.Do("expireat"); err == nil {
t.Fatal("invalid err of %v", err)
}
if _, err := c.Do("expireat", "a", "b"); err == nil {
t.Fatal("invalid err of %v", err)
}
if _, err := c.Do("ttl"); err == nil {
t.Fatal("invalid err of %v", err)
}
if _, err := c.Do("persist"); err == nil {
t.Fatal("invalid err of %v", err)
}
}
|
package main
import (
"josiah.top/go_lagou/ch22/server"
"log"
"net"
"net/rpc"
)
/*
rpc 远程服务 c/s
客户端(Client)调用客户端存根(Client Stub),同时把参数传给客户端存根;
客户端存根将参数打包编码,并通过系统调用发送到服务端;
客户端本地系统发送信息到服务器;
服务器系统将信息发送到服务端存根(Server Stub);
服务端存根解析信息,也就是解码;
服务端存根调用真正的服务端程序(Sever);
服务端(Server)处理后,通过同样的方式,把结果再返回给客户端(Client)。
*/
func main() {
rpc.RegisterName("MathService", new(server.MathService))
//rpc.HandleHTTP()//新增变为通过http协议调用
l, e := net.Listen("tcp", ":1234")
if e != nil {
log.Fatal("listen error:", e)
}
rpc.Accept(l)
//http.Serve(l, nil)//换成http服务
}
|
package main
import (
"fmt"
"strings"
"strconv"
"time"
)
func main() {
fmt.Println("Strings operation")
stringOperation()
timeOperation()
}
func stringOperation() {
str := "I'm a confident boy"
//前后缀 strings.HasPrefix | strings.HasSuffix
fmt.Printf("does str has perfix is %s? %t\n", "Im",strings.HasPrefix(str,"Im"))
fmt.Printf("does str has surfix is %s? %t\n", "boy",strings.HasSuffix(str,"boy"))
//字符串包含 strings.Contains
fmt.Printf("does str contain \"a\"? %t\n",strings.Contains(str,"a"))
//子串或字符的位置 strings.Index | strings.LastIndex
// 有多个只显示第一个
fmt.Printf("\"n\" first show in str at index %d\n",strings.Index(str,"n"))
// 有多个显示最后一个
fmt.Printf("\"o\" last show in str at index %d\n",strings.LastIndex(str,"o"))
// 查找非ASCII码出现的第一个位置
fmt.Printf("'i' first show in str at index %d\n",strings.IndexRune(str,rune('i')))
//字符串替换 strings.Replace 后面那个数字是代码str中需要替换的子串个数,比如说,"ab ab ab ab ab",n为1则只替换第一个ab串
fmt.Printf("str replace \"confident\" with \"handsome\": %s \n",strings.Replace(str,"confident","handsome",3))
//统计字符出现的个数 strings.Count
fmt.Printf("\"o\" show times %d\n",strings.Count(str,"o"))
//重复字符串 strings.Repeat
fmt.Printf("str repeat 2 times is %s\n",strings.Repeat(str,2))
//修改大小写 strings.ToLower,strings.ToUpper
fmt.Printf("str to low is %s\n str to uper is %s\n",strings.ToLower(str),strings.ToUpper(str))
//修剪字符串 strings.TrimSpace | strings.Trim | strings.TrimLeft | strings.TrimRight
str1 := " 'abcd efa' "
//修剪开始和结尾的空格
fmt.Printf("str1 cut space on begin or end:%s\n",strings.TrimSpace(str1))
//将'a'剔除
fmt.Printf("str1 cut 'a':%s\n",strings.Trim(str1,"a"))
//剔除开头的字符串
fmt.Printf("str1 cut ' 'a' on begin:%s\n",strings.TrimLeft(str1," 'a"))
//剔除结尾的字符串
fmt.Printf("str1 cut 'a' ' on end:%s\n",strings.TrimRight(str1,"a' "))
//分割 strings.Fields | strings.Split
// 会按空格进行分割
fmt.Printf("str field is %v\n",strings.Fields(str))
// 会按"o"进行分割
fmt.Printf("str split by 'o' is %v\n",strings.Split(str,"o"))
//拼接 strings.Join 注意slice的概念
//将slice按拼接符组成一个新字符串
aslice := [] string {"1","1","2","3"}
fmt.Printf("\"1,1,2,3\" join a new str by '`' is:%s\n",strings.Join(aslice,"`"))
//从字符串中读取内容 strings.Repeat
// 将字符串读取2遍,相当于Repeat2次
fmt.Printf("str read char at 7: %s\n",strings.Repeat(str, 2))
//类型转换 strconv包
orgin := "666"
var a int
var newstr string
a,_ = strconv.Atoi(orgin)
fmt.Printf("the a = %d\n",a)
a = a + 5
newstr = strconv.Itoa(a)
fmt.Printf("the newstr = %s\n",newstr)
}
func timeOperation(){
//获取当前时间
t := time.Now()
fmt.Println(t)
fmt.Printf("%02d %02d %4d\n",t.Day(),t.Minute(),t.Year())
//通用国际时间
t = t.UTC()
fmt.Println(t)
//当前时间➕1周的时间
var week time.Duration
week = 60 * 60 * 24 * 7 * 1e9 //一周的纳秒表示时长
week_from_now := t.Add(time.Duration(week))
fmt.Println(week_from_now)
//时间格式化输出
fmt.Println(t.Format(time.RFC822))
fmt.Println(t.Format(time.ANSIC))
//根据一个时间的标准化描述来打印当前时间
fmt.Println(t.Format("2006-Jan-02 15:04"))
}
|
package main
import "github.com/gin-gonic/gin"
// GetLoggedInUser gets the logged in user from gin context
func GetLoggedInUser(c *gin.Context) *User {
u, exists := c.Get("user")
user, ok := u.(*User)
if !exists || !ok || user == nil {
return nil
}
return user
}
// IsLoggedIn returns true if the user is logged in (not null)
func (user *User) IsLoggedIn() bool {
return user != nil
}
// IsInGroup returns true if a given user is in the given group
func (user *User) IsInGroup(groupID int) (bool, error) {
db := GetDb()
stmt, err := db.Prepare("SELECT 1 FROM GroupUser WHERE groupID = ? AND userID = ?")
if err != nil {
return false, err
}
defer stmt.Close()
rows, err := stmt.Query(groupID, user.ID)
if err != nil {
return false, err
}
defer rows.Close()
// rows.Next will return true if there is a result, therefore is a row
return rows.Next(), nil
}
|
//Utils is the tools lib
package setting
import (
"fmt"
"io"
"os"
"strings"
"sync"
"time"
"github.com/lunny/log"
)
//Seelogger is a logger instance;
type Seelogger struct {
*log.Logger
}
type seelogwriter struct {
*sync.Mutex
currentFileName string
fd *os.File
}
var (
//Seelog can be used
SeeLog *Seelogger = &Seelogger{Logger: log.New(os.Stderr, "", log.Ldefault())}
)
// -----------------------------------------
func (l *Seelogger) Errorf(format string, v ...interface{}) {
l.Output("", log.Lerror, 2, fmt.Sprintf(format, v...))
SendCount(1272, 1)
}
func (l *Seelogger) Error(v ...interface{}) {
l.Output("", log.Lerror, 2, fmt.Sprintln(v...))
SendCount(1272, 1)
}
// -----------------------------------------
// Fatal is equivalent to Print() followed by a call to os.Exit(1).
func (l *Seelogger) Fatal(v ...interface{}) {
l.Output("", log.Lfatal, 2, fmt.Sprintln(v...))
SendCount(1272, 1)
}
// Fatalf is equivalent to Printf() followed by a call to os.Exit(1).
func (l *Seelogger) Fatalf(format string, v ...interface{}) {
l.Output("", log.Lfatal, 2, fmt.Sprintf(format, v...))
SendCount(1272, 1)
}
// -----------------------------------------
// Panic is equivalent to Print() followed by a call to panic().
func (l *Seelogger) Panic(v ...interface{}) {
l.Output("", log.Lpanic, 2, fmt.Sprintln(v...))
}
// Panicf is equivalent to Printf() followed by a call to panic().
func (l *Seelogger) Panicf(format string, v ...interface{}) {
l.Output("", log.Lpanic, 2, fmt.Sprintf(format, v...))
}
// -----------------------------------------
// Write is the interface based split log
func (l *seelogwriter) Write(p []byte) (int, error) {
defer l.Mutex.Unlock()
timeFormat := time.Now().Format("20060102")
l.Mutex.Lock()
if l.fd == nil || !strings.EqualFold(timeFormat, l.currentFileName) {
l.currentFileName = timeFormat
f, err := os.OpenFile("logs/"+l.currentFileName+".log", os.O_CREATE|os.O_RDWR|os.O_APPEND, 0664)
if err != nil {
panic("create log file failed:" + err.Error())
}
if l.fd != nil {
l.fd.Close()
}
l.fd = f
}
return l.fd.Write(p)
}
func init() {
os.MkdirAll("./logs", os.ModePerm)
f := &seelogwriter{Mutex: new(sync.Mutex)}
if IsProMode {
SeeLog.SetOutput(f)
SeeLog.SetOutputLevel(log.Linfo) //前期保留尽可能多的日志
} else {
w := io.MultiWriter(f, os.Stdout)
SeeLog.SetOutput(w)
SeeLog.SetOutputLevel(log.Ldebug)
}
}
|
package main
import (
"flag"
"io/fs"
"log"
"os"
"path/filepath"
"time"
)
var flagPath string
var flagDuration string
var flagDryRun bool
var flagLogPath string
var duration time.Duration
var logFile *os.File
func init() {
log.Print("initializing")
flag.StringVar(&flagPath, "path", "", "the path of the directory to prune files from")
flag.StringVar(&flagLogPath, "logPath", "", "the path to write filecleaner.log to; if not specified, logs are written to stdout")
flag.BoolVar(&flagDryRun, "dryRun", false, "true to enable dry-run mode which only displays files that will be deleted but does not delete them")
flag.StringVar(&flagDuration, "duration", "5m", "the duration of time to evaluate the last modDate by. Calculated as now()-duration. A duration string is a possibly signed sequence of decimal numbers, each with optional fraction and a unit suffix, such as \"300ms\", \"-1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\", \"m\", \"h\".")
flag.Parse()
if flagPath == "" {
log.Fatal("missing required param: flagPath")
}
localDuration, err := time.ParseDuration(flagDuration)
if err != nil {
log.Fatal("invalid duration flag", err.Error())
}
log.Println("path =", flagPath)
log.Println("duration =", flagDuration)
log.Println("dryRun =", flagDryRun)
log.Println("logPath =", flagLogPath)
duration = localDuration
log.Println("initializing logs")
initLogs()
}
func initLogs() {
var err error
if flagLogPath != "" {
logFile, err = os.OpenFile(flagLogPath+"/filecleaner.log", os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0644)
if err != nil {
log.Fatal(err)
}
log.Println("writing logs to", flagLogPath)
log.SetOutput(logFile)
} else {
log.SetOutput(os.Stdout)
log.Println("writing logs to STDOUT")
}
}
func findFilesToDelete(path string) (paths []string, infos []fs.DirEntry, err error) {
log.Println("about to findFilesToDelete")
err = filepath.WalkDir(path, func(p string, d fs.DirEntry, e error) error {
if e != nil {
return e
}
info, _ := d.Info()
if !d.IsDir() && info.ModTime().Before(time.Now().Add(-duration)) {
paths = append(paths, p)
infos = append(infos, d)
}
return nil
})
log.Println("found", len(paths), "files to delete")
return
}
func dryRun(paths []string, dirEntries []fs.DirEntry) {
log.Println("dryRun is enabled so not deleting any files. These files would be deleted if dryRun==false")
for i, info := range dirEntries {
fileInfo, _ := info.Info()
log.Println(paths[i], "with last modDate of ", fileInfo.ModTime())
}
}
func deleteFiles(paths []string, dirEntries []fs.DirEntry) {
log.Println("dryRun is not enabled")
for i, path := range paths {
fileInfo, _ := dirEntries[i].Info()
log.Println("removing file ", paths[i], "with last modDate of ", fileInfo.ModTime())
err := os.Remove(path)
if err != nil {
log.Fatal(err.Error())
}
}
}
func main() {
//close the logfile when the program ends. i would love to handle the possible error on closing, but doesnt seem
//to be consensus on how to do that with a defer. i found https://www.joeshaw.org/dont-defer-close-on-writable-files/
//but not gonna overdo this simple program
defer logFile.Close()
log.Println("starting filecleaner")
paths, dirEntries, err := findFilesToDelete(flagPath)
if err != nil {
log.Fatal(err.Error())
}
if len(paths) > 0 {
if flagDryRun {
dryRun(paths, dirEntries)
} else {
deleteFiles(paths, dirEntries)
}
}
log.Println("filecleaner done")
}
|
package commands
import (
"os"
"github.com/BurntSushi/toml"
"github.com/cpuguy83/strongerrors"
"github.com/pkg/errors"
)
// UserConfig represents the user configuration read from a config file
type UserConfig struct {
Subscription string
Location string
Profile struct {
Kubernetes struct {
Version string
NetworkPolicy string
NetworkPlugin string
CustomBinaries struct {
HyperkubeImage string
}
}
Leader struct {
Linux struct {
Distro string
SKU string
Count *int
}
}
Agent struct {
Linux struct {
Distro string
SKU string
Count *int
AvailabilityProfile string
}
Windows struct {
SKU string
Count *int
AvailabilityProfile string
}
}
Auth struct {
Linux struct {
User string
PublicKeyFile string
}
Windows struct {
User string
PasswordFile string
}
}
}
}
// ReadUserConfig reads the config from the provided path
// If
func ReadUserConfig(configPath string) (UserConfig, error) {
var cfg UserConfig
f, err := os.Open(configPath)
if err != nil {
if os.IsNotExist(err) {
return cfg, strongerrors.NotFound(errors.Wrap(err, "user config file not found"))
}
return cfg, errors.Wrap(err, "could not open specified config file path")
}
if _, err := toml.DecodeReader(f, &cfg); err != nil {
return cfg, errors.Wrap(err, "error decoding user config")
}
return cfg, nil
}
|
// Copyright 2019, OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package relayexporter
import (
"time"
"github.com/open-telemetry/opentelemetry-collector/config/configmodels"
)
const (
formatProto = "proto"
formatJSON = "json"
defaultInterval = 60
)
// Config defines configuration for relay exporter.
type Config struct {
configmodels.ExporterSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct.
// URL is the URL of the relay receiver (e.g.: http://some.url:14268/api/traces).
URL string `mapstructure:"url"`
// Interval defines time in seconds bewteen batch uploading tracedata to relay receiver
// default value is 60 seconds. zero means do not wait.
Interval time.Duration `mapstructure:"interval"`
Format string `mapstructure:"format"`
// Headers are a set of headers to be added to the HTTP request sending relay
Headers map[string]string `mapstructure:"headers"`
}
|
package rtrserver
import (
"github.com/cpusoft/goutil/belogs"
"github.com/cpusoft/goutil/xormdb"
)
func getMaxSerialNumberDb() (serialNumber uint32, err error) {
sql := `select serialNumber from lab_rpki_rtr_serial_number order by id desc limit 1`
has, err := xormdb.XormEngine.SQL(sql).Get(&serialNumber)
if err != nil {
belogs.Error("getMaxSerialNumberDb():select serialNumber from lab_rpki_rtr_serial_number order by id desc limit 1 fail:", err)
return serialNumber, err
}
if !has {
// init serialNumber
serialNumber = 1
}
belogs.Debug("getMaxSerialNumberDb():select max(sessionserialNumId) lab_rpki_rtr_serial_number, serialNumber :", serialNumber)
return serialNumber, nil
}
|
/*
* @lc app=leetcode.cn id=141 lang=golang
*
* [141] 环形链表
*/
// @lc code=start
/**
* Definition for singly-linked list.
* type ListNode struct {
* Val int
* Next *ListNode
* }
*/
package main
import "fmt"
type ListNode struct {
Val int
Next *ListNode
}
func hasCycle(head *ListNode) bool {
p1 := head
p2 := head
for p2.Next != nil && p2.Next.Next != nil {
p1 = p1.Next
p2 = p2.Next.Next
if p2 == nil {
return false
}
if p1.Val == p2.Val {
return true
}
}
return false
}
func main() {
var result bool
v1 := &ListNode{0, nil}
v2 := &ListNode{1, nil}
v1.Next = nil
result = hasCycle(v1)
fmt.Printf("one node is %t\n", result)
v1.Next = v2
result = hasCycle(v1)
fmt.Printf("two node is %t\n", result)
fmt.Println("v2")
v21 := &ListNode{0, nil}
v21.Next = v21
result = hasCycle(v21)
fmt.Printf("one circle node is %t\n", result)
fmt.Println("v3")
v31 := &ListNode{0, nil}
v32 := &ListNode{0, nil}
v31.Next = v32
v32.Next = v31
result = hasCycle(v31)
fmt.Printf("two circle node is %t\n", result)
}
// @lc code=end
|
package main
import (
"fmt"
"os"
"unicode"
)
func main() {
lista := os.Args[0:]
for i := 0; i < len(lista); i++ {
fmt.Print(TrasformaParola(lista[i], i), " ")
}
}
func TrasformaParola(parola string, posizione int) (parolaTrasformata string) {
parolaE := []rune(parola)
var ptr []rune
if posizione%2 == 0 {
for i := 0; i < len(parolaE); i++ {
if i == 0 {
ptr = append(ptr, unicode.ToUpper(parolaE[i]))
} else {
ptr = append(ptr, unicode.ToLower(parolaE[i]))
}
}
} else {
for i := 0; i < len(parolaE); i++ {
if i%2 == 0 {
ptr = append(ptr, unicode.ToLower(parolaE[i]))
} else {
ptr = append(ptr, unicode.ToUpper(parolaE[i]))
}
}
}
//fmt.Println(string([]rune(parolaE)))
parolaTrasformata = string([]rune(ptr))
return parolaTrasformata
}
|
package crawler
import (
"errors"
"fmt"
"log"
"net/http"
"path/filepath"
"github.com/PuerkitoBio/goquery"
)
// FindForGate .
func FindForGate(url string) (title string, text string, err error) {
// Request the HTML page.
res, err := http.Get(url)
if err != nil {
log.Println(err)
return
}
defer res.Body.Close()
if res.StatusCode != 200 {
log.Printf("status code error: %d %s\n", res.StatusCode, res.Status)
return
}
// Load the HTML document
doc, err := goquery.NewDocumentFromReader(res.Body)
if err != nil {
log.Println(err)
return
}
_, title = filepath.Split(url)
fmt.Println("title: ", title)
text, err = doc.Find(".itemContent").Html()
if err != nil {
log.Println(err)
return
}
if len(text) == 0 {
log.Println("error: empty text. ", url)
return title, text, errors.New("empty text")
}
// fmt.Println("text: ", text)
return title, text, nil
}
// CleanDataForGate .
func CleanDataForGate(title, text string) (string, string) {
return title, text
}
// Gate API.
func Gate() error {
s := "https://gate.io/api2"
title, text, err := FindForGate(s)
if err != nil {
log.Println(err)
}
title, text = CleanDataForGate(title, text)
if err = SaveForMd(title, text, "./gate"); err != nil {
log.Println(err)
}
return nil
}
|
// Copyright 2015 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package android
import (
"fmt"
"os"
"path"
"path/filepath"
"strings"
"text/scanner"
"github.com/google/blueprint"
"github.com/google/blueprint/proptools"
)
var (
DeviceSharedLibrary = "shared_library"
DeviceStaticLibrary = "static_library"
DeviceExecutable = "executable"
HostSharedLibrary = "host_shared_library"
HostStaticLibrary = "host_static_library"
HostExecutable = "host_executable"
)
type BuildParams struct {
Rule blueprint.Rule
Deps blueprint.Deps
Depfile WritablePath
Description string
Output WritablePath
Outputs WritablePaths
ImplicitOutput WritablePath
ImplicitOutputs WritablePaths
Input Path
Inputs Paths
Implicit Path
Implicits Paths
OrderOnly Paths
Default bool
Args map[string]string
}
type ModuleBuildParams BuildParams
// EarlyModuleContext provides methods that can be called early, as soon as the properties have
// been parsed into the module and before any mutators have run.
type EarlyModuleContext interface {
Module() Module
ModuleName() string
ModuleDir() string
ModuleType() string
BlueprintsFile() string
ContainsProperty(name string) bool
Errorf(pos scanner.Position, fmt string, args ...interface{})
ModuleErrorf(fmt string, args ...interface{})
PropertyErrorf(property, fmt string, args ...interface{})
Failed() bool
AddNinjaFileDeps(deps ...string)
DeviceSpecific() bool
SocSpecific() bool
VendorOverlay() bool
ProductSpecific() bool
SystemExtSpecific() bool
Platform() bool
Config() Config
DeviceConfig() DeviceConfig
// Deprecated: use Config()
AConfig() Config
// GlobWithDeps returns a list of files that match the specified pattern but do not match any
// of the patterns in excludes. It also adds efficient dependencies to rerun the primary
// builder whenever a file matching the pattern as added or removed, without rerunning if a
// file that does not match the pattern is added to a searched directory.
GlobWithDeps(pattern string, excludes []string) ([]string, error)
Glob(globPattern string, excludes []string) Paths
GlobFiles(globPattern string, excludes []string) Paths
IsSymlink(path Path) bool
Readlink(path Path) string
}
// BaseModuleContext is the same as blueprint.BaseModuleContext except that Config() returns
// a Config instead of an interface{}, and some methods have been wrapped to use an android.Module
// instead of a blueprint.Module, plus some extra methods that return Android-specific information
// about the current module.
type BaseModuleContext interface {
EarlyModuleContext
OtherModuleName(m blueprint.Module) string
OtherModuleDir(m blueprint.Module) string
OtherModuleErrorf(m blueprint.Module, fmt string, args ...interface{})
OtherModuleDependencyTag(m blueprint.Module) blueprint.DependencyTag
OtherModuleExists(name string) bool
OtherModuleType(m blueprint.Module) string
GetDirectDepsWithTag(tag blueprint.DependencyTag) []Module
GetDirectDepWithTag(name string, tag blueprint.DependencyTag) blueprint.Module
GetDirectDep(name string) (blueprint.Module, blueprint.DependencyTag)
VisitDirectDepsBlueprint(visit func(blueprint.Module))
VisitDirectDeps(visit func(Module))
VisitDirectDepsWithTag(tag blueprint.DependencyTag, visit func(Module))
VisitDirectDepsIf(pred func(Module) bool, visit func(Module))
// Deprecated: use WalkDeps instead to support multiple dependency tags on the same module
VisitDepsDepthFirst(visit func(Module))
// Deprecated: use WalkDeps instead to support multiple dependency tags on the same module
VisitDepsDepthFirstIf(pred func(Module) bool, visit func(Module))
WalkDeps(visit func(Module, Module) bool)
WalkDepsBlueprint(visit func(blueprint.Module, blueprint.Module) bool)
// GetWalkPath is supposed to be called in visit function passed in WalkDeps()
// and returns a top-down dependency path from a start module to current child module.
GetWalkPath() []Module
// GetTagPath is supposed to be called in visit function passed in WalkDeps()
// and returns a top-down dependency tags path from a start module to current child module.
// It has one less entry than GetWalkPath() as it contains the dependency tags that
// exist between each adjacent pair of modules in the GetWalkPath().
// GetTagPath()[i] is the tag between GetWalkPath()[i] and GetWalkPath()[i+1]
GetTagPath() []blueprint.DependencyTag
AddMissingDependencies(missingDeps []string)
Target() Target
TargetPrimary() bool
// The additional arch specific targets (e.g. 32/64 bit) that this module variant is
// responsible for creating.
MultiTargets() []Target
Arch() Arch
Os() OsType
Host() bool
Device() bool
Darwin() bool
Fuchsia() bool
Windows() bool
Debug() bool
PrimaryArch() bool
}
// Deprecated: use EarlyModuleContext instead
type BaseContext interface {
EarlyModuleContext
}
type ModuleContext interface {
BaseModuleContext
// Deprecated: use ModuleContext.Build instead.
ModuleBuild(pctx PackageContext, params ModuleBuildParams)
ExpandSources(srcFiles, excludes []string) Paths
ExpandSource(srcFile, prop string) Path
ExpandOptionalSource(srcFile *string, prop string) OptionalPath
InstallExecutable(installPath InstallPath, name string, srcPath Path, deps ...Path) InstallPath
InstallFile(installPath InstallPath, name string, srcPath Path, deps ...Path) InstallPath
InstallSymlink(installPath InstallPath, name string, srcPath InstallPath) InstallPath
InstallAbsoluteSymlink(installPath InstallPath, name string, absPath string) InstallPath
CheckbuildFile(srcPath Path)
InstallInData() bool
InstallInTestcases() bool
InstallInSanitizerDir() bool
InstallInRamdisk() bool
InstallInRecovery() bool
InstallInRoot() bool
InstallBypassMake() bool
RequiredModuleNames() []string
HostRequiredModuleNames() []string
TargetRequiredModuleNames() []string
ModuleSubDir() string
Variable(pctx PackageContext, name, value string)
Rule(pctx PackageContext, name string, params blueprint.RuleParams, argNames ...string) blueprint.Rule
// Similar to blueprint.ModuleContext.Build, but takes Paths instead of []string,
// and performs more verification.
Build(pctx PackageContext, params BuildParams)
// Phony creates a Make-style phony rule, a rule with no commands that can depend on other
// phony rules or real files. Phony can be called on the same name multiple times to add
// additional dependencies.
Phony(phony string, deps ...Path)
PrimaryModule() Module
FinalModule() Module
VisitAllModuleVariants(visit func(Module))
GetMissingDependencies() []string
Namespace() blueprint.Namespace
}
type Module interface {
blueprint.Module
// GenerateAndroidBuildActions is analogous to Blueprints' GenerateBuildActions,
// but GenerateAndroidBuildActions also has access to Android-specific information.
// For more information, see Module.GenerateBuildActions within Blueprint's module_ctx.go
GenerateAndroidBuildActions(ModuleContext)
DepsMutator(BottomUpMutatorContext)
base() *ModuleBase
Disable()
Enabled() bool
Target() Target
Owner() string
InstallInData() bool
InstallInTestcases() bool
InstallInSanitizerDir() bool
InstallInRamdisk() bool
InstallInRecovery() bool
InstallInRoot() bool
InstallBypassMake() bool
SkipInstall()
IsSkipInstall() bool
ExportedToMake() bool
InitRc() Paths
VintfFragments() Paths
NoticeFile() OptionalPath
AddProperties(props ...interface{})
GetProperties() []interface{}
BuildParamsForTests() []BuildParams
RuleParamsForTests() map[blueprint.Rule]blueprint.RuleParams
VariablesForTests() map[string]string
// String returns a string that includes the module name and variants for printing during debugging.
String() string
// Get the qualified module id for this module.
qualifiedModuleId(ctx BaseModuleContext) qualifiedModuleName
// Get information about the properties that can contain visibility rules.
visibilityProperties() []visibilityProperty
RequiredModuleNames() []string
HostRequiredModuleNames() []string
TargetRequiredModuleNames() []string
}
// Qualified id for a module
type qualifiedModuleName struct {
// The package (i.e. directory) in which the module is defined, without trailing /
pkg string
// The name of the module, empty string if package.
name string
}
func (q qualifiedModuleName) String() string {
if q.name == "" {
return "//" + q.pkg
}
return "//" + q.pkg + ":" + q.name
}
func (q qualifiedModuleName) isRootPackage() bool {
return q.pkg == "" && q.name == ""
}
// Get the id for the package containing this module.
func (q qualifiedModuleName) getContainingPackageId() qualifiedModuleName {
pkg := q.pkg
if q.name == "" {
if pkg == "" {
panic(fmt.Errorf("Cannot get containing package id of root package"))
}
index := strings.LastIndex(pkg, "/")
if index == -1 {
pkg = ""
} else {
pkg = pkg[:index]
}
}
return newPackageId(pkg)
}
func newPackageId(pkg string) qualifiedModuleName {
// A qualified id for a package module has no name.
return qualifiedModuleName{pkg: pkg, name: ""}
}
type nameProperties struct {
// The name of the module. Must be unique across all modules.
Name *string
}
type commonProperties struct {
// emit build rules for this module
//
// Disabling a module should only be done for those modules that cannot be built
// in the current environment. Modules that can build in the current environment
// but are not usually required (e.g. superceded by a prebuilt) should not be
// disabled as that will prevent them from being built by the checkbuild target
// and so prevent early detection of changes that have broken those modules.
Enabled *bool `android:"arch_variant"`
// Controls the visibility of this module to other modules. Allowable values are one or more of
// these formats:
//
// ["//visibility:public"]: Anyone can use this module.
// ["//visibility:private"]: Only rules in the module's package (not its subpackages) can use
// this module.
// ["//visibility:override"]: Discards any rules inherited from defaults or a creating module.
// Can only be used at the beginning of a list of visibility rules.
// ["//some/package:__pkg__", "//other/package:__pkg__"]: Only modules in some/package and
// other/package (defined in some/package/*.bp and other/package/*.bp) have access to
// this module. Note that sub-packages do not have access to the rule; for example,
// //some/package/foo:bar or //other/package/testing:bla wouldn't have access. __pkg__
// is a special module and must be used verbatim. It represents all of the modules in the
// package.
// ["//project:__subpackages__", "//other:__subpackages__"]: Only modules in packages project
// or other or in one of their sub-packages have access to this module. For example,
// //project:rule, //project/library:lib or //other/testing/internal:munge are allowed
// to depend on this rule (but not //independent:evil)
// ["//project"]: This is shorthand for ["//project:__pkg__"]
// [":__subpackages__"]: This is shorthand for ["//project:__subpackages__"] where
// //project is the module's package. e.g. using [":__subpackages__"] in
// packages/apps/Settings/Android.bp is equivalent to
// //packages/apps/Settings:__subpackages__.
// ["//visibility:legacy_public"]: The default visibility, behaves as //visibility:public
// for now. It is an error if it is used in a module.
//
// If a module does not specify the `visibility` property then it uses the
// `default_visibility` property of the `package` module in the module's package.
//
// If the `default_visibility` property is not set for the module's package then
// it will use the `default_visibility` of its closest ancestor package for which
// a `default_visibility` property is specified.
//
// If no `default_visibility` property can be found then the module uses the
// global default of `//visibility:legacy_public`.
//
// The `visibility` property has no effect on a defaults module although it does
// apply to any non-defaults module that uses it. To set the visibility of a
// defaults module, use the `defaults_visibility` property on the defaults module;
// not to be confused with the `default_visibility` property on the package module.
//
// See https://android.googlesource.com/platform/build/soong/+/master/README.md#visibility for
// more details.
Visibility []string
// control whether this module compiles for 32-bit, 64-bit, or both. Possible values
// are "32" (compile for 32-bit only), "64" (compile for 64-bit only), "both" (compile for both
// architectures), or "first" (compile for 64-bit on a 64-bit platform, and 32-bit on a 32-bit
// platform
Compile_multilib *string `android:"arch_variant"`
Target struct {
Host struct {
Compile_multilib *string
}
Android struct {
Compile_multilib *string
}
}
// If set to true then the archMutator will create variants for each arch specific target
// (e.g. 32/64) that the module is required to produce. If set to false then it will only
// create a variant for the architecture and will list the additional arch specific targets
// that the variant needs to produce in the CompileMultiTargets property.
UseTargetVariants bool `blueprint:"mutated"`
Default_multilib string `blueprint:"mutated"`
// whether this is a proprietary vendor module, and should be installed into /vendor
Proprietary *bool
// vendor who owns this module
Owner *string
// whether this module is specific to an SoC (System-On-a-Chip). When set to true,
// it is installed into /vendor (or /system/vendor if vendor partition does not exist).
// Use `soc_specific` instead for better meaning.
Vendor *bool
// whether this module is specific to an SoC (System-On-a-Chip) and the build is system-only.
// When set to true, it is installed into $(TARGET_COPY_OUT_PRODUCT)/vendor_overlay/$(PRODUCT_TARGET_VNDK_VERSION)
Vendor_overlay *bool
// whether this module is specific to an SoC (System-On-a-Chip). When set to true,
// it is installed into /vendor (or /system/vendor if vendor partition does not exist).
Soc_specific *bool
// whether this module is specific to a device, not only for SoC, but also for off-chip
// peripherals. When set to true, it is installed into /odm (or /vendor/odm if odm partition
// does not exist, or /system/vendor/odm if both odm and vendor partitions do not exist).
// This implies `soc_specific:true`.
Device_specific *bool
// whether this module is specific to a software configuration of a product (e.g. country,
// network operator, etc). When set to true, it is installed into /product (or
// /system/product if product partition does not exist).
Product_specific *bool
// whether this module extends system. When set to true, it is installed into /system_ext
// (or /system/system_ext if system_ext partition does not exist).
System_ext_specific *bool
// Whether this module is installed to recovery partition
Recovery *bool
// Whether this module is installed to ramdisk
Ramdisk *bool
// Whether this module is built for non-native architecures (also known as native bridge binary)
Native_bridge_supported *bool `android:"arch_variant"`
// init.rc files to be installed if this module is installed
Init_rc []string `android:"path"`
// VINTF manifest fragments to be installed if this module is installed
Vintf_fragments []string `android:"path"`
// names of other modules to install if this module is installed
Required []string `android:"arch_variant"`
// names of other modules to install on host if this module is installed
Host_required []string `android:"arch_variant"`
// names of other modules to install on target if this module is installed
Target_required []string `android:"arch_variant"`
// relative path to a file to include in the list of notices for the device
Notice *string `android:"path"`
Dist struct {
// copy the output of this module to the $DIST_DIR when `dist` is specified on the
// command line and any of these targets are also on the command line, or otherwise
// built
Targets []string `android:"arch_variant"`
// The name of the output artifact. This defaults to the basename of the output of
// the module.
Dest *string `android:"arch_variant"`
// The directory within the dist directory to store the artifact. Defaults to the
// top level directory ("").
Dir *string `android:"arch_variant"`
// A suffix to add to the artifact file name (before any extension).
Suffix *string `android:"arch_variant"`
} `android:"arch_variant"`
// The OsType of artifacts that this module variant is responsible for creating.
//
// Set by osMutator
CompileOS OsType `blueprint:"mutated"`
// The Target of artifacts that this module variant is responsible for creating.
//
// Set by archMutator
CompileTarget Target `blueprint:"mutated"`
// The additional arch specific targets (e.g. 32/64 bit) that this module variant is
// responsible for creating.
//
// By default this is nil as, where necessary, separate variants are created for the
// different multilib types supported and that information is encapsulated in the
// CompileTarget so the module variant simply needs to create artifacts for that.
//
// However, if UseTargetVariants is set to false (e.g. by
// InitAndroidMultiTargetsArchModule) then no separate variants are created for the
// multilib targets. Instead a single variant is created for the architecture and
// this contains the multilib specific targets that this variant should create.
//
// Set by archMutator
CompileMultiTargets []Target `blueprint:"mutated"`
// True if the module variant's CompileTarget is the primary target
//
// Set by archMutator
CompilePrimary bool `blueprint:"mutated"`
// Set by InitAndroidModule
HostOrDeviceSupported HostOrDeviceSupported `blueprint:"mutated"`
ArchSpecific bool `blueprint:"mutated"`
// If set to true then a CommonOS variant will be created which will have dependencies
// on all its OsType specific variants. Used by sdk/module_exports to create a snapshot
// that covers all os and architecture variants.
//
// The OsType specific variants can be retrieved by calling
// GetOsSpecificVariantsOfCommonOSVariant
//
// Set at module initialization time by calling InitCommonOSAndroidMultiTargetsArchModule
CreateCommonOSVariant bool `blueprint:"mutated"`
// If set to true then this variant is the CommonOS variant that has dependencies on its
// OsType specific variants.
//
// Set by osMutator.
CommonOSVariant bool `blueprint:"mutated"`
SkipInstall bool `blueprint:"mutated"`
NamespaceExportedToMake bool `blueprint:"mutated"`
MissingDeps []string `blueprint:"mutated"`
// Name and variant strings stored by mutators to enable Module.String()
DebugName string `blueprint:"mutated"`
DebugMutators []string `blueprint:"mutated"`
DebugVariations []string `blueprint:"mutated"`
// set by ImageMutator
ImageVariation string `blueprint:"mutated"`
}
type hostAndDeviceProperties struct {
// If set to true, build a variant of the module for the host. Defaults to false.
Host_supported *bool
// If set to true, build a variant of the module for the device. Defaults to true.
Device_supported *bool
}
type Multilib string
const (
MultilibBoth Multilib = "both"
MultilibFirst Multilib = "first"
MultilibCommon Multilib = "common"
MultilibCommonFirst Multilib = "common_first"
MultilibDefault Multilib = ""
)
type HostOrDeviceSupported int
const (
_ HostOrDeviceSupported = iota
// Host and HostCross are built by default. Device is not supported.
HostSupported
// Host is built by default. HostCross and Device are not supported.
HostSupportedNoCross
// Device is built by default. Host and HostCross are not supported.
DeviceSupported
// Device is built by default. Host and HostCross are supported.
HostAndDeviceSupported
// Host, HostCross, and Device are built by default.
HostAndDeviceDefault
// Nothing is supported. This is not exposed to the user, but used to mark a
// host only module as unsupported when the module type is not supported on
// the host OS. E.g. benchmarks are supported on Linux but not Darwin.
NeitherHostNorDeviceSupported
)
type moduleKind int
const (
platformModule moduleKind = iota
deviceSpecificModule
socSpecificModule
vendorOverlayModule
productSpecificModule
systemExtSpecificModule
)
func (k moduleKind) String() string {
switch k {
case platformModule:
return "platform"
case deviceSpecificModule:
return "device-specific"
case socSpecificModule:
return "soc-specific"
case vendorOverlayModule:
return "vendor-overlay"
case productSpecificModule:
return "product-specific"
case systemExtSpecificModule:
return "systemext-specific"
default:
panic(fmt.Errorf("unknown module kind %d", k))
}
}
func initAndroidModuleBase(m Module) {
m.base().module = m
}
func InitAndroidModule(m Module) {
initAndroidModuleBase(m)
base := m.base()
m.AddProperties(
&base.nameProperties,
&base.commonProperties)
initProductVariableModule(m)
base.generalProperties = m.GetProperties()
base.customizableProperties = m.GetProperties()
// The default_visibility property needs to be checked and parsed by the visibility module during
// its checking and parsing phases so make it the primary visibility property.
setPrimaryVisibilityProperty(m, "visibility", &base.commonProperties.Visibility)
}
func InitAndroidArchModule(m Module, hod HostOrDeviceSupported, defaultMultilib Multilib) {
InitAndroidModule(m)
base := m.base()
base.commonProperties.HostOrDeviceSupported = hod
base.commonProperties.Default_multilib = string(defaultMultilib)
base.commonProperties.ArchSpecific = true
base.commonProperties.UseTargetVariants = true
switch hod {
case HostAndDeviceSupported, HostAndDeviceDefault:
m.AddProperties(&base.hostAndDeviceProperties)
}
InitArchModule(m)
}
func InitAndroidMultiTargetsArchModule(m Module, hod HostOrDeviceSupported, defaultMultilib Multilib) {
InitAndroidArchModule(m, hod, defaultMultilib)
m.base().commonProperties.UseTargetVariants = false
}
// As InitAndroidMultiTargetsArchModule except it creates an additional CommonOS variant that
// has dependencies on all the OsType specific variants.
func InitCommonOSAndroidMultiTargetsArchModule(m Module, hod HostOrDeviceSupported, defaultMultilib Multilib) {
InitAndroidArchModule(m, hod, defaultMultilib)
m.base().commonProperties.UseTargetVariants = false
m.base().commonProperties.CreateCommonOSVariant = true
}
// A ModuleBase object contains the properties that are common to all Android
// modules. It should be included as an anonymous field in every module
// struct definition. InitAndroidModule should then be called from the module's
// factory function, and the return values from InitAndroidModule should be
// returned from the factory function.
//
// The ModuleBase type is responsible for implementing the GenerateBuildActions
// method to support the blueprint.Module interface. This method will then call
// the module's GenerateAndroidBuildActions method once for each build variant
// that is to be built. GenerateAndroidBuildActions is passed a ModuleContext
// rather than the usual blueprint.ModuleContext.
// ModuleContext exposes extra functionality specific to the Android build
// system including details about the particular build variant that is to be
// generated.
//
// For example:
//
// import (
// "android/soong/android"
// )
//
// type myModule struct {
// android.ModuleBase
// properties struct {
// MyProperty string
// }
// }
//
// func NewMyModule() android.Module) {
// m := &myModule{}
// m.AddProperties(&m.properties)
// android.InitAndroidModule(m)
// return m
// }
//
// func (m *myModule) GenerateAndroidBuildActions(ctx android.ModuleContext) {
// // Get the CPU architecture for the current build variant.
// variantArch := ctx.Arch()
//
// // ...
// }
type ModuleBase struct {
// Putting the curiously recurring thing pointing to the thing that contains
// the thing pattern to good use.
// TODO: remove this
module Module
nameProperties nameProperties
commonProperties commonProperties
variableProperties interface{}
hostAndDeviceProperties hostAndDeviceProperties
generalProperties []interface{}
archProperties [][]interface{}
customizableProperties []interface{}
// Information about all the properties on the module that contains visibility rules that need
// checking.
visibilityPropertyInfo []visibilityProperty
// The primary visibility property, may be nil, that controls access to the module.
primaryVisibilityProperty visibilityProperty
noAddressSanitizer bool
installFiles Paths
checkbuildFiles Paths
noticeFile OptionalPath
phonies map[string]Paths
// Used by buildTargetSingleton to create checkbuild and per-directory build targets
// Only set on the final variant of each module
installTarget WritablePath
checkbuildTarget WritablePath
blueprintDir string
hooks hooks
registerProps []interface{}
// For tests
buildParams []BuildParams
ruleParams map[blueprint.Rule]blueprint.RuleParams
variables map[string]string
initRcPaths Paths
vintfFragmentsPaths Paths
prefer32 func(ctx BaseModuleContext, base *ModuleBase, class OsClass) bool
}
func (m *ModuleBase) DepsMutator(BottomUpMutatorContext) {}
func (m *ModuleBase) AddProperties(props ...interface{}) {
m.registerProps = append(m.registerProps, props...)
}
func (m *ModuleBase) GetProperties() []interface{} {
return m.registerProps
}
func (m *ModuleBase) BuildParamsForTests() []BuildParams {
return m.buildParams
}
func (m *ModuleBase) RuleParamsForTests() map[blueprint.Rule]blueprint.RuleParams {
return m.ruleParams
}
func (m *ModuleBase) VariablesForTests() map[string]string {
return m.variables
}
func (m *ModuleBase) Prefer32(prefer32 func(ctx BaseModuleContext, base *ModuleBase, class OsClass) bool) {
m.prefer32 = prefer32
}
// Name returns the name of the module. It may be overridden by individual module types, for
// example prebuilts will prepend prebuilt_ to the name.
func (m *ModuleBase) Name() string {
return String(m.nameProperties.Name)
}
// String returns a string that includes the module name and variants for printing during debugging.
func (m *ModuleBase) String() string {
sb := strings.Builder{}
sb.WriteString(m.commonProperties.DebugName)
sb.WriteString("{")
for i := range m.commonProperties.DebugMutators {
if i != 0 {
sb.WriteString(",")
}
sb.WriteString(m.commonProperties.DebugMutators[i])
sb.WriteString(":")
sb.WriteString(m.commonProperties.DebugVariations[i])
}
sb.WriteString("}")
return sb.String()
}
// BaseModuleName returns the name of the module as specified in the blueprints file.
func (m *ModuleBase) BaseModuleName() string {
return String(m.nameProperties.Name)
}
func (m *ModuleBase) base() *ModuleBase {
return m
}
func (m *ModuleBase) qualifiedModuleId(ctx BaseModuleContext) qualifiedModuleName {
return qualifiedModuleName{pkg: ctx.ModuleDir(), name: ctx.ModuleName()}
}
func (m *ModuleBase) visibilityProperties() []visibilityProperty {
return m.visibilityPropertyInfo
}
func (m *ModuleBase) Target() Target {
return m.commonProperties.CompileTarget
}
func (m *ModuleBase) TargetPrimary() bool {
return m.commonProperties.CompilePrimary
}
func (m *ModuleBase) MultiTargets() []Target {
return m.commonProperties.CompileMultiTargets
}
func (m *ModuleBase) Os() OsType {
return m.Target().Os
}
func (m *ModuleBase) Host() bool {
return m.Os().Class == Host || m.Os().Class == HostCross
}
func (m *ModuleBase) Device() bool {
return m.Os().Class == Device
}
func (m *ModuleBase) Arch() Arch {
return m.Target().Arch
}
func (m *ModuleBase) ArchSpecific() bool {
return m.commonProperties.ArchSpecific
}
// True if the current variant is a CommonOS variant, false otherwise.
func (m *ModuleBase) IsCommonOSVariant() bool {
return m.commonProperties.CommonOSVariant
}
func (m *ModuleBase) OsClassSupported() []OsClass {
switch m.commonProperties.HostOrDeviceSupported {
case HostSupported:
return []OsClass{Host, HostCross}
case HostSupportedNoCross:
return []OsClass{Host}
case DeviceSupported:
return []OsClass{Device}
case HostAndDeviceSupported, HostAndDeviceDefault:
var supported []OsClass
if Bool(m.hostAndDeviceProperties.Host_supported) ||
(m.commonProperties.HostOrDeviceSupported == HostAndDeviceDefault &&
m.hostAndDeviceProperties.Host_supported == nil) {
supported = append(supported, Host, HostCross)
}
if m.hostAndDeviceProperties.Device_supported == nil ||
*m.hostAndDeviceProperties.Device_supported {
supported = append(supported, Device)
}
return supported
default:
return nil
}
}
func (m *ModuleBase) DeviceSupported() bool {
return m.commonProperties.HostOrDeviceSupported == DeviceSupported ||
m.commonProperties.HostOrDeviceSupported == HostAndDeviceSupported &&
(m.hostAndDeviceProperties.Device_supported == nil ||
*m.hostAndDeviceProperties.Device_supported)
}
func (m *ModuleBase) HostSupported() bool {
return m.commonProperties.HostOrDeviceSupported == HostSupported ||
m.commonProperties.HostOrDeviceSupported == HostAndDeviceSupported &&
(m.hostAndDeviceProperties.Host_supported != nil &&
*m.hostAndDeviceProperties.Host_supported)
}
func (m *ModuleBase) Platform() bool {
return !m.DeviceSpecific() && !m.SocSpecific() && !m.ProductSpecific() && !m.SystemExtSpecific() && !m.VendorOverlay()
}
func (m *ModuleBase) DeviceSpecific() bool {
return Bool(m.commonProperties.Device_specific)
}
func (m *ModuleBase) SocSpecific() bool {
return Bool(m.commonProperties.Vendor) || Bool(m.commonProperties.Proprietary) || Bool(m.commonProperties.Soc_specific)
}
func (m *ModuleBase) ProductSpecific() bool {
return Bool(m.commonProperties.Product_specific)
}
func (m *ModuleBase) VendorOverlay() bool {
return Bool(m.commonProperties.Vendor_overlay)
}
func (m *ModuleBase) SystemExtSpecific() bool {
return Bool(m.commonProperties.System_ext_specific)
}
// RequiresStableAPIs returns true if the module will be installed to a partition that may
// be updated separately from the system image.
func (m *ModuleBase) RequiresStableAPIs(ctx BaseModuleContext) bool {
return m.SocSpecific() || m.DeviceSpecific() ||
(m.ProductSpecific() && ctx.Config().EnforceProductPartitionInterface())
}
func (m *ModuleBase) PartitionTag(config DeviceConfig) string {
partition := "system"
if m.SocSpecific() {
// A SoC-specific module could be on the vendor partition at
// "vendor" or the system partition at "system/vendor".
if config.VendorPath() == "vendor" {
partition = "vendor"
}
} else if m.DeviceSpecific() {
// A device-specific module could be on the odm partition at
// "odm", the vendor partition at "vendor/odm", or the system
// partition at "system/vendor/odm".
if config.OdmPath() == "odm" {
partition = "odm"
} else if strings.HasPrefix(config.OdmPath(), "vendor/") {
partition = "vendor"
}
} else if m.ProductSpecific() {
// A product-specific module could be on the product partition
// at "product" or the system partition at "system/product".
if config.ProductPath() == "product" {
partition = "product"
}
} else if m.SystemExtSpecific() {
// A system_ext-specific module could be on the system_ext
// partition at "system_ext" or the system partition at
// "system/system_ext".
if config.SystemExtPath() == "system_ext" {
partition = "system_ext"
}
}
return partition
}
func (m *ModuleBase) Enabled() bool {
if m.commonProperties.Enabled == nil {
return !m.Os().DefaultDisabled
}
return *m.commonProperties.Enabled
}
func (m *ModuleBase) Disable() {
m.commonProperties.Enabled = proptools.BoolPtr(false)
}
func (m *ModuleBase) SkipInstall() {
m.commonProperties.SkipInstall = true
}
func (m *ModuleBase) IsSkipInstall() bool {
return m.commonProperties.SkipInstall == true
}
func (m *ModuleBase) ExportedToMake() bool {
return m.commonProperties.NamespaceExportedToMake
}
func (m *ModuleBase) computeInstallDeps(
ctx blueprint.ModuleContext) Paths {
result := Paths{}
// TODO(ccross): we need to use WalkDeps and have some way to know which dependencies require installation
ctx.VisitDepsDepthFirstIf(isFileInstaller,
func(m blueprint.Module) {
fileInstaller := m.(fileInstaller)
files := fileInstaller.filesToInstall()
result = append(result, files...)
})
return result
}
func (m *ModuleBase) filesToInstall() Paths {
return m.installFiles
}
func (m *ModuleBase) NoAddressSanitizer() bool {
return m.noAddressSanitizer
}
func (m *ModuleBase) InstallInData() bool {
return false
}
func (m *ModuleBase) InstallInTestcases() bool {
return false
}
func (m *ModuleBase) InstallInSanitizerDir() bool {
return false
}
func (m *ModuleBase) InstallInRamdisk() bool {
return Bool(m.commonProperties.Ramdisk)
}
func (m *ModuleBase) InstallInRecovery() bool {
return Bool(m.commonProperties.Recovery)
}
func (m *ModuleBase) InstallInRoot() bool {
return false
}
func (m *ModuleBase) InstallBypassMake() bool {
return false
}
func (m *ModuleBase) Owner() string {
return String(m.commonProperties.Owner)
}
func (m *ModuleBase) NoticeFile() OptionalPath {
return m.noticeFile
}
func (m *ModuleBase) setImageVariation(variant string) {
m.commonProperties.ImageVariation = variant
}
func (m *ModuleBase) ImageVariation() blueprint.Variation {
return blueprint.Variation{
Mutator: "image",
Variation: m.base().commonProperties.ImageVariation,
}
}
func (m *ModuleBase) getVariationByMutatorName(mutator string) string {
for i, v := range m.commonProperties.DebugMutators {
if v == mutator {
return m.commonProperties.DebugVariations[i]
}
}
return ""
}
func (m *ModuleBase) InRamdisk() bool {
return m.base().commonProperties.ImageVariation == RamdiskVariation
}
func (m *ModuleBase) InRecovery() bool {
return m.base().commonProperties.ImageVariation == RecoveryVariation
}
func (m *ModuleBase) RequiredModuleNames() []string {
return m.base().commonProperties.Required
}
func (m *ModuleBase) HostRequiredModuleNames() []string {
return m.base().commonProperties.Host_required
}
func (m *ModuleBase) TargetRequiredModuleNames() []string {
return m.base().commonProperties.Target_required
}
func (m *ModuleBase) InitRc() Paths {
return append(Paths{}, m.initRcPaths...)
}
func (m *ModuleBase) VintfFragments() Paths {
return append(Paths{}, m.vintfFragmentsPaths...)
}
func (m *ModuleBase) generateModuleTarget(ctx ModuleContext) {
allInstalledFiles := Paths{}
allCheckbuildFiles := Paths{}
ctx.VisitAllModuleVariants(func(module Module) {
a := module.base()
allInstalledFiles = append(allInstalledFiles, a.installFiles...)
allCheckbuildFiles = append(allCheckbuildFiles, a.checkbuildFiles...)
})
var deps Paths
namespacePrefix := ctx.Namespace().(*Namespace).id
if namespacePrefix != "" {
namespacePrefix = namespacePrefix + "-"
}
if len(allInstalledFiles) > 0 {
name := namespacePrefix + ctx.ModuleName() + "-install"
ctx.Phony(name, allInstalledFiles...)
m.installTarget = PathForPhony(ctx, name)
deps = append(deps, m.installTarget)
}
if len(allCheckbuildFiles) > 0 {
name := namespacePrefix + ctx.ModuleName() + "-checkbuild"
ctx.Phony(name, allCheckbuildFiles...)
m.checkbuildTarget = PathForPhony(ctx, name)
deps = append(deps, m.checkbuildTarget)
}
if len(deps) > 0 {
suffix := ""
if ctx.Config().EmbeddedInMake() {
suffix = "-soong"
}
ctx.Phony(namespacePrefix+ctx.ModuleName()+suffix, deps...)
m.blueprintDir = ctx.ModuleDir()
}
}
func determineModuleKind(m *ModuleBase, ctx blueprint.EarlyModuleContext) moduleKind {
var socSpecific = Bool(m.commonProperties.Vendor) || Bool(m.commonProperties.Proprietary) || Bool(m.commonProperties.Soc_specific)
var vendorOverlay = Bool(m.commonProperties.Vendor_overlay)
var deviceSpecific = Bool(m.commonProperties.Device_specific)
var productSpecific = Bool(m.commonProperties.Product_specific)
var systemExtSpecific = Bool(m.commonProperties.System_ext_specific)
msg := "conflicting value set here"
if socSpecific && deviceSpecific {
ctx.PropertyErrorf("device_specific", "a module cannot be specific to SoC and device at the same time.")
if Bool(m.commonProperties.Vendor) {
ctx.PropertyErrorf("vendor", msg)
}
if Bool(m.commonProperties.Proprietary) {
ctx.PropertyErrorf("proprietary", msg)
}
if Bool(m.commonProperties.Soc_specific) {
ctx.PropertyErrorf("soc_specific", msg)
}
if Bool(m.commonProperties.Vendor_overlay) {
ctx.PropertyErrorf("vendor_overlay", msg)
}
}
if productSpecific && systemExtSpecific {
ctx.PropertyErrorf("product_specific", "a module cannot be specific to product and system_ext at the same time.")
ctx.PropertyErrorf("system_ext_specific", msg)
}
if (socSpecific || deviceSpecific) && (productSpecific || systemExtSpecific) {
if productSpecific {
ctx.PropertyErrorf("product_specific", "a module cannot be specific to SoC or device and product at the same time.")
} else {
ctx.PropertyErrorf("system_ext_specific", "a module cannot be specific to SoC or device and system_ext at the same time.")
}
if deviceSpecific {
ctx.PropertyErrorf("device_specific", msg)
} else {
if Bool(m.commonProperties.Vendor) {
ctx.PropertyErrorf("vendor", msg)
}
if Bool(m.commonProperties.Proprietary) {
ctx.PropertyErrorf("proprietary", msg)
}
if Bool(m.commonProperties.Soc_specific) {
ctx.PropertyErrorf("soc_specific", msg)
}
if Bool(m.commonProperties.Vendor_overlay) {
ctx.PropertyErrorf("vendor_overlay", msg)
}
}
}
if productSpecific {
return productSpecificModule
} else if systemExtSpecific {
return systemExtSpecificModule
} else if deviceSpecific {
return deviceSpecificModule
} else if vendorOverlay {
return vendorOverlayModule
} else if socSpecific {
return socSpecificModule
} else {
return platformModule
}
}
func (m *ModuleBase) earlyModuleContextFactory(ctx blueprint.EarlyModuleContext) earlyModuleContext {
return earlyModuleContext{
EarlyModuleContext: ctx,
kind: determineModuleKind(m, ctx),
config: ctx.Config().(Config),
}
}
func (m *ModuleBase) baseModuleContextFactory(ctx blueprint.BaseModuleContext) baseModuleContext {
return baseModuleContext{
bp: ctx,
earlyModuleContext: m.earlyModuleContextFactory(ctx),
os: m.commonProperties.CompileOS,
target: m.commonProperties.CompileTarget,
targetPrimary: m.commonProperties.CompilePrimary,
multiTargets: m.commonProperties.CompileMultiTargets,
}
}
func (m *ModuleBase) GenerateBuildActions(blueprintCtx blueprint.ModuleContext) {
ctx := &moduleContext{
module: m.module,
bp: blueprintCtx,
baseModuleContext: m.baseModuleContextFactory(blueprintCtx),
installDeps: m.computeInstallDeps(blueprintCtx),
installFiles: m.installFiles,
variables: make(map[string]string),
}
// Temporarily continue to call blueprintCtx.GetMissingDependencies() to maintain the previous behavior of never
// reporting missing dependency errors in Blueprint when AllowMissingDependencies == true.
// TODO: This will be removed once defaults modules handle missing dependency errors
blueprintCtx.GetMissingDependencies()
// For the final GenerateAndroidBuildActions pass, require that all visited dependencies Soong modules and
// are enabled. Unless the module is a CommonOS variant which may have dependencies on disabled variants
// (because the dependencies are added before the modules are disabled). The
// GetOsSpecificVariantsOfCommonOSVariant(...) method will ensure that the disabled variants are
// ignored.
ctx.baseModuleContext.strictVisitDeps = !m.IsCommonOSVariant()
if ctx.config.captureBuild {
ctx.ruleParams = make(map[blueprint.Rule]blueprint.RuleParams)
}
desc := "//" + ctx.ModuleDir() + ":" + ctx.ModuleName() + " "
var suffix []string
if ctx.Os().Class != Device && ctx.Os().Class != Generic {
suffix = append(suffix, ctx.Os().String())
}
if !ctx.PrimaryArch() {
suffix = append(suffix, ctx.Arch().ArchType.String())
}
if apex, ok := m.module.(ApexModule); ok && !apex.IsForPlatform() {
suffix = append(suffix, apex.ApexName())
}
ctx.Variable(pctx, "moduleDesc", desc)
s := ""
if len(suffix) > 0 {
s = " [" + strings.Join(suffix, " ") + "]"
}
ctx.Variable(pctx, "moduleDescSuffix", s)
// Some common property checks for properties that will be used later in androidmk.go
if m.commonProperties.Dist.Dest != nil {
_, err := validateSafePath(*m.commonProperties.Dist.Dest)
if err != nil {
ctx.PropertyErrorf("dist.dest", "%s", err.Error())
}
}
if m.commonProperties.Dist.Dir != nil {
_, err := validateSafePath(*m.commonProperties.Dist.Dir)
if err != nil {
ctx.PropertyErrorf("dist.dir", "%s", err.Error())
}
}
if m.commonProperties.Dist.Suffix != nil {
if strings.Contains(*m.commonProperties.Dist.Suffix, "/") {
ctx.PropertyErrorf("dist.suffix", "Suffix may not contain a '/' character.")
}
}
if m.Enabled() {
// ensure all direct android.Module deps are enabled
ctx.VisitDirectDepsBlueprint(func(bm blueprint.Module) {
if _, ok := bm.(Module); ok {
ctx.validateAndroidModule(bm, ctx.baseModuleContext.strictVisitDeps)
}
})
notice := proptools.StringDefault(m.commonProperties.Notice, "NOTICE")
if module := SrcIsModule(notice); module != "" {
m.noticeFile = ctx.ExpandOptionalSource(¬ice, "notice")
} else {
noticePath := filepath.Join(ctx.ModuleDir(), notice)
m.noticeFile = ExistentPathForSource(ctx, noticePath)
}
m.module.GenerateAndroidBuildActions(ctx)
if ctx.Failed() {
return
}
m.installFiles = append(m.installFiles, ctx.installFiles...)
m.checkbuildFiles = append(m.checkbuildFiles, ctx.checkbuildFiles...)
m.initRcPaths = PathsForModuleSrc(ctx, m.commonProperties.Init_rc)
m.vintfFragmentsPaths = PathsForModuleSrc(ctx, m.commonProperties.Vintf_fragments)
for k, v := range ctx.phonies {
m.phonies[k] = append(m.phonies[k], v...)
}
} else if ctx.Config().AllowMissingDependencies() {
// If the module is not enabled it will not create any build rules, nothing will call
// ctx.GetMissingDependencies(), and blueprint will consider the missing dependencies to be unhandled
// and report them as an error even when AllowMissingDependencies = true. Call
// ctx.GetMissingDependencies() here to tell blueprint not to handle them.
ctx.GetMissingDependencies()
}
if m == ctx.FinalModule().(Module).base() {
m.generateModuleTarget(ctx)
if ctx.Failed() {
return
}
}
m.buildParams = ctx.buildParams
m.ruleParams = ctx.ruleParams
m.variables = ctx.variables
}
type earlyModuleContext struct {
blueprint.EarlyModuleContext
kind moduleKind
config Config
}
func (e *earlyModuleContext) Glob(globPattern string, excludes []string) Paths {
ret, err := e.GlobWithDeps(globPattern, excludes)
if err != nil {
e.ModuleErrorf("glob: %s", err.Error())
}
return pathsForModuleSrcFromFullPath(e, ret, true)
}
func (e *earlyModuleContext) GlobFiles(globPattern string, excludes []string) Paths {
ret, err := e.GlobWithDeps(globPattern, excludes)
if err != nil {
e.ModuleErrorf("glob: %s", err.Error())
}
return pathsForModuleSrcFromFullPath(e, ret, false)
}
func (b *earlyModuleContext) IsSymlink(path Path) bool {
fileInfo, err := b.config.fs.Lstat(path.String())
if err != nil {
b.ModuleErrorf("os.Lstat(%q) failed: %s", path.String(), err)
}
return fileInfo.Mode()&os.ModeSymlink == os.ModeSymlink
}
func (b *earlyModuleContext) Readlink(path Path) string {
dest, err := b.config.fs.Readlink(path.String())
if err != nil {
b.ModuleErrorf("os.Readlink(%q) failed: %s", path.String(), err)
}
return dest
}
func (e *earlyModuleContext) Module() Module {
module, _ := e.EarlyModuleContext.Module().(Module)
return module
}
func (e *earlyModuleContext) Config() Config {
return e.EarlyModuleContext.Config().(Config)
}
func (e *earlyModuleContext) AConfig() Config {
return e.config
}
func (e *earlyModuleContext) DeviceConfig() DeviceConfig {
return DeviceConfig{e.config.deviceConfig}
}
func (e *earlyModuleContext) Platform() bool {
return e.kind == platformModule
}
func (e *earlyModuleContext) DeviceSpecific() bool {
return e.kind == deviceSpecificModule
}
func (e *earlyModuleContext) SocSpecific() bool {
return e.kind == socSpecificModule
}
func (e *earlyModuleContext) ProductSpecific() bool {
return e.kind == productSpecificModule
}
func (e *earlyModuleContext) SystemExtSpecific() bool {
return e.kind == systemExtSpecificModule
}
func (e *earlyModuleContext) VendorOverlay() bool {
return e.kind == vendorOverlayModule
}
type baseModuleContext struct {
bp blueprint.BaseModuleContext
earlyModuleContext
os OsType
target Target
multiTargets []Target
targetPrimary bool
debug bool
walkPath []Module
tagPath []blueprint.DependencyTag
strictVisitDeps bool // If true, enforce that all dependencies are enabled
}
func (b *baseModuleContext) OtherModuleName(m blueprint.Module) string {
return b.bp.OtherModuleName(m)
}
func (b *baseModuleContext) OtherModuleDir(m blueprint.Module) string { return b.bp.OtherModuleDir(m) }
func (b *baseModuleContext) OtherModuleErrorf(m blueprint.Module, fmt string, args ...interface{}) {
b.bp.OtherModuleErrorf(m, fmt, args...)
}
func (b *baseModuleContext) OtherModuleDependencyTag(m blueprint.Module) blueprint.DependencyTag {
return b.bp.OtherModuleDependencyTag(m)
}
func (b *baseModuleContext) OtherModuleExists(name string) bool { return b.bp.OtherModuleExists(name) }
func (b *baseModuleContext) OtherModuleType(m blueprint.Module) string {
return b.bp.OtherModuleType(m)
}
func (b *baseModuleContext) GetDirectDepWithTag(name string, tag blueprint.DependencyTag) blueprint.Module {
return b.bp.GetDirectDepWithTag(name, tag)
}
type moduleContext struct {
bp blueprint.ModuleContext
baseModuleContext
installDeps Paths
installFiles Paths
checkbuildFiles Paths
module Module
phonies map[string]Paths
// For tests
buildParams []BuildParams
ruleParams map[blueprint.Rule]blueprint.RuleParams
variables map[string]string
}
func (m *moduleContext) ninjaError(params BuildParams, err error) (PackageContext, BuildParams) {
return pctx, BuildParams{
Rule: ErrorRule,
Description: params.Description,
Output: params.Output,
Outputs: params.Outputs,
ImplicitOutput: params.ImplicitOutput,
ImplicitOutputs: params.ImplicitOutputs,
Args: map[string]string{
"error": err.Error(),
},
}
}
func (m *moduleContext) ModuleBuild(pctx PackageContext, params ModuleBuildParams) {
m.Build(pctx, BuildParams(params))
}
func convertBuildParams(params BuildParams) blueprint.BuildParams {
bparams := blueprint.BuildParams{
Rule: params.Rule,
Description: params.Description,
Deps: params.Deps,
Outputs: params.Outputs.Strings(),
ImplicitOutputs: params.ImplicitOutputs.Strings(),
Inputs: params.Inputs.Strings(),
Implicits: params.Implicits.Strings(),
OrderOnly: params.OrderOnly.Strings(),
Args: params.Args,
Optional: !params.Default,
}
if params.Depfile != nil {
bparams.Depfile = params.Depfile.String()
}
if params.Output != nil {
bparams.Outputs = append(bparams.Outputs, params.Output.String())
}
if params.ImplicitOutput != nil {
bparams.ImplicitOutputs = append(bparams.ImplicitOutputs, params.ImplicitOutput.String())
}
if params.Input != nil {
bparams.Inputs = append(bparams.Inputs, params.Input.String())
}
if params.Implicit != nil {
bparams.Implicits = append(bparams.Implicits, params.Implicit.String())
}
bparams.Outputs = proptools.NinjaEscapeList(bparams.Outputs)
bparams.ImplicitOutputs = proptools.NinjaEscapeList(bparams.ImplicitOutputs)
bparams.Inputs = proptools.NinjaEscapeList(bparams.Inputs)
bparams.Implicits = proptools.NinjaEscapeList(bparams.Implicits)
bparams.OrderOnly = proptools.NinjaEscapeList(bparams.OrderOnly)
bparams.Depfile = proptools.NinjaEscapeList([]string{bparams.Depfile})[0]
return bparams
}
func (m *moduleContext) Variable(pctx PackageContext, name, value string) {
if m.config.captureBuild {
m.variables[name] = value
}
m.bp.Variable(pctx.PackageContext, name, value)
}
func (m *moduleContext) Rule(pctx PackageContext, name string, params blueprint.RuleParams,
argNames ...string) blueprint.Rule {
if m.config.UseRemoteBuild() {
if params.Pool == nil {
// When USE_GOMA=true or USE_RBE=true are set and the rule is not supported by goma/RBE, restrict
// jobs to the local parallelism value
params.Pool = localPool
} else if params.Pool == remotePool {
// remotePool is a fake pool used to identify rule that are supported for remoting. If the rule's
// pool is the remotePool, replace with nil so that ninja runs it at NINJA_REMOTE_NUM_JOBS
// parallelism.
params.Pool = nil
}
}
rule := m.bp.Rule(pctx.PackageContext, name, params, argNames...)
if m.config.captureBuild {
m.ruleParams[rule] = params
}
return rule
}
func (m *moduleContext) Build(pctx PackageContext, params BuildParams) {
if params.Description != "" {
params.Description = "${moduleDesc}" + params.Description + "${moduleDescSuffix}"
}
if missingDeps := m.GetMissingDependencies(); len(missingDeps) > 0 {
pctx, params = m.ninjaError(params, fmt.Errorf("module %s missing dependencies: %s\n",
m.ModuleName(), strings.Join(missingDeps, ", ")))
}
if m.config.captureBuild {
m.buildParams = append(m.buildParams, params)
}
m.bp.Build(pctx.PackageContext, convertBuildParams(params))
}
func (m *moduleContext) Phony(name string, deps ...Path) {
addPhony(m.config, name, deps...)
}
func (m *moduleContext) GetMissingDependencies() []string {
var missingDeps []string
missingDeps = append(missingDeps, m.Module().base().commonProperties.MissingDeps...)
missingDeps = append(missingDeps, m.bp.GetMissingDependencies()...)
missingDeps = FirstUniqueStrings(missingDeps)
return missingDeps
}
func (b *baseModuleContext) AddMissingDependencies(deps []string) {
if deps != nil {
missingDeps := &b.Module().base().commonProperties.MissingDeps
*missingDeps = append(*missingDeps, deps...)
*missingDeps = FirstUniqueStrings(*missingDeps)
}
}
func (b *baseModuleContext) validateAndroidModule(module blueprint.Module, strict bool) Module {
aModule, _ := module.(Module)
if !strict {
return aModule
}
if aModule == nil {
b.ModuleErrorf("module %q not an android module", b.OtherModuleName(module))
return nil
}
if !aModule.Enabled() {
if b.Config().AllowMissingDependencies() {
b.AddMissingDependencies([]string{b.OtherModuleName(aModule)})
} else {
b.ModuleErrorf("depends on disabled module %q", b.OtherModuleName(aModule))
}
return nil
}
return aModule
}
func (b *baseModuleContext) getDirectDepInternal(name string, tag blueprint.DependencyTag) (blueprint.Module, blueprint.DependencyTag) {
type dep struct {
mod blueprint.Module
tag blueprint.DependencyTag
}
var deps []dep
b.VisitDirectDepsBlueprint(func(module blueprint.Module) {
if aModule, _ := module.(Module); aModule != nil && aModule.base().BaseModuleName() == name {
returnedTag := b.bp.OtherModuleDependencyTag(aModule)
if tag == nil || returnedTag == tag {
deps = append(deps, dep{aModule, returnedTag})
}
}
})
if len(deps) == 1 {
return deps[0].mod, deps[0].tag
} else if len(deps) >= 2 {
panic(fmt.Errorf("Multiple dependencies having same BaseModuleName() %q found from %q",
name, b.ModuleName()))
} else {
return nil, nil
}
}
func (b *baseModuleContext) GetDirectDepsWithTag(tag blueprint.DependencyTag) []Module {
var deps []Module
b.VisitDirectDepsBlueprint(func(module blueprint.Module) {
if aModule, _ := module.(Module); aModule != nil {
if b.bp.OtherModuleDependencyTag(aModule) == tag {
deps = append(deps, aModule)
}
}
})
return deps
}
func (m *moduleContext) GetDirectDepWithTag(name string, tag blueprint.DependencyTag) blueprint.Module {
module, _ := m.getDirectDepInternal(name, tag)
return module
}
func (b *baseModuleContext) GetDirectDep(name string) (blueprint.Module, blueprint.DependencyTag) {
return b.getDirectDepInternal(name, nil)
}
func (b *baseModuleContext) VisitDirectDepsBlueprint(visit func(blueprint.Module)) {
b.bp.VisitDirectDeps(visit)
}
func (b *baseModuleContext) VisitDirectDeps(visit func(Module)) {
b.bp.VisitDirectDeps(func(module blueprint.Module) {
if aModule := b.validateAndroidModule(module, b.strictVisitDeps); aModule != nil {
visit(aModule)
}
})
}
func (b *baseModuleContext) VisitDirectDepsWithTag(tag blueprint.DependencyTag, visit func(Module)) {
b.bp.VisitDirectDeps(func(module blueprint.Module) {
if aModule := b.validateAndroidModule(module, b.strictVisitDeps); aModule != nil {
if b.bp.OtherModuleDependencyTag(aModule) == tag {
visit(aModule)
}
}
})
}
func (b *baseModuleContext) VisitDirectDepsIf(pred func(Module) bool, visit func(Module)) {
b.bp.VisitDirectDepsIf(
// pred
func(module blueprint.Module) bool {
if aModule := b.validateAndroidModule(module, b.strictVisitDeps); aModule != nil {
return pred(aModule)
} else {
return false
}
},
// visit
func(module blueprint.Module) {
visit(module.(Module))
})
}
func (b *baseModuleContext) VisitDepsDepthFirst(visit func(Module)) {
b.bp.VisitDepsDepthFirst(func(module blueprint.Module) {
if aModule := b.validateAndroidModule(module, b.strictVisitDeps); aModule != nil {
visit(aModule)
}
})
}
func (b *baseModuleContext) VisitDepsDepthFirstIf(pred func(Module) bool, visit func(Module)) {
b.bp.VisitDepsDepthFirstIf(
// pred
func(module blueprint.Module) bool {
if aModule := b.validateAndroidModule(module, b.strictVisitDeps); aModule != nil {
return pred(aModule)
} else {
return false
}
},
// visit
func(module blueprint.Module) {
visit(module.(Module))
})
}
func (b *baseModuleContext) WalkDepsBlueprint(visit func(blueprint.Module, blueprint.Module) bool) {
b.bp.WalkDeps(visit)
}
func (b *baseModuleContext) WalkDeps(visit func(Module, Module) bool) {
b.walkPath = []Module{b.Module()}
b.tagPath = []blueprint.DependencyTag{}
b.bp.WalkDeps(func(child, parent blueprint.Module) bool {
childAndroidModule, _ := child.(Module)
parentAndroidModule, _ := parent.(Module)
if childAndroidModule != nil && parentAndroidModule != nil {
// record walkPath before visit
for b.walkPath[len(b.walkPath)-1] != parentAndroidModule {
b.walkPath = b.walkPath[0 : len(b.walkPath)-1]
b.tagPath = b.tagPath[0 : len(b.tagPath)-1]
}
b.walkPath = append(b.walkPath, childAndroidModule)
b.tagPath = append(b.tagPath, b.OtherModuleDependencyTag(childAndroidModule))
return visit(childAndroidModule, parentAndroidModule)
} else {
return false
}
})
}
func (b *baseModuleContext) GetWalkPath() []Module {
return b.walkPath
}
func (b *baseModuleContext) GetTagPath() []blueprint.DependencyTag {
return b.tagPath
}
func (m *moduleContext) VisitAllModuleVariants(visit func(Module)) {
m.bp.VisitAllModuleVariants(func(module blueprint.Module) {
visit(module.(Module))
})
}
func (m *moduleContext) PrimaryModule() Module {
return m.bp.PrimaryModule().(Module)
}
func (m *moduleContext) FinalModule() Module {
return m.bp.FinalModule().(Module)
}
func (m *moduleContext) ModuleSubDir() string {
return m.bp.ModuleSubDir()
}
func (b *baseModuleContext) Target() Target {
return b.target
}
func (b *baseModuleContext) TargetPrimary() bool {
return b.targetPrimary
}
func (b *baseModuleContext) MultiTargets() []Target {
return b.multiTargets
}
func (b *baseModuleContext) Arch() Arch {
return b.target.Arch
}
func (b *baseModuleContext) Os() OsType {
return b.os
}
func (b *baseModuleContext) Host() bool {
return b.os.Class == Host || b.os.Class == HostCross
}
func (b *baseModuleContext) Device() bool {
return b.os.Class == Device
}
func (b *baseModuleContext) Darwin() bool {
return b.os == Darwin
}
func (b *baseModuleContext) Fuchsia() bool {
return b.os == Fuchsia
}
func (b *baseModuleContext) Windows() bool {
return b.os == Windows
}
func (b *baseModuleContext) Debug() bool {
return b.debug
}
func (b *baseModuleContext) PrimaryArch() bool {
if len(b.config.Targets[b.target.Os]) <= 1 {
return true
}
return b.target.Arch.ArchType == b.config.Targets[b.target.Os][0].Arch.ArchType
}
// Makes this module a platform module, i.e. not specific to soc, device,
// product, or system_ext.
func (m *ModuleBase) MakeAsPlatform() {
m.commonProperties.Vendor = boolPtr(false)
m.commonProperties.Proprietary = boolPtr(false)
m.commonProperties.Soc_specific = boolPtr(false)
m.commonProperties.Product_specific = boolPtr(false)
m.commonProperties.System_ext_specific = boolPtr(false)
m.commonProperties.Vendor_overlay = boolPtr(false)
}
func (m *ModuleBase) EnableNativeBridgeSupportByDefault() {
m.commonProperties.Native_bridge_supported = boolPtr(true)
}
func (m *ModuleBase) MakeAsSystemExt() {
m.commonProperties.Vendor = boolPtr(false)
m.commonProperties.Proprietary = boolPtr(false)
m.commonProperties.Soc_specific = boolPtr(false)
m.commonProperties.Product_specific = boolPtr(false)
m.commonProperties.System_ext_specific = boolPtr(true)
m.commonProperties.Vendor_overlay = boolPtr(false)
}
// IsNativeBridgeSupported returns true if "native_bridge_supported" is explicitly set as "true"
func (m *ModuleBase) IsNativeBridgeSupported() bool {
return proptools.Bool(m.commonProperties.Native_bridge_supported)
}
func (m *moduleContext) InstallInData() bool {
return m.module.InstallInData()
}
func (m *moduleContext) InstallInTestcases() bool {
return m.module.InstallInTestcases()
}
func (m *moduleContext) InstallInSanitizerDir() bool {
return m.module.InstallInSanitizerDir()
}
func (m *moduleContext) InstallInRamdisk() bool {
return m.module.InstallInRamdisk()
}
func (m *moduleContext) InstallInRecovery() bool {
return m.module.InstallInRecovery()
}
func (m *moduleContext) InstallInRoot() bool {
return m.module.InstallInRoot()
}
func (m *moduleContext) InstallBypassMake() bool {
return m.module.InstallBypassMake()
}
func (m *moduleContext) skipInstall(fullInstallPath InstallPath) bool {
if m.module.base().commonProperties.SkipInstall {
return true
}
// We'll need a solution for choosing which of modules with the same name in different
// namespaces to install. For now, reuse the list of namespaces exported to Make as the
// list of namespaces to install in a Soong-only build.
if !m.module.base().commonProperties.NamespaceExportedToMake {
return true
}
if m.Device() {
if m.Config().EmbeddedInMake() && !m.InstallBypassMake() {
return true
}
if m.Config().SkipMegaDeviceInstall(fullInstallPath.String()) {
return true
}
}
return false
}
func (m *moduleContext) InstallFile(installPath InstallPath, name string, srcPath Path,
deps ...Path) InstallPath {
return m.installFile(installPath, name, srcPath, Cp, deps)
}
func (m *moduleContext) InstallExecutable(installPath InstallPath, name string, srcPath Path,
deps ...Path) InstallPath {
return m.installFile(installPath, name, srcPath, CpExecutable, deps)
}
func (m *moduleContext) installFile(installPath InstallPath, name string, srcPath Path,
rule blueprint.Rule, deps []Path) InstallPath {
fullInstallPath := installPath.Join(m, name)
m.module.base().hooks.runInstallHooks(m, fullInstallPath, false)
if !m.skipInstall(fullInstallPath) {
deps = append(deps, m.installDeps...)
var implicitDeps, orderOnlyDeps Paths
if m.Host() {
// Installed host modules might be used during the build, depend directly on their
// dependencies so their timestamp is updated whenever their dependency is updated
implicitDeps = deps
} else {
orderOnlyDeps = deps
}
m.Build(pctx, BuildParams{
Rule: rule,
Description: "install " + fullInstallPath.Base(),
Output: fullInstallPath,
Input: srcPath,
Implicits: implicitDeps,
OrderOnly: orderOnlyDeps,
Default: !m.Config().EmbeddedInMake(),
})
m.installFiles = append(m.installFiles, fullInstallPath)
}
m.checkbuildFiles = append(m.checkbuildFiles, srcPath)
return fullInstallPath
}
func (m *moduleContext) InstallSymlink(installPath InstallPath, name string, srcPath InstallPath) InstallPath {
fullInstallPath := installPath.Join(m, name)
m.module.base().hooks.runInstallHooks(m, fullInstallPath, true)
if !m.skipInstall(fullInstallPath) {
relPath, err := filepath.Rel(path.Dir(fullInstallPath.String()), srcPath.String())
if err != nil {
panic(fmt.Sprintf("Unable to generate symlink between %q and %q: %s", fullInstallPath.Base(), srcPath.Base(), err))
}
m.Build(pctx, BuildParams{
Rule: Symlink,
Description: "install symlink " + fullInstallPath.Base(),
Output: fullInstallPath,
Input: srcPath,
Default: !m.Config().EmbeddedInMake(),
Args: map[string]string{
"fromPath": relPath,
},
})
m.installFiles = append(m.installFiles, fullInstallPath)
m.checkbuildFiles = append(m.checkbuildFiles, srcPath)
}
return fullInstallPath
}
// installPath/name -> absPath where absPath might be a path that is available only at runtime
// (e.g. /apex/...)
func (m *moduleContext) InstallAbsoluteSymlink(installPath InstallPath, name string, absPath string) InstallPath {
fullInstallPath := installPath.Join(m, name)
m.module.base().hooks.runInstallHooks(m, fullInstallPath, true)
if !m.skipInstall(fullInstallPath) {
m.Build(pctx, BuildParams{
Rule: Symlink,
Description: "install symlink " + fullInstallPath.Base() + " -> " + absPath,
Output: fullInstallPath,
Default: !m.Config().EmbeddedInMake(),
Args: map[string]string{
"fromPath": absPath,
},
})
m.installFiles = append(m.installFiles, fullInstallPath)
}
return fullInstallPath
}
func (m *moduleContext) CheckbuildFile(srcPath Path) {
m.checkbuildFiles = append(m.checkbuildFiles, srcPath)
}
type fileInstaller interface {
filesToInstall() Paths
}
func isFileInstaller(m blueprint.Module) bool {
_, ok := m.(fileInstaller)
return ok
}
func isAndroidModule(m blueprint.Module) bool {
_, ok := m.(Module)
return ok
}
func findStringInSlice(str string, slice []string) int {
for i, s := range slice {
if s == str {
return i
}
}
return -1
}
// SrcIsModule decodes module references in the format ":name" into the module name, or empty string if the input
// was not a module reference.
func SrcIsModule(s string) (module string) {
if len(s) > 1 && s[0] == ':' {
return s[1:]
}
return ""
}
// SrcIsModule decodes module references in the format ":name{.tag}" into the module name and tag, ":name" into the
// module name and an empty string for the tag, or empty strings if the input was not a module reference.
func SrcIsModuleWithTag(s string) (module, tag string) {
if len(s) > 1 && s[0] == ':' {
module = s[1:]
if tagStart := strings.IndexByte(module, '{'); tagStart > 0 {
if module[len(module)-1] == '}' {
tag = module[tagStart+1 : len(module)-1]
module = module[:tagStart]
return module, tag
}
}
return module, ""
}
return "", ""
}
type sourceOrOutputDependencyTag struct {
blueprint.BaseDependencyTag
tag string
}
func sourceOrOutputDepTag(tag string) blueprint.DependencyTag {
return sourceOrOutputDependencyTag{tag: tag}
}
var SourceDepTag = sourceOrOutputDepTag("")
// Adds necessary dependencies to satisfy filegroup or generated sources modules listed in srcFiles
// using ":module" syntax, if any.
//
// Deprecated: tag the property with `android:"path"` instead.
func ExtractSourcesDeps(ctx BottomUpMutatorContext, srcFiles []string) {
set := make(map[string]bool)
for _, s := range srcFiles {
if m, t := SrcIsModuleWithTag(s); m != "" {
if _, found := set[s]; found {
ctx.ModuleErrorf("found source dependency duplicate: %q!", s)
} else {
set[s] = true
ctx.AddDependency(ctx.Module(), sourceOrOutputDepTag(t), m)
}
}
}
}
// Adds necessary dependencies to satisfy filegroup or generated sources modules specified in s
// using ":module" syntax, if any.
//
// Deprecated: tag the property with `android:"path"` instead.
func ExtractSourceDeps(ctx BottomUpMutatorContext, s *string) {
if s != nil {
if m, t := SrcIsModuleWithTag(*s); m != "" {
ctx.AddDependency(ctx.Module(), sourceOrOutputDepTag(t), m)
}
}
}
// A module that implements SourceFileProducer can be referenced from any property that is tagged with `android:"path"`
// using the ":module" syntax and provides a list of paths to be used as if they were listed in the property.
type SourceFileProducer interface {
Srcs() Paths
}
// A module that implements OutputFileProducer can be referenced from any property that is tagged with `android:"path"`
// using the ":module" syntax or ":module{.tag}" syntax and provides a list of output files to be used as if they were
// listed in the property.
type OutputFileProducer interface {
OutputFiles(tag string) (Paths, error)
}
// OutputFilesForModule returns the paths from an OutputFileProducer with the given tag. On error, including if the
// module produced zero paths, it reports errors to the ctx and returns nil.
func OutputFilesForModule(ctx PathContext, module blueprint.Module, tag string) Paths {
paths, err := outputFilesForModule(ctx, module, tag)
if err != nil {
reportPathError(ctx, err)
return nil
}
return paths
}
// OutputFileForModule returns the path from an OutputFileProducer with the given tag. On error, including if the
// module produced zero or multiple paths, it reports errors to the ctx and returns nil.
func OutputFileForModule(ctx PathContext, module blueprint.Module, tag string) Path {
paths, err := outputFilesForModule(ctx, module, tag)
if err != nil {
reportPathError(ctx, err)
return nil
}
if len(paths) > 1 {
reportPathErrorf(ctx, "got multiple output files from module %q, expected exactly one",
pathContextName(ctx, module))
return nil
}
return paths[0]
}
func outputFilesForModule(ctx PathContext, module blueprint.Module, tag string) (Paths, error) {
if outputFileProducer, ok := module.(OutputFileProducer); ok {
paths, err := outputFileProducer.OutputFiles(tag)
if err != nil {
return nil, fmt.Errorf("failed to get output file from module %q: %s",
pathContextName(ctx, module), err.Error())
}
if len(paths) == 0 {
return nil, fmt.Errorf("failed to get output files from module %q", pathContextName(ctx, module))
}
return paths, nil
} else {
return nil, fmt.Errorf("module %q is not an OutputFileProducer", pathContextName(ctx, module))
}
}
type HostToolProvider interface {
HostToolPath() OptionalPath
}
// Returns a list of paths expanded from globs and modules referenced using ":module" syntax. The property must
// be tagged with `android:"path" to support automatic source module dependency resolution.
//
// Deprecated: use PathsForModuleSrc or PathsForModuleSrcExcludes instead.
func (m *moduleContext) ExpandSources(srcFiles, excludes []string) Paths {
return PathsForModuleSrcExcludes(m, srcFiles, excludes)
}
// Returns a single path expanded from globs and modules referenced using ":module" syntax. The property must
// be tagged with `android:"path" to support automatic source module dependency resolution.
//
// Deprecated: use PathForModuleSrc instead.
func (m *moduleContext) ExpandSource(srcFile, prop string) Path {
return PathForModuleSrc(m, srcFile)
}
// Returns an optional single path expanded from globs and modules referenced using ":module" syntax if
// the srcFile is non-nil. The property must be tagged with `android:"path" to support automatic source module
// dependency resolution.
func (m *moduleContext) ExpandOptionalSource(srcFile *string, prop string) OptionalPath {
if srcFile != nil {
return OptionalPathForPath(PathForModuleSrc(m, *srcFile))
}
return OptionalPath{}
}
func (m *moduleContext) RequiredModuleNames() []string {
return m.module.RequiredModuleNames()
}
func (m *moduleContext) HostRequiredModuleNames() []string {
return m.module.HostRequiredModuleNames()
}
func (m *moduleContext) TargetRequiredModuleNames() []string {
return m.module.TargetRequiredModuleNames()
}
func init() {
RegisterSingletonType("buildtarget", BuildTargetSingleton)
}
func BuildTargetSingleton() Singleton {
return &buildTargetSingleton{}
}
func parentDir(dir string) string {
dir, _ = filepath.Split(dir)
return filepath.Clean(dir)
}
type buildTargetSingleton struct{}
func (c *buildTargetSingleton) GenerateBuildActions(ctx SingletonContext) {
var checkbuildDeps Paths
mmTarget := func(dir string) string {
return "MODULES-IN-" + strings.Replace(filepath.Clean(dir), "/", "-", -1)
}
modulesInDir := make(map[string]Paths)
ctx.VisitAllModules(func(module Module) {
blueprintDir := module.base().blueprintDir
installTarget := module.base().installTarget
checkbuildTarget := module.base().checkbuildTarget
if checkbuildTarget != nil {
checkbuildDeps = append(checkbuildDeps, checkbuildTarget)
modulesInDir[blueprintDir] = append(modulesInDir[blueprintDir], checkbuildTarget)
}
if installTarget != nil {
modulesInDir[blueprintDir] = append(modulesInDir[blueprintDir], installTarget)
}
})
suffix := ""
if ctx.Config().EmbeddedInMake() {
suffix = "-soong"
}
// Create a top-level checkbuild target that depends on all modules
ctx.Phony("checkbuild"+suffix, checkbuildDeps...)
// Make will generate the MODULES-IN-* targets
if ctx.Config().EmbeddedInMake() {
return
}
// Ensure ancestor directories are in modulesInDir
dirs := SortedStringKeys(modulesInDir)
for _, dir := range dirs {
dir := parentDir(dir)
for dir != "." && dir != "/" {
if _, exists := modulesInDir[dir]; exists {
break
}
modulesInDir[dir] = nil
dir = parentDir(dir)
}
}
// Make directories build their direct subdirectories
for _, dir := range dirs {
p := parentDir(dir)
if p != "." && p != "/" {
modulesInDir[p] = append(modulesInDir[p], PathForPhony(ctx, mmTarget(dir)))
}
}
// Create a MODULES-IN-<directory> target that depends on all modules in a directory, and
// depends on the MODULES-IN-* targets of all of its subdirectories that contain Android.bp
// files.
for _, dir := range dirs {
ctx.Phony(mmTarget(dir), modulesInDir[dir]...)
}
// Create (host|host-cross|target)-<OS> phony rules to build a reduced checkbuild.
osDeps := map[OsType]Paths{}
ctx.VisitAllModules(func(module Module) {
if module.Enabled() {
os := module.Target().Os
osDeps[os] = append(osDeps[os], module.base().checkbuildFiles...)
}
})
osClass := make(map[string]Paths)
for os, deps := range osDeps {
var className string
switch os.Class {
case Host:
className = "host"
case HostCross:
className = "host-cross"
case Device:
className = "target"
default:
continue
}
name := className + "-" + os.Name
osClass[className] = append(osClass[className], PathForPhony(ctx, name))
ctx.Phony(name, deps...)
}
// Wrap those into host|host-cross|target phony rules
for _, class := range SortedStringKeys(osClass) {
ctx.Phony(class, osClass[class]...)
}
}
// Collect information for opening IDE project files in java/jdeps.go.
type IDEInfo interface {
IDEInfo(ideInfo *IdeInfo)
BaseModuleName() string
}
// Extract the base module name from the Import name.
// Often the Import name has a prefix "prebuilt_".
// Remove the prefix explicitly if needed
// until we find a better solution to get the Import name.
type IDECustomizedModuleName interface {
IDECustomizedModuleName() string
}
type IdeInfo struct {
Deps []string `json:"dependencies,omitempty"`
Srcs []string `json:"srcs,omitempty"`
Aidl_include_dirs []string `json:"aidl_include_dirs,omitempty"`
Jarjar_rules []string `json:"jarjar_rules,omitempty"`
Jars []string `json:"jars,omitempty"`
Classes []string `json:"class,omitempty"`
Installed_paths []string `json:"installed,omitempty"`
SrcJars []string `json:"srcjars,omitempty"`
}
|
package main
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"os"
"testing"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/cognitoidentityprovider"
"github.com/aws/aws-sdk-go/service/cognitoidentityprovider/cognitoidentityprovideriface"
)
type mockVerify struct {
cognitoidentityprovideriface.CognitoIdentityProviderAPI
Response cognitoidentityprovider.ConfirmSignUpOutput
}
func (d mockVerify) ConfirmSignUp(in *cognitoidentityprovider.ConfirmSignUpInput) (*cognitoidentityprovider.ConfirmSignUpOutput, error) {
if *in.ConfirmationCode != "183609" {
return nil, awserr.New(
cognitoidentityprovider.ErrCodeExpiredCodeException,
"code has expired",
errors.New("Code has expired"),
)
}
return &d.Response, nil
}
func TestHandleRequest(t *testing.T) {
t.Run("Successfully verify granted code", func(t *testing.T) {
// load test data
jsonFile, err := os.Open("./testdata/verify-payload.json")
if err != nil {
fmt.Println(err)
}
defer jsonFile.Close()
var confirmInput ConfirmInput
byteJSON, _ := ioutil.ReadAll(jsonFile)
json.Unmarshal(byteJSON, &confirmInput)
// create mock output
m := mockVerify{
Response: cognitoidentityprovider.ConfirmSignUpOutput{},
}
// create dependancy object
d := deps{
cognito: m,
}
//execute test of function
_, err = d.HandleRequest(nil, confirmInput)
if err != nil {
t.Error("verifying confirm code failed")
}
})
t.Run("Generated code expired", func(t *testing.T) {
// load test data
jsonFile, err := os.Open("./testdata/verify-payload.json")
if err != nil {
fmt.Println(err)
}
defer jsonFile.Close()
var confirmInput ConfirmInput
byteJSON, _ := ioutil.ReadAll(jsonFile)
json.Unmarshal(byteJSON, &confirmInput)
confirmInput.Token = "12345"
// create mock output
m := mockVerify{
Response: cognitoidentityprovider.ConfirmSignUpOutput{},
}
// create dependancy object
d := deps{
cognito: m,
}
//execute test of function
result, err := d.HandleRequest(nil, confirmInput)
if result.Message != "The code has expired" || err == nil {
t.Error("failed to catch and handle expired code exception")
}
})
}
|
package main
import (
"fmt"
"sync"
)
var wg sync.WaitGroup
func main() {
//testAnonymousStructs()
// i := 1
// defer logNum(i) // deferred function call: logNum(1)
// fmt.Println("First main statement")
// i++
// defer logNum(i) // deferred function call: logNum(2)
// defer logNum(i * i) // deferred function call: logNum(4)
// fmt.Printf("Num in main %d\n", i)
// for i := 0; i < 2; i++ {
// defer func() {
// fmt.Printf("%d\n", i)
// }()
// }
chIn := make(chan int)
chOut := make(chan int)
for i := 0; i < 2; i++ {
wg.Add(1)
go sqrtWorker(chIn, chOut)
}
go func() {
chIn <- 2
chIn <- 4
close(chIn)
}()
go func() {
wg.Wait()
close(chOut)
}()
for sqrt := range chOut {
fmt.Printf("Got sqrt: %d\n", sqrt)
}
}
func sqrtWorker(chIn chan int, chOut chan int) {
fmt.Printf("sqrtWorker started\n")
for i := range chIn {
sqrt := i * i
chOut <- sqrt
}
fmt.Printf("sqrtWorker finished\n")
wg.Done()
}
func logNum(i int) {
fmt.Printf("Num %d\n", i)
}
|
package main
import (
"fmt"
"os"
)
func some() {
defer fmt.Println("Hello World") // after completed this func, deal this defer
fmt.Println("Hello Go")
}
// deferはファイルをopen, closeするときなどに便利下記のように使う
func filehandler() {
file, _ := os.Open("./main.go")
defer file.Close()
data := make([]byte, 100)
file.Read(data)
fmt.Println(string(data))
}
func main() {
// defer some()
// fmt.Println("Main func")
fmt.Println("run")
defer fmt.Println(1)
defer fmt.Println(2)
defer fmt.Println(3) // stacking process
fmt.Println("success")
filehandler()
}
|
package cmd
import (
"fmt"
"github.com/andytom/tmpltr/template"
"github.com/spf13/cobra"
)
func init() {
RootCmd.AddCommand(installCmd)
installCmd.Flags().BoolVarP(&force, "force", "f", false, "Force the installation of a template")
}
var force bool
var installCmd = &cobra.Command{
Use: "install NAME DIR",
Short: "Install a template from a directory",
Long: `Install a template from a directory
This will install the template found in DIR with the name NAME
If there is already a template with that name you will need to force the
installation which will over write the existing template.
`,
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) != 2 {
return fmt.Errorf("Need a directory and template")
}
templateKey := args[0]
srcDir := args[1]
s, err := template.OpenStore(cfgDir)
if err != nil {
return fmt.Errorf("Unable to access the template directory %q: %q", cfgDir, err)
}
t, err := template.New(srcDir)
if err != nil {
return fmt.Errorf("Unable to install template: %q", err)
}
_, err = s.Get(templateKey)
if err == nil {
// We have an existing template so we need to check if we have to replace or not.
if force {
fmt.Printf("Replacing %q with template from %q\n", templateKey, srcDir)
return s.Replace(templateKey, &t)
}
return fmt.Errorf("There is already a template with the name %q", templateKey)
}
fmt.Printf("Installing template from %q as %q\n", srcDir, templateKey)
return s.Create(templateKey, &t)
},
}
|
package local
import (
"os"
"testing"
"github.com/stretchr/testify/assert"
"github.com/ubclaunchpad/inertia/cfg"
)
func TestInitializeInertiaProjetFail(t *testing.T) {
err := InitializeInertiaProject("", "", "")
assert.NotNil(t, err)
}
func TestGetConfigFail(t *testing.T) {
_, _, err := GetProjectConfigFromDisk()
assert.NotNil(t, err)
}
func TestConfigCreateAndWriteAndRead(t *testing.T) {
err := createConfigFile("test", "dockerfile", "")
assert.Nil(t, err)
// Already exists
err = createConfigFile("test", "dockerfile", "")
assert.NotNil(t, err)
// Get config and add remotes
config, configPath, err := GetProjectConfigFromDisk()
assert.Nil(t, err)
config.AddRemote(&cfg.RemoteVPS{
Name: "test",
IP: "1234",
User: "bobheadxi",
PEM: "/some/pem/file",
SSHPort: "22",
Daemon: &cfg.DaemonConfig{
Port: "8080",
},
})
config.AddRemote(&cfg.RemoteVPS{
Name: "test2",
IP: "12343",
User: "bobheadxi234",
PEM: "/some/pem/file234",
SSHPort: "222",
Daemon: &cfg.DaemonConfig{
Port: "80801",
},
})
// Test config creation
err = config.Write(configPath)
assert.Nil(t, err)
// Test config read
readConfig, _, err := GetProjectConfigFromDisk()
assert.Nil(t, err)
assert.Equal(t, config.Remotes[0], readConfig.Remotes[0])
assert.Equal(t, config.Remotes[1], readConfig.Remotes[1])
// Test client read
client, err := GetClient("test2")
assert.Nil(t, err)
assert.Equal(t, "test2", client.Name)
assert.Equal(t, "12343:80801", client.GetIPAndPort())
_, err = GetClient("asdf")
assert.NotNil(t, err)
// Test config remove
err = os.Remove(configPath)
assert.Nil(t, err)
}
|
/*
Task:
Given an integer input, figure out whether or not it is a Cyclops Number.
What is a Cyclops number, you may ask? Well, it's a number whose binary representation only has one 0 in the center!
Test Cases:
Input | Output | Binary | Explanation
--------------------------------------
0 | truthy | 0 | only one zero at "center"
1 | falsy | 1 | contains no zeroes
5 | truthy | 101 | only one zero at center
9 | falsy | 1001 | contains two zeroes (even though both are at the center)
10 | falsy | 1010 | contains two zeroes
27 | truthy | 11011 | only one zero at center
85 | falsy | 1010101 | contains three zeroes
101 | falsy | 1100101 | contains three zeroes
111 | falsy | 1101111 | only one zero, not at center
119 | truthy | 1110111 | only one zero at center
Input:
An integer or equivalent types. (int, long, decimal, etc.)
Assume that if evaluating the input results in an integer overflow or other undesirable problems, then that input doesn't have to be evaluated.
Output:
Truthy or falsy.
Truthy/falsy output must meet the used language's specifications for truthy/falsy. (e.g. C has 0 as false, non-zero as true)
Challenge Rules:
Input that is less than 0 is assumed to be falsy and thus does not have to be evaluated.
If the length of the binary representation of the number is even, then the number cannot be a Cyclops number.
General Rules:
This is code-golf, so the shortest answers in bytes wins!.
Default loopholes are forbidden.
Standard rules apply for your answer with default I/O rules.
This is my first Programming Puzzles & Code Golf challenge, so any feedback on how I should improve would be much appreciated!
*/
package main
import "fmt"
func main() {
assert(cyclops(0) == true)
assert(cyclops(1) == false)
assert(cyclops(5) == true)
assert(cyclops(9) == false)
assert(cyclops(10) == false)
assert(cyclops(27) == true)
assert(cyclops(85) == false)
assert(cyclops(101) == false)
assert(cyclops(111) == false)
assert(cyclops(119) == true)
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
func cyclops(n uint64) bool {
p := fmt.Sprintf("%b", n)
c := 0
for i := range p {
if p[i] == '0' {
if c >= 1 || len(p)/2 != i {
return false
}
c++
}
}
return c == 1
}
|
package memphis
import (
"context"
"fmt"
"github.com/memphisdev/memphis.go"
"github.com/pkg/errors"
"github.com/batchcorp/plumber-schemas/build/go/protos/opts"
"github.com/batchcorp/plumber-schemas/build/go/protos/records"
"github.com/batchcorp/plumber/util"
"github.com/batchcorp/plumber/validate"
)
func (m *Memphis) Write(ctx context.Context, writeOpts *opts.WriteOptions, errorCh chan<- *records.ErrorRecord, messages ...*records.WriteRecord) error {
if err := validateWriteOptions(writeOpts); err != nil {
return errors.Wrap(err, "unable to verify write options")
}
args := writeOpts.GetMemphis().Args
producer, err := m.client.CreateProducer(args.Station, args.ProducerName)
if err != nil {
return errors.Wrap(err, "unable to create Memphis producer")
}
defer m.client.Close()
headers := genHeaders(args.Headers)
po := make([]memphis.ProduceOpt, 0)
po = append(po, memphis.MsgHeaders(headers))
if args.MessageId != "" {
po = append(po, memphis.MsgId(args.MessageId))
}
for _, msg := range messages {
if err := producer.Produce([]byte(msg.Input), po...); err != nil {
util.WriteError(m.log, errorCh, fmt.Errorf("unable to write message to station '%s': %s", args.Station, err))
}
}
return nil
}
func validateWriteOptions(opts *opts.WriteOptions) error {
if opts == nil {
return validate.ErrEmptyWriteOpts
}
if opts.Memphis == nil {
return validate.ErrEmptyBackendGroup
}
args := opts.Memphis.Args
if args == nil {
return validate.ErrEmptyBackendArgs
}
if args.Station == "" {
return ErrEmptyStation
}
return nil
}
|
// Copyright 2021 Clivern. All rights reserved.
// Use of this source code is governed by the MIT
// license that can be found in the LICENSE file.
package definition
import (
"fmt"
"strings"
"testing"
"github.com/franela/goblin"
)
// TestUnitPrometheus test cases
func TestUnitPrometheus(t *testing.T) {
g := goblin.Goblin(t)
g.Describe("#TestPrometheus", func() {
g.It("It should satisfy all provided test cases", func() {
prometheus := GetPrometheusConfig("prometheus", "", "/etc/peanut/storage/da2ce8ac-d33f-4dd9-a345-d76f2a4336be.yml")
result, err := prometheus.ToString()
g.Assert(strings.Contains(result, fmt.Sprintf("image: %s", fmt.Sprintf("%s:%s", PrometheusDockerImage, PrometheusDockerImageVersion)))).Equal(true)
g.Assert(strings.Contains(result, fmt.Sprintf(`- "%s"`, PrometheusPort))).Equal(true)
g.Assert(strings.Contains(
result,
"- /etc/peanut/storage/da2ce8ac-d33f-4dd9-a345-d76f2a4336be.yml:/etc/prometheus/prometheus.yml",
)).Equal(true)
g.Assert(strings.Contains(result, fmt.Sprintf("restart: %s", PrometheusRestartPolicy))).Equal(true)
g.Assert(err).Equal(nil)
})
})
}
|
package main
import (
"fmt"
"reflect"
)
func main() {
a1 := [3]int{1, 2, 3} // array
s1 := []int{1, 2, 3} // slice
fmt.Println(a1, s1)
fmt.Println(reflect.TypeOf(a1), reflect.TypeOf(s1))
a2 := [5]int{1, 2, 3, 4, 5}
// Slice não é um array! Slide define um pedaço de um array.
s2 := a2[1:3]
fmt.Println(a2, s2)
s3 := a2[:2] // novo slice, mas aponta para o mesmo array
fmt.Println(a2, s3)
// vc pode imaginar um slice como: tamanho e um ponteiro para um elemento de um array
s4 := s2[:1]
fmt.Println(s2, s4)
// como slice é um ponteiro para um pedaço do array, quando eu altero o array reflete em todos os slices
a2[1] = 9
fmt.Println(s2, s4, a2)
// da mesma forma, se eu altero o valor em um slice na verdade altero no array e em todos os slices
// que referenciam aquele pedaço
s2[0] = 7
fmt.Println(s2[0], s2, s4, a2)
}
|
// Package baremetal provides a cluster-destroyer for bare metal clusters.
package baremetal
|
/*
This blog post about generating random CSS color codes in JavaScript have multiple solutions for generating a random color in JavaScript. The shortest I can find is this:
'#'+(Math.random()*0xffffff).toString(16).slice(-6)
If you are not familiar with CSS color code read documentations here.
Can we do better? What about other languages?
*/
package main
import (
"fmt"
"math/rand"
"time"
)
func main() {
rand.Seed(time.Now().UnixNano())
for i := 0; i < 10; i++ {
fmt.Println(randcss())
}
}
func randcss() string {
return fmt.Sprintf("#%06X", rand.Intn(0x1000000))
}
|
package postgres
import (
"github.com/frk/gosql/internal/analysis"
"github.com/frk/gosql/internal/postgres/oid"
)
type compkey struct {
oid oid.OID
typmod1 bool
}
type compentry struct {
valuer string
scanner string
}
type comptable struct {
literal2oid map[analysis.LiteralType]map[compkey]compentry
oid2literal map[compkey]map[analysis.LiteralType]compentry
}
var compatibility = comptable{
literal2oid: init_literal2oid(),
oid2literal: init_oid2literal(),
}
func (c comptable) getTypeInfoOIDs(typ analysis.TypeInfo) []oid.OID {
lit := typ.GenericLiteral()
keys, ok := compatibility.literal2oid[lit]
if !ok {
return nil
}
var oids []oid.OID
for key, ce := range keys {
if ce.valuer == "" && ce.scanner == "" {
oids = append([]oid.OID{key.oid}, oids...)
} else {
oids = append(oids, key.oid)
}
}
return oids
}
func init_literal2oid() map[analysis.LiteralType]map[compkey]compentry {
oid2literal := init_oid2literal()
literal2oid := make(map[analysis.LiteralType]map[compkey]compentry)
for id, litmap := range oid2literal {
for lit, comp := range litmap {
if oidmap, ok := literal2oid[lit]; !ok {
literal2oid[lit] = map[compkey]compentry{id: comp}
} else {
oidmap[id] = comp
}
}
}
return literal2oid
}
func init_oid2literal() map[compkey]map[analysis.LiteralType]compentry {
return map[compkey]map[analysis.LiteralType]compentry{
{oid: oid.Bit, typmod1: true}: {
analysis.LiteralBool: {valuer: "BitFromBool"},
analysis.LiteralUint: {},
analysis.LiteralUint8: {},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.BitArr, typmod1: true}: {
analysis.LiteralBoolSlice: {valuer: "BitArrayFromBoolSlice", scanner: "BitArrayToBoolSlice"},
analysis.LiteralUintSlice: {valuer: "BitArrayFromUintSlice", scanner: "BitArrayToUintSlice"},
analysis.LiteralUint8Slice: {valuer: "BitArrayFromUint8Slice", scanner: "BitArrayToUint8Slice"},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.BPChar, typmod1: true}: {
analysis.LiteralByte: {valuer: "BPCharFromByte", scanner: "BPCharToByte"},
analysis.LiteralRune: {valuer: "BPCharFromRune", scanner: "BPCharToRune"},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.BPCharArr, typmod1: true}: {
analysis.LiteralRuneSlice: {valuer: "BPCharArrayFromRuneSlice", scanner: "BPCharArrayToRuneSlice"},
analysis.LiteralStringSlice: {valuer: "BPCharArrayFromStringSlice", scanner: "BPCharArrayToStringSlice"},
analysis.LiteralString: {valuer: "BPCharArrayFromString", scanner: "BPCharArrayToString"},
analysis.LiteralByteSlice: {valuer: "BPCharArrayFromByteSlice", scanner: "BPCharArrayToByteSlice"},
},
{oid: oid.Char, typmod1: true}: {
analysis.LiteralByte: {valuer: "CharFromByte", scanner: "CharToByte"},
analysis.LiteralRune: {valuer: "CharFromRune", scanner: "CharToRune"},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.CharArr, typmod1: true}: {
analysis.LiteralRuneSlice: {valuer: "CharArrayFromRuneSlice", scanner: "CharArrayToRuneSlice"},
analysis.LiteralStringSlice: {valuer: "CharArrayFromStringSlice", scanner: "CharArrayToStringSlice"},
analysis.LiteralString: {valuer: "CharArrayFromString", scanner: "CharArrayToString"},
analysis.LiteralByteSlice: {valuer: "CharArrayFromByteSlice", scanner: "CharArrayToByteSlice"},
},
////////////////////////////////////////////////////////////////
{oid: oid.Bool}: {
analysis.LiteralBool: {},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.BoolArr}: {
analysis.LiteralBoolSlice: {valuer: "BoolArrayFromBoolSlice", scanner: "BoolArrayToBoolSlice"},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.Box}: {
analysis.LiteralFloat64Array2Array2: {valuer: "BoxFromFloat64Array2Array2", scanner: "BoxToFloat64Array2Array2"},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.BoxArr}: {
analysis.LiteralFloat64Array2Array2Slice: {valuer: "BoxArrayFromFloat64Array2Array2Slice", scanner: "BoxArrayToFloat64Array2Array2Slice"},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.Bytea}: {
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.ByteaArr}: {
analysis.LiteralStringSlice: {valuer: "ByteaArrayFromStringSlice", scanner: "ByteaArrayToStringSlice"},
analysis.LiteralByteSliceSlice: {valuer: "ByteaArrayFromByteSliceSlice", scanner: "ByteaArrayToByteSliceSlice"},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.CIDR}: {
analysis.LiteralIPNet: {valuer: "CIDRFromIPNet", scanner: "CIDRToIPNet"},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.CIDRArr}: {
analysis.LiteralIPNetSlice: {valuer: "CIDRArrayFromIPNetSlice", scanner: "CIDRArrayToIPNetSlice"},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.Circle}: {
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.CircleArr}: {
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.Date}: {
analysis.LiteralTime: {scanner: "DateToTime"},
analysis.LiteralString: {scanner: "DateToString"},
analysis.LiteralByteSlice: {scanner: "DateToByteSlice"},
},
{oid: oid.DateArr}: {
analysis.LiteralTimeSlice: {valuer: "DateArrayFromTimeSlice", scanner: "DateArrayToTimeSlice"},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.DateRange}: {
analysis.LiteralTimeArray2: {valuer: "DateRangeFromTimeArray2", scanner: "DateRangeToTimeArray2"},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.DateRangeArr}: {
analysis.LiteralTimeArray2Slice: {valuer: "DateRangeArrayFromTimeArray2Slice", scanner: "DateRangeArrayToTimeArray2Slice"},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.Float4}: {
analysis.LiteralFloat32: {},
analysis.LiteralFloat64: {},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.Float4Arr}: {
analysis.LiteralFloat32Slice: {valuer: "Float4ArrayFromFloat32Slice", scanner: "Float4ArrayToFloat32Slice"},
analysis.LiteralFloat64Slice: {valuer: "Float4ArrayFromFloat64Slice", scanner: "Float4ArrayToFloat64Slice"},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.Float8}: {
analysis.LiteralFloat32: {},
analysis.LiteralFloat64: {},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.Float8Arr}: {
analysis.LiteralFloat32Slice: {valuer: "Float8ArrayFromFloat32Slice", scanner: "Float8ArrayToFloat32Slice"},
analysis.LiteralFloat64Slice: {valuer: "Float8ArrayFromFloat64Slice", scanner: "Float8ArrayToFloat64Slice"},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
/*
{oid: oid.HStore}: {
analysis.LiteralStringMap: {valuer: "HStoreFromStringMap", scanner: "HStoreToStringMap"},
analysis.LiteralStringPtrMap: {valuer: "HStoreFromStringPtrMap", scanner: "HStoreToStringPtrMap"},
analysis.LiteralNullStringMap: {valuer: "HStoreFromNullStringMap", scanner: "HStoreToNullStringMap"},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.HStoreArr}: {
analysis.LiteralStringMapSlice: {valuer: "HStoreArrayFromStringMapSlice", scanner: "HStoreArrayToStringMapSlice"},
analysis.LiteralStringPtrMapSlice: {valuer: "HStoreArrayFromStringPtrMapSlice", scanner: "HStoreArrayToStringPtrMapSlice"},
analysis.LiteralNullStringMapSlice: {valuer: "HStoreArrayFromNullStringMapSlice", scanner: "HStoreArrayToNullStringMapSlice"},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
*/
{oid: oid.Inet}: {
analysis.LiteralIP: {valuer: "InetFromIP", scanner: "InetToIP"},
analysis.LiteralIPNet: {valuer: "InetFromIPNet", scanner: "InetToIPNet"},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.InetArr}: {
analysis.LiteralIPSlice: {valuer: "InetArrayFromIPSlice", scanner: "InetArrayFromIPSlice"},
analysis.LiteralIPNetSlice: {valuer: "InetArrayFromIPNetSlice", scanner: "InetArrayFromIPNetSlice"},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.Int2}: {
analysis.LiteralInt: {},
analysis.LiteralInt8: {},
analysis.LiteralInt16: {},
analysis.LiteralInt32: {},
analysis.LiteralInt64: {},
analysis.LiteralUint: {},
analysis.LiteralUint8: {},
analysis.LiteralUint16: {},
analysis.LiteralUint32: {},
analysis.LiteralUint64: {},
analysis.LiteralFloat32: {},
analysis.LiteralFloat64: {},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.Int2Arr}: {
analysis.LiteralIntSlice: {valuer: "Int2ArrayFromIntSlice", scanner: "Int2ArrayToIntSlice"},
analysis.LiteralInt8Slice: {valuer: "Int2ArrayFromInt8Slice", scanner: "Int2ArrayToInt8Slice"},
analysis.LiteralInt16Slice: {valuer: "Int2ArrayFromInt16Slice", scanner: "Int2ArrayToInt16Slice"},
analysis.LiteralInt32Slice: {valuer: "Int2ArrayFromInt32Slice", scanner: "Int2ArrayToInt32Slice"},
analysis.LiteralInt64Slice: {valuer: "Int2ArrayFromInt64Slice", scanner: "Int2ArrayToInt64Slice"},
analysis.LiteralUintSlice: {valuer: "Int2ArrayFromUintSlice", scanner: "Int2ArrayToUintSlice"},
analysis.LiteralUint8Slice: {valuer: "Int2ArrayFromUint8Slice", scanner: "Int2ArrayToUint8Slice"},
analysis.LiteralUint16Slice: {valuer: "Int2ArrayFromUint16Slice", scanner: "Int2ArrayToUint16Slice"},
analysis.LiteralUint32Slice: {valuer: "Int2ArrayFromUint32Slice", scanner: "Int2ArrayToUint32Slice"},
analysis.LiteralUint64Slice: {valuer: "Int2ArrayFromUint64Slice", scanner: "Int2ArrayToUint64Slice"},
analysis.LiteralFloat32Slice: {valuer: "Int2ArrayFromFloat32Slice", scanner: "Int2ArrayToFloat32Slice"},
analysis.LiteralFloat64Slice: {valuer: "Int2ArrayFromFloat64Slice", scanner: "Int2ArrayToFloat64Slice"},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.Int2Vector}: {
analysis.LiteralIntSlice: {valuer: "Int2VectorFromIntSlice", scanner: "Int2VectorToIntSlice"},
analysis.LiteralInt8Slice: {valuer: "Int2VectorFromInt8Slice", scanner: "Int2VectorToInt8Slice"},
analysis.LiteralInt16Slice: {valuer: "Int2VectorFromInt16Slice", scanner: "Int2VectorToInt16Slice"},
analysis.LiteralInt32Slice: {valuer: "Int2VectorFromInt32Slice", scanner: "Int2VectorToInt32Slice"},
analysis.LiteralInt64Slice: {valuer: "Int2VectorFromInt64Slice", scanner: "Int2VectorToInt64Slice"},
analysis.LiteralUintSlice: {valuer: "Int2VectorFromUintSlice", scanner: "Int2VectorToUintSlice"},
analysis.LiteralUint8Slice: {valuer: "Int2VectorFromUint8Slice", scanner: "Int2VectorToUint8Slice"},
analysis.LiteralUint16Slice: {valuer: "Int2VectorFromUint16Slice", scanner: "Int2VectorToUint16Slice"},
analysis.LiteralUint32Slice: {valuer: "Int2VectorFromUint32Slice", scanner: "Int2VectorToUint32Slice"},
analysis.LiteralUint64Slice: {valuer: "Int2VectorFromUint64Slice", scanner: "Int2VectorToUint64Slice"},
analysis.LiteralFloat32Slice: {valuer: "Int2VectorFromFloat32Slice", scanner: "Int2VectorToFloat32Slice"},
analysis.LiteralFloat64Slice: {valuer: "Int2VectorFromFloat64Slice", scanner: "Int2VectorToFloat64Slice"},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.Int2VectorArr}: {
analysis.LiteralIntSliceSlice: {valuer: "Int2VectorArrayFromIntSliceSlice", scanner: "Int2VectorArrayToIntSliceSlice"},
analysis.LiteralInt8SliceSlice: {valuer: "Int2VectorArrayFromInt8SliceSlice", scanner: "Int2VectorArrayToInt8SliceSlice"},
analysis.LiteralInt16SliceSlice: {valuer: "Int2VectorArrayFromInt16SliceSlice", scanner: "Int2VectorArrayToInt16SliceSlice"},
analysis.LiteralInt32SliceSlice: {valuer: "Int2VectorArrayFromInt32SliceSlice", scanner: "Int2VectorArrayToInt32SliceSlice"},
analysis.LiteralInt64SliceSlice: {valuer: "Int2VectorArrayFromInt64SliceSlice", scanner: "Int2VectorArrayToInt64SliceSlice"},
analysis.LiteralUintSliceSlice: {valuer: "Int2VectorArrayFromUintSliceSlice", scanner: "Int2VectorArrayToUintSliceSlice"},
analysis.LiteralUint8SliceSlice: {valuer: "Int2VectorArrayFromUint8SliceSlice", scanner: "Int2VectorArrayToUint8SliceSlice"},
analysis.LiteralUint16SliceSlice: {valuer: "Int2VectorArrayFromUint16SliceSlice", scanner: "Int2VectorArrayToUint16SliceSlice"},
analysis.LiteralUint32SliceSlice: {valuer: "Int2VectorArrayFromUint32SliceSlice", scanner: "Int2VectorArrayToUint32SliceSlice"},
analysis.LiteralUint64SliceSlice: {valuer: "Int2VectorArrayFromUint64SliceSlice", scanner: "Int2VectorArrayToUint64SliceSlice"},
analysis.LiteralFloat32SliceSlice: {valuer: "Int2VectorArrayFromFloat32SliceSlice", scanner: "Int2VectorArrayToFloat32SliceSlice"},
analysis.LiteralFloat64SliceSlice: {valuer: "Int2VectorArrayFromFloat64SliceSlice", scanner: "Int2VectorArrayToFloat64SliceSlice"},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.Int4}: {
analysis.LiteralInt: {},
analysis.LiteralInt8: {},
analysis.LiteralInt16: {},
analysis.LiteralInt32: {},
analysis.LiteralInt64: {},
analysis.LiteralUint: {},
analysis.LiteralUint8: {},
analysis.LiteralUint16: {},
analysis.LiteralUint32: {},
analysis.LiteralUint64: {},
analysis.LiteralFloat32: {},
analysis.LiteralFloat64: {},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.Int4Arr}: {
analysis.LiteralIntSlice: {valuer: "Int4ArrayFromIntSlice", scanner: "Int4ArrayToIntSlice"},
analysis.LiteralInt8Slice: {valuer: "Int4ArrayFromInt8Slice", scanner: "Int4ArrayToInt8Slice"},
analysis.LiteralInt16Slice: {valuer: "Int4ArrayFromInt16Slice", scanner: "Int4ArrayToInt16Slice"},
analysis.LiteralInt32Slice: {valuer: "Int4ArrayFromInt32Slice", scanner: "Int4ArrayToInt32Slice"},
analysis.LiteralInt64Slice: {valuer: "Int4ArrayFromInt64Slice", scanner: "Int4ArrayToInt64Slice"},
analysis.LiteralUintSlice: {valuer: "Int4ArrayFromUintSlice", scanner: "Int4ArrayToUintSlice"},
analysis.LiteralUint8Slice: {valuer: "Int4ArrayFromUint8Slice", scanner: "Int4ArrayToUint8Slice"},
analysis.LiteralUint16Slice: {valuer: "Int4ArrayFromUint16Slice", scanner: "Int4ArrayToUint16Slice"},
analysis.LiteralUint32Slice: {valuer: "Int4ArrayFromUint32Slice", scanner: "Int4ArrayToUint32Slice"},
analysis.LiteralUint64Slice: {valuer: "Int4ArrayFromUint64Slice", scanner: "Int4ArrayToUint64Slice"},
analysis.LiteralFloat32Slice: {valuer: "Int4ArrayFromFloat32Slice", scanner: "Int4ArrayToFloat32Slice"},
analysis.LiteralFloat64Slice: {valuer: "Int4ArrayFromFloat64Slice", scanner: "Int4ArrayToFloat64Slice"},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.Int4Range}: {
analysis.LiteralIntArray2: {valuer: "Int4RangeFromIntArray2", scanner: "Int4RangeToIntArray2"},
analysis.LiteralInt8Array2: {valuer: "Int4RangeFromInt8Array2", scanner: "Int4RangeToInt8Array2"},
analysis.LiteralInt16Array2: {valuer: "Int4RangeFromInt16Array2", scanner: "Int4RangeToInt16Array2"},
analysis.LiteralInt32Array2: {valuer: "Int4RangeFromInt32Array2", scanner: "Int4RangeToInt32Array2"},
analysis.LiteralInt64Array2: {valuer: "Int4RangeFromInt64Array2", scanner: "Int4RangeToInt64Array2"},
analysis.LiteralUintArray2: {valuer: "Int4RangeFromUintArray2", scanner: "Int4RangeToUintArray2"},
analysis.LiteralUint8Array2: {valuer: "Int4RangeFromUint8Array2", scanner: "Int4RangeToUint8Array2"},
analysis.LiteralUint16Array2: {valuer: "Int4RangeFromUint16Array2", scanner: "Int4RangeToUint16Array2"},
analysis.LiteralUint32Array2: {valuer: "Int4RangeFromUint32Array2", scanner: "Int4RangeToUint32Array2"},
analysis.LiteralUint64Array2: {valuer: "Int4RangeFromUint64Array2", scanner: "Int4RangeToUint64Array2"},
analysis.LiteralFloat32Array2: {valuer: "Int4RangeFromFloat32Array2", scanner: "Int4RangeToFloat32Array2"},
analysis.LiteralFloat64Array2: {valuer: "Int4RangeFromFloat64Array2", scanner: "Int4RangeToFloat64Array2"},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.Int4RangeArr}: {
analysis.LiteralIntArray2Slice: {valuer: "Int4RangeArrayFromIntArray2Slice", scanner: "Int4RangeArrayToIntArray2Slice"},
analysis.LiteralInt8Array2Slice: {valuer: "Int4RangeArrayFromInt8Array2Slice", scanner: "Int4RangeArrayToInt8Array2Slice"},
analysis.LiteralInt16Array2Slice: {valuer: "Int4RangeArrayFromInt16Array2Slice", scanner: "Int4RangeArrayToInt16Array2Slice"},
analysis.LiteralInt32Array2Slice: {valuer: "Int4RangeArrayFromInt32Array2Slice", scanner: "Int4RangeArrayToInt32Array2Slice"},
analysis.LiteralInt64Array2Slice: {valuer: "Int4RangeArrayFromInt64Array2Slice", scanner: "Int4RangeArrayToInt64Array2Slice"},
analysis.LiteralUintArray2Slice: {valuer: "Int4RangeArrayFromUintArray2Slice", scanner: "Int4RangeArrayToUintArray2Slice"},
analysis.LiteralUint8Array2Slice: {valuer: "Int4RangeArrayFromUint8Array2Slice", scanner: "Int4RangeArrayToUint8Array2Slice"},
analysis.LiteralUint16Array2Slice: {valuer: "Int4RangeArrayFromUint16Array2Slice", scanner: "Int4RangeArrayToUint16Array2Slice"},
analysis.LiteralUint32Array2Slice: {valuer: "Int4RangeArrayFromUint32Array2Slice", scanner: "Int4RangeArrayToUint32Array2Slice"},
analysis.LiteralUint64Array2Slice: {valuer: "Int4RangeArrayFromUint64Array2Slice", scanner: "Int4RangeArrayToUint64Array2Slice"},
analysis.LiteralFloat32Array2Slice: {valuer: "Int4RangeArrayFromFloat32Array2Slice", scanner: "Int4RangeArrayToFloat32Array2Slice"},
analysis.LiteralFloat64Array2Slice: {valuer: "Int4RangeArrayFromFloat64Array2Slice", scanner: "Int4RangeArrayToFloat64Array2Slice"},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.Int8}: {
analysis.LiteralInt: {},
analysis.LiteralInt8: {},
analysis.LiteralInt16: {},
analysis.LiteralInt32: {},
analysis.LiteralInt64: {},
analysis.LiteralUint: {},
analysis.LiteralUint8: {},
analysis.LiteralUint16: {},
analysis.LiteralUint32: {},
analysis.LiteralUint64: {},
analysis.LiteralFloat32: {},
analysis.LiteralFloat64: {},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.Int8Arr}: {
analysis.LiteralIntSlice: {valuer: "Int8ArrayFromIntSlice", scanner: "Int8ArrayToIntSlice"},
analysis.LiteralInt8Slice: {valuer: "Int8ArrayFromInt8Slice", scanner: "Int8ArrayToInt8Slice"},
analysis.LiteralInt16Slice: {valuer: "Int8ArrayFromInt16Slice", scanner: "Int8ArrayToInt16Slice"},
analysis.LiteralInt32Slice: {valuer: "Int8ArrayFromInt32Slice", scanner: "Int8ArrayToInt32Slice"},
analysis.LiteralInt64Slice: {valuer: "Int8ArrayFromInt64Slice", scanner: "Int8ArrayToInt64Slice"},
analysis.LiteralUintSlice: {valuer: "Int8ArrayFromUintSlice", scanner: "Int8ArrayToUintSlice"},
analysis.LiteralUint8Slice: {valuer: "Int8ArrayFromUint8Slice", scanner: "Int8ArrayToUint8Slice"},
analysis.LiteralUint16Slice: {valuer: "Int8ArrayFromUint16Slice", scanner: "Int8ArrayToUint16Slice"},
analysis.LiteralUint32Slice: {valuer: "Int8ArrayFromUint32Slice", scanner: "Int8ArrayToUint32Slice"},
analysis.LiteralUint64Slice: {valuer: "Int8ArrayFromUint64Slice", scanner: "Int8ArrayToUint64Slice"},
analysis.LiteralFloat32Slice: {valuer: "Int8ArrayFromFloat32Slice", scanner: "Int8ArrayToFloat32Slice"},
analysis.LiteralFloat64Slice: {valuer: "Int8ArrayFromFloat64Slice", scanner: "Int8ArrayToFloat64Slice"},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.Int8Range}: {
analysis.LiteralIntArray2: {valuer: "Int8RangeFromIntArray2", scanner: "Int8RangeToIntArray2"},
analysis.LiteralInt8Array2: {valuer: "Int8RangeFromInt8Array2", scanner: "Int8RangeToInt8Array2"},
analysis.LiteralInt16Array2: {valuer: "Int8RangeFromInt16Array2", scanner: "Int8RangeToInt16Array2"},
analysis.LiteralInt32Array2: {valuer: "Int8RangeFromInt32Array2", scanner: "Int8RangeToInt32Array2"},
analysis.LiteralInt64Array2: {valuer: "Int8RangeFromInt64Array2", scanner: "Int8RangeToInt64Array2"},
analysis.LiteralUintArray2: {valuer: "Int8RangeFromUintArray2", scanner: "Int8RangeToUintArray2"},
analysis.LiteralUint8Array2: {valuer: "Int8RangeFromUint8Array2", scanner: "Int8RangeToUint8Array2"},
analysis.LiteralUint16Array2: {valuer: "Int8RangeFromUint16Array2", scanner: "Int8RangeToUint16Array2"},
analysis.LiteralUint32Array2: {valuer: "Int8RangeFromUint32Array2", scanner: "Int8RangeToUint32Array2"},
analysis.LiteralUint64Array2: {valuer: "Int8RangeFromUint64Array2", scanner: "Int8RangeToUint64Array2"},
analysis.LiteralFloat32Array2: {valuer: "Int8RangeFromFloat32Array2", scanner: "Int8RangeToFloat32Array2"},
analysis.LiteralFloat64Array2: {valuer: "Int8RangeFromFloat64Array2", scanner: "Int8RangeToFloat64Array2"},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.Int8RangeArr}: {
analysis.LiteralIntArray2Slice: {valuer: "Int8RangeArrayFromIntArray2Slice", scanner: "Int8RangeArrayToIntArray2Slice"},
analysis.LiteralInt8Array2Slice: {valuer: "Int8RangeArrayFromInt8Array2Slice", scanner: "Int8RangeArrayToInt8Array2Slice"},
analysis.LiteralInt16Array2Slice: {valuer: "Int8RangeArrayFromInt16Array2Slice", scanner: "Int8RangeArrayToInt16Array2Slice"},
analysis.LiteralInt32Array2Slice: {valuer: "Int8RangeArrayFromInt32Array2Slice", scanner: "Int8RangeArrayToInt32Array2Slice"},
analysis.LiteralInt64Array2Slice: {valuer: "Int8RangeArrayFromInt64Array2Slice", scanner: "Int8RangeArrayToInt64Array2Slice"},
analysis.LiteralUintArray2Slice: {valuer: "Int8RangeArrayFromUintArray2Slice", scanner: "Int8RangeArrayToUintArray2Slice"},
analysis.LiteralUint8Array2Slice: {valuer: "Int8RangeArrayFromUint8Array2Slice", scanner: "Int8RangeArrayToUint8Array2Slice"},
analysis.LiteralUint16Array2Slice: {valuer: "Int8RangeArrayFromUint16Array2Slice", scanner: "Int8RangeArrayToUint16Array2Slice"},
analysis.LiteralUint32Array2Slice: {valuer: "Int8RangeArrayFromUint32Array2Slice", scanner: "Int8RangeArrayToUint32Array2Slice"},
analysis.LiteralUint64Array2Slice: {valuer: "Int8RangeArrayFromUint64Array2Slice", scanner: "Int8RangeArrayToUint64Array2Slice"},
analysis.LiteralFloat32Array2Slice: {valuer: "Int8RangeArrayFromFloat32Array2Slice", scanner: "Int8RangeArrayToFloat32Array2Slice"},
analysis.LiteralFloat64Array2Slice: {valuer: "Int8RangeArrayFromFloat64Array2Slice", scanner: "Int8RangeArrayToFloat64Array2Slice"},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.Interval}: {
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.IntervalArr}: {
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.JSON}: {
analysis.LiteralEmptyInterface: {valuer: "JSON", scanner: "JSON"},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.JSONArr}: {
analysis.LiteralByteSliceSlice: {valuer: "JSONArrayFromByteSliceSlice", scanner: "JSONArrayToByteSliceSlice"},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.JSONB}: {
analysis.LiteralEmptyInterface: {valuer: "JSON", scanner: "JSON"},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.JSONBArr}: {
analysis.LiteralByteSliceSlice: {valuer: "JSONArrayFromByteSliceSlice", scanner: "JSONArrayToByteSliceSlice"},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.Line}: {
analysis.LiteralFloat64Array3: {valuer: "LineFromFloat64Array3", scanner: "LineToFloat64Array3"},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.LineArr}: {
analysis.LiteralFloat64Array3Slice: {valuer: "LineArrayFromFloat64Array3Slice", scanner: "LineArrayToFloat64Array3Slice"},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.LSeg}: {
analysis.LiteralFloat64Array2Array2: {valuer: "LsegFromFloat64Array2Array2", scanner: "LsegToFloat64Array2Array2"},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.LSegArr}: {
analysis.LiteralFloat64Array2Array2Slice: {valuer: "LsegArrayFromFloat64Array2Array2Slice", scanner: "LsegArrayToFloat64Array2Array2Slice"},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.MACAddr}: {
analysis.LiteralHardwareAddr: {valuer: "MACAddrFromHardwareAddr", scanner: "MACAddrToHardwareAddr"},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.MACAddrArr}: {
analysis.LiteralHardwareAddrSlice: {valuer: "MACAddrArrayFromHardwareAddrSlice", scanner: "MACAddrArrayToHardwareAddrSlice"},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.MACAddr8}: {
analysis.LiteralHardwareAddr: {valuer: "MACAddr8FromHardwareAddr", scanner: "MACAddr8ToHardwareAddr"},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.MACAddr8Arr}: {
analysis.LiteralHardwareAddrSlice: {valuer: "MACAddr8ArrayFromHardwareAddrSlice", scanner: "MACAddr8ArrayToHardwareAddrSlice"},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.Money}: {
analysis.LiteralInt64: {valuer: "MoneyFromInt64", scanner: "MoneyToInt64"},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.MoneyArr}: {
analysis.LiteralInt64Slice: {valuer: "MoneyArrayFromInt64Slice", scanner: "MoneyArrayToInt64Slice"},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.Numeric}: {
analysis.LiteralInt: {},
analysis.LiteralInt8: {},
analysis.LiteralInt16: {},
analysis.LiteralInt32: {},
analysis.LiteralInt64: {},
analysis.LiteralUint: {},
analysis.LiteralUint8: {},
analysis.LiteralUint16: {},
analysis.LiteralUint32: {},
analysis.LiteralUint64: {},
analysis.LiteralFloat32: {},
analysis.LiteralFloat64: {},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.NumericArr}: {
analysis.LiteralIntSlice: {valuer: "NumericArrayFromIntSlice", scanner: "NumericArrayToIntSlice"},
analysis.LiteralInt8Slice: {valuer: "NumericArrayFromInt8Slice", scanner: "NumericArrayToInt8Slice"},
analysis.LiteralInt16Slice: {valuer: "NumericArrayFromInt16Slice", scanner: "NumericArrayToInt16Slice"},
analysis.LiteralInt32Slice: {valuer: "NumericArrayFromInt32Slice", scanner: "NumericArrayToInt32Slice"},
analysis.LiteralInt64Slice: {valuer: "NumericArrayFromInt64Slice", scanner: "NumericArrayToInt64Slice"},
analysis.LiteralUintSlice: {valuer: "NumericArrayFromUintSlice", scanner: "NumericArrayToUintSlice"},
analysis.LiteralUint8Slice: {valuer: "NumericArrayFromUint8Slice", scanner: "NumericArrayToUint8Slice"},
analysis.LiteralUint16Slice: {valuer: "NumericArrayFromUint16Slice", scanner: "NumericArrayToUint16Slice"},
analysis.LiteralUint32Slice: {valuer: "NumericArrayFromUint32Slice", scanner: "NumericArrayToUint32Slice"},
analysis.LiteralUint64Slice: {valuer: "NumericArrayFromUint64Slice", scanner: "NumericArrayToUint64Slice"},
analysis.LiteralFloat32Slice: {valuer: "NumericArrayFromFloat32Slice", scanner: "NumericArrayToFloat32Slice"},
analysis.LiteralFloat64Slice: {valuer: "NumericArrayFromFloat64Slice", scanner: "NumericArrayToFloat64Slice"},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.NumRange}: {
analysis.LiteralIntArray2: {valuer: "NumRangeFromIntArray2", scanner: "NumRangeToIntArray2"},
analysis.LiteralInt8Array2: {valuer: "NumRangeFromInt8Array2", scanner: "NumRangeToInt8Array2"},
analysis.LiteralInt16Array2: {valuer: "NumRangeFromInt16Array2", scanner: "NumRangeToInt16Array2"},
analysis.LiteralInt32Array2: {valuer: "NumRangeFromInt32Array2", scanner: "NumRangeToInt32Array2"},
analysis.LiteralInt64Array2: {valuer: "NumRangeFromInt64Array2", scanner: "NumRangeToInt64Array2"},
analysis.LiteralUintArray2: {valuer: "NumRangeFromUintArray2", scanner: "NumRangeToUintArray2"},
analysis.LiteralUint8Array2: {valuer: "NumRangeFromUint8Array2", scanner: "NumRangeToUint8Array2"},
analysis.LiteralUint16Array2: {valuer: "NumRangeFromUint16Array2", scanner: "NumRangeToUint16Array2"},
analysis.LiteralUint32Array2: {valuer: "NumRangeFromUint32Array2", scanner: "NumRangeToUint32Array2"},
analysis.LiteralUint64Array2: {valuer: "NumRangeFromUint64Array2", scanner: "NumRangeToUint64Array2"},
analysis.LiteralFloat32Array2: {valuer: "NumRangeFromFloat32Array2", scanner: "NumRangeToFloat32Array2"},
analysis.LiteralFloat64Array2: {valuer: "NumRangeFromFloat64Array2", scanner: "NumRangeToFloat64Array2"},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.NumRangeArr}: {
analysis.LiteralIntArray2Slice: {valuer: "NumRangeArrayFromIntArray2Slice", scanner: "NumRangeArrayToIntArray2Slice"},
analysis.LiteralInt8Array2Slice: {valuer: "NumRangeArrayFromInt8Array2Slice", scanner: "NumRangeArrayToInt8Array2Slice"},
analysis.LiteralInt16Array2Slice: {valuer: "NumRangeArrayFromInt16Array2Slice", scanner: "NumRangeArrayToInt16Array2Slice"},
analysis.LiteralInt32Array2Slice: {valuer: "NumRangeArrayFromInt32Array2Slice", scanner: "NumRangeArrayToInt32Array2Slice"},
analysis.LiteralInt64Array2Slice: {valuer: "NumRangeArrayFromInt64Array2Slice", scanner: "NumRangeArrayToInt64Array2Slice"},
analysis.LiteralUintArray2Slice: {valuer: "NumRangeArrayFromUintArray2Slice", scanner: "NumRangeArrayToUintArray2Slice"},
analysis.LiteralUint8Array2Slice: {valuer: "NumRangeArrayFromUint8Array2Slice", scanner: "NumRangeArrayToUint8Array2Slice"},
analysis.LiteralUint16Array2Slice: {valuer: "NumRangeArrayFromUint16Array2Slice", scanner: "NumRangeArrayToUint16Array2Slice"},
analysis.LiteralUint32Array2Slice: {valuer: "NumRangeArrayFromUint32Array2Slice", scanner: "NumRangeArrayToUint32Array2Slice"},
analysis.LiteralUint64Array2Slice: {valuer: "NumRangeArrayFromUint64Array2Slice", scanner: "NumRangeArrayToUint64Array2Slice"},
analysis.LiteralFloat32Array2Slice: {valuer: "NumRangeArrayFromFloat32Array2Slice", scanner: "NumRangeArrayToFloat32Array2Slice"},
analysis.LiteralFloat64Array2Slice: {valuer: "NumRangeArrayFromFloat64Array2Slice", scanner: "NumRangeArrayToFloat64Array2Slice"},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.Path}: {
analysis.LiteralFloat64Array2Slice: {valuer: "PathFromFloat64Array2Slice", scanner: "PathToFloat64Array2Slice"},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.PathArr}: {
analysis.LiteralFloat64Array2SliceSlice: {valuer: "PathArrayFromFloat64Array2SliceSlice", scanner: "PathArrayToFloat64Array2SliceSlice"},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.Point}: {
analysis.LiteralFloat64Array2: {valuer: "PointFromFloat64Array2", scanner: "PointToFloat64Array2"},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.PointArr}: {
analysis.LiteralFloat64Array2Slice: {valuer: "PointArrayFromFloat64Array2Slice", scanner: "PointArrayToFloat64Array2Slice"},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.Polygon}: {
analysis.LiteralFloat64Array2Slice: {valuer: "PolygonFromFloat64Array2Slice", scanner: "PolygonToFloat64Array2Slice"},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.PolygonArr}: {
analysis.LiteralFloat64Array2SliceSlice: {valuer: "PolygonArrayFromFloat64Array2SliceSlice", scanner: "PolygonArrayToFloat64Array2SliceSlice"},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.Text}: {
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.TextArr}: {
analysis.LiteralStringSlice: {valuer: "TextArrayFromStringSlice", scanner: "TextArrayToStringSlice"},
analysis.LiteralByteSliceSlice: {valuer: "TextArrayFromByteSliceSlice", scanner: "TextArrayToByteSliceSlice"},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.Time}: {
analysis.LiteralTime: {},
analysis.LiteralString: {scanner: "TimeToString"},
analysis.LiteralByteSlice: {scanner: "TimeToByteSlice"},
},
{oid: oid.TimeArr}: {
analysis.LiteralTimeSlice: {valuer: "TimeArrayFromTimeSlice", scanner: "TimeArrayToTimeSlice"},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.Timestamp}: {
analysis.LiteralTime: {},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.TimestampArr}: {
analysis.LiteralTimeSlice: {valuer: "TimestampArrayFromTimeSlice", scanner: "TimestampArrayToTimeSlice"},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.Timestamptz}: {
analysis.LiteralTime: {},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.TimestamptzArr}: {
analysis.LiteralTimeSlice: {valuer: "TimestamptzArrayFromTimeSlice", scanner: "TimestamptzArrayToTimeSlice"},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.Timetz}: {
analysis.LiteralTime: {},
analysis.LiteralString: {scanner: "TimetzToString"},
analysis.LiteralByteSlice: {scanner: "TimetzToByteSlice"},
},
{oid: oid.TimetzArr}: {
analysis.LiteralTimeSlice: {valuer: "TimetzArrayFromTimeSlice", scanner: "TimetzArrayToTimeSlice"},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.TSQuery}: {
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.TSQueryArr}: {
analysis.LiteralStringSlice: {valuer: "TSQueryArrayFromStringSlice", scanner: "TSQueryArrayToStringSlice"},
analysis.LiteralByteSliceSlice: {valuer: "TSQueryArrayFromByteSliceSlice", scanner: "TSQueryArrayToByteSliceSlice"},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.TsRange}: {
analysis.LiteralTimeArray2: {valuer: "TsRangeFromTimeArray2", scanner: "TsRangeToTimeArray2"},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.TsRangeArr}: {
analysis.LiteralTimeArray2Slice: {valuer: "TsRangeArrayFromTimeArray2Slice", scanner: "TsRangeArrayToTimeArray2Slice"},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.TsTzRange}: {
analysis.LiteralTimeArray2: {valuer: "TstzRangeFromTimeArray2", scanner: "TstzRangeToTimeArray2"},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.TsTzRangeArr}: {
analysis.LiteralTimeArray2Slice: {valuer: "TstzRangeArrayFromTimeArray2Slice", scanner: "TstzRangeArrayToTimeArray2Slice"},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.TSVector}: {
analysis.LiteralStringSlice: {valuer: "TSVectorFromStringSlice", scanner: "TSVectorToStringSlice"},
analysis.LiteralByteSliceSlice: {valuer: "TSVectorFromByteSliceSlice", scanner: "TSVectorToByteSliceSlice"},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.TSVectorArr}: {
analysis.LiteralStringSliceSlice: {valuer: "TSVectorArrayFromStringSliceSlice", scanner: "TSVectorArrayToStringSliceSlice"},
analysis.LiteralByteSliceSliceSlice: {valuer: "TSVectorArrayFromByteSliceSliceSlice", scanner: "TSVectorArrayToByteSliceSliceSlice"},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.UUID}: {
analysis.LiteralByteArray16: {valuer: "UUIDFromByteArray16", scanner: "UUIDToByteArray16"},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.UUIDArr}: {
analysis.LiteralByteArray16Slice: {valuer: "UUIDArrayFromByteArray16Slice", scanner: "UUIDArrayToByteArray16Slice"},
analysis.LiteralStringSlice: {valuer: "UUIDArrayFromStringSlice", scanner: "UUIDArrayToStringSlice"},
analysis.LiteralByteSliceSlice: {valuer: "UUIDArrayFromByteSliceSlice", scanner: "UUIDArrayToByteSliceSlice"},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.VarBit}: {
analysis.LiteralInt64: {valuer: "VarBitFromInt64", scanner: "VarBitToInt64"},
analysis.LiteralBoolSlice: {valuer: "VarBitFromBoolSlice", scanner: "VarBitToBoolSlice"},
analysis.LiteralUint8Slice: {valuer: "VarBitFromUint8Slice", scanner: "VarBitToUint8Slice"},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.VarBitArr}: {
analysis.LiteralInt64Slice: {valuer: "VarBitArrayFromInt64Slice", scanner: "VarBitArrayToInt64Slice"},
analysis.LiteralBoolSliceSlice: {valuer: "VarBitArrayFromBoolSliceSlice", scanner: "VarBitArrayToBoolSliceSlice"},
analysis.LiteralUint8SliceSlice: {valuer: "VarBitArrayFromUint8SliceSlice", scanner: "VarBitArrayToUint8SliceSlice"},
analysis.LiteralStringSlice: {valuer: "VarBitArrayFromStringSlice", scanner: "VarBitArrayToStringSlice"},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.VarChar}: {
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.VarCharArr}: {
analysis.LiteralStringSlice: {valuer: "VarCharArrayFromStringSlice", scanner: "VarCharArrayToStringSlice"},
analysis.LiteralByteSliceSlice: {valuer: "VarCharArrayFromByteSliceSlice", scanner: "VarCharArrayToByteSliceSlice"},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.XML}: {
analysis.LiteralEmptyInterface: {valuer: "XML", scanner: "XML"},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
{oid: oid.XMLArr}: {
analysis.LiteralByteSliceSlice: {valuer: "XMLArrayFromByteSliceSlice", scanner: "XMLArrayToByteSliceSlice"},
analysis.LiteralString: {},
analysis.LiteralByteSlice: {},
},
}
}
|
package mysqladapter
import "github.com/AlejandroWaiz/novels-box/internal/domain/structs"
func (db *MySqlAdapter) GetAllNovelsInDB() (novels []structs.Novel, err error) {
q := "SELECT * FROM `allnovels`"
query, err := db.dbconn.Query(q)
if err != nil {
return nil, err
}
for query.Next() {
var novel structs.Novel
err = query.Scan(&novel.ID, &novel.Title, &novel.Author, &novel.Genres)
if err != nil {
return nil, err
}
novels = append(novels, novel)
}
return novels, nil
}
|
/*
* Copyright © 2019-2022 Software AG, Darmstadt, Germany and/or its licensors
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package adabas
import (
"github.com/SoftwareAG/adabas-go-api/adatypes"
)
// Cursoring the structure support cursor instance handling reading record list in
// chunks defined by a search or descriptor
type Cursoring struct {
// FieldLength in streaming mode of a field the field length of the field
FieldLength uint32
offset uint32
search string
descriptors string
empty bool
result *Response
request *ReadRequest
adabasRequest *adatypes.Request
err error
}
// ReadLogicalWithCursoring this method provide the search of records in Adabas
// and provide a cursor. The cursor will read a number of records using Multifetch
// calls. The number of records is defined in `Limit`.
// This method initialize the read records using cursoring.
func (request *ReadRequest) ReadLogicalWithCursoring(search string) (cursor *Cursoring, err error) {
request.cursoring = &Cursoring{}
if request.Limit == 0 {
request.Limit = 10
}
request.Multifetch = uint32(request.Limit)
if request.Multifetch > 20 {
request.Multifetch = 20
}
result, rerr := request.ReadLogicalWith(search)
if rerr != nil {
return nil, rerr
}
request.cursoring.result = result
request.cursoring.search = search
request.cursoring.request = request
request.queryFunction = request.readLogicalWith
request.cursoring.empty = result.NrRecords() == 0
return request.cursoring, nil
}
// ReadLogicalByCursoring this method provide the descriptor read of records in Adabas
// and provide a cursor. The cursor will read a number of records using Multifetch
// calls. The number of records is defined in `Limit`.
// This method initialize the read records using cursoring.
func (request *ReadRequest) ReadLogicalByCursoring(descriptor string) (cursor *Cursoring, err error) {
request.cursoring = &Cursoring{}
if request.Limit == 0 {
request.Limit = 10
}
request.Multifetch = uint32(request.Limit)
if request.Multifetch > 20 {
request.Multifetch = 20
}
result, rerr := request.ReadLogicalBy(descriptor)
if rerr != nil {
return nil, rerr
}
request.cursoring.result = result
request.cursoring.search = ""
request.cursoring.request = request
request.queryFunction = request.readLogicalBy
request.cursoring.empty = result.NrRecords() == 0
return request.cursoring, nil
}
// SearchAndOrderWithCursoring this method provide the search of records in Adabas
// ordered by a descriptor. It provide a cursor. The cursor will read a number of records using Multifetch
// calls. The number of records is defined in `Limit`.
// This method initialize the read records using cursoring.
func (request *ReadRequest) SearchAndOrderWithCursoring(search, descriptors string) (cursor *Cursoring, err error) {
request.cursoring = &Cursoring{}
if request.Limit == 0 {
request.Limit = 10
}
request.Multifetch = uint32(request.Limit)
if request.Multifetch > 20 {
request.Multifetch = 20
}
result, rerr := request.SearchAndOrder(search, descriptors)
if rerr != nil {
return nil, rerr
}
request.cursoring.result = result
request.cursoring.search = search
request.cursoring.descriptors = descriptors
request.cursoring.request = request
request.queryFunction = request.SearchAndOrder
request.cursoring.empty = result.NrRecords() == 0
return request.cursoring, nil
}
// HistogramByCursoring provides the descriptor read of a field and uses
// cursoring. The cursor will read a number of records using Multifetch
// calls. The number of records is defined in `Limit`.
// This method initialize the read records using cursoring.
func (request *ReadRequest) HistogramByCursoring(descriptor string) (cursor *Cursoring, err error) {
request.cursoring = &Cursoring{}
if request.Limit == 0 {
request.Limit = 10
}
request.Multifetch = uint32(request.Limit)
if request.Multifetch > 20 {
request.Multifetch = 20
}
result, rerr := request.HistogramBy(descriptor)
if rerr != nil {
return nil, rerr
}
request.cursoring.result = result
request.cursoring.search = ""
request.cursoring.descriptors = descriptor
request.cursoring.request = request
request.queryFunction = request.histogramBy
request.cursoring.empty = result.NrRecords() == 0
return request.cursoring, nil
}
// HistogramWithCursoring provides the searched read of a descriptor of a
// field. It uses a cursor to read only a part of the data and read further
// only on request. The cursor will read a number of records using Multifetch
// calls. The number of records is defined in `Limit`.
// This method initialize the read records using cursoring.
func (request *ReadRequest) HistogramWithCursoring(search string) (cursor *Cursoring, err error) {
request.cursoring = &Cursoring{}
if request.Limit == 0 {
request.Limit = 10
}
request.Multifetch = uint32(request.Limit)
if request.Multifetch > 20 {
request.Multifetch = 20
}
result, rerr := request.HistogramWith(search)
if rerr != nil {
return nil, rerr
}
request.cursoring.result = result
request.cursoring.search = search
request.cursoring.request = request
request.queryFunction = request.histogramWith
request.cursoring.empty = result.NrRecords() == 0
return request.cursoring, nil
}
// ReadPhysicalWithCursoring this method provide the physical read of records in Adabas
// and provide a cursor. The cursor will read a number of records using Multifetch
// calls. The number of records is defined in `Limit`.
// This method initialize the read records using cursoring.
func (request *ReadRequest) ReadPhysicalWithCursoring() (cursor *Cursoring, err error) {
request.cursoring = &Cursoring{}
/* Define the read chunk to 20 if not defined */
if request.Limit == 0 {
request.Limit = 20
}
request.Multifetch = uint32(request.Limit)
if request.Multifetch > 20 {
request.Multifetch = 20
}
result, rerr := request.ReadPhysicalSequence()
if rerr != nil {
return nil, rerr
}
request.cursoring.result = result
request.cursoring.search = ""
request.cursoring.request = request
request.queryFunction = request.readPhysical
request.cursoring.empty = result.NrRecords() == 0
return request.cursoring, nil
}
// HasNextRecord with cursoring this method checks if a next record
// or stream entry is available return `true` if it is.
// This method will call Adabas if no entry is available and reads new entries
// using Multifetch or partial LOB.
// If an error during processing occurs, the function will return an false and
// you need to check with cursor Error() methods
func (cursor *Cursoring) HasNextRecord() (hasNext bool) {
if cursor == nil || cursor.empty {
return false
}
adatypes.Central.Log.Debugf("Check next record: %v offset=%d values=%d data=%d", hasNext, cursor.offset+1, len(cursor.result.Values), len(cursor.result.Data))
if cursor.offset+1 > uint32(cursor.result.NrRecords()) {
if cursor.adabasRequest == nil || (cursor.adabasRequest.Response != AdaNormal && cursor.adabasRequest.Option.StreamCursor == 0) {
if cursor.adabasRequest != nil {
adatypes.Central.Log.Debugf("Error adabas request empty of not normal response, may be EOF %#v", cursor.adabasRequest.Response)
} else {
adatypes.Central.Log.Debugf("Error adabas request empty %#v", cursor.adabasRequest)
}
return false
}
cursor.result, cursor.err = cursor.request.queryFunction(cursor.search, cursor.descriptors)
if cursor.err != nil || cursor.result == nil {
adatypes.Central.Log.Debugf("Error query function %v %#v", cursor.err, cursor.result)
return false
}
adatypes.Central.Log.Debugf("Nr Records cursored %d", cursor.result.NrRecords())
hasNext = cursor.result.NrRecords() > 0
cursor.offset = 0
} else {
hasNext = true
}
adatypes.Central.Log.Debugf("Has next record: %v", hasNext)
return
}
// NextRecord cursoring to next record, if current chunk contains a record, no call is send. If
// the chunk is not in memory, the next chunk is read into memory. This method may be initiated,
// if `HasNextRecord()` is called before.
func (cursor *Cursoring) NextRecord() (record *Record, err error) {
if cursor.empty {
return nil, adatypes.NewGenericError(141)
}
if cursor.err != nil {
adatypes.Central.Log.Debugf("Error next record: %v", err)
return nil, cursor.err
}
adatypes.Central.Log.Debugf("Get next record offset=%d/%d", cursor.offset, len(cursor.result.Values))
if cursor.offset+1 > uint32(cursor.result.NrRecords()) {
if !cursor.HasNextRecord() {
return nil, nil
}
}
cursor.offset++
adatypes.Central.Log.Debugf("ISN=%d ISN quantity=%d", cursor.result.Values[cursor.offset-1].Isn,
cursor.result.Values[cursor.offset-1].Quantity)
if len(cursor.result.Data) > 0 {
return nil, adatypes.NewGenericError(139)
}
return cursor.result.Values[cursor.offset-1], nil
}
// NextData cursoring to next structure representation of the data record, if current chunk contains a
// record, no call is send. If
// the chunk is not in memory, the next chunk is read into memory. This method may be initiated,
// if `HasNextRecord()` is called before.
func (cursor *Cursoring) NextData() (record interface{}, err error) {
if cursor.empty {
return nil, adatypes.NewGenericError(141)
}
if cursor.err != nil {
adatypes.Central.Log.Debugf("Error next data record: %v", err)
return nil, cursor.err
}
adatypes.Central.Log.Debugf("Get next data record offset=%d/%d", cursor.offset, len(cursor.result.Values))
if cursor.offset+1 > uint32(cursor.result.NrRecords()) {
if !cursor.HasNextRecord() {
return nil, nil
}
}
cursor.offset++
if len(cursor.result.Values) > 0 {
return nil, adatypes.NewGenericError(139)
}
return cursor.result.Data[cursor.offset-1], nil
}
// Error Provide the current error state for the cursor
func (cursor *Cursoring) Error() (err error) {
if cursor == nil {
return nil
}
return cursor.err
}
|
package common
import "time"
func GetNow() time.Time {
return time.Now()
}
func GetTomorrow() time.Time {
return time.Now().Add(time.Hour * 24)
}
func BeginingOfDay() time.Time {
today := time.Now()
year, month, day := today.Date()
return time.Date(year, month, day, 0, 0, 0, 0, today.Location())
}
func EndOfDay(t time.Time) time.Time {
today := time.Now()
year, month, day := today.Date()
return time.Date(year, month, day, 23, 59, 59, 0, today.Location())
}
|
package repository
import (
"context"
"fmt"
"strings"
"github.com/TodoApp2021/gorestreact/pkg/models"
"github.com/jackc/pgx/v4/pgxpool"
)
type TodoListPostgres struct {
pool *pgxpool.Pool
}
func NewTodoListPostgres(pool *pgxpool.Pool) *TodoListPostgres {
return &TodoListPostgres{pool: pool}
}
func (t *TodoListPostgres) Create(userId int, list models.TodoList) (int, error) {
ctx := context.Background()
conn, err := t.pool.Acquire(ctx)
if err != nil {
return 0, err
}
defer conn.Release()
tx, err := conn.Begin(ctx)
if err != nil {
return 0, err
}
var id int
createListQuery := fmt.Sprintf("INSERT INTO %s (title, description) VALUES ($1, $2) RETURNING id", todoListsTable)
row := tx.QueryRow(ctx, createListQuery, list.Title, list.Description)
if err := row.Scan(&id); err != nil {
if e := tx.Rollback(ctx); e != nil {
return 0, err
}
return 0, err
}
createUsersListQuery := fmt.Sprintf("INSERT INTO %s (user_id, list_id) VALUES ($1, $2)", usersListsTable)
_, err = tx.Exec(ctx, createUsersListQuery, userId, id)
if err != nil {
if e := tx.Rollback(ctx); e != nil {
return 0, err
}
return 0, err
}
return id, tx.Commit(ctx)
}
func (t *TodoListPostgres) GetAll(userId int, limit, offset string) ([]models.TodoList, int, error) {
ctx := context.Background()
conn, err := t.pool.Acquire(ctx)
if err != nil {
return nil, 0, err
}
defer conn.Release()
setValues := make([]string, 0)
args := make([]interface{}, 0)
args = append(args, userId)
argId := 2
if limit != "" {
setValues = append(setValues, fmt.Sprintf("LIMIT $%d", argId))
args = append(args, limit)
argId++
}
if offset != "" {
setValues = append(setValues, fmt.Sprintf("OFFSET $%d", argId))
args = append(args, offset)
}
setQuery := strings.Join(setValues, " ")
lists := make([]models.TodoList, 0)
count := 0
// limit offset
query := fmt.Sprintf("SELECT tl.id, tl.title, tl.description FROM %s tl INNER JOIN %s ul on tl.id = ul.list_id WHERE ul.user_id = $1 %s",
todoListsTable, usersListsTable, setQuery)
queryCount := fmt.Sprintf(
"SELECT COUNT(*) FROM (SELECT tl.id, tl.title, tl.description FROM %s tl INNER JOIN %s ul on tl.id = ul.list_id WHERE ul.user_id = $1 ORDER BY tl.id) as t",
todoListsTable, usersListsTable)
row := conn.QueryRow(ctx, queryCount, userId)
if err := row.Scan(&count); err != nil {
return nil, 0, err
}
rows, err := conn.Query(ctx, query, args...)
if err != nil {
return nil, 0, err
}
for rows.Next() {
list := models.TodoList{}
if err = rows.Scan(&list.Id, &list.Title, &list.Description); err != nil {
return nil, 0, err
}
lists = append(lists, list)
}
return lists, count, nil
}
func (t *TodoListPostgres) GetById(userId, listId int) (models.TodoList, error) {
ctx := context.Background()
var list models.TodoList
conn, err := t.pool.Acquire(ctx)
if err != nil {
return list, err
}
defer conn.Release()
query := fmt.Sprintf(`SELECT tl.id, tl.title, tl.description FROM %s tl
INNER JOIN %s ul on tl.id = ul.list_id WHERE ul.user_id = $1 AND ul.list_id = $2`,
todoListsTable, usersListsTable)
row := conn.QueryRow(ctx, query, userId, listId)
if err := row.Scan(&list.Id, &list.Title, &list.Description); err != nil {
return list, err
}
return list, nil
}
func (t *TodoListPostgres) Delete(userId, listId int) error {
ctx := context.Background()
conn, err := t.pool.Acquire(ctx)
if err != nil {
return err
}
defer conn.Release()
query := fmt.Sprintf("DELETE FROM %s tl USING %s ul WHERE tl.id = ul.list_id AND ul.user_id=$1 AND ul.list_id=$2",
todoListsTable, usersListsTable)
_, err = conn.Exec(ctx, query, userId, listId)
return err
}
func (t *TodoListPostgres) Update(userId, listId int, input models.UpdateListInput) error {
ctx := context.Background()
conn, err := t.pool.Acquire(ctx)
if err != nil {
return err
}
defer conn.Release()
setValues := make([]string, 0)
args := make([]interface{}, 0)
argId := 1
if input.Title != nil {
setValues = append(setValues, fmt.Sprintf("title=$%d", argId))
args = append(args, *input.Title)
argId++
}
if input.Description != nil {
setValues = append(setValues, fmt.Sprintf("description=$%d", argId))
args = append(args, *input.Description)
argId++
}
setQuery := strings.Join(setValues, ", ")
query := fmt.Sprintf("UPDATE %s tl SET %s FROM %s ul WHERE tl.id = ul.list_id AND ul.list_id=$%d AND ul.user_id=$%d",
todoListsTable, setQuery, usersListsTable, argId, argId+1)
args = append(args, listId, userId)
_, err = conn.Exec(ctx, query, args...)
return err
}
|
package main
import (
"flag"
"fmt"
"github.com/randall77/hprof/read"
)
func main() {
flag.Parse()
args := flag.Args()
var d *read.Dump
if len(args) == 2 {
d = read.Read(args[0], args[1])
} else {
d = read.Read(args[0], "")
}
// eliminate unreachable objects
// TODO: have reader do this?
reachable := make([]bool, d.NumObjects())
var q []read.ObjId
for _, f := range d.Frames {
for _, e := range f.Edges {
if !reachable[e.To] {
reachable[e.To] = true
q = append(q, e.To)
}
}
}
for _, x := range []*read.Data{d.Data, d.Bss} {
for _, e := range x.Edges {
if !reachable[e.To] {
reachable[e.To] = true
q = append(q, e.To)
}
}
}
for _, r := range d.Otherroots {
for _, e := range r.Edges {
if !reachable[e.To] {
reachable[e.To] = true
q = append(q, e.To)
}
}
}
for _, f := range d.QFinal {
for _, e := range f.Edges {
if !reachable[e.To] {
reachable[e.To] = true
q = append(q, e.To)
}
}
}
for _, g := range d.Goroutines {
if g.Ctxt != read.ObjNil {
if !reachable[g.Ctxt] {
reachable[g.Ctxt] = true
q = append(q, g.Ctxt)
}
}
}
for len(q) > 0 {
x := q[0]
q = q[1:]
for _, e := range d.Edges(x) {
if !reachable[e.To] {
reachable[e.To] = true
q = append(q, e.To)
}
}
}
fmt.Printf("digraph {\n")
// print object graph
for i := 0; i < d.NumObjects(); i++ {
x := read.ObjId(i)
if !reachable[x] {
fmt.Printf(" v%d [style=filled fillcolor=gray];\n", x)
}
fmt.Printf(" v%d [label=\"%s\\n%d\"];\n", x, d.Ft(x).Name, d.Size(x))
for _, e := range d.Edges(x) {
var taillabel, headlabel string
if e.FieldName != "" {
taillabel = fmt.Sprintf(" [taillabel=\"%s\"]", e.FieldName)
} else if e.FromOffset != 0 {
taillabel = fmt.Sprintf(" [taillabel=\"%d\"]", e.FromOffset)
}
if e.ToOffset != 0 {
headlabel = fmt.Sprintf(" [headlabel=\"%d\"]", e.ToOffset)
}
fmt.Printf(" v%d -> v%d%s%s;\n", x, e.To, taillabel, headlabel)
}
}
// goroutines and stacks
for _, t := range d.Goroutines {
fmt.Printf(" \"goroutines\" [shape=diamond];\n")
fmt.Printf(" \"goroutines\" -> f%x_0;\n", t.Bos.Addr)
}
// stack frames
for _, f := range d.Frames {
fmt.Printf(" f%x_%d [label=\"%s\\n%d\" shape=rectangle];\n", f.Addr, f.Depth, f.Name, len(f.Data))
if f.Parent != nil {
fmt.Printf(" f%x_%d -> f%x_%d;\n", f.Addr, f.Depth, f.Parent.Addr, f.Parent.Depth)
}
for _, e := range f.Edges {
if e.To != read.ObjNil {
var taillabel, headlabel string
if e.FieldName != "" {
taillabel = fmt.Sprintf(" [taillabel=\"%s\"]", e.FieldName)
} else if e.FromOffset != 0 {
taillabel = fmt.Sprintf(" [taillabel=\"%d\"]", e.FromOffset)
}
if e.ToOffset != 0 {
headlabel = fmt.Sprintf(" [headlabel=\"%d\"]", e.ToOffset)
}
fmt.Printf(" f%x_%d -> v%d%s%s;\n", f.Addr, f.Depth, e.To, taillabel, headlabel)
}
}
}
for _, x := range []*read.Data{d.Data, d.Bss} {
for _, e := range x.Edges {
if e.To != read.ObjNil {
var headlabel string
if e.ToOffset != 0 {
headlabel = fmt.Sprintf(" [headlabel=\"%d\"]", e.ToOffset)
}
fmt.Printf(" \"%s\" [shape=diamond];\n", e.FieldName)
fmt.Printf(" \"%s\" -> v%d%s;\n", e.FieldName, e.To, headlabel)
}
}
}
for _, r := range d.Otherroots {
for _, e := range r.Edges {
var headlabel string
if e.ToOffset != 0 {
headlabel = fmt.Sprintf(" [headlabel=\"%d\"]", e.ToOffset)
}
fmt.Printf(" \"%s\" [shape=diamond];\n", r.Description)
fmt.Printf(" \"%s\" -> v%d%s;\n", r.Description, e.To, headlabel)
}
}
for _, f := range d.QFinal {
for _, e := range f.Edges {
var headlabel string
if e.ToOffset != 0 {
headlabel = fmt.Sprintf(" [headlabel=\"%d\"]", e.ToOffset)
}
fmt.Printf(" \"queued finalizers\" [shape=diamond];\n")
fmt.Printf(" \"queued finalizers\" -> v%d%s;\n", e.To, headlabel)
}
}
fmt.Printf("}\n")
}
|
// DRUNKWATER TEMPLATE(add description and prototypes)
// Question Title and Description on leetcode.com
// Function Declaration and Function Prototypes on leetcode.com
//789. Escape The Ghosts
//You are playing a simplified Pacman game. You start at the point (0, 0), and your destination is (target[0], target[1]). There are several ghosts on the map, the i-th ghost starts at (ghosts[i][0], ghosts[i][1]).
//Each turn, you and all ghosts simultaneously *may* move in one of 4 cardinal directions: north, east, west, or south, going from the previous point to a new point 1 unit of distance away.
//You escape if and only if you can reach the target before any ghost reaches you (for any given moves the ghosts may take.) If you reach any square (including the target) at the same time as a ghost, it doesn't count as an escape.
//Return True if and only if it is possible to escape.
//Example 1:
//Input:
//ghosts = [[1, 0], [0, 3]]
//target = [0, 1]
//Output: true
//Explanation:
//You can directly reach the destination (0, 1) at time 1, while the ghosts located at (1, 0) or (0, 3) have no way to catch up with you.
//Example 2:
//Input:
//ghosts = [[1, 0]]
//target = [2, 0]
//Output: false
//Explanation:
//You need to reach the destination (2, 0), but the ghost at (1, 0) lies between you and the destination.
//Example 3:
//Input:
//ghosts = [[2, 0]]
//target = [1, 0]
//Output: false
//Explanation:
//The ghost can reach the target at the same time as you.
//Note:
//All points have coordinates with absolute value <= 10000.
//The number of ghosts will not exceed 100.
//func escapeGhosts(ghosts [][]int, target []int) bool {
//}
// Time Is Money |
package leetcode
func rotateString(A string, B string) bool {
lA, lB := len(A), len(B)
if lA != lB {
return false
}
if lA == 0 {
return true
}
for i := 0; i < lA; i++ {
if A[i:] == B[:lA-i] && A[:i] == B[lA-i:] {
return true
}
}
return false
}
|
package models
import (
"errors"
"gopkg.in/mgo.v2"
"gopkg.in/mgo.v2/bson"
"log"
"strconv"
"strings"
)
type UPS struct {
ID string
Manufacturer string
Model string
Description string
InWork bool
Barcode string
CurrentLocation string
HistoryLocations []string
RemovalMark bool
ChangesHistory []string
}
func (UPS) Get(session *mgo.Session, id string) (ups UPS, err error) {
collection, err := getCollection(session, MONGO_COL_UP_NAME)
if err != nil {
return ups, err
}
err = collection.Find(bson.M{"id": id}).One(&ups)
if err != nil {
return ups, err
}
return ups, nil
}
func (UPS) GetAll(session *mgo.Session) (listUPS []UPS, err error) {
collection, err := getCollection(session, MONGO_COL_UP_NAME)
err = collection.Find(bson.M{"currentlocation": bson.M{"$ne": "Списать"}}).Sort("id").All(&listUPS)
if err != nil {
return listUPS, err
}
return listUPS, nil
}
func (ups UPS) Save(session *mgo.Session) (err error) {
collection, err := getCollection(session, MONGO_COL_UP_NAME)
if err != nil {
return err
}
_, err = ups.Get(session, ups.ID)
if err == mgo.ErrNotFound {
err = collection.Insert(ups)
if err != nil {
return err
}
return nil
} else {
return err
}
err = errors.New("UPS already in database")
return err
}
func (ups UPS) Update(session *mgo.Session) (err error) {
collection, err := getCollection(session, MONGO_COL_UP_NAME)
if err != nil {
return err
}
//upsFind.ChangesHistory = append(upsFind.ChangesHistory, fmt.Sprintf("Изменено из: %v в: %v. Пользователь: %v. Дата: %v", upsFind, ups, r.Header.Get("X-Real-Ip"), time.Now().String()))
// ups.HistoryLocations = append(upsFind.HistoryLocations, "Перемещено из "+upsFind.CurrentLocation+" в "+ups.CurrentLocation+"\n"+r.Header.Get("X-Real-Ip")+"\n"+time.Now().String())
// if ups.CurrentLocation == "Списать" {
// ups.RemovalMark = true
// }
su := bson.M{"id": ups.ID}
err = collection.Update(su, ups)
if err != nil {
return err
}
return nil
}
func (UPS) GetNextID(session *mgo.Session) (str string) {
collection, err := getCollection(session, MONGO_COL_UP_NAME)
if err != nil {
return str
}
ups := UPS{}
err = collection.Find(bson.M{}).Sort("-id").One(&ups)
if err != nil {
if err.Error() == "not found" {
ups.ID = "IT-UP-0000"
}
}
num, err := strconv.ParseInt(strings.Split(ups.ID, "-")[2], 10, 64)
if err != nil {
log.Fatalln(err.Error())
}
num += 1
z := int(num)
strN := strconv.Itoa(z)
make4digits(&strN)
str = strings.Join(strings.Split(ups.ID, "-")[:2], "-") + "-" + strN
return str
}
|
package frame
import (
"fmt"
"sync"
"github.com/google/uuid"
)
type PositionPolicy int
const (
FloatFree PositionPolicy = iota // allowed to go anyway, even off the screen
FloatForward // similar to free, except once it hits the bottom, it does not go off the screen (it makes more realestate)
FloatTop // top fixed
FloatBottom // bottom fixed
)
func (float PositionPolicy) String() string {
switch float {
case FloatFree:
return "FloatFree"
case FloatForward:
return "FloatForward"
case FloatTop:
return "FloatTop"
case FloatBottom:
return "FloatBottom"
default:
return fmt.Sprintf("PositionPolicy=%d?", float)
}
}
type ScreenEventHandler interface {
onEvent(*ScreenEvent)
}
type Policy interface {
// reactive actions
// onClose()
onResize(adjustment int)
// onUpdate()
onTrail()
// proactive actions
onInit()
allowedMotion(rows int) int
allowTrail() bool
}
type Config struct {
Lines int
startRow int
HasHeader bool
HasFooter bool
TrailOnRemove bool
PositionPolicy PositionPolicy
ManualDraw bool
}
type ScreenEvent struct {
value []byte
row int
}
type Line struct {
id uuid.UUID
buffer []byte
row int
lock *sync.Mutex
closeSignal *sync.WaitGroup
closed bool
stale bool
}
type Frame struct {
config Config
lock *sync.Mutex
header *Line
activeLines []*Line
clearRows []int
trailRows []string
rowAdvancements int
footer *Line
policy Policy
autoDraw bool
topRow int
closeSignal *sync.WaitGroup
closed bool
stale bool
}
type floatTopPolicy struct {
Frame *Frame
}
type floatBottomPolicy struct {
Frame *Frame
}
type floatFreePolicy struct {
Frame *Frame
}
type floatForwardPolicy struct {
Frame *Frame
}
|
package kdtree
import (
"github.com/sephriot/kdtree/point2"
"testing"
)
func testTree() *KDTree {
tree := New()
tree.Add(point2.Point2{})
tree.Add(point2.Point2{X: 1})
tree.Add(point2.Point2{X: -1})
tree.Add(point2.Point2{Y: 2})
tree.Add(point2.Point2{Y: -2})
tree.Add(point2.Point2{X: -1, Y: -1})
tree.Add(point2.Point2{X: -1, Y: 1})
return tree
}
func TestKDTree_Add(t *testing.T) {
tree := testTree()
if !equals(tree.root, point2.Point2{}) {
t.Fail()
}
if !equals(tree.root.Left, point2.Point2{X:-1}) {
t.Fail()
}
if !equals(tree.root.Right, point2.Point2{X:1}) {
t.Fail()
}
if !equals(tree.root.Right.Right, point2.Point2{Y:2}) {
t.Fail()
}
if !equals(tree.root.Right.Left, point2.Point2{Y:-2}) {
t.Fail()
}
if !equals(tree.root.Left.Left, point2.Point2{X:-1, Y:-1}) {
t.Fail()
}
if !equals(tree.root.Left.Right, point2.Point2{X:-1, Y:1}) {
t.Fail()
}
}
func TestKDTree_String(t *testing.T) {
tree := testTree()
_ = tree.String()
tree.Remove(point2.Point2{X: -1, Y: 1})
_ = tree.String()
}
func TestNode_Min(t *testing.T) {
n1 := &node{point2.Point2{Y: 1}, nil, nil}
n2 := &node{point2.Point2{X: 1}, nil, nil}
if n1.Min(n2, 0) == n2 {
t.Log("Incorrect value for dimension 0")
t.Fail()
}
if n1.Min(n2, 1) == n1 {
t.Log("Incorrect value for dimension 1")
t.Fail()
}
}
func TestNode_FindMin(t *testing.T) {
tree := testTree()
if tree.root.FindMin(0, 0).Dimension(0) != -1 {
t.Log("Incorrect value for dimension 0")
t.Fail()
}
if tree.root.FindMin(1, 0).Dimension(1) != -2 {
t.Log("Incorrect value for dimension 1")
t.Fail()
}
}
func TestNode_Find(t *testing.T) {
tree := testTree()
ret, parent, dim := tree.root.Find(point2.Point2{}, 0, nil)
if ret != tree.root || parent != nil || dim != 0 {
t.Log("Incorrect value for (0,0)")
t.Fail()
}
ret, parent, dim = tree.root.Find(point2.Point2{X: -1}, 0, nil)
if ret != tree.root.Left || parent != tree.root || dim != 1 {
t.Log("Incorrect value for (-1,0)")
t.Fail()
}
ret, parent, dim = tree.root.Find(point2.Point2{X: 1}, 0, nil)
if ret != tree.root.Right || parent != tree.root || dim != 1 {
t.Log("Incorrect value for (1,0)")
t.Fail()
}
ret, parent, dim = tree.root.Find(point2.Point2{Y: 2}, 0, nil)
if ret != tree.root.Right.Right || parent != tree.root.Right || dim != 0 {
t.Log("Incorrect value for (0,2)")
t.Log("Expected", tree.root.Right.Right, tree.root.Right, 0)
t.Log("Received", ret, parent, dim)
t.Fail()
}
ret, parent, dim = tree.root.Find(point2.Point2{Y: -2}, 0, nil)
if ret != tree.root.Right.Left || parent != tree.root.Right || dim != 0 {
t.Log("Incorrect value for (0,-2)")
t.Log("Expected", tree.root.Right.Left, tree.root.Right, 0)
t.Log("Received", ret, parent, dim)
t.Fail()
}
ret, parent, dim = tree.root.Right.Right.Find(point2.Point2{Y: 2}, 0, tree.root.Right)
if ret != tree.root.Right.Right || parent != tree.root.Right || dim != 0 {
t.Log("Incorrect value for (0,-2)")
t.Log("Expected", tree.root.Right.Right, tree.root.Right, 0)
t.Log("Received", ret, parent, dim)
t.Fail()
}
ret, _, _ = tree.root.Find(point2.Point2{X: 10, Y: 10}, 0, nil)
if ret != nil {
t.Log("Incorrect value for (10,10)")
t.Fail()
}
}
func TestKDTree_RemoveMoving(t *testing.T) {
tree := &KDTree{}
p1 := point2.Point2{X:-1}
p2 := point2.Point2{X:-1.1}
p3 := point2.Point2{X:-1.2}
tree.Remove(p1)
p1.X += 0.1
tree.Add(p1)
tree.Remove(p2)
p2.X += 0.1
tree.Add(p2)
tree.Remove(p3)
p3.X += 0.1
tree.Add(p3)
tree.Remove(p1)
p1.X += 0.1
tree.Add(p1)
tree.Remove(p2)
p2.X += 0.1
tree.Add(p2)
tree.Remove(p3)
p3.X += 0.1
tree.Add(p3)
if !equals(tree.root, p2) || !equals(tree.root.Left, p3) || !equals(tree.root.Right, p1) {
t.Fail()
}
}
func TestKDTree_RemoveRoot(t *testing.T) {
tree := KDTree{}
tree.Add(point2.Point2{})
tree.Add(point2.Point2{X:-1})
tree.Remove(point2.Point2{})
if tree.root == nil {
t.Fail()
}
}
func TestKDTree_Remove(t *testing.T) {
tree := New()
tree.Add(point2.Point2{})
tree.Add(point2.Point2{X: 1})
tree.Add(point2.Point2{X: -1})
tree.Remove(point2.Point2{X: 1})
if tree.root.Right != nil {
t.Fail()
}
tree.Remove(point2.Point2{X: -1})
if tree.root.Left != nil {
t.Fail()
}
tree.Remove(point2.Point2{})
if tree.root != nil {
t.Fail()
}
tree = testTree()
tree.Remove(point2.Point2{})
if !equals(tree.root, point2.Point2{Y: 2}) {
t.Fail()
}
tree = testTree()
tree.Remove(point2.Point2{X: 1})
if !equals(tree.root.Right, point2.Point2{Y: 2}) {
t.Fail()
}
tree = testTree()
tree.Remove(point2.Point2{X: 1})
if !equals(tree.root.Right, point2.Point2{Y: 2}) {
t.Fail()
}
tree.Remove(point2.Point2{})
if !equals(tree.root, point2.Point2{Y: -2}) {
t.Fail()
}
tree = testTree()
tree.Remove(point2.Point2{X: -1})
if !equals(tree.root.Left, point2.Point2{X:-1, Y:1}) {
t.Fail()
}
if tree.root.Left.Right != nil {
t.Fail()
}
tree.Remove(point2.Point2{X: -1, Y: -1})
if tree.root.Left.Left != nil {
t.Fail()
}
tree.Remove(point2.Point2{X: -1})
if tree.root.Left.Left != nil {
t.Fail()
}
tree = testTree()
tree.Remove(point2.Point2{X:1})
tree.Remove(point2.Point2{Y: 2})
tree.Remove(point2.Point2{Y: -2})
tree.Remove(point2.Point2{X:-1})
if tree.root.Right != nil {
t.Fail()
}
if !equals(tree.root.Left, point2.Point2{X:-1,Y:1}) {
t.Fail()
}
}
func TestKDTree_RadiusSearchNil(t *testing.T) {
tree := New()
points := tree.RadiusSearch(point2.Point2{X:-2}, 2.1)
if points != nil {
t.Fail()
}
}
func TestKDTree_RadiusSearch(t *testing.T) {
tree := testTree()
expected := []Point{point2.Point2{}, point2.Point2{X:-1}, point2.Point2{X: -1, Y: -1}, point2.Point2{X: -1, Y: 1}}
points := tree.RadiusSearch(point2.Point2{X:-2}, 2.1)
for i := range points {
if !equals(points[i], expected[i]) {
t.Fail()
}
}
expected = []Point{point2.Point2{X:1}}
points = tree.RadiusSearch(point2.Point2{X:2}, 2.0)
for i := range points {
if !equals(points[i], expected[i]) {
t.Fail()
}
}
expected = []Point{point2.Point2{},point2.Point2{X:1}}
points = tree.RadiusSearch(point2.Point2{X:2}, 2.0 + 0.00000000000001)
for i := range points {
if !equals(points[i], expected[i]) {
t.Fail()
}
}
} |
package realm_test
import (
"testing"
"github.com/10gen/realm-cli/internal/cli/user"
"github.com/10gen/realm-cli/internal/cloud/realm"
u "github.com/10gen/realm-cli/internal/utils/test"
"github.com/10gen/realm-cli/internal/utils/test/assert"
"github.com/10gen/realm-cli/internal/utils/test/mock"
)
func TestRealmAuthenticate(t *testing.T) {
u.SkipUnlessRealmServerRunning(t)
client := realm.NewClient(u.RealmServerURL())
t.Run("Should fail with invalid credentials", func(t *testing.T) {
_, err := client.Authenticate("username", "apiKey")
assert.Equal(t,
realm.ServerError{Message: "failed to authenticate with MongoDB Cloud API: You are not authorized for this resource."},
err,
)
})
t.Run("Should return session details with valid credentials", func(t *testing.T) {
session, err := client.Authenticate(u.CloudUsername(), u.CloudAPIKey())
assert.Nil(t, err)
assert.NotEqual(t, "", session.AccessToken, "access token must not be blank")
assert.NotEqual(t, "", session.RefreshToken, "refresh token must not be blank")
})
}
func TestRealmAuthProfile(t *testing.T) {
u.SkipUnlessRealmServerRunning(t)
t.Run("Should fail without an auth client", func(t *testing.T) {
client := realm.NewClient(u.RealmServerURL())
_, err := client.AuthProfile()
assert.Equal(t, realm.ErrInvalidSession{}, err)
})
t.Run("With an active session should return session details with valid credentials", func(t *testing.T) {
client := newAuthClient(t)
profile, err := client.AuthProfile()
assert.Nil(t, err)
assert.NotEqualf(t, 0, len(profile.Roles), "expected profile to have role(s)")
assert.Equal(t, []string{u.CloudGroupID()}, profile.AllGroupIDs())
})
}
func TestRealmAuthRefresh(t *testing.T) {
u.SkipUnlessRealmServerRunning(t)
t.Run("Does not refresh auth if request does not return invalid session code", func(t *testing.T) {
client := realm.NewClient(u.RealmServerURL())
session, err := client.Authenticate(u.CloudUsername(), u.CloudAPIKey())
assert.Equal(t, nil, err)
// invalidate the session's access token
session.AccessToken = session.RefreshToken
profile := mock.NewProfileWithSession(t, session)
client = realm.NewAuthClient(profile.RealmBaseURL(), profile)
_, err = client.AuthProfile()
serverError, ok := err.(realm.ServerError)
assert.True(t, ok, "expected %T to be server error", err)
assert.Equal(t, realm.ServerError{Message: "invalid session: valid Issuer required"}, serverError)
})
t.Run("Should return the invalid session error when credentials are invalid", func(t *testing.T) {
client := realm.NewClient(u.RealmServerURL())
session, err := client.Authenticate(u.CloudUsername(), u.CloudAPIKey())
assert.Equal(t, nil, err)
// invalidate the session's tokens
session.RefreshToken = session.AccessToken
session.AccessToken = ""
profile := mock.NewProfileWithSession(t, session)
client = realm.NewAuthClient(profile.RealmBaseURL(), profile)
_, err = client.AuthProfile()
assert.Equal(t, realm.ErrInvalidSession{}, err)
})
t.Run("with an expired access token", func(t *testing.T) {
u.SkipUnlessExpiredAccessTokenPresent(t)
t.Run("should use the refresh token to generate a new access token", func(t *testing.T) {
client := realm.NewClient(u.RealmServerURL())
session, err := client.Authenticate(u.CloudUsername(), u.CloudAPIKey())
assert.Nil(t, err)
profile, teardown := mock.NewProfileFromTmpDir(t, "auth_refresh_test")
defer teardown()
profile.SetRealmBaseURL(u.RealmServerURL())
// set the access token value to an expired token
profile.SetSession(user.Session{u.ExpiredAccessToken(), session.RefreshToken})
client = realm.NewAuthClient(profile.RealmBaseURL(), profile)
_, err = client.AuthProfile()
assert.Nil(t, err)
updatedSession := profile.Session()
t.Log("and update the access token")
assert.NotEqual(t, u.ExpiredAccessToken(), updatedSession.AccessToken, "access token was not updated")
assert.Equalf(t, session.RefreshToken, updatedSession.RefreshToken, "refresh token was incorrectly updated")
})
t.Run("should error if both the access and refresh tokens are expired", func(t *testing.T) {
profile, teardown := mock.NewProfileFromTmpDir(t, "auth_refresh_test")
defer teardown()
profile.SetRealmBaseURL(u.RealmServerURL())
// set the access and refresh token values to expired tokens
profile.SetSession(user.Session{u.ExpiredAccessToken(), u.ExpiredAccessToken()})
client := realm.NewAuthClient(profile.RealmBaseURL(), profile)
_, err := client.AuthProfile()
assert.Equal(t, realm.ErrInvalidSession{}, err)
session := profile.Session()
t.Log("and clear the session tokens")
assert.Equalf(t, "", session.AccessToken, "access token was not cleared")
assert.Equalf(t, "", session.RefreshToken, "refresh token was not cleared")
})
})
}
func newAuthClient(t *testing.T) realm.Client {
t.Helper()
client := realm.NewClient(u.RealmServerURL())
session, err := client.Authenticate(u.CloudUsername(), u.CloudAPIKey())
assert.Nil(t, err)
profile := mock.NewProfileWithSession(t, session)
return realm.NewAuthClient(profile.RealmBaseURL(), profile)
}
|
package main
import (
"awesomeProject/calculator/calculatorpb"
"context"
"fmt"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"io"
"log"
)
func main() {
fmt.Println("Calculator Client")
cc, err := grpc.Dial("localhost:50051", grpc.WithInsecure())
if err != nil {
log.Fatalf("Failed to connect: %v", err)
}
defer cc.Close()
c := calculatorpb.NewCalculatorServiceClient(cc)
//fmt.Printf("Created client: %f", c)
//doUnary(c)
//doServerStreaming(c)
doErrorUnary(c)
}
func doErrorUnary(c calculatorpb.CalculatorServiceClient) {
fmt.Printf("Starting to do a SquareRoot unary RPC..\n")
// correct call
doErrorCall(c, 10)
// error call
doErrorCall(c, -1)
}
func doErrorCall(c calculatorpb.CalculatorServiceClient, n int32) {
resp, err := c.SquareRoot(context.Background(), &calculatorpb.SquareRootRequest{
Number: n,
})
if err != nil {
s, ok := status.FromError(err)
if ok {
// actual error from gRPC (user error)
fmt.Printf("Error message from server: %v\n", s.Message())
if s.Code() == codes.InvalidArgument {
fmt.Println("We probably sent a negative number")
return
}
} else {
log.Fatalf("Big error calling SquareRoot: %v", err)
return
}
}
fmt.Printf("Result of square root of %v: %v\n", n, resp.GetNumberRoot())
}
func doUnary(c calculatorpb.CalculatorServiceClient) {
fmt.Printf("Starting to do a Sum unary RPC..\n")
req := &calculatorpb.SumRequest{
FirstNumber: 1,
SecondNumber: 2,
}
resp, err := c.Sum(context.Background(), req)
if err != nil {
log.Fatalf("error while calling Calculator RPC: %v", err)
}
log.Printf("Response from Calculator: %v\n", resp.SumResult)
}
func doServerStreaming(c calculatorpb.CalculatorServiceClient) {
fmt.Printf("Starting to do a PrimeDecomposition Server Streaming RPC..\n")
req := &calculatorpb.PrimeNumberDecompositionRequest{
Number: 1239039284099999000,
}
stream, err := c.PrimeNumberDecomposition(context.Background(), req)
if err != nil {
log.Fatalf("error while calling PrimeDecomposition RPC: %v", err)
}
for {
resp, err := stream.Recv()
if err == io.EOF {
break
}
if err != nil {
log.Fatalf("Something happened: %v", err)
}
fmt.Println(resp.GetPrimeFactor())
}
} |
package task8
import (
"io"
)
type Rot13Reader struct {
R io.Reader
}
func (r Rot13Reader) Read(p []byte) (n int, err error) {
n, err = r.R.Read(p)
for i := range p {
switch {
case p[i] >= 'A' && p[i] < 'N':
fallthrough
case p[i] >= 'a' && p[i] < 'n':
p[i] += 13
case p[i] > 'M' && p[i] <= 'Z':
fallthrough
case p[i] > 'm' && p[i] <= 'z':
p[i] -= 13
}
}
return
}
|
/*
# -*- coding: utf-8 -*-
# @Author : joker
# @Time : 2020-03-26 13:31
# @File : main.go
# @Description :
# @Attention :
*/
package main
import (
"examples/blockchain/config"
"fmt"
"os"
"os/signal"
"syscall"
)
func main() {
yamlPath := "/Users/joker/go/src/examples/blockchain/raftorder_example/application-dev.yaml"
if e := config.Config(yamlPath); nil != e {
panic(e)
}
c := make(chan os.Signal, 1)
signal.Notify(c, syscall.SIGKILL, syscall.SIGTERM, syscall.SIGINT)
si := <-c
if si != nil {
fmt.Println("接收到结束信号")
os.Exit(0)
}
fmt.Println("程序结束运行")
}
|
package usermodel
//User is represent the struct of the user in user collection
type User struct {
UID string `json:"uid"` //User id
Nick string `json:"nickname"` //Nick name wich will display(for example in posts)
Name string `json:"name"` //Actual Name
Surname string `json:"surname"` //Actual Surname
Avatar string `json:"avatar"` //Path in firebase storage to avatar // For displaying we will use an autogenereted thubnails
Email string `json:"email"` //Email of user
IsSetted bool `json:"issetted"` //Is User was setted(specified name avatar and nick)
}
|
package main
import (
"fmt"
"sort"
"github.com/shirou/gopsutil/process"
"github.com/shirou/gopsutil/cpu"
)
type Process struct {
pid int32
ppid int32
name string
cmdline string
status string
createTime int64
cpuPercent float64
memPercent float32
numThreads int32
}
func Parser(pid int32, ppid int32, allProcesses map[int32]*Process, resProcesses map[int32]*Process) {
if ppid == 1 || ppid == 0 || ppid == 2 {
resProcesses[pid] = allProcesses[pid]
} else if ppid < 0 {
return
} else {
pProc, hasKey := resProcesses[ppid]
if hasKey {
proc := allProcesses[pid]
pProc.cpuPercent += proc.cpuPercent
pProc.memPercent += proc.memPercent
} else {
Parser(pid, ppid-1, allProcesses, resProcesses)
}
}
return
}
func main() {
cpu, err := cpu.Counts(true)
if err != nil {
fmt.Println("get cpu number error: ", err)
}
fmt.Printf("cpu: %v\n", cpu)
procs, err := process.Processes()
if err != nil {
fmt.Println("get process error: ", err)
}
var allProcesses = map[int32]*Process{}
for _, proc := range procs {
var process = new(Process)
process.pid = proc.Pid
process.ppid, _ = proc.Ppid()
process.name, _ = proc.Name()
process.cmdline, _ = proc.Cmdline()
process.status, _ = proc.Status()
process.createTime, _ = proc.CreateTime()
process.cpuPercent, _ = proc.CPUPercent()
process.memPercent, _ = proc.MemoryPercent()
process.numThreads, _ = proc.NumThreads()
allProcesses[process.pid] = process
fmt.Printf("pid: %v, ppid: %v, name: %v, cmd: %v\n", proc.Pid, process.ppid, process.name, process.cmdline)
}
resProcesses := map[int32]*Process{}
var keys sort.IntSlice
for k := range allProcesses {
keys = append(keys, int(k))
}
keys.Sort()
for _, key := range keys {
pid := int32(key)
proc := allProcesses[pid]
ppid := proc.ppid
fmt.Println(pid, ppid)
Parser(pid, ppid, allProcesses, resProcesses)
}
for key, value := range resProcesses {
fmt.Printf("%v: %+v\n", key, value)
}
}
|
package main
import (
"fmt"
"net"
"strings"
"time"
"github.com/hashicorp/yamux"
"github.com/payfazz/go-errors"
"github.com/payfazz/go-errors/errhandler"
"github.com/payfazz/stdlog"
)
func runServer(addr string) {
leftAddr := strings.SplitN(addr, ":", 2)
if len(leftAddr) != 2 {
showUsage()
}
leftListener, err := net.Listen(leftAddr[0], leftAddr[1])
errhandler.Check(errors.Wrap(err))
stdlog.Err.Print(fmt.Sprintf("Server listening on %v\n", leftListener.Addr()))
rightMuxedIO := newStdioWrapper()
go func() {
<-rightMuxedIO.readerDoneCh
leftListener.Close()
}()
rightMuxed, err := yamux.Client(rightMuxedIO, nil)
errhandler.Check(err)
defer rightMuxed.Close()
for {
left, err := leftListener.Accept()
if err != nil {
if rightMuxedIO.readerDone() {
break
}
errors.PrintTo(stdlog.Err, errors.Wrap(err))
time.Sleep(100 * time.Millisecond)
continue
}
go func() {
defer errhandler.With(func(err error) {
errors.PrintTo(stdlog.Err, errors.Wrap(err))
})
defer left.Close()
right, err := rightMuxed.OpenStream()
errhandler.Check(err)
defer right.Close()
biCopy(left, right, rightMuxedIO.readerDoneCh)
}()
}
}
|
// Copyright 2017 Vlad Didenko. All rights reserved.
// See the included LICENSE.md file for licensing information
package slops // import "go.didenko.com/slops"
import (
"reflect"
"testing"
)
type uniqueUseCase struct {
in []string
expect []string
}
var uniqueTestScript = []uniqueUseCase{
{nil, []string{}},
{[]string{}, []string{}},
{[]string{""}, []string{""}},
{[]string{"-"}, []string{"-"}},
{[]string{"-", "-"}, []string{"-"}},
{[]string{"a", "b", "c"}, []string{"a", "b", "c"}},
{[]string{"a", "a", "b", "c"}, []string{"a", "b", "c"}},
{[]string{"a", "b", "b", "c"}, []string{"a", "b", "c"}},
{[]string{"a", "b", "c", "c"}, []string{"a", "b", "c"}},
}
func TestUnique(t *testing.T) {
for uci, uc := range uniqueTestScript {
result := Unique(uc.in)
if !reflect.DeepEqual(uc.expect, result) {
t.Error("At index", uci, "result", result, "does not match expected", uc.expect)
}
}
}
|
package domain
import "oneday-infrastructure/tools"
type TenantRepo interface {
InsertTenant(tenant Tenant)
FindByName(tenantName string) (tenant Tenant, exist bool)
GetByCode(tenantCode string) (tenant Tenant)
InsertUser(user User)
}
type TenantService struct {
TenantRepo
}
func InitTenantService(tenantRepo TenantRepo) TenantService {
return TenantService{tenantRepo}
}
type GenUniqueCode func() string
// TODO move to admin service
func (service TenantService) Add(cmd *AddTenantCmd, genUniqueCode GenUniqueCode) (TenantCO, AddTenantSuccess) {
if tenant, exist := service.FindByName(cmd.TenantName); !exist {
if cmd.TenantCode == "" {
tenant.TenantCode = genUniqueCode()
}
service.InsertTenant(ToTenant(cmd))
return ToTenantCO(tenant), AddSuccess
}
return TenantCO{}, TenantNameExist
}
func (service TenantService) AddUser(cmd *AddUserCmd) {
tenant := service.GetByCode(cmd.TenantCode)
user := tenant.generateUser(
cmd.Username,
tools.ChooseEncrypter(cmd.EncryptWay)(cmd.Password),
cmd.Mobile)
service.InsertUser(user)
}
|
package gherkin
import (
re "regexp"
"io"
"reflect"
"strconv"
)
type stepdef struct {
r *re.Regexp
f interface{}
}
func (s stepdef) call(w *World) {
t := reflect.TypeOf(s.f)
in := make([]reflect.Value, t.NumIn())
in[0] = reflect.ValueOf(w)
if len(in) != len(w.regexParams) + 1 {
panic("Function type mismatch")
}
in[1] = reflect.ValueOf(w.ctx)
for i := 2; i < len(in); i++ {
var val interface{}
var err error
itp := w.regexParams[i - 1]
switch t.In(i).Kind() {
case reflect.Bool:
val, err = strconv.ParseBool(itp)
case reflect.Int8:
val, err = strconv.ParseInt(itp, 10, 8)
val = int8(val.(int64))
case reflect.Int16:
val, err = strconv.ParseInt(itp, 10, 16)
val = int16(val.(int64))
case reflect.Int32:
val, err = strconv.ParseInt(itp, 10, 32)
val = int32(val.(int64))
case reflect.Int:
val, err = strconv.ParseInt(itp, 10, 64)
val = int(val.(int64))
case reflect.Int64:
val, err = strconv.ParseInt(itp, 10, 64)
val = val.(int64)
case reflect.Float32:
val, err = strconv.ParseFloat(itp, 32)
val = float32(val.(float64))
case reflect.Float64:
val, err = strconv.ParseFloat(itp, 64)
val = val.(float64)
case reflect.String:
val = itp
default:
panic("Function type not supported")
}
if err != nil {
panic(err)
}
in[i] = reflect.ValueOf(val)
}
r := reflect.ValueOf(s.f)
r.Call(in)
}
func createstepdef(p string, f interface{}) stepdef {
r, _ := re.Compile(p)
return stepdef{r, f}
}
func (s stepdef) execute(line *step, output io.Writer, ctx interface{}) bool {
if s.r.MatchString(line.String()) {
if s.f != nil {
substrs := s.r.FindStringSubmatch(line.String())
w := &World{
regexParams:substrs,
MultiStep:line.mldata,
output: output,
ctx: ctx}
defer func() { line.hasErrors = w.gotAnError }()
s.call(w)
}
return true
}
return false
}
func (s stepdef) String() string {
return s.r.String()
}
|
package main
import (
"net/http"
"html/template"
"errors"
"log"
)
// check if the user is currently logged in
func isLoggedIn(writer http.ResponseWriter, request *http.Request)(authenticated bool){
cookie, err := request.Cookie("chitchat_cookie")
if err == http.ErrNoCookie {
http.Redirect(writer, request, "/login", 302)
} else {
check(err, "Failed to get cookie")
if _, ok := sessions[cookie.Value]; !ok {
http.Redirect(writer, request, "/login", 302)
}
}
return
}
// Get the user
func getUser(email string) (user User, err error) {
if u, ok := users[email]; ok {
user = u
} else {
err = errors.New("User not found")
}
return
}
// get the template
func getTemplate(name string)(t *template.Template) {
t = template.New(name)
t = template.Must(t.ParseGlob("templates/*.html"))
return
}
// checks errors
func check(err error, msg string) {
if err != nil {
log.Println(msg, err)
}
}
|
package component
// Position component.
type Position struct {
X, Y float64
}
// NewPosition position constructor.
func NewPosition(x, y float64) *Position {
return &Position{x, y}
}
// Name component implementation.
func (c *Position) Name() string {
return "position"
}
|
package gox
import (
"encoding/json"
"io/ioutil"
"os"
)
type KeyValueStoreFile struct {
Values map[string]string
Path string
}
func NewDiskCache(filename string) (*KeyValueStoreFile, error) {
cache := new(KeyValueStoreFile)
cache.Path = filename
err := cache.Load()
if err != nil {
return nil, err
}
return cache, nil
}
func (it *KeyValueStoreFile) Get(key string) string {
val, ok := it.Values[key]
if ok {
return val
} else {
return ""
}
}
func (it *KeyValueStoreFile) Save(key string, value string) error {
m := it.Values
m[key] = value
return it.SaveToDisk()
}
func (it *KeyValueStoreFile) SaveToDisk() error {
json, err := json.Marshal(it.Values)
if err != nil {
return err
}
err = ioutil.WriteFile(it.Path, json, 0644)
if err != nil {
return err
}
return nil
}
func (it *KeyValueStoreFile) Load() error {
values := make(map[string]string, 0)
if _, err := os.Stat(it.Path); os.IsNotExist(err) {
f, err := os.Create(it.Path)
if err != nil {
return err
}
err = f.Close()
if err != nil {
return err
}
}
b, err := ioutil.ReadFile(it.Path)
if err != nil {
return err
}
if len(b) > 0 {
err = json.Unmarshal(b, &values)
if err != nil {
return err
}
it.Values = values
} else {
it.Values = values
}
return nil
}
|
package operator
import (
"testing"
)
func TestForStatement(t *testing.T) {
number := []int{11, 12, 13, 14, 15, 16}
for i := range number {
if i == 3 {
number[i] |= i
}
t.Log(i)
}
t.Log(number)
}
// range表达式只会在for语句开始执行时被求值一次,无论后边会有多少次迭代;
// range表达式的求值结果会被复制,也就是说,被迭代的对象是range表达式结果值的副本而不是原值。
func TestForStatement1(t *testing.T) {
number := [...]int{1, 2, 3, 4, 5, 6}
maxIndex := len(number) -1
t.Log(number)
// range 语句只初始化一次,不在改变
for i,e := range number{
if i == maxIndex{
number[0] +=e
}else{
number[i+1] +=e
}
}
t.Log(number)
number1 := []int{1, 2, 3, 4, 5, 6}
maxIndex1 := len(number1) -1
t.Log(number1)
// 切片是引用
for i,e := range number1{
if i == maxIndex1{
number1[0] +=e
}else{
number1[i+1] +=e
}
}
t.Log(number1)
}
|
// Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package codec
import (
"testing"
"github.com/pingcap/tidb/parser/mysql"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/benchdaily"
"github.com/pingcap/tidb/util/chunk"
)
var valueCnt = 100
func composeEncodedData(size int) []byte {
values := make([]types.Datum, 0, size)
for i := 0; i < size; i++ {
values = append(values, types.NewDatum(i))
}
bs, _ := EncodeValue(nil, nil, values...)
return bs
}
func BenchmarkDecodeWithSize(b *testing.B) {
b.StopTimer()
bs := composeEncodedData(valueCnt)
b.StartTimer()
for i := 0; i < b.N; i++ {
_, err := Decode(bs, valueCnt)
if err != nil {
b.Fatal(err)
}
}
}
func BenchmarkDecodeWithOutSize(b *testing.B) {
b.StopTimer()
bs := composeEncodedData(valueCnt)
b.StartTimer()
for i := 0; i < b.N; i++ {
_, err := Decode(bs, 1)
if err != nil {
b.Fatal(err)
}
}
}
func BenchmarkEncodeIntWithSize(b *testing.B) {
for i := 0; i < b.N; i++ {
data := make([]byte, 0, 8)
EncodeInt(data, 10)
}
}
func BenchmarkEncodeIntWithOutSize(b *testing.B) {
for i := 0; i < b.N; i++ {
EncodeInt(nil, 10)
}
}
func BenchmarkDecodeDecimal(b *testing.B) {
dec := &types.MyDecimal{}
err := dec.FromFloat64(1211.1211113)
if err != nil {
b.Fatal(err)
}
precision, frac := dec.PrecisionAndFrac()
raw, _ := EncodeDecimal([]byte{}, dec, precision, frac)
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, _, _, _, err := DecodeDecimal(raw)
if err != nil {
b.Fatal(err)
}
}
}
func BenchmarkDecodeOneToChunk(b *testing.B) {
str := new(types.Datum)
*str = types.NewStringDatum("a")
var raw []byte
raw = append(raw, bytesFlag)
raw = EncodeBytes(raw, str.GetBytes())
intType := types.NewFieldType(mysql.TypeLonglong)
b.ResetTimer()
decoder := NewDecoder(chunk.New([]*types.FieldType{intType}, 32, 32), nil)
for i := 0; i < b.N; i++ {
_, err := decoder.DecodeOne(raw, 0, intType)
if err != nil {
b.Fatal(err)
}
}
}
func TestBenchDaily(t *testing.T) {
benchdaily.Run(
BenchmarkDecodeWithSize,
BenchmarkDecodeWithOutSize,
BenchmarkEncodeIntWithSize,
BenchmarkEncodeIntWithOutSize,
BenchmarkDecodeDecimal,
BenchmarkDecodeOneToChunk,
)
}
|
package limitutil
import (
"balansir/internal/configutil"
"sync"
"time"
"golang.org/x/time/rate"
)
type visitor struct {
limiter *rate.Limiter
lastSeen time.Time
}
//Limiter ...
type Limiter struct {
mux sync.RWMutex
list map[string]*visitor
}
var limiter *Limiter
var once sync.Once
//GetLimiter ...
func GetLimiter() *Limiter {
once.Do(func() {
limiter = &Limiter{
list: make(map[string]*visitor),
}
})
return limiter
}
//GetVisitor ...
func (v *Limiter) GetVisitor(ip string, configuration *configutil.Configuration) *rate.Limiter {
v.mux.Lock()
defer v.mux.Unlock()
limiter, exists := v.list[ip]
if !exists {
limiter := rate.NewLimiter(rate.Limit(configuration.RatePerSecond), configuration.RateBucket)
v.list[ip] = &visitor{
limiter: limiter,
lastSeen: time.Now(),
}
return limiter
}
limiter.lastSeen = time.Now()
return limiter.limiter
}
//CleanOldVisitors ...
func (v *Limiter) CleanOldVisitors() {
ticker := time.NewTicker(1 * time.Second)
for {
<-ticker.C
v.mux.Lock()
for ip, val := range v.list {
if time.Since(val.lastSeen) > 1*time.Second {
delete(v.list, ip)
}
}
v.mux.Unlock()
}
}
|
package main
import (
"bufio"
"fmt"
"math/rand"
"os"
"time"
)
type World struct {
Cells []int
Width int
}
func NeweWorld(width int, density float64) *World {
cells := make([]int, width)
for i, _ := range cells {
if rand.Float64() < density {
cells[i] = rand.Intn(19) - 9
}
}
return &World{
Cells: cells,
Width: width,
}
}
func (w *World) Reproduce() {
newcells := make([]int, w.Width)
cc := make([]int, w.Width)
for i, old := range w.Cells {
if old != 0 {
// i = mod(i+old, w.Width)
i = i + old
if 0 <= i && i < w.Width {
cc[i] += 1
newcells[i] += old
}
}
}
for i, old := range w.Cells {
if cc[i] > 0 {
w.Cells[i] = newcells[i] - old*(cc[i]-1)
}
}
}
func mod(d, m int) int {
return ((d % m) + m) % m
}
func main() {
var sc = bufio.NewScanner(os.Stdin)
rand.Seed(time.Now().UnixNano())
world := NeweWorld(30, 0.2)
for i := 0; i < 30000; i++ {
sc.Scan()
for _, v := range world.Cells {
fmt.Printf("%3d", v)
}
world.Reproduce()
}
}
|
package _230_Kth_Smallest_Element_in_a_BST
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
func kthSmallest(root *TreeNode, k int) int {
var (
s = []*TreeNode{}
node = root
count int
)
for node != nil || len(s) > 0 {
for node != nil {
s = append(s, node)
node = node.Left
}
if len(s) > 0 {
node = s[len(s)-1]
s = s[:len(s)-1]
count++
if count == k {
return node.Val
}
node = node.Right
}
}
return -1
}
|
package service
import (
"time"
"github.com/pkg/errors"
"github.com/shharn/blog/logger"
"github.com/shharn/blog/model"
"github.com/shharn/blog/repository"
"github.com/shharn/blog/session"
)
var (
InvalidEmailOrPasswordError = errors.New("Invalid email or password")
SessionCreationFailureError = errors.New("Fail to create a session")
InvalidTokenError = errors.New("Invalid token")
)
type AuthenticationService interface {
ValidateEmailAndPassword(string, string) bool
CreateToken(string, session.AdminSessionTransformFunc) (string, error)
ValidateToken(string) (model.Authentication, error)
StoreToken(string, time.Duration)
RevokeToken(string)
AuthorizeWithOAuthProvider(string) string
GetBlogTokenFromAuthCode(string, string) (string, error)
}
type BlogAuthenticationService struct {
repo repository.AuthenticationRepository
sessionStorage session.SessionStorage
tokenMaker session.TokenMaker
}
func (s *BlogAuthenticationService) ValidateEmailAndPassword(email, password string) bool {
ctx := s.repo.Context()
defer ctx.(repository.Disposable).Dispose()
valid, err := s.repo.Authenticate(ctx, email, password)
if err != nil {
logger.Error(err)
return false
}
return valid
}
func (s *BlogAuthenticationService) CreateToken(email string, fn session.AdminSessionTransformFunc) (string, error) {
inputSession, err := fn(email)
if err != nil {
return "", err
}
token, err := s.tokenMaker.Encode(inputSession)
if err != nil {
return "", err
}
return token, nil
}
func (s *BlogAuthenticationService) StoreToken(token string, d time.Duration) {
s.sessionStorage.Put(token)
go func() {
time.Sleep(d)
s.sessionStorage.Remove(token)
}()
}
func (s *BlogAuthenticationService) ValidateToken(token string) (model.Authentication, error) {
if !s.sessionStorage.Has(token) {
return model.InvalidAuthentication, InvalidTokenError
}
rawSession, err := s.tokenMaker.Decode(token)
if err == nil {
s := rawSession.(*session.Session)
return model.Authentication{
IsValid:true,
Platform: s.Platform,
Admin: s.Admin,
}, nil
}
s.sessionStorage.Remove(token)
return model.InvalidAuthentication, err
}
func (s *BlogAuthenticationService) RevokeToken(token string) {
s.sessionStorage.Remove(token)
}
func (s *BlogAuthenticationService) AuthorizeWithOAuthProvider(platform string) string {
logger.WithFields(logger.Tuples{
"platform": platform,
})("trace", "OAuth ID received")
svc := oauthServiceFactory(platform)
if svc == nil {
return ""
}
url := svc.GetAuthCodeURL()
return url
}
func (s *BlogAuthenticationService) GetBlogTokenFromAuthCode(authCode, platform string) (string, error) {
svc := oauthServiceFactory(platform)
tok, err := svc.Exchange(authCode)
if err != nil {
return "", err
}
profile, err := svc.GetProfile(tok)
if err != nil {
return "", err
}
session := profile.ToSession()
blogToken, err := s.tokenMaker.Encode(session)
if err != nil {
return "", err
}
return blogToken, nil
}
func NewAuthenticationService(r repository.AuthenticationRepository, s session.SessionStorage, tm session.TokenMaker) AuthenticationService {
return &BlogAuthenticationService{r, s, tm}
}
|
package main
import (
"errors"
"time"
sj "github.com/bitly/go-simplejson"
"github.com/donnie4w/go-logger/logger"
"job"
)
type Workshop struct {
sub string // 业务的名字,打日志时有用
ctx *sj.Json
sleepWhenNeed bool // 当机器不健康时消极怠工
ctrlChan chan int // [0] unis -> channel -> control
reportChan chan int // [0] control -> channel -> unis
processorNum int
collectorNum int
workCtrlChan chan int // [1] control -> channel -> work
workReportChan chan int // [0] work -> channel -> control
msgChan chan string // [n] work -> channel -> processor
itemChans [](chan job.Item) // [n] processor -> channel -> collector
PRCtrlChans [](chan int) // [1] control -> channel(try) -> processor || work -> channel(wait) -> processor
PRReportChan chan int // [0] processor -> channel -> control
CLCtrlChans [](chan int) // [1] control -> channel(try) -> processor || work -> channel(wait) -> processor
CLReportChan chan int // [0] collector -> channel -> control
provider SrcProvider
// memLimit uint64
processedLine int
ticker_ *time.Ticker
}
func (p *Workshop) Init(sub string, ctx *sj.Json, ctrlChan chan int, reportChan chan int) (err error) {
p.sub = sub
// 将pub传下去,供processor和collector用
ctx.Set("runtime", map[string]interface{}{
"sub": sub,
})
p.ctx = ctx
p.ctrlChan = ctrlChan
p.reportChan = reportChan
p.processedLine = 0
if err = p.initSrcProvider(); err != nil {
logger.WarnSubf(p.sub, "Workshop.Init err: %v", err)
return err
}
// p.memLimit = ctx.Get("main").Get("mem_limit").MustUint64()
tickSec := ctx.Get("main").Get("tick_sec").MustInt(5)
p.ticker_ = time.NewTicker(time.Duration(tickSec) * time.Second)
p.sleepWhenNeed = ctx.Get("main").Get("sleep_when_need").MustBool(false)
p.workCtrlChan = make(chan int, 1)
p.workReportChan = make(chan int)
p.PRReportChan = make(chan int)
p.CLReportChan = make(chan int)
if err = p.initChannel(); err != nil {
return err
}
json_str, _ := p.ctx.MarshalJSON()
logger.InfoSubf(p.sub, "Workshop.Init success, ctx: %s", string(json_str))
return nil
}
func (p *Workshop) initChannel() error {
// create channels
p.processorNum = p.ctx.Get("main").Get("processor_num").MustInt()
p.collectorNum = p.ctx.Get("main").Get("collector_num").MustInt()
msgChanSize := p.ctx.Get("main").Get("msg_chan_size").MustInt()
itemChanSize := p.ctx.Get("main").Get("item_chan_size").MustInt()
p.msgChan = make(chan string, msgChanSize)
p.itemChans = make([](chan job.Item), p.collectorNum)
for i := 0; i < p.collectorNum; i++ {
p.itemChans[i] = make(chan job.Item, itemChanSize)
}
p.PRCtrlChans = make([](chan int), p.processorNum)
for i := 0; i < p.processorNum; i++ {
p.PRCtrlChans[i] = make(chan int, 1)
}
p.CLCtrlChans = make([](chan int), p.collectorNum)
for i := 0; i < p.collectorNum; i++ {
p.CLCtrlChans[i] = make(chan int, 1)
}
// start processor goroutines
processorSuccessNum := 0
for i := 0; i < p.processorNum; i++ {
go p.ProcessRoutine(i, p.PRCtrlChans[i])
}
for i := 0; i < p.processorNum; i++ {
isSuccess := <-p.PRReportChan
if isSuccess == RET_INIT_SUCCESS {
processorSuccessNum++
}
}
if processorSuccessNum < p.processorNum {
logger.WarnSubf(p.sub, "Workshop.initChannel failed processor success %d/%d",
processorSuccessNum, p.processorNum)
p.sendCtrlInfo(p.PRCtrlChans, CMD_EXIT)
return errors.New("some processor init fail")
}
logger.InfoSubf(p.sub, "Workshop.initChannel start %d processor, all success", processorSuccessNum)
// start collector goroutines
collectorSuccessNum := 0
for i := 0; i < p.collectorNum; i++ {
go p.CollectRoutine(i, p.CLCtrlChans[i], p.itemChans[i])
}
for i := 0; i < p.collectorNum; i++ {
isSuccess := <-p.CLReportChan
if isSuccess == RET_INIT_SUCCESS {
collectorSuccessNum++
}
}
if collectorSuccessNum < p.collectorNum {
logger.InfoSubf(p.sub, "Workshop.initChannel failed collector success %d/%d",
collectorSuccessNum, p.collectorNum)
p.sendCtrlInfo(p.PRCtrlChans, CMD_EXIT)
p.sendCtrlInfo(p.CLCtrlChans, CMD_EXIT)
return errors.New("some colletor init fail")
}
logger.InfoSubf(p.sub, "Workshop.initChannel start %d colletors", collectorSuccessNum)
return nil
}
func (p *Workshop) ProcessRoutine(id int, ctrlChannel chan int) {
processorFlags := p.ctx.Get("processor").MustMap() // eg: UrlProcessor => true
logger.InfoSubf(p.sub, "Workshop.ProcessRoutine begin id: %d, processor: %v", id, processorFlags)
processorMap := make(map[string]job.Processor)
for name, flag := range processorFlags {
enable, ok := flag.(bool)
if !ok {
logger.ErrorSubf(p.sub, "Workshop.ProcessRoutine processor name: %s, setting invalid flag: %v", name, flag)
p.PRReportChan <- RET_INIT_FAIL
return
}
if !enable {
continue
}
processorMap[name] = job.NewProcessor(name)
if processorMap[name] == nil {
logger.ErrorSubf(p.sub, "Workshop.ProcessRoutine processor %s create failed", name)
p.PRReportChan <- RET_INIT_FAIL
return
}
if err := processorMap[name].Init(p.ctx, id, p.itemChans); err != nil {
logger.ErrorSubf(p.sub, "Workshop.ProcessRoutine processor %s init failed, err: %v", name, err)
delete(processorMap, name)
p.PRReportChan <- RET_INIT_FAIL
return
}
}
if len(processorMap) <= 0 {
logger.ErrorSubf(p.sub, "Workshop.ProcessorRoutine no valid processor")
p.PRReportChan <- RET_INIT_FAIL
return
}
p.PRReportChan <- RET_INIT_SUCCESS
logger.InfoSubf(p.sub, "Workshop.ProcessRoutine success id: %d, count: %d", id, len(processorMap))
exitFlag := false
LOOP:
for {
// 退出条件
if exitFlag && len(p.msgChan) <= 0 {
logger.InfoSubf(p.sub, "Workshop.ProcessRoutine %d th ProcessRoutine is closing", id)
break LOOP
}
select {
case cmd := <-ctrlChannel:
if cmd == CMD_EXIT {
logger.InfoSubf(p.sub, "Workshop.ProcessRoutine %d th ProcessorRoutine receive exit cmd", id)
exitFlag = true
} else if cmd == CMD_TICK {
for _, proc := range processorMap {
proc.Tick()
}
} else {
logger.ErrorSubf(p.sub, "Workshop.ProcessRoutine %d th ProcessRoutine receive a un-expected cmd: %v",
id, cmd)
}
case msg := <-p.msgChan:
for name, proc := range processorMap {
if err := proc.Process(msg); err != nil {
logger.ErrorSubf(p.sub, "Workshop.ProcessRoutine %d th ProcessRoutine, %s process fail, err: %v",
id, name, err)
}
}
case <-time.After(time.Second * 1):
// logger.Debug(id, "th ProcessRoutine nothing to do")
} // select
} // for
for _, proc := range processorMap {
proc.Destory()
}
p.PRReportChan <- RET_EXIT_SUCCESS
}
func (p *Workshop) CollectRoutine(id int, ctrlChannel chan int, channel chan job.Item) {
collectorInfos := p.ctx.Get("collector").MustMap()
logger.InfoSubf(p.sub, "Workshop.CollectRoutine begin id: %d, collector: %v", id, collectorInfos)
collectorMap := make(map[string]job.Collector)
for category, name := range collectorInfos {
collectorName, ok := name.(string)
if !ok {
logger.ErrorSubf(p.sub, "Workshop.ProcessRoutine collector setting invalid: %s => %s", category, name)
p.CLReportChan <- RET_INIT_FAIL
return
}
collectorMap[category] = job.NewCollector(collectorName)
if collectorMap[category] == nil {
logger.ErrorSubf(p.sub, "Workshop.ProcessRoutine collector %s create failed", collectorName)
p.CLReportChan <- RET_INIT_FAIL
return
}
if err := collectorMap[category].Init(p.ctx, id); err != nil {
logger.ErrorSubf(p.sub, "Workshop.ProcessRoutine collector %s init failed, err: %v", collectorName, err)
delete(collectorMap, category)
p.CLReportChan <- RET_INIT_FAIL
return
}
}
if len(collectorMap) <= 0 {
logger.ErrorSubf(p.sub, "Workshop.CollectRoutine no valid collector")
p.CLReportChan <- RET_INIT_FAIL
return
}
p.CLReportChan <- RET_INIT_SUCCESS
logger.InfoSubf(p.sub, "Workshop.CollectRoutine success id: %d, count: %d", id, len(collectorMap))
exitFlag := false
LOOP:
for {
if exitFlag && len(channel) <= 0 {
logger.InfoSubf(p.sub, "Workshop.CollectRoutine id: %d is closing", id)
break LOOP
}
select {
case cmd := <-ctrlChannel:
if cmd == CMD_EXIT {
logger.InfoSubf(p.sub, "Workshop.CollectRoutine id: %d receive exit cmd", id)
exitFlag = true
} else if cmd == CMD_TICK {
for _, collector := range collectorMap {
collector.Tick()
}
} else {
logger.ErrorSubf(p.sub, "Workshop.CollectRoutine id: %d receive a un-expected cmd: %v",
id, cmd)
}
case processed_item := <-channel:
category := processed_item.Category
if collector, ok := collectorMap[category]; ok {
if err := collector.Collect(processed_item); err != nil {
logger.ErrorSubf(p.sub, "Workshop.CollectRoutine id: %d %s collect fail, err: %v",
id, category, err)
}
} else {
logger.ErrorSubf(p.sub, "Workshop.CollectRoutine id: %d data %s has no proper collector: %v",
id, category, collectorMap)
}
case <-time.After(time.Second * 1):
// logger.Debug(id, "th CollectRoutine nothing to do")
} // select
} // for
for _, collector := range collectorMap {
collector.Destory()
}
p.CLReportChan <- RET_EXIT_SUCCESS
}
func (p *Workshop) worker() {
// 工作goroutine
logger.InfoSubf(p.sub, "WorkShop.worker begin")
exitedProcesser := 0
exitedCollector := 0
for {
select {
// 通过workCtrlChan从控制goroutine获取指令,没有指令的时候就干活
case cmd := <-p.workCtrlChan:
logger.InfoSubf(p.sub, "WorkShop.worker get a cmd: %v", cmd)
if cmd == CMD_EXIT {
logger.InfoSubf(p.sub, "Workshop.worker quit, no msg will push to msgChan, stock will go on")
// 告诉processor关张
// 这一步可能阻塞,所有需要阻塞的工作都交给worker干
// before
// 顺序并阻塞的发送退出信号,太慢了
// after
// 不断try广播,这样可以尽可能使线程早日得到退出信号,加速平滑退出的速度
func() {
p.trySendCtrlInfo(p.PRCtrlChans, cmd)
for {
select {
case <-p.PRReportChan:
exitedProcesser++
logger.InfoSubf(p.sub, "WorkShop.worker receive a exit from processer, total: %d",
exitedProcesser)
if exitedProcesser >= p.processorNum {
return
}
case <-time.After(time.Second * 5):
logger.InfoSubf(p.sub, "WorkShop.worker after some time from close processer, total: %d",
exitedProcesser)
p.trySendCtrlInfo(p.PRCtrlChans, cmd)
}
}
}()
func() {
p.trySendCtrlInfo(p.CLCtrlChans, cmd)
for {
select {
case <-p.CLReportChan:
exitedCollector++
logger.InfoSubf(p.sub, "WorkShop.worker receive a exit from collector, total: %d",
exitedCollector)
if exitedCollector >= p.collectorNum {
p.workReportChan <- RET_EXIT_SUCCESS
return
}
case <-time.After(time.Second * 5):
logger.InfoSubf(p.sub, "WorkShop.worker after some time from close collector, total: %d",
exitedCollector)
p.trySendCtrlInfo(p.CLCtrlChans, cmd)
}
}
}()
return
} else {
logger.WarnSubf(p.sub, "Workshop.worker bad cmd type: %v", cmd)
}
default:
// how many msgs it returns each time depend on max_messsage_fetch_size in etc/qbus-client.conf
messages, err := p.provider.GetNextMsg()
if err != nil {
logger.ErrorSubf(p.sub, "WorkShop.worker GetNextMsg error: %v", err)
// 等一会儿再取消息
time.Sleep(1 * time.Second)
continue
}
if messages == nil || len(messages) <= 0 {
// 如果没有消息需要处理,说明比较闲,就sleep一下
logger.InfoSubf(p.sub, "WorkShop.worker receive nothing from srcProvider")
time.Sleep(5 * time.Second)
continue
}
logger.InfoSubf(p.sub, "WorkShop.worker receive %d msgs from srcProvider", len(messages))
for _, v := range messages {
if len(v) > 0 {
p.msgChan <- string(v)
}
}
logger.InfoSubf(p.sub, "WorkShop.worker send %d msgs successful", len(messages))
p.processedLine += len(messages)
p.provider.Ack()
}
}
}
func (p *Workshop) Run() {
// 分两个goroutine,一个控制,不许阻塞,一个工作可以阻塞
go p.worker()
isWorking := true
// 控制线程是主线程,具有退出的权利
LOOP:
for {
select {
case cmd := <-p.ctrlChan:
if cmd == CMD_EXIT {
if !isWorking {
logger.InfoSubf(p.sub, "Workshop.Run control goroutine is going home, ignore")
continue
}
logger.InfoSubf(p.sub, "Workshop.Run control goroutine tell work goroutine to go home")
isWorking = false
// 由于workCtrlChan有一个空间,所以正常不会堵
// worker接到这个命令后会通知processor关闭,这个动作是可能阻塞的
// 这里要确保所有processor都关闭后,再关闭collector
p.workCtrlChan <- CMD_EXIT
// 从此以后是否不再给processor和collector打tick,抢夺worker向其打exit的机会
// ticker还要,但是不给下游发
// p.ticker_.Stop()
} else {
logger.WarnSubf(p.sub, "Workshop.Run bad cmd type: %v", cmd)
}
case <-p.workReportChan:
logger.InfoSubf(p.sub, "Workshop.Run worker report exit successful, so exit")
break LOOP
case <-p.ticker_.C:
logger.InfoSubf(p.sub, "Workshop.Run tick begin")
// 不许阻塞
if isWorking {
p.trySendCtrlInfo(p.PRCtrlChans, CMD_TICK)
p.trySendCtrlInfo(p.CLCtrlChans, CMD_TICK)
}
p.Tick()
}
}
// all destory
p.provider.Destory()
p.reportChan <- RET_EXIT_SUCCESS
}
var lastProcessedLine = make(map[string]int, 0)
func (p *Workshop) Tick() error {
logger.InfoSubf(p.sub, "Workshop.Tick receive %d(current tick), %d(totally) msgs",
p.processedLine-lastProcessedLine[p.sub], p.processedLine)
lastProcessedLine[p.sub] = p.processedLine
p.ChannelStat()
// p.GC()
return nil
}
func (p *Workshop) ChannelStat() {
logger.InfoSubf(p.sub, "Workshop.ChannelStat len(ctrlChan): %d", len(p.ctrlChan))
logger.InfoSubf(p.sub, "Workshop.ChannelStat len(workCtrlChan): %d", len(p.workCtrlChan))
logger.InfoSubf(p.sub, "Workshop.ChannelStat len(msgChan): %d", len(p.msgChan))
for i, itemChan := range p.itemChans {
logger.InfoSubf(p.sub, "Workshop.ChannelStat index: %d, len(itemChan): %d", i, len(itemChan))
}
}
// func (p *Workshop) GC() {
// runtime.ReadMemStats(&(p.ms))
// alloc := p.ms.Alloc / 1024 / 1024
// logger.Info("memAlloc:", alloc, "M heapAlloc:", p.ms.HeapAlloc/1024/1024, "M, stackAlloc:", p.ms.StackInuse/1024/1024, "M")
// if alloc >= p.memLimit {
// debug.FreeOSMemory()
// runtime.ReadMemStats(&(p.ms))
// alloc = p.ms.Alloc / 1024 / 1024
// logger.Info("after GC memAlloc:", alloc, "M heapAlloc:", p.ms.HeapAlloc/1024/1024, "M, stackAlloc:", p.ms.StackInuse/1024/1024, "M")
// }
// }
func (p *Workshop) initSrcProvider() error {
ctxSrcProvider := p.ctx.Get("src_provider")
if ctxSrcProvider == nil {
return errors.New("no src_provider section")
}
srcType := ctxSrcProvider.Get("src_type").MustString()
p.provider = NewSrcProvider(srcType)
if p.provider == nil {
return errors.New("provider create failed")
}
ctxProviderDetail := ctxSrcProvider.Get(srcType)
if ctxProviderDetail == nil {
return errors.New("no src_provider detail section " + srcType)
}
if err := p.provider.Init(ctxProviderDetail); err != nil {
return err
}
logger.InfoSubf(p.sub, "Workshop.initSrcProvider success, srcType: %s", srcType)
return nil
}
// 会阻塞但保证能发送成功
func (p *Workshop) sendCtrlInfo(channels [](chan int), cmd int) {
logger.InfoSubf(p.sub, "Workshop.sendCtrlInfo begin")
for _, channel := range channels {
channel <- cmd
}
logger.InfoSubf(p.sub, "Workshop.sendCtrlInfo end")
}
// 不会阻塞但不保证能发送成功
func (p *Workshop) trySendCtrlInfo(channels [](chan int), cmd int) {
logger.InfoSubf(p.sub, "Workshop.trySendCtrlInfo begin")
for i, channel := range channels {
select {
case channel <- cmd:
default:
logger.InfoSubf(p.sub, "Workshop.trySendCtrlInfo %d th channel full", i)
}
}
logger.InfoSubf(p.sub, "Workshop.trySendCtrlInfo end")
}
|
package main
import (
"fmt"
"os"
"bufio"
"strings"
"strconv"
)
const filePath = "input.txt"
const length = 85
func main(){
firewall := parseInput()
i := 0
//loop forever until an attempt with delay i gets through
for ; attemptRun(firewall,i); i++ {
}
fmt.Println(i)
}
func attemptRun(fw [][]int, delay int) (bool) {
for i := delay; i < delay + len(fw); i++ {
//skip empty columns
if len(fw[i-delay]) == 0 {
continue
}
//scanners repeat mod 2*(len - 1), so if the operation == 0, packet got caught
if i % (2 * (len(fw[i-delay]) - 1 )) == 0 {
return true
}
}
return false
}
func parseInput() ([][]int){
firewall := make([][]int,length)
file,_ := os.Open(filePath)
scanner := bufio.NewScanner(file)
for scanner.Scan() {
line := strings.Split(scanner.Text(), ": ")
index,_ := strconv.Atoi(line[0])
depth,_ := strconv.Atoi(line[1])
firewall[index] = make([]int, depth)
firewall[index][0] = 1 //init security scanner
}
return firewall
} |
package dtx
import (
"fmt"
"strings"
"testing"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/dynamodb"
"github.com/pkg/errors"
)
func TestRunInTransactionSucceed(t *testing.T) {
if err := setup(); err != nil {
t.Fatalf("%v", err)
}
key := newKey(integHashTableName)
item := make(map[string]*dynamodb.AttributeValue)
for k, v := range key {
item[k] = v
}
item["attr_s"] = &dynamodb.AttributeValue{S: aws.String("s")}
input := &dynamodb.PutItemInput{
TableName: aws.String(integHashTableName),
Item: item,
}
txID := ""
err := manager.RunInTransaction(func(tx *Transaction) error {
txID = tx.ID()
tx.PutItem(input)
return nil
})
if err != nil {
t.Fatalf("%+v", err)
}
txState, _, err := manager.TransactionInfo(txID)
if err != nil {
t.Fatalf("%+v", err)
}
if txState != TransactionItemStateCommitted {
t.Fatalf("%s is not committed", txState)
}
if err := assertItemNotLocked(assertItemNotLockedArg{tableName: integHashTableName, key: key, expected: item, shouldExist: true}); err != nil {
t.Fatalf("%+v", err)
}
}
func TestRunInTransactionFailByUser(t *testing.T) {
if err := setup(); err != nil {
t.Fatalf("%v", err)
}
key := newKey(integHashTableName)
item := make(map[string]*dynamodb.AttributeValue)
for k, v := range key {
item[k] = v
}
item["attr_s"] = &dynamodb.AttributeValue{S: aws.String("s")}
input := &dynamodb.PutItemInput{
TableName: aws.String(integHashTableName),
Item: item,
}
txID := ""
err := manager.RunInTransaction(func(tx *Transaction) error {
txID = tx.ID()
tx.PutItem(input)
return fmt.Errorf("fail by user")
})
if err == nil {
t.Fatalf("should fail")
}
if !strings.Contains(err.Error(), "fail by user") {
t.Fatalf("wrong err %v", err)
}
txState, _, err := manager.TransactionInfo(txID)
if err != nil {
t.Fatalf("%+v", err)
}
if txState != TransactionItemStateRolledBack {
t.Fatalf("%s is not committed", txState)
}
if err := assertItemNotLocked(assertItemNotLockedArg{tableName: integHashTableName, key: key, shouldExist: false}); err != nil {
t.Fatalf("%v", err)
}
}
func TestRunInTransactionFail(t *testing.T) {
if err := setup(); err != nil {
t.Fatalf("%v", err)
}
key := newKey(integHashTableName)
item := make(map[string]*dynamodb.AttributeValue)
for k, v := range key {
item[k] = v
}
item["attr_s"] = &dynamodb.AttributeValue{S: aws.String("s")}
input := &dynamodb.PutItemInput{
TableName: aws.String(integHashTableName),
Item: item,
}
t1Client := &failingAmazonDynamodbClient{DynamoDB: dynamodb.New(sess)}
t1Client.putFilter = func(input *dynamodb.PutItemInput) (bool, *dynamodb.PutItemOutput, error) {
if *input.TableName != integHashTableName {
return false, nil, nil
}
return true, nil, fmt.Errorf("failed your request")
}
ttlDuration := 24 * time.Hour
t1Manager := NewTransactionManager(t1Client, integLockTableName, integImagesTableName, ttlDuration)
err := t1Manager.RunInTransaction(func(tx *Transaction) error {
tx.PutItem(input)
return nil
})
if err == nil {
t.Fatalf("should fail")
}
if !strings.Contains(err.Error(), "failed your request") {
t.Fatalf("wrong err %v", err)
}
if err := assertItemNotLocked(assertItemNotLockedArg{tableName: integHashTableName, key: key, shouldExist: false}); err != nil {
t.Fatalf("%v", err)
}
}
func TestInsertNotExists(t *testing.T) {
if err := setup(); err != nil {
t.Fatalf("%v", err)
}
keyID := randString(8)
newGetInput := func() *dynamodb.GetItemInput {
key := make(map[string]*dynamodb.AttributeValue)
key[idAttribute] = &dynamodb.AttributeValue{S: aws.String(keyID)}
input := &dynamodb.GetItemInput{
TableName: aws.String(integHashTableName),
Key: key,
}
return input
}
newPutInput := func(attrVal string) *dynamodb.PutItemInput {
key := make(map[string]*dynamodb.AttributeValue)
key[idAttribute] = &dynamodb.AttributeValue{S: aws.String(keyID)}
item := make(map[string]*dynamodb.AttributeValue)
for k, v := range key {
item[k] = v
}
item["attr_s"] = &dynamodb.AttributeValue{S: aws.String(attrVal)}
input := &dynamodb.PutItemInput{
TableName: aws.String(integHashTableName),
Item: item,
}
return input
}
t1GetOK := make(chan struct{})
t1Resume := make(chan struct{})
t1Err := make(chan error)
go func() {
t1Err <- manager.RunInTransaction(func(t1 *Transaction) error {
getResult, err := t1.GetItem(newGetInput())
if err != nil {
return err
}
if getResult.Item != nil {
return fmt.Errorf("item should not exists %+v", getResult)
}
t1GetOK <- struct{}{}
<-t1Resume
t1.PutItem(newPutInput("t1"))
return nil
})
}()
<-t1GetOK
err := manager.RunInTransaction(func(t2 *Transaction) error {
getResult, err := t2.GetItem(newGetInput())
if err != nil {
return err
}
if getResult.Item != nil {
return fmt.Errorf("item should not exists %+v", getResult)
}
t2.PutItem(newPutInput("t2"))
return nil
})
t1Resume <- struct{}{}
if err != nil {
t.Fatalf("%v", err)
}
err = <-t1Err
if err == nil {
t.Fatalf("t1 should fail")
}
if _, ok := errors.Cause(err).(*TransactionRolledBackError); !ok {
t.Fatalf("wrong error %v", err)
}
expKey := newGetInput().Key
expItem := newPutInput("t2").Item
if err := assertItemNotLocked(assertItemNotLockedArg{tableName: integHashTableName, key: expKey, expected: expItem, shouldExist: true}); err != nil {
t.Fatalf("%v", err)
}
}
func TestRollbackOrCommit(t *testing.T) {
if err := setup(); err != nil {
t.Fatalf("%+v", err)
}
getInput := &dynamodb.GetItemInput{
TableName: aws.String(integHashTableName),
Key: key0,
}
txID := func() (txID string) {
defer func() {
recover()
}()
manager.RunInTransaction(func(tx *Transaction) error {
txID = tx.ID()
tx.GetItem(getInput)
panic("machine failure")
})
return ""
}()
state, _, err := manager.TransactionInfo(txID)
if err != nil {
t.Fatalf("%+v", err)
}
if state != TransactionItemStatePending {
t.Fatalf("state %s not pending", state)
}
if err := assertItemLocked(assertItemLockedArg{tableName: integHashTableName, key: key0, owner: txID, isTransient: false, isApplied: false}); err != nil {
t.Fatalf("%v", err)
}
rolledback, err := manager.RollbackOrCommit(txID)
if err != nil {
t.Fatalf("%+v", err)
}
if !rolledback {
t.Fatalf("not rolled back")
}
if err := assertItemNotLocked(assertItemNotLockedArg{tableName: integHashTableName, key: key0, expected: item0, shouldExist: true}); err != nil {
t.Fatalf("%v", err)
}
}
func TestRollbackOrCommitCommitted(t *testing.T) {
if err := setup(); err != nil {
t.Fatalf("%+v", err)
}
key1 := newKey(integHashTableName)
updateInput := &dynamodb.UpdateItemInput{
TableName: aws.String(integHashTableName),
Key: key1,
}
txID := ""
err := manager.RunInTransaction(func(tx *Transaction) error {
txID = tx.ID()
if _, err := tx.UpdateItem(updateInput); err != nil {
return errors.Wrap(err, "UpdateItem")
}
return nil
})
if err != nil {
t.Fatalf("%+v", err)
}
rolledback, err := manager.RollbackOrCommit(txID)
if err != nil {
t.Fatalf("%+v", err)
}
if rolledback {
t.Fatalf("rolled back")
}
if err := assertItemNotLocked(assertItemNotLockedArg{tableName: integHashTableName, key: key1, expected: key1, shouldExist: true}); err != nil {
t.Fatalf("%v", err)
}
}
func TestQuery(t *testing.T) {
if err := setup(); err != nil {
t.Fatalf("%+v", err)
}
t1, err := manager.newTransaction()
if err != nil {
t.Fatalf("%+v", err)
}
item1 := make(map[string]*dynamodb.AttributeValue)
item1[idAttribute] = &dynamodb.AttributeValue{S: aws.String("a")}
item1[rangeAttribute] = &dynamodb.AttributeValue{N: aws.String("1")}
putInput1 := &dynamodb.PutItemInput{
TableName: aws.String(integRangeTableName),
Item: item1,
}
if _, err := t1.PutItem(putInput1); err != nil {
t.Fatalf("%+v", err)
}
item3 := make(map[string]*dynamodb.AttributeValue)
item3[idAttribute] = &dynamodb.AttributeValue{S: aws.String("a")}
item3[rangeAttribute] = &dynamodb.AttributeValue{N: aws.String("3")}
putInput3 := &dynamodb.PutItemInput{
TableName: aws.String(integRangeTableName),
Item: item3,
}
if _, err := t1.PutItem(putInput3); err != nil {
t.Fatalf("%+v", err)
}
if err := t1.commit(); err != nil {
t.Fatalf("%+v", err)
}
t2, err := manager.newTransaction()
if err != nil {
t.Fatalf("%+v", err)
}
item2 := make(map[string]*dynamodb.AttributeValue)
item2[idAttribute] = &dynamodb.AttributeValue{S: aws.String("a")}
item2[rangeAttribute] = &dynamodb.AttributeValue{N: aws.String("2")}
item2["some_attr"] = &dynamodb.AttributeValue{S: aws.String("wef")}
putInput2 := &dynamodb.PutItemInput{
TableName: aws.String(integRangeTableName),
Item: item2,
}
if _, err := t2.PutItem(putInput2); err != nil {
t.Fatalf("%+v", err)
}
eav := make(map[string]*dynamodb.AttributeValue)
eav[":id"] = &dynamodb.AttributeValue{S: aws.String("a")}
queryInput := &dynamodb.QueryInput{
TableName: aws.String(integRangeTableName),
ExpressionAttributeValues: eav,
KeyConditionExpression: aws.String(fmt.Sprintf("%s = :id", idAttribute)),
ScanIndexForward: aws.Bool(false),
}
queryOutput1, err := manager.Query(queryInput)
if err != nil {
t.Fatalf("%+v", err)
}
if err := sliceOfAttributeValueMapEqual(queryOutput1.Items, []map[string]*dynamodb.AttributeValue{item3, item1}); err != nil {
t.Fatalf("%+v", err)
}
if err := t2.commit(); err != nil {
t.Fatalf("%+v", err)
}
queryOutput2, err := manager.Query(queryInput)
if err != nil {
t.Fatalf("%+v", err)
}
if err := sliceOfAttributeValueMapEqual(queryOutput2.Items, []map[string]*dynamodb.AttributeValue{item3, item2, item1}); err != nil {
t.Fatalf("%+v", err)
}
t2a, err := manager.newTransaction()
if err != nil {
t.Fatalf("%+v", err)
}
item2a := make(map[string]*dynamodb.AttributeValue)
item2a[idAttribute] = item2[idAttribute]
item2a[rangeAttribute] = item2[rangeAttribute]
item2a["some_attr"] = &dynamodb.AttributeValue{S: aws.String("abcd")}
putInput2a := &dynamodb.PutItemInput{
TableName: aws.String(integRangeTableName),
Item: item2a,
}
if _, err := t2a.PutItem(putInput2a); err != nil {
t.Fatalf("%+v", err)
}
queryOutput2AUnCommit, err := manager.Query(queryInput)
if err != nil {
t.Fatalf("%+v", err)
}
if err := sliceOfAttributeValueMapEqual(queryOutput2AUnCommit.Items, []map[string]*dynamodb.AttributeValue{item3, item2, item1}); err != nil {
t.Fatalf("%+v", err)
}
if err := sliceOfAttributeValueMapEqual(queryOutput2AUnCommit.Items, []map[string]*dynamodb.AttributeValue{item3, item2a, item1}); err == nil {
t.Fatalf("should not return item2a %+v", queryOutput2AUnCommit.Items)
}
if err := t2a.commit(); err != nil {
t.Fatalf("%+v", err)
}
queryOutput2a, err := manager.Query(queryInput)
if err != nil {
t.Fatalf("%+v", err)
}
if err := sliceOfAttributeValueMapEqual(queryOutput2a.Items, []map[string]*dynamodb.AttributeValue{item3, item2a, item1}); err != nil {
t.Fatalf("%+v", err)
}
}
func sliceOfAttributeValueMapEqual(a, b []map[string]*dynamodb.AttributeValue) error {
if len(a) != len(b) {
return fmt.Errorf("different lengths %d %d", len(a), len(b))
}
for i, ae := range a {
be := b[i]
if err := attributeValueMapEqual(ae, be); err != nil {
return errors.Wrap(err, fmt.Sprintf("at %d", i))
}
}
return nil
}
|
// Package sqlfly can make writing sql fast flying
package sqlfly
|
package model
type TODOListID string
type ListItemID string
type TODOList struct {
ID TODOListID
Owner UserID
Items []*ListItem
}
type ListItem struct {
ID ListItemID
Text string
}
type UserID string
type User struct {
ID UserID
Name string
YandexAvatarID string
}
type AccessMode string
func (m AccessMode) Grantable() bool {
return m == AccessModeRead || m == AccessModeReadWrite
}
func (m AccessMode) CanRead() bool {
return m.CanWrite() || m == AccessModeRead
}
func (m AccessMode) CanWrite() bool {
return m.CanInvite() || m == AccessModeReadWrite
}
func (m AccessMode) CanInvite() bool {
return m == AccessModeOwner
}
const (
AccessModeRead AccessMode = "R"
AccessModeReadWrite AccessMode = "RW"
AccessModeOwner AccessMode = "O"
)
type ACLEntry struct {
User UserID
Mode AccessMode
ListID TODOListID
Alias string
Inviter UserID
Accepted bool
}
|
package test
import (
"common/tbhandler"
"crypto/md5"
"encoding/hex"
"fmt"
"reflect"
"regexp"
"strconv"
"strings"
"time"
_ "github.com/go-sql-driver/mysql" //驱动包
"github.com/kdada/tinygo"
"github.com/narrowizard/tinysql"
)
type Result struct {
Info reflect.Value
less func(x, y reflect.Value) bool
Count int
}
// UserOrderForPrint 三联单
type UserOrderForPrint struct {
User int //用户id
Table []TableForPrint
}
//用户的单张三联单 新订单另起一张
type TableForPrint struct {
Uid int //用户id
UserName string //用户名
UserAddress string //配送地址
OrderNumber string //订单号
OutOfWareTime string //出库时间'2006年1月2日'
Phone string //联系电话
Products []Product //订单内的物品
Remark string //订单备注 考虑到备注过长情况 将备注放到一列中
Count float64 //合计 float会出现多位小数 string更好?
WareAddress string //仓库地址
Telephone string //联系电话
Boss string //总经理
NowPage int //当前页码
TotalPage int //总页数 2/3 一笔订单总的分页情况
}
type BaseService struct {
DB *tinysql.DB
condition string
values []interface{}
}
//商品详情
type Product struct {
Number int //编号
Name string //品名 单品名+单品规格
Size string //规格 仓库对应的规格
OrderCount float64 //订单需求量 三联单不需要 打印订单时需要
Count float64 // 空 由装箱时填写
Price int //线上售价
TotalPrice int //金额 count*Price
// Remark string //备注 用户订单里的备注
}
func (r Result) Len() int {
return r.Info.Len()
}
func (r Result) Less(i, j int) bool {
return r.less(r.Info.Index(i), r.Info.Index(j))
}
func (r Result) Swap(i, j int) {
var a = r.Info.Index(i).Interface()
var b = r.Info.Index(j)
r.Info.Index(i).Set(b)
r.Info.Index(j).Set(reflect.ValueOf(a))
}
func Test2016() {
// m := []UserInfo{{CreateTime: "2016-06-28 12:11:11", UserId: 1, Use: Use{Id: 1, Name: "zz"}},
// {CreateTime: "2016-06-28 12:11:11", UserId: 13, Use: Use{Id: 3, Name: "z222z"}}}
// base:= new(BaseService)
// base.DB=tinysql.Open()
// new(BaseService).InsertModels(m)
// mm := Result{Info: reflect.ValueOf(m), less: func(x, y reflect.Value) bool {
// return x.FieldByName("UserId").Int() < y.FieldByName("UserId").Int()
// }, Count: 11}
// sort.Sort(mm)
// fmt.Println(mm.Info)
tbhandler.CreateModelAll("depot")
// tinysql.RegisterDB("test", "mysql", "jiudeng:jiudeng2016@tcp(10.0.0.13:3306)/test?charset=utf8mb4&loc=Asia%2fShanghai", 10)
// var ser = new(BaseService)
// ser.DB = tinysql.Open("test")
// m := UserInfo{CreateTime: "12:11:11", UserId: 13, Use: Use{Id: 1, Name: "zz"}}
// var ms []interface{}
// ms = append(ms, m, m)
// tinygo.Debug(ser.InsertModels(ms))
}
// InsertModels 新增数组
func (this *BaseService) InsertModels(models interface{}) bool {
var value = reflect.ValueOf(models)
if value.Kind() == reflect.Struct {
return this.DB.NewBuilder().InsertModel(&models) > 0
}
// tinygo.Debug(value)
// return false
return this.insertModels(value)
}
// insertModels 新增数组
func (this *BaseService) insertModels(models reflect.Value) bool {
if models.Len() < 1 {
return false
}
var data = models.Index(0)
if data.Kind() == reflect.Interface {
data = data.Elem()
}
var tableName = data.Type().Name()
var columns = GetColumns(models.Index(0))
var values = GetValues(models)
var holders = make([]string, len(columns))
for i := 0; i < len(columns); i++ {
holders[i] = "?"
}
var vs = make([]string, models.Len())
for i := 0; i < models.Len(); i++ {
vs[i] = "(" + strings.Join(holders, ",") + ")"
}
tinygo.Debug("table:", tableName)
tinygo.Debug("cols:", columns)
tinygo.Debug("value:", values)
if tableName == "" {
return false
}
var sql = `insert into ` + SnakeName(tableName) + "(" + strings.Join(columns, ",") + ")values " + strings.Join(vs, ",")
tinygo.Debug(sql)
// return false
var _, err = this.DB.Exec(sql, values...)
if err != nil {
fmt.Println(err)
return false
}
return true
}
// GetColumns 获取字段名(匿名字段递归解析)
func GetColumns(t reflect.Value) []string {
var res []string
if t.Kind() == reflect.Interface {
t = t.Elem()
}
if t.Kind() == reflect.Slice {
res = append(res, GetColumns(t.Index(0))...)
}
if t.Kind() == reflect.Struct {
var n = t.NumField()
for i := 0; i < n; i++ {
if t.Type().Field(i).Anonymous {
res = append(res, GetColumns(t.Field(i))...)
continue
} else {
res = append(res, SnakeName(t.Type().Field(i).Name))
}
}
}
return res
}
// GetValues 获取要插入的值
func GetValues(t reflect.Value) []interface{} {
var vals []interface{}
if t.Kind() == reflect.Interface {
t = t.Elem()
}
if t.Kind() == reflect.Slice {
for i := 0; i < t.Len(); i++ {
vals = append(vals, GetValues(t.Index(i))...)
}
}
if t.Kind() == reflect.Struct {
var n = t.NumField()
for i := 0; i < n; i++ {
if t.Type().Field(i).Anonymous {
vals = append(vals, GetValues(t.Field(i))...)
} else {
vals = append(vals, t.Field(i).Interface())
}
}
}
return vals
}
// CamelName 蛇形转驼峰
func CamelName(base string) string {
var r = make([]rune, 0, len(base))
var b = []rune(base)
for i := 0; i < len(b); i++ {
if i == 0 && b[i] == '_' {
continue
}
if i == len(b)-1 && b[i] == '_' {
continue
}
if b[i] == '_' && i < len(b)-1 {
if i > 0 && b[i] == '_' && i < len(b)-1 {
if (b[i+1] <= 'Z' && b[i+1] >= 'A') || (b[i+1] >= 'a' && b[i+1] <= 'z') {
r = append(r, b[i+1]-32)
i++
}
continue
}
continue
}
r = append(r, b[i])
}
return string(r)
}
func SnakeName(base string) string {
var r = make([]byte, 0, len(base)*2)
var b = []byte(base)
for i := 0; i < len(b); i++ {
if i > 0 && b[i] >= 'A' && b[i] <= 'Z' {
r = append(r, '_', b[i]+32)
continue
}
if i == 0 && b[i] >= 'A' && b[i] <= 'Z' {
r = append(r, b[i]+32)
continue
}
r = append(r, b[i])
}
return string(r)
}
type S struct {
A int
B int
}
// Replace 字符替换
func Replace(s string, f func(i int, r rune) string) string {
b := []rune(s)
c := ""
for k, v := range b {
c += f(k, v)
}
return c
}
// To 驼峰转蛇形
func To(s string) string {
return strings.ToLower(Replace(s, func(i int, r rune) string {
if i != 0 && r >= 'A' && r <= 'Z' {
r += ('a' - 'A')
return string([]rune{'_', r})
}
return string([]rune{r})
}))
}
// To2 驼峰转蛇形
func To2(s string) string {
return strings.TrimLeft(regexp.MustCompile(`[A-Z]`).ReplaceAllStringFunc(s, func(v string) string {
return "_" + strings.ToLower(v)
}), "_")
}
var ta time.Time
func testFunc() {
var (
a []int
length = 26
)
for i := 0; i < length; i++ {
a = append(a, i)
}
for i := 0; i < length; i++ {
if (i+1)%10 == 0 && i > 0 {
fmt.Println(a[i-9 : i+1])
}
if i == length {
fmt.Println(a[i-i%10:])
}
}
// var a int = 2
// fmt.Println(^a)
// a := "TotalCountSp"
// fmt.Println(To(a))
// _, file, line, _ := runtime.Caller(2)
// _, fileName := filepath.Split(file)
// fmt.Println(fileName, file, line)
// var ta, _ = time.Parse("2006-01-02", "2016-06-08")
// teqeqb, err := time.ParseInLocation("", "", time.Local)
// _, _, _ = ta, teqeqb, err
// fmt.Println(ta)
// b := []rune(a)
// for i := 0; i < len(b); {
// if b[i] >= 'A' && b[i] <= 'Z' && i > 0 {
// b = append(b[:i], '_', b[i]+32)
// b = append(b, b[i+2:]...)
// fmt.Println(string(b))
// }
// i++
// }
// fmt.Print(string(b))
// strings.Split()
// a := []s{{A: 1, B: 2}, {A: 0, B: 1}}
// e := json.Unmarshal([]byte(`[{"A":2,"B":3},{"A":4,"B":5}]`), &a)
// fmt.Println(a, e)
// var a, b float64
// var af = big.NewFloat(a)
// var bf = big.NewFloat(b)
// var aa = big.NewFloat(0.0)
// a, b = 20.5, 0.99
// fmt.Println(a*b, "\n", aa.Mul(af, bf), "\n", 0.99*20.5)
// fmt.Println(float32(a*b), 20.5*0.99)
// fmt.Println(Tofixed(a * b))
// fmt.Println(Tofixed(1.333))
// createImage()
// imageChange()
// var sum float64
// sum = 0.1
// for i := 1; i < 1000; i++ {
// sum = sum + float64(0.1)
// }
// fmt.Println(sum)
// price := float64(990) / 1.0 / 1000
// fmt.Println(Tofixed(price))
// fmt.Println(Tofixed(float64(0.99) * 20.5))
// fmt.Println(Tofixed(0.99 * 20.5))
// fmt.Println(strconv.FormatFloat(20.5*price, 'f', 2, 64))
}
// Tofixed 保留两位小数
func Tofixed(n interface{}) string {
if n == nil {
return ""
}
var num int64
switch n.(type) {
case float32:
num = int64(n.(float32) * 1000)
case float64:
num = int64(float32(n.(float64)) * 1000)
}
num += 5
num = num / 10
var s = fmt.Sprint(num)
switch len(s) {
case 0:
return "0.00"
case 1:
return "0.0" + s[:1]
case 2:
return "0." + s[:2]
}
// fmt.Println(s)
return s[:len(s)-2] + "." + s[len(s)-2:]
}
func ToFixed(number interface{}, n int) float64 {
switch number.(type) {
case int:
return float64(number.(int))
case float32:
return toFixed(float64(number.(float32)), n)
case float64:
return toFixed(number.(float64), n)
default:
return 0.0
}
}
func toFixed(number float64, n int) float64 {
num, err := strconv.ParseFloat(strconv.FormatFloat(number, 'f', n, 64), 64)
if err != nil {
return 0.0
}
return num
}
//func Add2() func(b int) int {
// return func(b int) int {
// return b + 2
// }
//}
// Adder 测试闭包
func Adder(a int) func(b int) int {
return func(b int) int {
return a + b
}
}
func func_name() {
fmt.Println("\u4e45\u7b49\u724c\u82cf\u5317\u5927\u7c73 \u89c4\u683c:\u666e\u901a")
}
type D struct {
T int
S string
}
type DailyCount struct {
Id int
Product int //商品id(对应product表id)
ItemId int //关联核心库product_info表id
Number float64 //数量
ItemName string //物品名称
Uint string //单位
Remark string //备注说明
CreateTime time.Time //创建时间
}
type I interface {
I2
Set() bool
}
type I2 interface {
Get() int
}
func (this *D) Get() int {
return 1
}
func (this *D) Set() bool {
return true
}
func Te() (a int) {
defer func() {
fmt.Println(a)
a++
}()
return 3
}
func CreatePassword(originalPassword, salt string) string {
var m = md5.New()
m.Write([]byte(originalPassword + salt))
return hex.EncodeToString(m.Sum(nil))
}
// UserOrderInfoQuery 数据库查询结果
type UserOrderInfoQuery struct {
UserId int //订单id
UserName string //用户名
UserAddress string //用户地址
Phone string //联系电话
Remark string //订单备注
ProductName string //商品名
Summary string //规格
Price int //实际购买的价格
Size string //仓库规格
OrderId int //订单id 做编号使用
}
func C() []UserOrderInfoQuery {
var res []UserOrderInfoQuery
var o = UserOrderInfoQuery{10201, "张三", "地址:XX市", "13323332333", "备注", "产品名:", "规格:100g", 100, "斤", 10033333}
for i := 0; i < 17; i++ {
o.ProductName = "产品名:+" + strconv.Itoa(i)
res = append(res, o)
}
// o.OrderId = 222222
// res = append(res, o)
// o.UserId = 10001
// for i := 0; i < 13; i++ {
// o.ProductName = "产品名:" + strconv.Itoa(i)
// res = append(res, o)
// }
// o.OrderId = 333333
// res = append(res, o)
fmt.Println(res)
return res
}
func R(r []*TableForPrint) []TableForPrint {
var res []TableForPrint
for _, v := range r {
res = append(res, *v)
}
return res
}
func Test() {
var model = C()
var userTableInfo []*TableForPrint //用户三联单集合
var ordid = 0
var page = make(map[string]int)
var userTable *TableForPrint //用户单张三联单
for k, v := range model {
// fmt.Println(ordid)
if ordid != v.OrderId {
ordid = v.OrderId
userTable = new(TableForPrint) //新建三联单数据 //赋值单张三联单数据
userTable.Uid, userTable.UserName, userTable.UserAddress, userTable.Phone = v.UserId, v.UserName, v.UserAddress, v.Phone
userTable.NowPage, userTable.TotalPage, userTable.OrderNumber = 1, 1, strconv.Itoa(v.OrderId)
userTable.Products = make([]Product, 10, 10) //单品信息列表
userTable.Products[0].Price, userTable.Products[0].Name = v.Price, v.ProductName //单品信息添加
userTableInfo = append(userTableInfo, userTable) //添加单张三联单到用户三联单数组
// fmt.Println(*userTableInfo[0])
} else if k%10 == 0 && v.OrderId == ordid { //同一个订单满10条,添加一张三联单
var tmp = *userTable
tmp.NowPage = userTable.NowPage + 1
tmp.TotalPage = userTable.TotalPage + 1
page[tmp.OrderNumber] = tmp.TotalPage
tmp.Products = make([]Product, 10, 10)
//添加单张三联单单品信息
tmp.Products[0].Name = v.ProductName
//添加单张三联单到用户三联单数组
userTable = new(TableForPrint)
userTable = &tmp
userTableInfo = append(userTableInfo, userTable)
} else { //同一个订单不满10条,
var index = k % 10
userTable.Products[index].Name = v.ProductName
}
}
var counts = len(userTableInfo)
for i := 0; i < counts; i++ {
userTableInfo[i].TotalPage = page[userTableInfo[i].OrderNumber]
}
fmt.Println(R(userTableInfo))
}
|
package run
import floc "gopkg.in/workanator/go-floc.v1"
func getCounter(state floc.State) int {
data, locker := state.DataWithReadLocker()
counter := data.(*int)
locker.Lock()
defer locker.Unlock()
return *counter
}
func updateCounter(flow floc.Flow, state floc.State, key string, value interface{}) {
data, locker := state.DataWithWriteLocker()
counter := data.(*int)
locker.Lock()
defer locker.Unlock()
*counter += value.(int)
}
func jobIncrement(flow floc.Flow, state floc.State, update floc.Update) {
update(flow, state, "", 1)
}
func predCounterEquals(n int) floc.Predicate {
return func(state floc.State) bool {
return getCounter(state) == n
}
}
|
package user
import (
"github.com/globalsign/mgo/bson"
)
func (userModel *UserModel) UnSubUser(uid bson.ObjectId, unSubUid bson.ObjectId) (err error) {
c := userModel.GetC()
defer c.Database.Session.Close()
err = userModel.removeAttention(uid, unSubUid)
if err != nil {
return
}
return userModel.removeFans(unSubUid, uid)
} |
package main
import "fmt"
func reverse(s []int) {
for i, j := 0, len(s)-1; i<j; i, j = i+1, j-1 {
s[i], s[j] = s[j], s[i]
}
}
func rotate(s []int, i int) {
reverse(s[:i])
reverse(s[i:])
reverse(s)
}
func rotation(s []int, i int) {
/*
write a version of roate that operaties in one pass
*/
lim := len(s)
// the originla implementation didn't have this safety check
// but it's simple enough to do, so let;s do it...
if (i > lim) {
i = i % lim
}
temp := make([]int, lim)
copy(temp, s)
for q:=0; q<lim; q++ {
newpos := q - i
if newpos < 0 {
newpos = newpos + lim
}
s[newpos] = temp[q]
}
}
func main() {
input := [...]int{0, 1, 2, 3, 4, 5}
rotate(input[:], 2)
fmt.Println(input)
rotation(input[:], 13)
fmt.Println(input)
}
|
// Copyright Fuzamei Corp. 2018 All Rights Reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"fmt"
"os"
"path/filepath"
"github.com/33cn/chain33/types"
_ "github.com/GM-Publicchain/gm/plugin/crypto/init"
"github.com/GM-Publicchain/gm/plugin/dapp/cert/authority/tools/cryptogen/generator"
ca "github.com/GM-Publicchain/gm/plugin/dapp/cert/authority/tools/cryptogen/generator/impl"
"github.com/BurntSushi/toml"
"github.com/spf13/cobra"
)
const (
// CANAME 默认CA名称
CANAME = "ca"
// CONFIGFILENAME 配置文件名
CONFIGFILENAME = "chain33.cryptogen.toml"
// OUTPUTDIR 证书文件输出路径
OUTPUTDIR = "./authdir/crypto"
// ORGNAME 默认组织名
ORGNAME = "Chain33"
)
// Config 证书生成工具配置
type Config struct {
Name []string
SignType string
}
var (
cmd = &cobra.Command{
Use: "cryptogen [-f configfile] [-o output directory]",
Short: "chain33 crypto tool for generating key and certificate",
Run: generate,
}
cfg Config
)
func initCfg(path string) *Config {
if _, err := toml.DecodeFile(path, &cfg); err != nil {
fmt.Println(err)
os.Exit(0)
}
return &cfg
}
func main() {
cmd.Flags().StringP("configfile", "f", CONFIGFILENAME, "config file for users")
cmd.Flags().StringP("outputdir", "o", OUTPUTDIR, "output diraction for key and certificate")
if err := cmd.Execute(); err != nil {
fmt.Println(err)
os.Exit(1)
}
}
func generate(cmd *cobra.Command, args []string) {
configfile, _ := cmd.Flags().GetString("configfile")
outputdir, _ := cmd.Flags().GetString("outputdir")
initCfg(configfile)
fmt.Println(cfg.Name)
generateUsers(outputdir, ORGNAME)
}
func generateUsers(baseDir string, orgName string) {
fmt.Printf("generateUsers\n")
fmt.Println(baseDir)
err := os.RemoveAll(baseDir)
if err != nil {
fmt.Printf("Clean directory %s error", baseDir)
os.Exit(1)
}
caDir := filepath.Join(baseDir, "cacerts")
signType := types.GetSignType("cert", cfg.SignType)
if signType == types.Invalid {
fmt.Printf("Invalid sign type:%s", cfg.SignType)
return
}
signCA, err := ca.NewCA(caDir, CANAME, signType)
if err != nil {
fmt.Printf("Error generating signCA:%s", err.Error())
os.Exit(1)
}
generateNodes(baseDir, signCA, orgName)
}
func generateNodes(baseDir string, signCA generator.CAGenerator, orgName string) {
for _, name := range cfg.Name {
userDir := filepath.Join(baseDir, name)
fileName := fmt.Sprintf("%s@%s", name, orgName)
err := signCA.GenerateLocalUser(userDir, fileName)
if err != nil {
fmt.Printf("Error generating local user")
os.Exit(1)
}
}
}
|
package main
import (
"fmt"
"net"
"github.com/zc409/gostudy/day8/solve_stickybag/protocol"
)
func main() {
con, err := net.Dial("tcp", "127.0.0.1:30000")
if err != nil {
fmt.Printf("connect to server wrong,err:%v\n", err)
return
}
mes := "this is test!"
mess, err := protocol.Encode(mes) //调用自定义protocol包编码消息
if err != nil {
fmt.Printf("encode wrong,err:%v\n", err)
return
}
for i := 0; i < 10; i++ {
con.Write(mess)
}
defer con.Close()
}
|
package main
import "fmt"
func main() {
var value = (((6+2)%3)*4 - 2) / 3
var isEqual = (value == 2)
fmt.Printf("result %d (%t) \n", value, isEqual)
}
|
/*
Left in sandbox for at least 3 days.
I want to verify if this inequality is true:
for n≥4, if a1,a2,a3,…,an∈R+∪{0} and ∑ni=1ai=1, then a1a2+a2a3+a3a4+⋯+an−1an+ana1≤1/4.
Challenge
Write a piece of program which takes an integer n as input. It does the following:
Generate a random array a which consists of n non-negative reals. The sum of all elements should be 1.
By saying random, I mean, every array satisfiying the requirements in 2 should have a non-zero probability of occurrence. It don't need to be uniform. See this related post.
Calculate a[0]a[1]+a[1]a[2]+a[2]a[3]+...+a[n-2]a[n-1]+a[n-1]a[0].
Output the sum and the array a.
For I/O forms see this post.
Rules
(Sorry for the late edit...) All numbers should be rounded to at least 10−4.
Standard loopholes should be forbidden.
Example
The following code is an ungolfed Python code for this challenge, using library numpy. (For discussion about using libraries, see This Link.)
import numpy as np
def inequality(n):
if n < 4:
raise Exception
a = np.random.rand(n)
sum_a = 0
for i in range(n):
sum_a += a[i]
for i in range(n):
a[i] /= sum_a
sum_prod = 0
for i in range(n):
sum_prod += a[i % n] * a[(i + 1) % n]
print(a)
return sum_prod, a
Tip
You could assume that input n is a positive integer greater than 3.
Your score is the bytes in your code. The one with the least score wins.
*/
package main
import (
"math"
"math/rand"
"time"
)
func main() {
rand.Seed(time.Now().UnixNano())
for n := 4; n < 10000; n++ {
for i := 0; i < 10; i++ {
p, a := inequality(n)
assert(p < 0.25)
assert(math.Abs(1-sum(a)) < 1e-8)
}
}
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
func inequality(n int) (float64, []float64) {
a := random(n)
p := 0.0
for i := 0; i < n; i++ {
p += a[i%n] * a[(i+1)%n]
}
return p, a
}
func random(n int) []float64 {
r := make([]float64, n)
t := 0.0
for i := 0; i < n; i++ {
r[i] = rand.Float64()
t += r[i]
}
for i := range r {
r[i] /= t
}
return r
}
func sum(a []float64) float64 {
r := 0.0
for i := range a {
r += a[i]
}
return r
}
|
package main
import (
"fmt"
"os"
)
func main() {
// 获取命令行参数
args := os.Args
for i, item := range args {
fmt.Printf("args[%d] = %s\n", i, item)
}
// 首先build文件,使其构建成可执行文件,即:
// go build command_args.go
// 然后执行并输入参数,即:
// command_args.exe 10 20
// 结果为:
// args[0] = command_args.exe
// args[1] = 10
// args[2] = 20
// 注意:它会将本身command_args.exe作为第1个参数
}
|
package fmm
import (
"encoding/json"
"errors"
"fmt"
"strconv"
"strings"
)
type Version [4]uint16
type VersionCmpRes uint8
const (
VersionAny VersionCmpRes = iota // 000
VersionEq // 001
VersionGt // 010
VersionGtEq // 011
VersionLt // 100
VersionLtEq // 101
)
func NewVersion(input string) (*Version, error) {
parts := strings.Split(strings.TrimSpace(input), ".")
if len(parts) < 2 || len(parts) > 4 {
return nil, errors.New("version string must have between 2 and 4 parts")
}
var ver Version
for i, part := range parts {
part, err := strconv.ParseUint(part, 10, 0)
if err != nil {
return nil, err
}
ver[i] = uint16(part)
}
return &ver, nil
}
func (v *Version) Cmp(other *Version) VersionCmpRes {
if other == nil {
return VersionEq
}
for i := range v {
if v[i] > other[i] {
return VersionGt
} else if v[i] < other[i] {
return VersionLt
}
}
return VersionEq
}
func (v *Version) ToString(includeBuild bool) string {
if includeBuild {
return fmt.Sprintf("%d.%d.%d.%d", v[0], v[1], v[2], v[3])
} else {
return fmt.Sprintf("%d.%d.%d", v[0], v[1], v[2])
}
}
func (v *Version) MarshalJSON() ([]byte, error) {
return []byte("\"" + v.ToString(false) + "\""), nil
}
func (v *Version) UnmarshalJSON(data []byte) error {
var s string
if err := json.Unmarshal(data, &s); err != nil {
return err
}
ver, err := NewVersion(s)
if err != nil {
return err
}
v[0] = ver[0]
v[1] = ver[1]
v[2] = ver[2]
v[3] = ver[3]
return nil
}
|
package main
import (
"fmt"
"reflect"
)
// type save like interface
type Doctor struct {
number int
actorName string
companions []string
}
type Animal struct {
Name string
Origin string
}
type Bird struct {
Animal
SpeedKPH float32
Canfly bool
}
type BirdDescription struct {
Name string `requiredmax:"100"`
Origin string
}
func (d Doctor) getCompanions() []string { // methods of struct
return d.companions
}
func (d Doctor) convertCompanions() map[int]string {
result := make(map[int]string)
for i, value := range d.companions {
result[i] = value
}
return result
}
func main() {
aDoctor := Doctor{
number: 3,
actorName: "Jon Pertwee",
companions: []string{
"Liz Shaw",
"Jo Grant",
"Sarah Jane Smith",
},
}
fmt.Println(aDoctor)
fmt.Println(aDoctor.number)
fmt.Println(aDoctor.getCompanions())
fmt.Println(aDoctor.convertCompanions())
// bee()
// description()
}
func bee() {
b := Bird{}
b.Name = "Bee"
b.Origin = "Australia"
b.SpeedKPH = 48
b.Canfly = false
fmt.Println(b)
}
func description() {
t := reflect.TypeOf(Animal{})
field, _ := t.FieldByName("Name")
fmt.Println(field.Tag)
}
|
package pathfileops
import (
"errors"
"fmt"
"strings"
)
// FileOpsCollection - A collection of files and file operations which are designed
// to perform specific actions on disk files.
//
type FileOpsCollection struct {
fileOps []FileOps
}
// AddByFileOps - Adds a FileOps object to the existing collection
// based on the 'FileOps' Input parameter.
func (fOpsCol *FileOpsCollection) AddByFileOps(fileOp FileOps) error {
ePrefix := "FileOpsCollection.AddByFileOps() "
if fOpsCol.fileOps == nil {
fOpsCol.fileOps = make([]FileOps, 0, 100)
}
if !fileOp.IsInitialized() {
return errors.New(ePrefix +
"ERROR: Input parameter 'fileOp' is NOT initialized!\n")
}
fOpsCol.fileOps = append(fOpsCol.fileOps, fileOp.CopyOut())
return nil
}
// AddByFileMgrs - Adds another FileOps object to the collection based source
// and destination input parameters of type 'FileMgr'.
//
func (fOpsCol *FileOpsCollection) AddByFileMgrs(
sourceFileMgr,
destinationFileMgr FileMgr) error {
ePrefix := "FileOpsCollection.AddByFileMgrs() "
if fOpsCol.fileOps == nil {
fOpsCol.fileOps = make([]FileOps, 0, 100)
}
newFileOps, err := FileOps{}.NewByFileMgrs(sourceFileMgr, destinationFileMgr)
if err != nil {
return fmt.Errorf(ePrefix+
"Error returned by FileOps{}.NewByFileMgrs(sourceFileMgr, destinationFileMgr). "+
"Error='%v' ", err.Error())
}
fOpsCol.fileOps = append(fOpsCol.fileOps, newFileOps)
return nil
}
// AddByDirMgrFileName - Creates and Adds another FileOps object to the
// collection based on input parameters consisting of a pair of DirMgr
// and file name extension strings for source and destination.
//
func (fOpsCol *FileOpsCollection) AddByDirMgrFileName(
sourceDirMgr DirMgr,
sourceFileNameExt string,
destinationDirMgr DirMgr,
destinationFileNameExt string) error {
ePrefix := "FileOpsCollection.AddByDirMgrFileName() "
if fOpsCol.fileOps == nil {
fOpsCol.fileOps = make([]FileOps, 0, 100)
}
newFileOps, err := FileOps{}.NewByDirMgrFileName(
sourceDirMgr,
sourceFileNameExt,
destinationDirMgr,
destinationFileNameExt)
if err != nil {
return fmt.Errorf(ePrefix+
"Error returned by FileOps{}.NewByDirMgrFileName(...). "+
"Error='%v' ", err.Error())
}
fOpsCol.fileOps = append(fOpsCol.fileOps, newFileOps)
return nil
}
// AddByDirStrsAndFileNameExtStrs - Creates and adds another File Operations
// object to the collection based on two pairs of directory name and file name
// extension strings for both source and destination respectively.
//
func (fOpsCol *FileOpsCollection) AddByDirStrsAndFileNameExtStrs(
sourceDirStr,
sourceFileNameExtStr,
destinationDirStr,
destinationFileNameExtStr string) error {
ePrefix := "FileOpsCollection.AddByDirStrsAndFileNameExtStrs() "
if fOpsCol.fileOps == nil {
fOpsCol.fileOps = make([]FileOps, 0, 100)
}
newFileOps, err :=
FileOps{}.NewByDirStrsAndFileNameExtStrs(
sourceDirStr,
sourceFileNameExtStr,
destinationDirStr,
destinationFileNameExtStr)
if err != nil {
return fmt.Errorf(ePrefix+
"Error returned by FileOps{}.NewByDirStrsAndFileNameExtStrs(...) "+
"Error='%v' ", err.Error())
}
fOpsCol.fileOps = append(fOpsCol.fileOps, newFileOps)
return nil
}
// AddByPathFileNameExtStrs - Creates and adds another File Operations
// object to the collection based on two input strings which contain the
// full path name, file name and file extension for the source and
// destination respectively.
//
func (fOpsCol *FileOpsCollection) AddByPathFileNameExtStrs(
sourcePathFileNameExt,
destinationPathFileNameExt string) error {
ePrefix := "FileOpsCollection.AddByPathFileNameExtStrs() "
if fOpsCol.fileOps == nil {
fOpsCol.fileOps = make([]FileOps, 0, 100)
}
newFileOps, err :=
FileOps{}.NewByPathFileNameExtStrs(
sourcePathFileNameExt, destinationPathFileNameExt)
if err != nil {
return fmt.Errorf(ePrefix+
"Error returned by FileOps{}.NewByPathFileNameExtStrs(...) "+
"sourcePathFileNameExt='%v' destinationPathFileNameExt='%v' Error='%v' ",
sourcePathFileNameExt, destinationPathFileNameExt, err.Error())
}
fOpsCol.fileOps = append(fOpsCol.fileOps, newFileOps)
return nil
}
// CopyOut - Returns an FileMgrCollection which is an
// exact duplicate of the current FileMgrCollection.
// The copy is operation is a 'deep copy'.
func (fOpsCol *FileOpsCollection) CopyOut() (FileOpsCollection, error) {
ePrefix := "FileOpsCollection.CopyOut() "
if fOpsCol.fileOps == nil {
fOpsCol.fileOps = make([]FileOps, 0, 100)
}
fOpsCol2 := FileOpsCollection{}
fOpsCol2.fileOps = make([]FileOps, 0, 100)
arrayLen := len(fOpsCol.fileOps)
if arrayLen == 0 {
return FileOpsCollection{},
errors.New(ePrefix +
"Error: This File Operations Collection ('FileOpsCollection') is EMPTY! ")
}
for i := 0; i < arrayLen; i++ {
err := fOpsCol2.AddByFileOps(fOpsCol.fileOps[i].CopyOut())
if err != nil {
return FileOpsCollection{},
fmt.Errorf(ePrefix +
"Error returned by fOpsCol2.AddByFileOps(fOp)\n" +
"Index='%v'\nError='%v'\n",
i, err.Error())
}
}
return fOpsCol2, nil
}
// DeleteAtIndex - Deletes a member File Operations element
// from the collection at the index specified by input
// parameter, 'idx'.
//
// If successful, at the completion of this method, the File
// Operations Collection array will have a length which is one
// less than the starting array length.
//
func (fOpsCol *FileOpsCollection) DeleteAtIndex(idx int) error {
ePrefix := "FileOpsCollection.DeleteAtIndex() "
if idx < 0 {
return fmt.Errorf(ePrefix+
"Error: Input Parameter 'idx' is less than zero. "+
"Index Out-Of-Range! idx='%v'", idx)
}
if fOpsCol.fileOps == nil {
fOpsCol.fileOps = make([]FileOps, 0, 50)
}
arrayLen := len(fOpsCol.fileOps)
if arrayLen == 0 {
return errors.New(ePrefix +
"Error: The File Operations Collection, 'FileOpsCollection', is EMPTY!")
}
if idx >= arrayLen {
return fmt.Errorf(ePrefix+
"Error: Input Parameter 'idx' is greater than the "+
"length of the collection index. Index Out-Of-Range! "+
"idx='%v' Array Length='%v' ", idx, arrayLen)
}
if arrayLen == 1 {
fOpsCol.fileOps = make([]FileOps, 0, 100)
} else if idx == 0 {
// arrayLen > 1 and requested idx = 0
fOpsCol.fileOps = fOpsCol.fileOps[1:]
} else if idx == arrayLen-1 {
// arrayLen > 1 and requested idx = last element index
fOpsCol.fileOps = fOpsCol.fileOps[0 : arrayLen-1]
} else {
// arrayLen > 1 and idx is in between
// first and last elements
fOpsCol.fileOps =
append(fOpsCol.fileOps[0:idx], fOpsCol.fileOps[idx+1:]...)
}
return nil
}
// Equal - Compares the input parameter FileOpsCollection to the current
// FileOpsCollection instance. If they are equal, this method returns
// true.
//
func (fOpsCol *FileOpsCollection) Equal(fOpsCol2 *FileOpsCollection) bool {
if fOpsCol2 == nil {
return false
}
if fOpsCol.fileOps == nil {
fOpsCol.fileOps = make([]FileOps, 0, 50)
}
if fOpsCol2.fileOps == nil {
fOpsCol2.fileOps = make([]FileOps, 0, 50)
}
if len(fOpsCol.fileOps) != len(fOpsCol2.fileOps) {
return false
}
for i:=0; i < len(fOpsCol.fileOps); i++ {
if !fOpsCol.fileOps[i].source.Equal(&fOpsCol2.fileOps[i].source) {
return false
}
if !fOpsCol.fileOps[i].destination.Equal(&fOpsCol2.fileOps[i].destination) {
return false
}
if int(fOpsCol.fileOps[i].opToExecute) != int(fOpsCol2.fileOps[i].opToExecute) {
return false
}
}
return true
}
// ExecuteFileOperations - Executes a file operation on
// each member of the File Operations Collection. Any
// errors are collected and returned in an error array.
//
// The type of file operation performed is specified by
// input parameter, 'fileOp'. 'fileOp' is of type
// 'FileOperationCode'.
//
func (fOpsCol *FileOpsCollection) ExecuteFileOperations(
fileOp FileOperationCode) error {
ePrefix := "FileOpsCollection.ExecuteFileOperation() "
if fOpsCol.fileOps == nil {
fOpsCol.fileOps = make([]FileOps, 0, 50)
}
arrayLen := len(fOpsCol.fileOps)
if arrayLen == 0 {
return errors.New(ePrefix +
"Error: This File Operations Collection ('FileOpsCollection') is EMPTY! ")
}
var b strings.Builder
_, err := fmt.Fprintf(&b, "%s Errors Returned by ExecuteFileOperations()", ePrefix)
if err != nil {
return fmt.Errorf(ePrefix+
"Error returned by initial fmt.Fprint(). %v\n", err.Error())
}
errNo := 0
for i := 0; i < arrayLen; i++ {
err = fOpsCol.fileOps[i].ExecuteFileOperation(fileOp)
if err != nil {
errNo++
_, err2 := fmt.Fprintf(&b, "%d. %v ", errNo, err.Error())
if err2 != nil {
return fmt.Errorf(ePrefix+
"Error returned by fmt.Fprint(). %s\n", err2.Error())
}
}
}
if errNo > 0 {
return errors.New(b.String())
}
return nil
}
// GetFileOpsAtIndex - If successful, this method returns a pointer to
// the FileOps instance at the array index specified. The 'Peek' and 'Pop'
// methods below return FileOps objects using a 'deep' copy and therefore
// offer better protection against data corruption.
//
func (fOpsCol *FileOpsCollection) GetFileOpsAtIndex(idx int) (*FileOps, error) {
ePrefix := "FileOpsCollection.GetFileOpsAtIndex() "
emptyFileOps := FileOps{}
if fOpsCol.fileOps == nil {
fOpsCol.fileOps = make([]FileOps, 0, 50)
}
arrayLen := len(fOpsCol.fileOps)
if arrayLen == 0 {
return &emptyFileOps,
fmt.Errorf(ePrefix +
"Error: This File Operations Collection ('FileOpsCollection') is EMPTY!")
}
if idx < 0 || idx >= arrayLen {
return &emptyFileOps,
fmt.Errorf(ePrefix+
"Error: The input parameter, 'idx', is OUT OF RANGE! idx='%v'. \n"+
"The minimum index is '0'. "+
"The maximum index is '%v'. ", idx, arrayLen-1)
}
return &fOpsCol.fileOps[idx], nil
}
// GetNumOfFileOps - Returns the number of File Operations objects
// in the collection. Effectively, this is the array length of
// internal field FileOpsCollection.fileOps.
//
func (fOpsCol *FileOpsCollection) GetNumOfFileOps() int {
if fOpsCol.fileOps == nil {
fOpsCol.fileOps = make([]FileOps, 0, 50)
}
return len(fOpsCol.fileOps)
}
// InsertFileOpsAtIndex - Inserts a new File Operations type ('FileOps') into
// the collection at array 'index'. The new File Operations instance is passed
// as input parameter 'fOps'.
//
// If input parameter 'index' is less than zero, an error will be returned. If
// 'index' exceeds the value of the last index in the collection, 'fOps' will be
// added to the end of the collection at the next legal index.
//
func (fOpsCol *FileOpsCollection) InsertFileOpsAtIndex(fOps FileOps, index int) error {
ePrefix := "FileMgrCollection.InsertFileOpsAtIndex() "
if fOpsCol.fileOps == nil {
fOpsCol.fileOps = make([]FileOps, 0, 100)
}
if index < 0 {
return fmt.Errorf(ePrefix+
"Error: Input parameter 'index' is LESS THAN ZERO! "+
"index='%v' ", index)
}
lenfMgrs := len(fOpsCol.fileOps)
if index >= lenfMgrs {
fOpsCol.fileOps = append(fOpsCol.fileOps, fOps.CopyOut())
return nil
}
newFileMgrs := make([]FileOps, 0, 100)
if index == 0 {
newFileMgrs = append(newFileMgrs, fOps.CopyOut())
fOpsCol.fileOps = append(newFileMgrs, fOpsCol.fileOps...)
return nil
}
newFileMgrs = append(newFileMgrs, fOpsCol.fileOps[index:]...)
fOpsCol.fileOps = append(fOpsCol.fileOps[:index])
fOpsCol.fileOps = append(fOpsCol.fileOps, fOps.CopyOut())
fOpsCol.fileOps = append(fOpsCol.fileOps, newFileMgrs...)
return nil
}
// New - Creates and returns a new, properly initialized
// instance of 'FileOpsCollection'
func (fOpsCol FileOpsCollection) New() FileOpsCollection {
if fOpsCol.fileOps == nil {
fOpsCol.fileOps = make([]FileOps, 0, 100)
}
newFileOpsCol := FileOpsCollection{}
newFileOpsCol.fileOps = make([]FileOps, 0, 100)
return newFileOpsCol
}
// NewFromFileMgrCollection - Creates and returns a new
// File Operations Collection ('FileOpsCollection')
// generated from an existing File Manger Collection
// ('FileMgrCollection') and a target base directory.
//
// The source files for the new File Operations Collection
// are taken from the input parameter 'fMgrCol', the
// incoming File Manager Collection.
//
// The destination files for the new File Operations Collection
// are created from the source file names. The destination file
// directories are created by substituting the target base
// directory ('targetBaseDir') for the source base directory
// ('sourceBaseDir') in the source directory tree.
//
// This substitution is helpful when copying one directory tree
// to another directory tree.
//
func (fOpsCol FileOpsCollection) NewFromFileMgrCollection(
fMgrCol *FileMgrCollection,
sourceBaseDir,
targetBaseDir *DirMgr) (FileOpsCollection, error) {
ePrefix := "FileOpsCollection.NewFromFileMgrCollection() "
if fMgrCol == nil {
return FileOpsCollection{},
errors.New(ePrefix +
"ERROR: Input parameter 'fMgrCol' is nil!")
}
if sourceBaseDir == nil {
return FileOpsCollection{},
errors.New(ePrefix +
"ERROR: Input parameter 'sourceBaseDir' is a 'nil' pointer!\n")
}
if targetBaseDir == nil {
return FileOpsCollection{},
errors.New(ePrefix +
"ERROR: Input parameter 'targetBaseDir' is a 'nil' pointer!\n")
}
if fOpsCol.fileOps == nil {
fOpsCol.fileOps = make([]FileOps, 0, 100)
}
if fMgrCol.fileMgrs == nil {
fMgrCol.fileMgrs = make([]FileMgr, 0, 100)
}
if len(fMgrCol.fileMgrs) == 0 {
return FileOpsCollection{},
errors.New(ePrefix +
"ERROR: The File Manager Collection input parameter 'fMgrCol' is EMPTY!\n")
}
srcBaseDir := strings.ToLower(sourceBaseDir.GetAbsolutePath())
targBaseDir := targetBaseDir.GetAbsolutePath()
srcBaseDirLen := len(srcBaseDir)
arrayLen := fMgrCol.GetNumOfFileMgrs()
newFileOpsCol := FileOpsCollection{}.New()
newFileOpsCol.fileOps = make([]FileOps, 0, arrayLen+10)
for i := 0; i < arrayLen; i++ {
srcFMgr, err := fMgrCol.PeekFileMgrAtIndex(i)
if err != nil {
return FileOpsCollection{},
fmt.Errorf(ePrefix+
"Error returned by fMgrCol.PeekFileMgrAtIndex(i). "+
"i='%v' Error='%v' ", i, err.Error())
}
srcPathFileName := srcFMgr.GetAbsolutePathFileName()
idx := strings.Index(strings.ToLower(srcPathFileName), srcBaseDir)
if idx < 0 {
return FileOpsCollection{},
fmt.Errorf(ePrefix+
"Error: Could not locate source base directory in source file path! "+
"Source Base Directory:='%v' Source Path File Name='%v'",
srcBaseDir, srcPathFileName)
}
// targetDir + pathFile[lenSrcBaseDir:]
targetPathFileName := targBaseDir + srcPathFileName[srcBaseDirLen:]
destFMgr, err := FileMgr{}.NewFromPathFileNameExtStr(targetPathFileName)
if err != nil {
return FileOpsCollection{},
fmt.Errorf(ePrefix+
"Error returned by FileMgr{}.NewFromPathFileNameExtStr(targetPathFileName). "+
"targetPathFileName='%v' Error='%v' ", targetPathFileName, err.Error())
}
err = newFileOpsCol.AddByFileMgrs(srcFMgr, destFMgr)
if err != nil {
return FileOpsCollection{},
fmt.Errorf(ePrefix+
"Error returned by newFileOpsCol.AddByFileMgrs(srcFMgr, destFMgr). "+
"srcFMgr='%v' destFMgr='%v' Error='%v' ",
srcFMgr.GetAbsolutePathFileName(), destFMgr.GetAbsolutePathFileName(),
err.Error())
}
}
return newFileOpsCol, nil
}
// PeekFileOpsAtIndex - Returns a deep copy of the File Operations
// ('FileOps') object located at array index 'idx' in the File
// Operations Collection ('FileOpsCollection'). This is a 'Peek'
// method and therefore the original File Operations ('FileOps')
// object is NOT deleted from the File Operations Collection
// ('FileOpsCollection') array.
//
// At the completion of this method, the length of the File
// Operations Collection ('FileOpsCollection') array will remain
// unchanged.
//
func (fOpsCol *FileOpsCollection) PeekFileOpsAtIndex(idx int) (FileOps, error) {
ePrefix := "FileOpsCollection.PeekFileOpsAtIndex() "
if fOpsCol.fileOps == nil {
fOpsCol.fileOps = make([]FileOps, 0, 50)
}
arrayLen := len(fOpsCol.fileOps)
if arrayLen == 0 {
return FileOps{},
errors.New(ePrefix +
"Error: The File Operations Collection, 'FileOpsCollection' is EMPTY!")
}
if idx < 0 {
return FileOps{}, fmt.Errorf(ePrefix+
"Error: Input Parameter 'idx' is less than zero. "+
"Index Out-Of-Range! idx='%v'", idx)
}
if idx >= arrayLen {
return FileOps{},
fmt.Errorf(ePrefix+
"Error: Input Parameter 'idx' is greater than the "+
"length of the collection array. Index Out-Of-Range! "+
"idx='%v' Array Length='%v' ",
idx, arrayLen)
}
return fOpsCol.fileOps[idx].CopyOut(), nil
}
// PeekFirstFileOps - Returns a deep copy of the first File
// Operations ('FileOps') object in the File Operations Collection
// ('FileOpsCollection'). This is a 'Peek' method and therefore
// the original File Operations ('FileOps') object is NOT
// deleted from the File Operations Collection ('FileOpsCollection')
// array.
//
// At the completion of this method, the length of the File
// Operations Collection ('FileOpsCollection') array will remain
// unchanged.
//
func (fOpsCol *FileOpsCollection) PeekFirstFileOps() (FileOps, error) {
ePrefix := "FileOpsCollection.PeekFirstFileOps() "
if fOpsCol.fileOps == nil {
fOpsCol.fileOps = make([]FileOps, 0, 50)
}
if len(fOpsCol.fileOps) == 0 {
return FileOps{},
errors.New(ePrefix +
"Error: The File Operations Collection ('FileOpsCollection') is EMPTY!")
}
return fOpsCol.fileOps[0].CopyOut(), nil
}
// PeekLastFileOps - Returns a deep copy of the last File
// Operations ('FileOps') object in the File Operations
// Collection ('FileOpsCollection').
//
// This is a 'Peek' method and therefore the original File
// Operations ('FileOps') object is NOT deleted from the
// File Operations Collection ('FileOpsCollection') array.
//
// At the completion of this method, the length of the File
// Operations Collection ('FileOpsCollection') array will
// remain unchanged.
//
func (fOpsCol *FileOpsCollection) PeekLastFileOps() (FileOps, error) {
ePrefix := "FileOpsCollection.PeekLastFileOps()"
if fOpsCol.fileOps == nil {
fOpsCol.fileOps = make([]FileOps, 0, 50)
}
arrayLen := len(fOpsCol.fileOps)
if arrayLen == 0 {
return FileOps{},
errors.New(ePrefix +
"Error: The File Operations Collection, 'FileOpsCollection' is EMPTY!")
}
return fOpsCol.fileOps[arrayLen-1].CopyOut(), nil
}
// PopFileOpsAtIndex - Returns a copy of the File Operations (FileOps)
// object located at index, 'idx', in the File Operations Collection
// ('FileOpsCollection') array. As a 'Pop' method, the original File
// Operations ('FileOps') object is deleted from the File Operations
// Collection ('FileOpsCollection') array.
//
// Therefore at the completion of this method, the File Operations
// Collection array has a length which is one less than the starting
// array length.
//
func (fOpsCol *FileOpsCollection) PopFileOpsAtIndex(idx int) (FileOps, error) {
ePrefix := "FileOpsCollection.PopFileOpsAtIndex() "
if fOpsCol.fileOps == nil {
fOpsCol.fileOps = make([]FileOps, 0, 100)
}
if idx < 0 {
return FileOps{}, fmt.Errorf(
ePrefix+
"Error: Input Parameter is less than zero. "+
"Index Out-Of-Range! idx='%v'", idx)
}
arrayLen := len(fOpsCol.fileOps)
if arrayLen == 0 {
return FileOps{},
errors.New(ePrefix +
"Error: The File Operations Collection, 'FileOpsCollection', is EMPTY!")
}
if idx >= arrayLen {
return FileOps{},
fmt.Errorf(ePrefix+
"Error: Input Parameter is greater than the "+
"length of the collection index. Index Out-Of-Range! "+
"idx='%v' Array Length='%v' ", idx, arrayLen)
}
if idx == 0 {
return fOpsCol.PopFirstFileOps()
}
if idx == arrayLen-1 {
return fOpsCol.PopLastFileOps()
}
fileOps := fOpsCol.fileOps[idx].CopyOut()
fOpsCol.fileOps =
append(fOpsCol.fileOps[0:idx], fOpsCol.fileOps[idx+1:]...)
return fileOps, nil
}
// PopFirstFileOps - Returns a deep copy of the first File Operations
// ('FileOps') object in the File Operations Collection array. As a
// 'Pop' method, the original File Operations ('FileOps') object is
// deleted from the File Operations Collection ('FileOpsCollection')
// array.
//
// Therefore at the completion of this method, the File Operations
// Collection array has a length which is one less than the starting
// array length.
//
func (fOpsCol *FileOpsCollection) PopFirstFileOps() (FileOps, error) {
ePrefix := "DirMgrCollection.PopFirstDirMgr() "
if fOpsCol.fileOps == nil {
fOpsCol.fileOps = make([]FileOps, 0, 50)
}
if len(fOpsCol.fileOps) == 0 {
return FileOps{},
errors.New(ePrefix +
"Error: The File Operations Collection is EMPTY!")
}
fileOps := fOpsCol.fileOps[0].CopyOut()
fOpsCol.fileOps = fOpsCol.fileOps[1:]
return fileOps, nil
}
// PopLastFileOps - Returns a deep copy of the last File Operations
// ('FileOps') object in the File Operations Collection array. As a
// 'Pop' method, the original File Operations ('FileOps') object is
// deleted from the File Operations Collection ('FileOpsCollection')
// array.
//
// Therefore, at the completion of this method, the File Operations
// Collection array has a length which is one less than the starting
// array length.
//
func (fOpsCol *FileOpsCollection) PopLastFileOps() (FileOps, error) {
ePrefix := "FileOpsCollection.PopLastFileOps() "
if fOpsCol.fileOps == nil {
fOpsCol.fileOps = make([]FileOps, 0, 100)
}
arrayLen := len(fOpsCol.fileOps)
if arrayLen == 0 {
return FileOps{},
errors.New(ePrefix +
"Error: The File Operations Collection, 'FileOpsCollection', is EMPTY!")
}
fileOps := fOpsCol.fileOps[arrayLen-1].CopyOut()
fOpsCol.fileOps = fOpsCol.fileOps[0 : arrayLen-1]
return fileOps, nil
}
|
package list
import (
"fmt"
"testing"
"github.com/influxdata/influxdb/pkg/testing/assert"
)
var testNormalArray = []interface{}{1, 2, 3, 4, 5, 6, 7, 8}
var testEmptyArray = []interface{}{}
var testSingleValueArray = []interface{}{0}
func insertNodes(arr []interface{}) *CircularList {
circularList := &CircularList{}
for _, v := range arr {
circularList.AddNode(v)
}
return circularList
}
func TestCircularList_AddNode(t *testing.T) {
circularList := insertNodes(testNormalArray)
values, e := circularList.IterateNode()
if nil != e {
panic(e)
}
for _, v := range values {
fmt.Printf("%d->", v)
}
fmt.Println()
successArray := []interface{}{1, 2, 3, 4, 5, 6, 7, 8}
assert.Equal(t, values, successArray)
}
func TestCircularList_AddNode_Empty(t *testing.T) {
circularList := &CircularList{}
for _, value := range testEmptyArray {
circularList.AddNode(value)
}
_, e := circularList.IterateNode()
if nil == e {
panic("the return error should not be null")
}
}
func TestCircularList_Remove(t *testing.T) {
// 1,2,3,4,5,6,7,8
circularList := insertNodes(testNormalArray)
remove, e := circularList.Remove()
if nil != e {
panic(e)
}
fmt.Println("remove value:", remove)
assert.Equal(t, remove, 8)
}
func TestCircularList_Remove_emptyList(t *testing.T) {
circularList := &CircularList{}
_, e := circularList.Remove()
if nil == e {
panic("should not be here")
}
}
// 测试输入1,2,3,4,5,6,7,8 然后移除head
func TestCircularList_RemoveByIndex(t *testing.T) {
// 1,2,3,4,5,6,7,8
circularList := insertNodes(testNormalArray)
removeValue, e := circularList.RemoveByIndex(1)
if nil != e {
panic(e)
}
fmt.Println("remove value:", removeValue)
values, e := circularList.IterateNode()
if nil != e {
panic(e)
}
assert.Equal(t, removeValue, 1)
assert.Equal(t, values, []interface{}{2, 3, 4, 5, 6, 7, 8})
}
// 测试只输入1个值,然后移除的是超过这个值
// 正常情况下应该是正常移除1 然后遍历的时候是NullPointerException
func TestCircularList_RemoveByIndex_OneValue(t *testing.T) {
circularList := insertNodes([]interface{}{1})
removeValue, e := circularList.RemoveByIndex(2)
if nil != e {
panic(e)
}
fmt.Println("remove value:", removeValue)
values, e := circularList.IterateNode()
if nil != e {
assert.Equal(t, e.Error(), "NullPointerException")
}
for _, value := range values {
fmt.Println(value)
}
}
func TestCircularList_IterateNode(t *testing.T) {
}
|
package coordinator
import "sync"
type BackItem struct {
value interface{}
switch_value string
}
type BackQueue struct {
sync.RWMutex
items []*BackItem
elemsCount int
}
func newBackItem(value interface{}, switch_value string) *BackItem {
return &BackItem{
value: value,
switch_value: switch_value,
}
}
|
package dao
import "fmt"
func Mysqltest() {
fmt.Println("ggg")
}
|
package main
import (
"flag"
"log"
"os"
"github.com/thefirstofthe300/ekg/dns"
"github.com/thefirstofthe300/ekg/fmt"
"github.com/thefirstofthe300/ekg/processes"
"github.com/thefirstofthe300/ekg/route"
)
func main() {
outptr := os.Stdout
help := flag.Bool("help", false, "Display this help dialog and exit.")
procs := flag.Bool("processes", false, "Pretty prints the currently running processes")
dnsdump := flag.Bool("dns", false, "Dumps the state of DNS")
routes := flag.Bool("routes", false, "Dumps the current routing table")
logfile := flag.String("log-file", "", "Log file to use")
outfile := flag.String("out-file", "", "Output file for the gathered metrics")
flag.Parse()
if *logfile != "" {
if _, err := os.Stat(*logfile); err == nil {
os.Rename(*logfile, *logfile+string(".0"))
}
logptr, err := os.OpenFile(*logfile, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644)
if err != nil {
log.Fatalf("unable to open log file for writing: %s", err)
}
defer logptr.Close()
log.SetOutput(logptr)
}
if *outfile != "" {
_, err := os.Stat(*outfile)
if err == nil {
os.Rename(*outfile, *outfile+string(".0"))
}
outptr, err = os.OpenFile(*outfile, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644)
if err != nil {
log.Fatalf("unable to open log file for writing: %s", err)
}
defer outptr.Close()
}
toFmt := fmt.Config{
Processes: nil,
DNS: nil,
}
if *help == true {
flag.PrintDefaults()
os.Exit(1)
}
log.Printf("I started and help doesn't exist.")
if *procs {
p, err := processes.New()
if err != nil {
log.Fatalf("could not get processes: %s", err)
}
toFmt.Processes = p
}
if *dnsdump {
// Passing a blank string because we are not in the testing environment
resolvconf, err := dns.NewResolvConf("")
if err != nil {
log.Fatalf("unable to generate resolvConf: %s", err)
}
dnsInfo, err := dns.NewConfig(resolvconf, false)
if err != nil {
log.Fatalf("unable to generate DNS config: %s", err)
}
toFmt.DNS = dnsInfo
}
if *routes {
routing, err := route.NewTable("/proc/net/route")
if err != nil {
log.Fatalf("unable to generate routing table: %s", err)
}
toFmt.Routes = routing
}
fmt.Printf(outptr, &toFmt)
}
|
package session
import (
"context"
"errors"
)
var sessionKey = struct {
value string
}{"sessionKey"}
type Session struct {
ID int `json:"id"`
Email string `json:"email"`
}
func (s *Session) Set(ctx context.Context) context.Context {
return context.WithValue(ctx, sessionKey, s)
}
func (s *Session) HasPermission(permission string) bool {
return true
}
func (s *Session) Valid() error {
if s == nil {
return errors.New("session is not valid")
}
if s.ID == 0 {
return errors.New("session with invalid user id")
} else {
return nil
}
}
func GetUserId(ctx context.Context) (int, error) {
session, err := Get(ctx)
if err != nil {
return 0, err
}
return session.ID, nil
}
func Get(ctx context.Context) (*Session, error) {
session, success := ctx.Value(sessionKey).(*Session)
if !success {
return nil, errors.New("could not get session from context")
}
return session, nil
}
|
package enter
import (
"strings"
"time"
"github.com/devspace-cloud/devspace/cmd"
"github.com/devspace-cloud/devspace/cmd/flags"
"github.com/devspace-cloud/devspace/e2e/utils"
"github.com/devspace-cloud/devspace/pkg/util/log"
"github.com/pkg/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
//1. enter --container
//2. enter --pod
//3. enter --label-selector
//4. enter --pick
func runDefault(f *utils.BaseCustomFactory, logger log.Logger) error {
logger.Info("Run sub test 'default' of test 'enter'")
logger.StartWait("Run test...")
defer logger.StopWait()
pods, err := f.Client.KubeClient().CoreV1().Pods(f.Namespace).List(metav1.ListOptions{})
if err != nil {
return errors.Errorf("Unable to list the pods: %v", err)
}
podName := pods.Items[0].Name
enterConfigs := []*cmd.EnterCmd{
{
GlobalFlags: &flags.GlobalFlags{
Namespace: f.Namespace,
NoWarn: true,
Silent: true,
},
Wait: true,
Container: "container-0",
},
{
GlobalFlags: &flags.GlobalFlags{
Namespace: f.Namespace,
NoWarn: true,
Silent: true,
},
Wait: true,
Pod: podName,
},
{
GlobalFlags: &flags.GlobalFlags{
Namespace: f.Namespace,
NoWarn: true,
Silent: true,
},
Wait: true,
LabelSelector: "app=test",
},
{
GlobalFlags: &flags.GlobalFlags{
Namespace: f.Namespace,
NoWarn: true,
Silent: true,
},
Wait: true,
Pick: true,
},
}
for _, c := range enterConfigs {
done := utils.Capture()
output := "My Test Data"
err = c.Run(f, nil, []string{"echo", output})
if err != nil {
return err
}
time.Sleep(time.Second * 5)
capturedOutput, err := done()
if err != nil {
return err
}
if !strings.HasPrefix(capturedOutput, output) {
return errors.Errorf("capturedOutput '%s' is different than output '%s' for the enter cmd", capturedOutput, output)
}
}
return nil
}
|
package api
import (
"net/http"
"github.com/Zenika/marcel/api/auth"
"github.com/Zenika/marcel/api/commons"
)
func validateHandler(w http.ResponseWriter, r *http.Request) {
if auth := auth.GetAuth(r); auth == nil {
commons.WriteResponse(w, http.StatusForbidden, "")
}
}
func validateAdminHandler(w http.ResponseWriter, r *http.Request) {
if !auth.CheckPermissions(r, nil, "admin") {
commons.WriteResponse(w, http.StatusForbidden, "")
return
}
}
|
package content
import (
"errors"
)
// Errors
var (
ErrDatastoreDoneContentID = errors.New("Datastore done at PageContent")
ErrDatastoreDoneContent = errors.New("Can't find any content for given content " +
"id.")
)
// en-us, en-au, en-ca, tr...
type Language struct {
ID string `datastore:"-"`
Values map[string]string `json:"values"`
}
type Languages []Language
type Page struct {
Name string `json:"name"`
}
type Pages []Page
// Add isInvalidate and set it true for all pages when language changed.
// And add this control at the top of shouldFatch function.
// Group contents by page in Store as root.
// CONSIDERING TO ADD LANG CODE TO THE CONTENT
type Content struct {
ID string `datastore:"-"`
Values map[string]string `json:"values"`
}
type Contents []Content
// PageID is Parent Key
type PageContent struct {
ContentKey *datastore.Key
}
|
package main
import (
"fmt"
"log"
"net/http"
"github.com/aiden0z/go-jwt-middleware"
"github.com/dgrijalva/jwt-go"
"github.com/facebookgo/inject"
"github.com/gorilla/mux"
"github.com/jessevdk/go-flags"
"github.com/jinzhu/gorm"
_ "github.com/jinzhu/gorm/dialects/postgres"
"github.com/hirondelle-app/api/api"
"github.com/hirondelle-app/api/common"
"github.com/hirondelle-app/api/tweets"
"github.com/hirondelle-app/api/users"
)
func init() {
_, err := flags.Parse(&common.Config)
if err != nil {
panic(err)
}
}
func main() {
db := initDatabase()
jwtMiddleware := initJwtMiddleware()
tweetsHandlers := &api.TweetsHandlers{}
authMiddleware := &api.AuthMiddleware{}
tweetsManager := &tweets.Manager{}
usersManager := &users.Manager{}
if err := inject.Populate(db, jwtMiddleware, tweetsHandlers, authMiddleware, tweetsManager, usersManager); err != nil {
panic(err)
}
router := mux.NewRouter()
// public routes
router.Handle("/tweets", http.HandlerFunc(tweetsHandlers.GetTweetsEndpoint)).Methods("GET")
router.HandleFunc("/keywords", tweetsHandlers.GetAllKeywordsEndpoint).Methods("GET")
router.HandleFunc("/keywords/{keywordID}/tweets", tweetsHandlers.GetTweetsByKeywordEndpoint).Methods("GET")
// admin routes
router.Handle("/keywords", authMiddleware.Use(http.HandlerFunc(tweetsHandlers.PostKeywordEndpoint))).Methods("POST")
router.Handle("/keywords/{keywordID}", authMiddleware.Use(http.HandlerFunc(tweetsHandlers.DeleteKeywordEndpoint))).Methods("DELETE")
router.Handle("/tweets/{tweetID}", authMiddleware.Use(http.HandlerFunc(tweetsHandlers.DeleteTweetEndpoint))).Methods("DELETE")
fmt.Printf("Starting server on port %v\n", common.Config.ServerPort)
log.Fatal(http.ListenAndServe(fmt.Sprintf(":%d", common.Config.ServerPort), router))
}
func initDatabase() *gorm.DB {
db, err := gorm.Open("postgres",
fmt.Sprintf("host=%s user=%s dbname=%s password=%s sslmode=disable",
common.Config.Database.Address,
common.Config.Database.Username,
common.Config.Database.Name,
common.Config.Database.Password))
if err != nil {
panic("failed to connect database")
}
db.AutoMigrate(&tweets.Tweet{}, &tweets.Keyword{}, &users.User{})
return db
}
func initJwtMiddleware() *jwtmiddleware.JWTMiddleware {
return jwtmiddleware.New(jwtmiddleware.Options{
ValidationKeyGetter: func(token *jwt.Token) (interface{}, error) {
return []byte(common.Config.Auth0Secret), nil
},
SigningMethod: jwt.SigningMethodHS256,
})
}
|
/*
*
* Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package main
import (
"context"
"flag"
"fmt"
"log"
"time"
pb "github.com/tenorbear/grpc-go-loadtest/helloworld"
"google.golang.org/grpc"
)
type ArgumentError string
func (a ArgumentError) Error() string {
return fmt.Sprintf("Argument error: %s", string(a))
}
var mode = flag.String("mode", "normal", "normal|latency|error, to switch the type of message.")
var address = flag.String("address", "localhost:50051", "Server to connect to.")
var name = flag.String("name", "world", "Your name.")
func main() {
flag.Parse()
// Set up a connection to the server.
conn, err := grpc.Dial(*address, grpc.WithInsecure())
if err != nil {
log.Fatalf("did not connect: %v", err)
}
defer conn.Close()
c := pb.NewGreeterClient(conn)
// Contact the server and print out its response.
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
var r *pb.HelloReply
switch *mode {
case "normal":
r, err = c.SayHello(ctx, &pb.HelloRequest{Name: *name})
case "latency":
r, err = c.SayHelloWithLatency(ctx, &pb.HelloRequest{Name: *name})
case "error":
r, err = c.SayHelloWithError(ctx, &pb.HelloRequest{Name: *name})
default:
err = ArgumentError(*mode)
}
if err != nil {
log.Fatalf("Error in making RPC: %s.", err)
}
log.Printf("Greeting: %s", r.Message)
}
|
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package codec
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestFastSlowFastReverse(t *testing.T) {
if !supportsUnaligned {
return
}
b := []byte{1, 2, 3, 4, 5, 6, 7, 8, 255, 0, 0, 0, 0, 0, 0, 0, 0, 247}
r1 := b
fastReverseBytes(b)
r2 := b
reverseBytes(r2)
require.Equal(t, r1, r2)
}
func TestBytesCodec(t *testing.T) {
inputs := []struct {
enc []byte
dec []byte
desc bool
}{
{[]byte{}, []byte{0, 0, 0, 0, 0, 0, 0, 0, 247}, false},
{[]byte{}, []byte{255, 255, 255, 255, 255, 255, 255, 255, 8}, true},
{[]byte{0}, []byte{0, 0, 0, 0, 0, 0, 0, 0, 248}, false},
{[]byte{0}, []byte{255, 255, 255, 255, 255, 255, 255, 255, 7}, true},
{[]byte{1, 2, 3}, []byte{1, 2, 3, 0, 0, 0, 0, 0, 250}, false},
{[]byte{1, 2, 3}, []byte{254, 253, 252, 255, 255, 255, 255, 255, 5}, true},
{[]byte{1, 2, 3, 0}, []byte{1, 2, 3, 0, 0, 0, 0, 0, 251}, false},
{[]byte{1, 2, 3, 0}, []byte{254, 253, 252, 255, 255, 255, 255, 255, 4}, true},
{[]byte{1, 2, 3, 4, 5, 6, 7}, []byte{1, 2, 3, 4, 5, 6, 7, 0, 254}, false},
{[]byte{1, 2, 3, 4, 5, 6, 7}, []byte{254, 253, 252, 251, 250, 249, 248, 255, 1}, true},
{[]byte{0, 0, 0, 0, 0, 0, 0, 0}, []byte{0, 0, 0, 0, 0, 0, 0, 0, 255, 0, 0, 0, 0, 0, 0, 0, 0, 247}, false},
{[]byte{0, 0, 0, 0, 0, 0, 0, 0}, []byte{255, 255, 255, 255, 255, 255, 255, 255, 0, 255, 255, 255, 255, 255, 255, 255, 255, 8}, true},
{[]byte{1, 2, 3, 4, 5, 6, 7, 8}, []byte{1, 2, 3, 4, 5, 6, 7, 8, 255, 0, 0, 0, 0, 0, 0, 0, 0, 247}, false},
{[]byte{1, 2, 3, 4, 5, 6, 7, 8}, []byte{254, 253, 252, 251, 250, 249, 248, 247, 0, 255, 255, 255, 255, 255, 255, 255, 255, 8}, true},
{[]byte{1, 2, 3, 4, 5, 6, 7, 8, 9}, []byte{1, 2, 3, 4, 5, 6, 7, 8, 255, 9, 0, 0, 0, 0, 0, 0, 0, 248}, false},
{[]byte{1, 2, 3, 4, 5, 6, 7, 8, 9}, []byte{254, 253, 252, 251, 250, 249, 248, 247, 0, 246, 255, 255, 255, 255, 255, 255, 255, 7}, true},
}
for _, input := range inputs {
require.Len(t, input.dec, EncodedBytesLength(len(input.enc)))
if input.desc {
b := EncodeBytesDesc(nil, input.enc)
require.Equal(t, input.dec, b)
_, d, err := DecodeBytesDesc(b, nil)
require.NoError(t, err)
require.Equal(t, input.enc, d)
} else {
b := EncodeBytes(nil, input.enc)
require.Equal(t, input.dec, b)
_, d, err := DecodeBytes(b, nil)
require.NoError(t, err)
require.Equal(t, input.enc, d)
}
}
// Test error decode.
errInputs := [][]byte{
{1, 2, 3, 4},
{0, 0, 0, 0, 0, 0, 0, 247},
{0, 0, 0, 0, 0, 0, 0, 0, 246},
{0, 0, 0, 0, 0, 0, 0, 1, 247},
{1, 2, 3, 4, 5, 6, 7, 8, 0},
{1, 2, 3, 4, 5, 6, 7, 8, 255, 1},
{1, 2, 3, 4, 5, 6, 7, 8, 255, 1, 2, 3, 4, 5, 6, 7, 8},
{1, 2, 3, 4, 5, 6, 7, 8, 255, 1, 2, 3, 4, 5, 6, 7, 8, 255},
{1, 2, 3, 4, 5, 6, 7, 8, 255, 1, 2, 3, 4, 5, 6, 7, 8, 0},
}
for _, input := range errInputs {
_, _, err := DecodeBytes(input, nil)
require.Error(t, err)
}
}
func TestBytesCodecExt(t *testing.T) {
inputs := []struct {
enc []byte
dec []byte
}{
{[]byte{}, []byte{0, 0, 0, 0, 0, 0, 0, 0, 247}},
{[]byte{1, 2, 3}, []byte{1, 2, 3, 0, 0, 0, 0, 0, 250}},
{[]byte{1, 2, 3, 4, 5, 6, 7, 8, 9}, []byte{1, 2, 3, 4, 5, 6, 7, 8, 255, 9, 0, 0, 0, 0, 0, 0, 0, 248}},
}
// `assertEqual` is to deal with test case for `[]byte{}` & `[]byte(nil)`.
assertEqual := func(expected []byte, acutal []byte) {
require.Equal(t, len(expected), len(acutal))
for i := range expected {
require.Equal(t, expected[i], acutal[i])
}
}
for _, input := range inputs {
assertEqual(input.enc, EncodeBytesExt(nil, input.enc, true))
assertEqual(input.dec, EncodeBytesExt(nil, input.enc, false))
}
}
|
/*
I am interested in seeing programs which don't ask for any input, print a googol copies of some nonempty string, no less, no more, and then stop. A googol is defined as 10100, i.e., 1 followed by a hundred 0's in decimal.
Example output:
111111111111111111111111111111111111111111111111111111111111111111111111...
or
Hello world
Hello world
Hello world
Hello world
Hello world
Hello world
...
The string can also be entirely composed of white space or special symbols. The only exception to identical copies of a fixed string is if your language decorates the output in some way that can not be prevented,
but could be trivially undone in a wrapper script, like prepending a line number to each line. The wrapper script in such cases need not be provided.
You can assume your computer will never run out of time, but other than that, your program must have a reasonable demand of resources.
Also, you must respect any restrictions that the programming language of your choice poses, for example, you can not exceed a maximum value allowed for its integer types, and at no point more than 4 GB of memory must be needed.
In other words, the program should in principle be testable by running it on your computer.
But because of the extent of this number you will be expected to prove that the number of copies of the string it outputs is exactly 10^100 and that the program stops afterwards.
Stopping can be exiting or halting or even terminating due to an error, but if so, the error must not produce any output that could not easily be separated from the program's output.
This is code-golf, so the solution with the fewest bytes wins.
Example solution (C, ungolfed, 3768 bytes)
#include <stdio.h>
int main() {
int a00, a01, a02, a03, ..., a99;
for(a00 = 0; a00 < 10; a00++)
for(a01 = 0; a01 < 10; a01++)
for(a02 = 0; a02 < 10; a02++)
for(a03 = 0; a03 < 10; a03++)
...
for(a99 = 0; a99 < 10; a99++)
puts("1");
return 0;
}
*/
package main
import (
"fmt"
"math/big"
)
func main() {
googol("Hello world")
}
func googol(s string) {
var (
one = big.NewInt(1)
hundred = big.NewInt(100)
)
n := big.NewInt(0)
m := big.NewInt(10)
m.Exp(m, hundred, nil)
for n.Cmp(m) != 0 {
fmt.Println(s)
n.Add(n, one)
}
}
|
package main
import (
"fmt"
"time"
)
var ch1 chan int = make(chan int, 3)
func task1() {
var x int = 0
fmt.Println("call task1")
for {
time.Sleep(1000 * time.Millisecond)
fmt.Printf("task1 send message:%d\r\n", x)
ch1 <- (x)
ch1 <- (x + 1)
ch1 <- (x + 2)
x += 3
}
}
func task2() {
var x int
for {
x = <-ch1
fmt.Printf("task2 received msg:%d\r\n", x)
x = <-ch1
fmt.Printf("task2 received msg:%d\r\n", x)
x = <-ch1
fmt.Printf("task2 received msg:%d\r\n\r\n", x)
}
}
func main() {
fmt.Println("hello go!!!世界的第一个golong程序")
go task1()
go task2()
for {
time.Sleep(1000 * time.Millisecond)
}
}
|
package file_producer_controller
import (
"github.com/gin-gonic/gin"
"github.com/google/uuid"
log "github.com/sirupsen/logrus"
"github.com/yjagdale/siem-data-producer/models/file_producer_model"
"github.com/yjagdale/siem-data-producer/models/logs_model"
"github.com/yjagdale/siem-data-producer/services/file_producer_service"
"github.com/yjagdale/siem-data-producer/utils/response"
"io"
"net/http"
"os"
"time"
)
func ProduceFile(c *gin.Context) {
var fileEntity file_producer_model.FileProducer
isValid := c.ShouldBindJSON(&fileEntity)
if isValid == io.EOF {
log.Errorln(isValid)
resp := response.NewBadRequest(gin.H{"Message": "Empty Body? May be!"})
c.JSON(resp.Status, resp)
return
}
if isValid != nil {
log.Errorln(isValid)
resp := response.NewBadRequest(gin.H{"Message": "Invalid Body"})
c.JSON(resp.Status, resp)
return
}
log.Infoln("Producing logs on destination", fileEntity.DestinationIP, "over port", fileEntity.DestinationPort, "From file", fileEntity.Path)
stats, err := os.Stat(fileEntity.Path)
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"Error": err.Error()})
} else {
if stats.Size() > 1594682 {
go file_producer_service.PublishFile(fileEntity)
c.JSON(http.StatusAccepted, gin.H{"Message": "Large file provided. Execution will be done in background"})
} else {
resp := file_producer_service.PublishFile(fileEntity)
c.JSON(resp.Status, resp.Message)
return
}
}
}
func ProduceFileAsync(c *gin.Context) {
if logs_model.ContinuesExecution == nil {
logs_model.ContinuesExecution = make(map[string]bool)
logs_model.ContinuesExecution["status"] = true
}
var fileEntity file_producer_model.FileProducer
isValid := c.ShouldBindJSON(&fileEntity)
if isValid == io.EOF {
log.Errorln(isValid)
resp := response.NewBadRequest(gin.H{"Message": "Empty Body? May be!"})
c.JSON(resp.Status, resp)
return
}
if isValid != nil {
log.Errorln(isValid)
resp := response.NewBadRequest(gin.H{"Message": "Invalid Body"})
c.JSON(resp.Status, resp)
return
}
log.Infoln("Producing logs on destination", fileEntity.DestinationIP, "over port", fileEntity.DestinationPort, "From file", fileEntity.Path)
executionId := uuid.New().String()
logs_model.ContinuesExecution[executionId] = true
go func() {
for logs_model.ContinuesExecution[executionId] != false {
file_producer_service.PublishFile(fileEntity)
time.Sleep(5 * time.Second)
}
}()
c.JSON(http.StatusAccepted, gin.H{"Message": "Execution started"})
}
|
package web
import (
"fmt"
"io/ioutil"
"net/http"
"database/sql"
"github.com/gorilla/mux"
)
// HTTPRouter is a structure used for all incoming and outgoing HTTP
// requests/calls in the services
type HTTPRouter struct {
router *mux.Router
pathPrefix string
}
// NewHTTPRouter constructs a new HTTPRouter structure
func NewHTTPRouter(router *mux.Router, pathPrefix string) *HTTPRouter {
return &HTTPRouter{router, pathPrefix}
}
// HandleRoute adds a new route to the HTTPRouter
func (r *HTTPRouter) HandleRoute(methods []string, path string,
reqParams []string, optParams []string,
handler func(w http.ResponseWriter,
queryParams map[string][]string,
body string, db *sql.DB),
db *sql.DB) {
handlerWrapper := func(w http.ResponseWriter, r *http.Request) {
var b []byte
if r.Body != nil {
var err error
b, err = ioutil.ReadAll(r.Body)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
}
filteredParams := make(map[string][]string)
queryParams := r.URL.Query()
for _, param := range reqParams {
if val, ok := queryParams[param]; ok && len(val) > 0 {
filteredParams[param] = val
} else {
http.Error(w, fmt.Sprintf(MissingParamErr, param), http.StatusBadRequest)
return
}
}
for _, param := range optParams {
if val, ok := queryParams[param]; ok && len(val) > 0 {
filteredParams[param] = val
}
}
handler(w, filteredParams, string(b), db)
}
r.router.HandleFunc(path, handlerWrapper).Methods(methods...)
}
// StartListening starts redirecting all requests through the HTTPRouter
func (r *HTTPRouter) StartListening() {
http.Handle("/", http.StripPrefix(r.pathPrefix, r.router))
}
|
package main
import (
"bytes"
"net/http"
"net/http/httptest"
"testing"
zsweb "github.com/zerostick/zerostick/daemon/web"
//_ "github.com/zerostick/zerostick/daemon"
)
func TestPushoverGetEmptyWeb(t *testing.T) {
req, err := http.NewRequest("GET", "/notifications/provider/pushover", nil)
if err != nil {
t.Fatal(err)
}
rr := httptest.NewRecorder()
handler := http.HandlerFunc(zsweb.NotificationPushoverConfig)
handler.ServeHTTP(rr, req)
if status := rr.Code; status != http.StatusOK {
t.Errorf("handler returned wrong status code: got %v want %v",
status, http.StatusOK)
}
// Check the response body is what we expect.
expected := `{"user_key":"","app_key":"","enabled":false}`
if rr.Body.String() != expected {
t.Errorf("handler returned unexpected body: got %v want %v",
rr.Body.String(), expected)
}
}
func TestPushoverConfigSet(t *testing.T) {
var jsonStr = []byte(`{"user_key":"testuserkey","app_key":"testappkey","enabled":true}`)
req, err := http.NewRequest("POST", "/notifications/provider/pushover", bytes.NewBuffer(jsonStr))
if err != nil {
t.Fatal(err)
}
req.Header.Set("Content-Type", "application/json")
rr := httptest.NewRecorder()
handler := http.HandlerFunc(zsweb.NotificationPushoverConfigSet)
handler.ServeHTTP(rr, req)
if status := rr.Code; status != http.StatusOK {
t.Errorf("handler returned wrong status code: got %v want %v",
status, http.StatusOK)
}
// Check the response body is what we expect.
expected := `{"user_key":"testuserkey","app_key":"testappkey","enabled":true}`
if rr.Body.String() != expected {
t.Errorf("handler returned unexpected body: got %v want %v",
rr.Body.String(), expected)
}
}
func TestPushoverGetWeb(t *testing.T) {
req, err := http.NewRequest("GET", "/notifications/provider/pushover", nil)
if err != nil {
t.Fatal(err)
}
rr := httptest.NewRecorder()
handler := http.HandlerFunc(zsweb.NotificationPushoverConfig)
handler.ServeHTTP(rr, req)
if status := rr.Code; status != http.StatusOK {
t.Errorf("handler returned wrong status code: got %v want %v",
status, http.StatusOK)
}
// Check the response body is what we expect.
expected := `{"user_key":"testuserkey","app_key":"testappkey","enabled":true}`
if rr.Body.String() != expected {
t.Errorf("handler returned unexpected body: got %v want %v",
rr.Body.String(), expected)
}
}
func TestPushoverDelete(t *testing.T) {
req, err := http.NewRequest("DELETE", "/notifications/provider/pushover", nil)
if err != nil {
t.Fatal(err)
}
req.Header.Set("Content-Type", "application/json")
rr := httptest.NewRecorder()
handler := http.HandlerFunc(zsweb.NotificationPushoverConfigDelete)
handler.ServeHTTP(rr, req)
if status := rr.Code; status != http.StatusOK {
t.Errorf("handler returned wrong status code: got %v want %v",
status, http.StatusOK)
}
// Check the response body is what we expect.
expected := `{}`
if rr.Body.String() != expected {
t.Errorf("handler returned unexpected body: got %v want %v",
rr.Body.String(), expected)
}
}
|
// Copyright 2014 The Sporting Exchange Limited. All rights reserved.
// Use of this source code is governed by a free license that can be
// found in the LICENSE file.
// collect-statse implements continuous statistical aggregation of cluster event metrics.
package main
import (
"log"
"opentsp.org/cmd/collect-statse/aggregator"
"opentsp.org/cmd/collect-statse/config"
"opentsp.org/cmd/collect-statse/forwarder"
_ "opentsp.org/internal/pprof"
)
func main() {
config.Load(tsdbChan)
go forwarderService()
go aggregatorService()
go config.Reload()
go expvarLoop()
select {}
}
func forwarderService() {
err := forwarder.ListenAndServe(&config.Loaded.Forwarder)
if err != nil {
log.Fatal(err)
}
}
func aggregatorService() {
err := aggregator.ListenAndServe(&config.Loaded.Aggregator)
if err != nil {
log.Fatal(err)
}
}
|
package main
import (
"fmt"
)
func main() {
for i := 0; i <= 12; i++ {
for j := 0; j <= 12; j++ {
fmt.Printf("%d x %d = %d\n", i, j, i * j)
}
}
}
|
package main
import (
"fmt"
"os"
"github.com/Cloud-Foundations/Dominator/imagebuilder/client"
"github.com/Cloud-Foundations/Dominator/lib/json"
"github.com/Cloud-Foundations/Dominator/lib/log"
proto "github.com/Cloud-Foundations/Dominator/proto/imaginator"
)
func getDependenciesSubcommand(args []string, logger log.DebugLogger) error {
if err := getDependencies(logger); err != nil {
return fmt.Errorf("error getting dependencies: %s", err)
}
return nil
}
func getDependencies(logger log.Logger) error {
srpcClient := getImaginatorClient()
req := proto.GetDependenciesRequest{}
if result, err := client.GetDependencies(srpcClient, req); err != nil {
return err
} else {
json.WriteWithIndent(os.Stdout, " ", result)
}
return nil
}
|
package config
const apiAddr = "http://backend-api:8080/api"
const execAddr = "http://backend-exec:8090/exec"
func GetAPIAddr() string {
return apiAddr
}
func GetExecAddr() string {
return execAddr
}
|
package models
import (
"encoding/json"
"fmt"
)
// Endpoint : model SslResponse
type Endpoint struct {
Address string `json:"ipAddress"`
Grade string `json:"grade"`
Country string `json:"country"`
Owner string `json:"owner"`
}
// Endpoints : model SslResponse
type Endpoints struct {
Endpoints []Endpoint `json:"endpoints"`
}
// WhoisResponse : model WhoisResponse
type WhoisResponse struct {
Country string `json:"Country"`
Owner string `json:"OrgName"`
}
// ScrapingResponse : model ScrapingResponse
type ScrapingResponse struct {
Icon string
Title string
}
// BadRequest : model for bad request
type BadRequest struct{
Response string `json:"response"`
}
// Response : model Response
type Response struct {
Servers []Endpoint `json:"servers"`
ServerChanged bool `json:"server_changed"`
SslGrade string `json:"ssl_grade"`
PreviousSslGrade string `json:"previous_ssl_grade"`
Logo string `json:"logo"`
Title string `json:"title"`
IsDown bool `json:"is_down"`
}
// Scan model Response
func (response *Response) Scan(src interface{}) error {
strValue, ok := src.([]uint8)
if !ok {
return fmt.Errorf("metas field must be a []uint8, got %T instead", src)
}
return json.Unmarshal([]byte(strValue), response)
}
|
package strategy
import "math/rand"
func randomLevel() int {
level := 1
for rand.Float32() < p && level < maxLevel {
level++
}
return level
}
|
package kbucket
import (
"container/list"
"sort"
peer "gx/ipfs/QmPJxxDsX2UbchSHobbYuvz7qnyJTFKvaKMzE2rZWJ4x5B/go-libp2p-peer"
)
// A helper struct to sort peers by their distance to the local node
type peerDistance struct {
p peer.ID
distance ID
}
// peerSorterArr implements sort.Interface to sort peers by xor distance
type peerSorterArr []*peerDistance
func (p peerSorterArr) Len() int { return len(p) }
func (p peerSorterArr) Swap(a, b int) { p[a], p[b] = p[b], p[a] }
func (p peerSorterArr) Less(a, b int) bool {
return p[a].distance.less(p[b].distance)
}
//
func copyPeersFromList(target ID, peerArr peerSorterArr, peerList *list.List) peerSorterArr {
if cap(peerArr) < len(peerArr)+peerList.Len() {
newArr := make(peerSorterArr, 0, len(peerArr)+peerList.Len())
copy(newArr, peerArr)
peerArr = newArr
}
for e := peerList.Front(); e != nil; e = e.Next() {
p := e.Value.(peer.ID)
pID := ConvertPeerID(p)
pd := peerDistance{
p: p,
distance: xor(target, pID),
}
peerArr = append(peerArr, &pd)
}
return peerArr
}
func SortClosestPeers(peers []peer.ID, target ID) []peer.ID {
psarr := make(peerSorterArr, 0, len(peers))
for _, p := range peers {
pID := ConvertPeerID(p)
pd := &peerDistance{
p: p,
distance: xor(target, pID),
}
psarr = append(psarr, pd)
}
sort.Sort(psarr)
out := make([]peer.ID, 0, len(psarr))
for _, p := range psarr {
out = append(out, p.p)
}
return out
}
|
/*
* AppManager API
*
* HTTP REST API to connect to the AppManager
*
* API version: 1.0
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package appManagerApiClient
type DeploymentSpec struct {
State string `json:"state"`
UpgradeStrategy *DeploymentUpgradeStrategy `json:"upgradeStrategy"`
RestoreStrategy *DeploymentRestoreStrategy `json:"restoreStrategy,omitempty"`
StartFromSavepoint *DeploymentStartFromSavepoint `json:"startFromSavepoint,omitempty"`
DeploymentTargetId string `json:"deploymentTargetId"`
MaxSavepointCreationAttempts int32 `json:"maxSavepointCreationAttempts,omitempty"`
MaxJobCreationAttempts int32 `json:"maxJobCreationAttempts,omitempty"`
Template *DeploymentTemplate `json:"template"`
}
|
// This file was generated for SObject EventBusSubscriber, API Version v43.0 at 2018-07-30 03:47:55.497737848 -0400 EDT m=+41.841914304
package sobjects
import (
"fmt"
"strings"
)
type EventBusSubscriber struct {
BaseSObject
ExternalId string `force:",omitempty"`
Id string `force:",omitempty"`
LastError string `force:",omitempty"`
Name string `force:",omitempty"`
Position int `force:",omitempty"`
Retries int `force:",omitempty"`
Status string `force:",omitempty"`
Tip int `force:",omitempty"`
Topic string `force:",omitempty"`
Type string `force:",omitempty"`
}
func (t *EventBusSubscriber) ApiName() string {
return "EventBusSubscriber"
}
func (t *EventBusSubscriber) String() string {
builder := strings.Builder{}
builder.WriteString(fmt.Sprintf("EventBusSubscriber #%s - %s\n", t.Id, t.Name))
builder.WriteString(fmt.Sprintf("\tExternalId: %v\n", t.ExternalId))
builder.WriteString(fmt.Sprintf("\tId: %v\n", t.Id))
builder.WriteString(fmt.Sprintf("\tLastError: %v\n", t.LastError))
builder.WriteString(fmt.Sprintf("\tName: %v\n", t.Name))
builder.WriteString(fmt.Sprintf("\tPosition: %v\n", t.Position))
builder.WriteString(fmt.Sprintf("\tRetries: %v\n", t.Retries))
builder.WriteString(fmt.Sprintf("\tStatus: %v\n", t.Status))
builder.WriteString(fmt.Sprintf("\tTip: %v\n", t.Tip))
builder.WriteString(fmt.Sprintf("\tTopic: %v\n", t.Topic))
builder.WriteString(fmt.Sprintf("\tType: %v\n", t.Type))
return builder.String()
}
type EventBusSubscriberQueryResponse struct {
BaseQuery
Records []EventBusSubscriber `json:"Records" force:"records"`
}
|
package utils
import (
"fmt"
"runtime"
)
func FileLine(errors ...error) string {
_, file, line, ok := runtime.Caller(1)
if ok {
return fmt.Sprintf("[%s:%d]% v", file, line, errors)
} else {
return "unknown"
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.