text stringlengths 11 4.05M |
|---|
package main
import (
"errors"
"fmt"
"math"
)
func main() {
// 命名
var a = "Hello"
b := "Gail"
var c string
c = "!"
fmt.Println(a, b, c)
// 变量一般用驼峰命名,但是要注意跟函数名区分开
roundArea := RoundArea(1.0)
fmt.Print(roundArea)
// 普通的for循环
sum := 0
for i := 0; i < 10; i++ {
sum += 1
}
fmt.Print(sum)
// Go 中的普通数组。初始化数组中 {} 中的元素个数不能大于 [] 中的数字。
arr1 := [3]int{1, 2, 3}
arrr1 := [...]int{1, 2, 5}
fmt.Print(arr1, arrr1)
// 类似于PHP中的foreach循环
for _, v := range arr1 {
fmt.Println(v)
}
arr11 := [2][2]int{{0, 0}, {1, 1}}
fmt.Print(arr11)
// map是Go中的集合,类似于PHP中有key值的数组,类似于js中的对象
map1 := map[string]int{"a": 1, "b": 2}
// 用range循环集合或者数组,类似于PHP中的foreach
for k, v := range map1 {
fmt.Printf("the key is %v and the value is %v \n", k, v)
}
val, ok := map1["a"]
// 查找是否含有"a"这个键,若有则OK返回true,val返回值。没有返回false,和值类型的默认值(值为int型,返回0
fmt.Print(ok, val)
// 删除map中的键值对,传key就行
delete(map1, "a")
// 还可以这样写函数,类似于PHP中的匿名函数,赋值给一个变量
getSquareRoot := func(x float64) float64 {
return math.Sqrt(x)
}
fmt.Println(getSquareRoot(9))
// 数组的长度不可改变,但是切片可以。
slice := []int{1, 2, 3} // 创建且赋值,可以省略长度
//var slice1 = make([]int, 2) // 仅创建(
// 用make创建 make([]T, length, capacity):T指类型、length初始长度、capacity容量
arr_slice := [5]int{1, 2, 3, 4, 5} // 数组
s := arr_slice[1:4] // 切片(可省略前后
fmt.Println(slice, s)
// len()计算切片的长度、cap()计算切片的容量
// 此处的cap为4,即从1开始往后有四个位置
fmt.Printf("len=%d cap=%d slice=%v\n", len(s), cap(s), s)
// 一个切片在未初始化之前默认为 nil,长度为 0
var i int
for i = 0; i < 10; i++ {
fmt.Printf("%d\t", fibonacci(i))
}
fmt.Println(factorial(5))
}
/**
求一个圆的面积。
PS: 关于函数名的命名规范
1. 使用驼峰命名
2. 如果包外不需要访问请用小写开头的函数
3. 如果需要暴露出去给包外访问需要使用大写开头的函数名称
*/
func RoundArea(r float32) float32 {
const Pi = 3.14 // 常量
area := r * r * Pi
return area
}
/**
用递归来实现斐波那契数列
*/
func fibonacci(n int) int {
if n < 2 {
return n
}
return fibonacci(n-2) + fibonacci(n-1)
}
/**
一个斐波那契数列的生成器
函数名称为fib,没有参数,有返回值,返回值类型为函数类型,该返回值函数的返回值为int型
*/
func fib() func() int {
a, b := 0, 1
return func() int {
a, b = b, a+b
return a
}
}
/**
用递归来实现阶乘
*/
func factorial(n int) int {
if n > 0 {
return n * factorial(n-1)
}
return 1
}
/*
返回错误error
*/
func Sqrt(f float64) (float64, error) {
if f < 0 {
return 0, errors.New("math: square root of negative number")
}
return math.Sqrt(f), errors.New("")
}
|
package window
import (
"github.com/galaco/lambda-client/engine"
"github.com/galaco/tinygametools"
"github.com/go-gl/glfw/v3.2/glfw"
)
// Manager is responsible for managing this games window. Understand
// that there is a distinction between the window and the renderer.
// This manager provides a window that a rendering context can be
// obtained from, and device input handling.
type Manager struct {
engine.Manager
window *tinygametools.Window
Name string
}
// Register will create a new Window
func (manager *Manager) Register() {
}
// Update simply calls the input manager that uses this window
func (manager *Manager) Update(dt float64) {
}
// Unregister will end input listening and kill any window
func (manager *Manager) Unregister() {
glfw.Terminate()
}
// PostUpdate is called at the end of an update loop.
// In this case it simply SwapBuffers the window, (to display updated window
// contents)
func (manager *Manager) PostUpdate() {
manager.window.Handle().SwapBuffers()
}
// NewWindowManager
func NewWindowManager(win *tinygametools.Window) *Manager {
return &Manager{
window: win,
}
}
|
package conv
import "net/mail"
func ToEmailAddress(s string) (string, error) {
addr, err := mail.ParseAddress(s)
if err != nil {
return "", err
}
return addr.String(), nil
}
func IsEmailAddress(s string) bool {
addr, _ := ToEmailAddress(s)
return addr != ""
}
|
package contexts
import (
"github.com/daiguadaidai/haechi/config"
)
type HttpContext struct {
ServerConfig *config.StartConfig
RuleConfig *config.RuleConfig
}
func NewHttpContext(sc *config.StartConfig) *HttpContext {
return &HttpContext{
ServerConfig: sc,
RuleConfig: sc.RuleConfig,
}
}
|
package service
import (
"fmt"
"snippetBox-microservice/news/api/controller"
"snippetBox-microservice/news/pkg/domain"
"snippetBox-microservice/news/pkg/validator"
"time"
)
type news struct {
repo NewsRepositoryInterface
}
type NewsRepositoryInterface interface {
Insert(title, content string, expires time.Time) (int, error)
GetById(id int) (*domain.News, error)
Latest() ([]*domain.News, error)
}
func News(NewsRepository NewsRepositoryInterface) controller.NewsServiceInterface {
return &news{repo: NewsRepository}
}
func (s *news) Save(news *domain.News) (int, error) {
title := news.Title
content := news.Content
expires := news.Expires
v := validator.New()
v.MaxLength(title, 100)
if !v.Valid() {
return -1, fmt.Errorf("news validation error")
}
return s.repo.Insert(title, content, expires)
}
func (s *news) FindById(id int) (*domain.News, error) {
return s.repo.GetById(id)
}
func (s *news) Latest() ([]*domain.News, error) {
return s.repo.Latest()
}
|
package tool
import (
"fmt"
"testing"
"time"
)
func TestRandom(t *testing.T) {
fmt.Printf("letterBytes:[%v] \n", letterBytes)
fmt.Printf("letterIdxBits [%v] [%b] \n", letterIdxBits, letterIdxBits)
fmt.Printf("letterIdxMask [%v] [%b] \n", letterIdxMask, letterIdxMask)
fmt.Printf("letterIdxMax [%v] [%b] \n", letterIdxMax, letterIdxMax)
fmt.Printf("-------------------------------------- \n")
size := 5
randomArr := GenRandomByteArray(5)
randomStr := GenRandomString(5)
randomTime := RandomTimeDuration(time.Second, 80*time.Second)
randomInt32 := RandomInt32(0, 80)
randomInt64 := RandomInt64(0, 80)
randomInt := RandomInt(0, 80)
fmt.Printf("size[%v],randomArr[%v],randomStr[%v],randomTime[%v] \n"+
"randomInt[%v],randomInt32[%v],randomInt64[%v]", size, randomArr, randomStr,
randomTime, randomInt, randomInt32, randomInt64)
}
|
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package ui
import (
"context"
"fmt"
"strings"
"time"
"github.com/godbus/dbus/v5"
"chromiumos/tast/ctxutil"
"chromiumos/tast/errors"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/browser"
"chromiumos/tast/local/chrome/browser/browserfixt"
"chromiumos/tast/local/dbusutil"
"chromiumos/tast/testing"
)
const (
// These should come from metrics_proto/execution_context.proto. However,
// because that file is hosted in google3 and manually exported to the
// Chromium repo, getting a copy of the proto file into the ChromiumOS repo
// is extremely complicated. Since these numbers can't be changed (see comment at
// https://source.chromium.org/chromium/chromium/src/+/main:content/public/common/process_type.h),
// we can save signicant work by just copying them here.
browserProcess = 1
rendererProcess = 2
gpuProcess = 3
utilityProcess = 4
networkServiceProcess = 10
mainThread = 1
ioThread = 6
compositorThread = 11
)
func init() {
testing.AddTest(&testing.Test{
Func: StackSampledMetrics,
// TODO(b/214117401): We need to add a similar
// 'GetSuccessfullyCollectedCounts' dbus method to Lacros before we can add
// a Lacros test. The current dbus service is ash-only.
LacrosStatus: testing.LacrosVariantNeeded,
Desc: "Check that stack-sampled metrics work",
Contacts: []string{
"iby@chromium.org",
"cros-telemetry@google.com",
},
Attr: []string{"group:mainline", "informational"},
SoftwareDeps: []string{"chrome", "stack_sampled_metrics"},
Params: []testing.Param{{
Name: "ash",
Fixture: "chromeLoggedInWithStackSampledMetrics",
Val: browser.TypeAsh,
}},
})
}
func StackSampledMetrics(ctx context.Context, s *testing.State) {
const (
dbusName = "org.chromium.ProfilerStatusService"
dbusPath = "/org/chromium/ProfilerStatusService"
statusFunction = "GetSuccessfullyCollectedCounts"
)
// Reserve a few seconds for cleanup.
cleanupCtx := ctx
ctx, cancel := ctxutil.Shorten(cleanupCtx, 10*time.Second)
defer cancel()
cr := s.FixtValue().(chrome.HasChrome).Chrome()
// Set up the browser, open a window.
const url = chrome.NewTabURL
conn, _, closeBrowser, err := browserfixt.SetUpWithURL(ctx, cr, s.Param().(browser.Type), url)
if err != nil {
s.Fatal("Failed to open the browser: ", err)
}
defer closeBrowser(cleanupCtx)
defer conn.Close()
_, obj, err := dbusutil.Connect(ctx, dbusName, dbus.ObjectPath(dbusPath))
if err != nil {
s.Fatalf("Failed to connect to %s: %v", dbusName, err)
}
type processThread struct {
processType int
threadType int
}
// We always expect to see at least the following process + threads being profiled.
// List should generally match chrome/common/profiler/thread_profiler_browsertest.cc
expectedResults := []processThread{
{browserProcess, mainThread},
{browserProcess, ioThread},
{rendererProcess, mainThread},
{rendererProcess, ioThread},
{rendererProcess, compositorThread},
{gpuProcess, mainThread},
{gpuProcess, ioThread},
{gpuProcess, compositorThread},
{networkServiceProcess, ioThread},
}
testing.ContextLog(ctx, "Waiting for all processes + threads to be profiled")
if err := testing.Poll(ctx, func(ctx context.Context) error {
type result struct {
ProcessType int
ThreadType int
Count int
}
var results []result
if err := obj.CallWithContext(ctx, dbusName+"."+statusFunction, dbus.FlagNoAutoStart).Store(&results); err != nil {
return errors.Wrap(err, "failed to get profiler status")
}
var missedExpections []processThread
for _, expectation := range expectedResults {
found := false
for _, result := range results {
if expectation.processType == result.ProcessType && expectation.threadType == result.ThreadType && result.Count > 0 {
found = true
break
}
}
if !found {
missedExpections = append(missedExpections, expectation)
}
}
if len(missedExpections) > 0 {
var missedExpectionsStr []string
for _, missedExpection := range missedExpections {
missedExpectionsStr = append(missedExpectionsStr, fmt.Sprintf("%+v", missedExpection))
}
return errors.New("not all process + threads profiled: " + strings.Join(missedExpectionsStr, ", "))
}
return nil
}, nil); err != nil {
s.Error("Chrome did not profile expected process+threads: ", err)
}
}
|
// SPDX-License-Identifier: ISC
// Copyright (c) 2014-2020 Bitmark Inc.
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package domain
import (
"net"
"time"
"github.com/miekg/dns"
"github.com/bitmark-inc/bitmarkd/announce/receptor"
"github.com/bitmark-inc/bitmarkd/background"
"github.com/bitmark-inc/bitmarkd/util"
"github.com/bitmark-inc/logger"
)
// startup node connection information is provided through DNS TXT records.
// The format:
// txt-record=a.b.c,"bitmark=v3 a=127.0.0.1;[::1] c=22136 r=22130 f=xxx p=xxx
// This package is to get DNS TXT record and parse it
const (
timeInterval = 1 * time.Hour // time interval for re-fetching nodes domain
configFile = "/etc/resolv.conf"
)
type domain struct {
log *logger.L
domainName string
receptors receptor.Receptor
lookuper Lookuper
}
// Run - background processing interface
func (d *domain) Run(_ interface{}, shutdown <-chan struct{}) {
timer := time.After(interval(d.domainName, d.log))
loop:
for {
select {
case <-timer:
timer = time.After(interval(d.domainName, d.log))
txts, err := d.lookuper.Lookup(d.domainName)
if nil != err {
continue loop
}
addTXTs(txts, d.log, d.receptors)
case <-shutdown:
break loop
}
}
}
// get interval time for lookup node domain txt record
func interval(domain string, log *logger.L) time.Duration {
t := timeInterval
var servers []string // dns name server
// reading default configuration file
conf, err := dns.ClientConfigFromFile(configFile)
if nil != err {
log.Warnf("reading %s error: %s", configFile, err)
goto done
}
if 0 == len(conf.Servers) {
log.Warnf("cannot get dns name server")
goto done
}
servers = conf.Servers
// limit the nameservers to lookup
// https://www.freebsd.org/cgi/man.cgi?resolv.conf
if len(servers) > 3 {
servers = servers[:3]
}
loop:
for _, server := range servers {
s := net.JoinHostPort(server, conf.Port)
c := dns.Client{}
msg := dns.Msg{}
msg.SetQuestion(domain+".", dns.TypeSOA) // fixed for type SOA
r, _, err := c.Exchange(&msg, s)
if nil != err {
log.Debugf("exchange with dns server %q error: %s", s, err)
continue loop
}
if 0 == len(r.Ns) && 0 == len(r.Answer) && 0 == len(r.Extra) {
log.Debugf("no resource record found by dns server %q", s)
continue loop
}
sections := [][]dns.RR{r.Answer, r.Ns, r.Extra}
for _, section := range sections {
ttl := ttl(section)
if 0 < ttl {
log.Infof("got TTL record from server %q value %d", s, ttl)
ttlSec := time.Duration(ttl) * time.Second
if timeInterval > ttlSec {
t = ttlSec
break loop
}
}
}
}
done:
log.Infof("time to re-fetching node domain: %v", t)
return t
}
// get TTL record from a resource record
func ttl(rrs []dns.RR) uint32 {
if 0 == len(rrs) {
return 0
}
for _, rr := range rrs {
if soa, ok := rr.(*dns.SOA); ok {
return soa.Hdr.Ttl
} else {
return rr.Header().Ttl
}
}
return 0
}
// New - return background processing interface
func New(log *logger.L, domainName string, receptors receptor.Receptor, f func(string) ([]string, error)) (background.Process, error) {
log.Info("initialising…")
d := &domain{
log: log,
domainName: domainName,
receptors: receptors,
lookuper: NewLookuper(log, f),
}
txts, err := d.lookuper.Lookup(d.domainName)
if nil != err {
return nil, err
}
addTXTs(txts, log, receptors)
return d, nil
}
func addTXTs(txts []DnsTXT, log *logger.L, receptors receptor.Receptor) {
for i, t := range txts {
var listeners []byte
if nil != t.IPv4 {
c1 := util.ConnectionFromIPandPort(t.IPv4, t.ConnectPort)
listeners = append(listeners, c1.Pack()...)
}
if nil != t.IPv6 {
c2 := util.ConnectionFromIPandPort(t.IPv6, t.ConnectPort)
listeners = append(listeners, c2.Pack()...)
}
if nil == t.IPv4 && nil == t.IPv6 {
log.Debugf("result[%d]: ignoring invalid record", i)
} else {
log.Infof("result[%d]: adding: %x", i, listeners)
receptors.Add(t.PublicKey, listeners, uint64(time.Now().Unix()))
}
}
}
|
package handlers
import (
"encoding/json"
"fmt"
"github.com/gin-gonic/gin"
"github.com/sagaraglawe/miniProject/inits"
"github.com/sagaraglawe/miniProject/migrations"
"io/ioutil"
"log"
"mime/multipart"
"net/http"
"sync"
)
func AdminShow(c *gin.Context){
//getting the parameter value user with the key name in the post request
user:=c.Query("name");
ShowData:=[]migrations.Product{}
//Querying
inits.Db.Where("username=?",user).Find(&ShowData)
JsonMessage:=[] json.RawMessage{}
for i:=0;i<len(ShowData);i++{
JsonMessage=append(JsonMessage,ShowData[i].Declare)
}
c.JSON(http.StatusOK,JsonMessage)
return
}
func UserShow(c *gin.Context) {
user := c.Query("name");
ShowData := []migrations.Product{}
//Query
inits.Db.Where("username=?", user).Find(&ShowData)
var JsonMessage [] json.RawMessage
for i := 0; i < len(ShowData); i++ {
var pp map[string]interface{}
err:=json.Unmarshal(ShowData[i].Declare,&pp)
if err!=nil{
log.Panic(err)
}
//securing the data which is not to be display to the user
for k, _ := range pp {
if k == "phone_no" {
pp[k] = "********"
}
if k == "password" {
pp[k] = "**********"
}
}
tpt, _ := json.Marshal(pp)
JsonMessage = append(JsonMessage, tpt)
}
c.JSON(http.StatusOK, JsonMessage)
return
}
func StoreData(c *gin.Context){
path:=c.Query("path")
var prod []migrations.Product
//reading the Json file into the file
file, _ := ioutil.ReadFile(path)
//converting the Json file into the slice of bytes
err:=json.Unmarshal([]byte(file),&prod)
//handling unstructured data
var pp []map[string]interface{}
err=json.Unmarshal([]byte(file),&pp)
//if error happens then call panic
if err!=nil{
fmt.Println(err)
}
//migration to create the table in the Database
inits.Db.AutoMigrate(&migrations.Product{})
//filling the database table
for i:=0;i<len(pp);i++{
//this is getting the entire field and setting that to the declare field
byte,_:=json.Marshal(pp[i])
//setting the prod[i] declare field
prod[i].Declare=byte
//now sending the entry to the database
inits.Db.Create(prod[i])
}
}
//Handling the HTML file
func UploadFile(c *gin.Context) {
//it is for calling the html file to be loaded
c.HTML(200, "index.html", nil)
}
func TakeFile(c *gin.Context){
//this is for receiving the uploaded content in the name file
//the "myFile" comes from the index.html where we used this attribute to represent the name of the file
file,err:=c.FormFile("myFile")
//if error happens while uploading then panic
if err!=nil{
fmt.Println(err)
log.Panic(err)
}
//it is to store the file thus get into the destination directory required and set by us
err=c.SaveUploadedFile(file,"JsonFile/"+file.Filename)
if err!=nil{
log.Fatal(err)
}
path:="JsonFile/"+file.Filename
var prod []migrations.Product
//reading the Json file into the file
file1, _:= ioutil.ReadFile(path)
//converting the Json file into the slice of bytes
err=json.Unmarshal([]byte(file1),&prod)
//Handling Unstructured data
var pp []map[string]interface{}
err=json.Unmarshal([]byte(file1),&pp)
//if error happens then call panic
if err!=nil{
fmt.Println(err)
}
//migration to create the table in the Database
inits.Db.AutoMigrate(&migrations.Product{})
//filling the database table
for i:=0;i<len(pp);i++{
//this is getting the entire field and setting that to the declare field
byte,_:=json.Marshal(pp[i])
//setting the prod[i] declare field
prod[i].Declare=byte
//now sending the entry to the database
inits.Db.Create(prod[i])
}
}
//this is for getting the HTML view
func MultiUpload(c *gin.Context){
c.HTML(200, "temp.html", nil)
}
func StoreMultiUpload(c *gin.Context){
form,_:=c.MultipartForm()
files:=form.File["multiplefiles"]
inits.Db.AutoMigrate(&migrations.Product{})
var wg sync.WaitGroup
for _,file:=range files{
err:=c.SaveUploadedFile(file,"JsonFile/"+file.Filename)
if err!=nil{
log.Fatal(err)
}
wg.Add(1)
go CreateDatabase(file)
defer wg.Done()
}
wg.Wait()
}
func CreateDatabase(file2 *multipart.FileHeader){
path:="JsonFile/"+file2.Filename
var prod []migrations.Product
//reading the Json file into the file
file1, _:= ioutil.ReadFile(path)
//converting the Json file into the slice of bytes
err:=json.Unmarshal([]byte(file1),&prod)
var pp []map[string]interface{}
err=json.Unmarshal([]byte(file1),&pp)
//if error happens then call panic
if err!=nil{
fmt.Println(err)
}
//filling the database table
for i:=0;i<len(pp);i++{
//this is getting the entire field and setting that to the declare field
byte,_:=json.Marshal(pp[i])
//setting the prod[i] declare field
prod[i].Declare=byte
//now sending the entry to the database
inits.Db.Create(prod[i])
}
} |
package main
import (
"fmt"
)
type describe interface {
description() string
}
func printDescription(d describe) {
fmt.Printf("Description: %s\n", d.description())
}
type Product struct {
id uint
name string
price uint
PR PRStatement
}
type PRStatement func() string
// PRStatementという関数をレシーバにして対象インターフェースのメソッドを定義
func (pr PRStatement) description() string {
return pr()
}
func main() {
p1 := &Product{id: 1, name: "Golang PC", price: 10000}
p1.PR = func() string {
return fmt.Sprintf("この %s は、値段が%d円なのでとてもお買い得です", p1.name, p1.price)
}
printDescription(p1.PR)
p2 := &Product{id: 2, name: "リンゴ", price: 100}
p2.PR = func() string {
return fmt.Sprintf("この %s は、とても美味しいです", p2.name)
}
printDescription(p2.PR)
}
|
package main
import (
"fmt"
"io"
"net/http"
"os"
"path/filepath"
)
func checkFatalError(err error) {
if err != nil {
fmt.Fprintf(os.Stderr, "error is %v\n", err)
os.Exit(1)
}
}
func main() {
fmt.Println("hello wget")
url := "https://www.baidu.com/img/bd_logo1.png"
// url := "https://mirrors.tuna.tsinghua.edu.cn/ubuntu-releases/releases/18.04/ubuntu-18.04.2-live-server-amd64.iso"
response, err := http.Get(url)
if response != nil {
defer response.Body.Close()
}
checkFatalError(err)
// print header
fmt.Printf("http header for %s is:\n", url)
for k, v := range response.Header {
fmt.Printf("%s: ", k)
for i := range v {
fmt.Printf("%s", v[i])
}
fmt.Printf("\n")
}
// write body to file
fmt.Println("begin write body")
baseName := filepath.Base(url)
newFile, err := os.Create(baseName)
checkFatalError(err)
defer newFile.Close()
io.Copy(newFile, response.Body)
fmt.Println("finish save body to file:", baseName)
}
|
package util
import (
"log"
"os"
)
func CreateLogger(prefix, logfile string) *log.Logger{
if logfile == "" {
return log.New(os.Stdout, prefix, log.LstdFlags)
} else {
file, _ := os.Open(logfile)
return log.New(file, prefix, log.LstdFlags)
}
}
|
package util
import (
"bufio"
"fmt"
"io"
"os"
"strings"
)
var path = "/Users/novalagung/Documents/temp/test.txt"
func createFile() {
// detect if file exists
var _, err = os.Stat(path)
// create file if not exists
if os.IsNotExist(err) {
var file, err = os.Create(path)
if isError(err) {
return
}
defer file.Close()
}
fmt.Println("==> done creating file", path)
}
func writeFile() {
// open file using READ & WRITE permission
var file, err = os.OpenFile(path, os.O_RDWR, 0644)
if isError(err) {
return
}
defer file.Close()
// write some text line-by-line to file
_, err = file.WriteString("halo\n")
if isError(err) {
return
}
_, err = file.WriteString("mari belajar golang\n")
if isError(err) {
return
}
// save changes
err = file.Sync()
if isError(err) {
return
}
fmt.Println("==> done writing to file")
}
func readFile() {
// re-open file
var file, err = os.OpenFile(path, os.O_RDWR, 0644)
if isError(err) {
return
}
defer file.Close()
// read file
var text = make([]byte, 1024)
for {
n, err := file.Read(text)
if err != io.EOF {
if isError(err) {
break
}
}
if n == 0 {
break
}
}
if isError(err) {
return
}
fmt.Println("==> done reading from file")
fmt.Println(string(text))
}
func deleteFile() {
// delete file
var err = os.Remove(path)
if isError(err) {
return
}
fmt.Println("==> done deleting file")
}
func isError(err error) bool {
if err != nil {
fmt.Println(err.Error())
}
return (err != nil)
}
func CreateDir(path string) error {
_, err := os.Stat(path)
if os.IsNotExist(err) {
err = os.MkdirAll(path, 0700)
if err != nil {
return err
}
}
return nil
}
func ReadLine(fileName string, handler func(string)) error {
f, err := os.Open(fileName)
if err != nil {
return err
}
defer f.Close()
buf := bufio.NewReader(f)
for {
line, err := buf.ReadString('\n')
line = strings.TrimSpace(line)
if err != nil {
if err == io.EOF {
return nil
}
return err
}
handler(line)
}
return nil
}
|
package test
import (
"github.com/orbs-network/orbs-network-javascript-plugin/test"
. "github.com/orbs-network/orbs-network-javascript-plugin/worker"
"github.com/stretchr/testify/require"
"testing"
)
func TestNewV8Worker_MethodNotFound(t *testing.T) {
sdkHandler := test.AFakeSdkFor([]byte("signer"), []byte("caller"))
contract := `
import { State } from "orbs-contract-sdk/v1";
const KEY = new Uint8Array([1, 2, 3])
export function write(value) {
State.writeString(KEY, value)
}
`
worker := newTestWorker(t, sdkHandler, contract)
worker.callMethodWithoutErrors("write", ArgsToArgumentArray("Diamond Dogs"))
outputValue, outputErr := worker.callMethodWithErrors("_read", ArgsToArgumentArray())
require.EqualError(t, outputErr, "JS contract execution failed")
require.EqualValues(t, "method '_read' not found in contract", outputValue.StringValue())
}
func TestNewV8Worker_MethodThrowsError(t *testing.T) {
sdkHandler := test.AFakeSdkFor([]byte("signer"), []byte("caller"))
contract := `
import { State } from "orbs-contract-sdk/v1";
const KEY = new Uint8Array([1, 2, 3])
export function write(value) {
State.writeString(KEY, value)
}
export function bang() {
throw new Error("bang!")
}
`
worker := newTestWorker(t, sdkHandler, contract)
worker.callMethodWithoutErrors("write", ArgsToArgumentArray("Diamond Dogs"))
outputValue, outputErr := worker.callMethodWithErrors("bang", ArgsToArgumentArray())
require.EqualError(t, outputErr, "JS contract execution failed")
require.EqualValues(t, "bang!", outputValue.StringValue())
}
func TestNewV8Worker_VerifyDataTypes(t *testing.T) {
sdkHandler := test.AFakeSdkFor([]byte("signer"), []byte("caller"))
contract := `
import { Verify } from "orbs-contract-sdk/v1";
export function verifyBytes(value) {
Verify.bytes(value)
}
export function verifyString(value) {
Verify.string(value)
}
export function verifyUint32(value) {
Verify.uint32(value)
}
export function verifyUint64(value) {
Verify.uint64(value)
}
`
worker := newTestWorker(t, sdkHandler, contract)
// bytes
worker.callMethodWithoutErrors("verifyBytes", ArgsToArgumentArray([]byte("Nicolas Cage")))
outputValue, outputErr := worker.callMethodWithErrors("verifyBytes", ArgsToArgumentArray("Vampire's Kiss"))
require.EqualError(t, outputErr, "JS contract execution failed")
require.EqualValues(t, `Value "Vampire's Kiss" is not a byte array`, outputValue.StringValue())
// string
worker.callMethodWithoutErrors("verifyString", ArgsToArgumentArray("Nicolas Cage"))
outputValue, outputErr = worker.callMethodWithErrors("verifyString", ArgsToArgumentArray([]byte("Vampire's Kiss")))
require.EqualError(t, outputErr, "JS contract execution failed")
require.EqualValues(t, `Value "86,97,109,112,105,114,101,39,115,32,75,105,115,115" is not a string`, outputValue.StringValue())
// uint32
worker.callMethodWithoutErrors("verifyUint32", ArgsToArgumentArray(uint32(1982)))
outputValue, outputErr = worker.callMethodWithErrors("verifyUint32", ArgsToArgumentArray(uint64(1997)))
require.EqualError(t, outputErr, "JS contract execution failed")
require.EqualValues(t, `Value "1997" is not a uint32`, outputValue.StringValue())
//// uint64
worker.callMethodWithoutErrors("verifyUint64", ArgsToArgumentArray(uint64(1997)))
outputValue, outputErr = worker.callMethodWithErrors("verifyUint64", ArgsToArgumentArray())
require.EqualError(t, outputErr, "JS contract execution failed")
require.EqualValues(t, `Value "undefined" is not a uint64`, outputValue.StringValue())
}
|
package main
import (
"fmt"
"time"
"github.com/liasece/micchaos/ccmd"
"github.com/liasece/micchaos/testclient/client"
)
func run(ch chan struct{}, i int) {
defer func() {
ch <- struct{}{}
}()
c := &client.Client{}
c.Init(fmt.Sprintf("Jansen%d", i+1), "testpsw99876")
err := c.Dial(":11002")
if err != nil {
return
}
for i := 0; i < 1; i++ {
if i < 10 {
c.SendMsg(c.GetRegsiterMsg())
} else {
// 进入游戏
c.SendMsg(&ccmd.CS_EnterGame{})
}
}
time.Sleep(time.Second * 1)
}
func main() {
threadsum := 10
stopchan := make(chan struct{}, threadsum)
for i := 0; i < threadsum; i++ {
go run(stopchan, i)
}
for i := 0; i < threadsum; i++ {
<-stopchan
}
time.Sleep(time.Millisecond * 10000)
}
|
// Copyright 2020 The VectorSQL Authors.
//
// Code is licensed under Apache License, Version 2.0.
package datablocks
import (
"fmt"
"sync"
"base/errors"
"columns"
"datavalues"
)
type DataBlock struct {
mu sync.RWMutex
seqs []int
info *DataBlockInfo
values []*DataBlockValue
totalBytes uint64
}
func NewDataBlock(cols []*columns.Column) *DataBlock {
block := &DataBlock{
info: &DataBlockInfo{},
seqs: make([]int, 0),
values: make([]*DataBlockValue, len(cols)),
}
for i, col := range cols {
cv := NewDataBlockValue(col)
block.values[i] = cv
}
return block
}
func newDataBlock(seqs []int, values []*DataBlockValue) *DataBlock {
return &DataBlock{
info: &DataBlockInfo{},
seqs: seqs,
values: values,
}
}
func (block *DataBlock) Clone() *DataBlock {
return NewDataBlock(block.Columns())
}
func (block *DataBlock) DeepClone() *DataBlock {
clone := NewDataBlock(block.Columns())
clone.totalBytes = block.totalBytes
clone.seqs = make([]int, len(block.seqs))
copy(clone.seqs, block.seqs)
for i, value := range block.values {
clone.values[i] = value.DeepClone()
}
return clone
}
func (block *DataBlock) Info() *DataBlockInfo {
return block.info
}
func (block *DataBlock) TotalBytes() uint64 {
block.mu.RLock()
defer block.mu.RUnlock()
return block.totalBytes
}
func (block *DataBlock) NumRows() int {
block.mu.RLock()
defer block.mu.RUnlock()
return len(block.seqs)
}
func (block *DataBlock) NumColumns() int {
return len(block.values)
}
func (block *DataBlock) Columns() []*columns.Column {
cols := make([]*columns.Column, len(block.values))
for i, cv := range block.values {
cols[i] = cv.column
}
return cols
}
func (block *DataBlock) Column(name string) (*columns.Column, error) {
for _, cv := range block.values {
if cv.column.Name == name {
return cv.column, nil
}
}
return nil, errors.Errorf("Can't find column:%v", name)
}
func (block *DataBlock) DataBlockValue(name string) (*DataBlockValue, error) {
for _, cv := range block.values {
if cv.column.Name == name {
return cv, nil
}
}
return nil, errors.Errorf("Can't find column:%v", name)
}
func (block *DataBlock) RowIterator() *DataBlockRowIterator {
return newDataBlockRowIterator(block)
}
func (block *DataBlock) ColumnIterator(name string) (*DataBlockColumnIterator, error) {
for i, v := range block.values {
if v.column.Name == name {
return newDataBlockColumnIterator(block, i), nil
}
}
return nil, errors.Errorf("Can't find column:%v", name)
}
func (block *DataBlock) ColumnIterators() []*DataBlockColumnIterator {
var iterators []*DataBlockColumnIterator
for i := range block.values {
iter := newDataBlockColumnIterator(block, i)
iterators = append(iterators, iter)
}
return iterators
}
func (block *DataBlock) MixsIterator(columns []string) (*DataBlockMixsIterator, error) {
return newDataBlockColsRowIterator(block, columns)
}
func (block *DataBlock) WriteRow(values []datavalues.IDataValue) error {
cols := block.NumColumns()
if len(values) != cols {
return errors.Errorf("Can't append row, expect column length:%v", cols)
}
offset := len(block.values[0].values)
for i := 0; i < cols; i++ {
block.totalBytes += uint64(values[i].Size())
block.values[i].values = append(block.values[i].values, values[i])
}
block.seqs = append(block.seqs, offset)
return nil
}
func (block *DataBlock) Close() {
block.seqs = nil
block.values = nil
block.totalBytes = 0
}
func (block *DataBlock) Dump() {
header := "\n| "
for _, cv := range block.values {
header += fmt.Sprintf("%v(%T)", cv.column.Name, cv.column.DataType)
header += " | "
}
fmt.Print(header)
body := "\n"
it := block.RowIterator()
for it.Next() {
row := it.Value()
for _, v := range row {
body += v.String()
body += " "
}
body += "\n"
}
fmt.Print(body)
}
|
package main
import (
"flag"
"fmt"
"log"
"os"
"path/filepath"
"time"
"github.com/fasthall/kubeprof/client"
"github.com/fasthall/kubeprof/util"
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
)
var tool string
var kubeConfig string
var sshKey string
var stageDir string
var outputDir string
var jobFile string
var namespace string
var skipChecking bool
var deleteJob bool
func init() {
flag.StringVar(&tool, "tool", "", "tool of choice to profile the workload")
flag.StringVar(&jobFile, "job_file", "", "job description JSON file")
flag.StringVar(&kubeConfig, "kube_config", filepath.Join(util.HomeDir(), ".kube", "config"), "(optional) absolute path to the kubeconfig file")
flag.StringVar(&sshKey, "ssh_key", filepath.Join(util.HomeDir(), ".ssh", "google_compute_engine"), "(optional) absolute path to the ssh private key")
flag.StringVar(&stageDir, "stage_dir", "/tmp/", "(optional) node directory to temporarily store profiling tool binary")
flag.StringVar(&outputDir, "output_dir", "", "(optional) host directory to store result files")
flag.StringVar(&namespace, "namespace", "default", "(optional) Kubernetes namespace to run the job")
flag.BoolVar(&skipChecking, "skip_checking", false, "skip checking if binary file is ready")
flag.BoolVar(&deleteJob, "rm", false, "delete the job and the associated pods after finished")
flag.Parse()
}
func main() {
if tool == "" {
log.Fatalf("Profiling tool not specified. Choose from [perf]")
}
if jobFile == "" {
log.Fatalln("Please specify the path of job description file.")
}
if outputDir == "" {
wd, err := os.Getwd()
if err != nil {
log.Fatalln("Couldn't get working directory. Please specify output directory with option --output_dir.")
}
outputDir = wd
} else {
// create the output directory if not exist
if _, err := os.Stat(outputDir); os.IsNotExist(err) {
os.Mkdir(outputDir, 0755)
}
}
// make a subdirectory by the current time
now := time.Now()
outputDir = filepath.Join(outputDir, fmt.Sprintf("%d%02d%02dT%02d%02d%02d", now.Year(), now.Month(), now.Day(), now.Hour(), now.Minute(), now.Second()))
if _, err := os.Stat(outputDir); os.IsNotExist(err) {
os.Mkdir(outputDir, 0755)
}
cli, err := client.NewClient(kubeConfig, namespace, sshKey)
if err != nil {
log.Fatalf("Couldn't create a node client with config file %s: %v\n", kubeConfig, err)
}
if !skipChecking {
// check if required profiling tool binary is presented on all nodes in cluster
nodes, err := cli.ListNodes()
if err != nil {
log.Fatalln("Couldn't list nodes of cluster:", err)
}
for _, node := range nodes {
log.Printf("Checking if %s can be found in %s...\n", tool, node.Name)
exist, err := cli.CheckBinary(node, filepath.Join(stageDir, tool))
if err != nil {
log.Fatalf("Couldn't connect to the node %s: %v\n", node.Name, err)
} else {
if !exist {
// if the binary is not found, upload it to the stage directory
log.Printf("%s couldn't be found. Uploading the binary...\n", tool)
cwd, err := os.Getwd()
if err != nil {
log.Fatalln("Couldn't get the current workdir:", err)
}
err = cli.UploadBinary(node, filepath.Join(cwd, "bin/", tool), stageDir)
if err != nil {
log.Fatalf("Couldn't upload %s to node %s: %v.\nMake the user has the permission to copy file into the stage directory.\n", tool, node.Name, err)
}
log.Println(tool, "uploaded.")
}
}
err = cli.EnableKernelSymbols(node)
if err != nil {
log.Printf("Failed to enables kernel symbol on node %s.\n", node.Name)
}
}
log.Println(tool, "are found in all nodes. Creating Kubernetes job...")
} else {
log.Println("Skip checking if binary file is ready on all nodes.")
}
log.Printf("Loading job description file %s.\n", jobFile)
jobObj := util.ParseFromJSON(jobFile)
// make the job runs in privileged mode and given SYS_ADMIN capability for perf to work properly
jobObj = util.AddSecurityContext(jobObj)
// mount the profiling tool binary from node host to pod
jobObj = util.AddStageDirMount(jobObj, stageDir)
if len(jobObj.Spec.Template.Spec.Containers) == 0 {
log.Println("No container specified in job description.")
return
}
cmds, err := util.GetJobCommand(jobObj)
if err != nil {
// TODO run it and inspect the image on the node to save space and eliminate the need to install Docker on the host machine
// the following command pulls the image to the host (not the node) and check its original command by docker inspect,
// hence it needs the host machine has Docker installed and running
// also it takes space to pull the image to the host
cmds, err = util.GetImageCommand(jobObj.Spec.Template.Spec.Containers[0].Image)
if err != nil {
log.Fatalf("Failed to get the original command of Docker image %s.\n", jobObj.Spec.Template.Spec.Containers[0].Image)
}
}
// override the pod command so it copies and runs profiling tool first followed by the original command
jobObj = util.OverrideCommand(jobObj, tool, stageDir, cmds)
log.Printf("Creating job %s...\n", jobObj.Name)
jobObj, err = cli.CreateJob(jobObj)
if err != nil {
log.Fatalln("Failed to create job:", err)
}
log.Printf("Job %s created.\n", jobObj.Name)
log.Println("Waiting for job to complete...")
err = cli.WaitForJobComplete(jobObj)
if err != nil {
log.Fatalln("Couldn't get job's status:", err)
}
pods, err := cli.GetPodsOfJob(jobObj)
if err != nil {
log.Fatalf("Failed to get the pods of job %s: %v\n", jobObj.Name, err)
}
// job is completed, copy result files back to host
for _, pod := range pods {
ip, err := cli.GetExternalIPOfPod(&pod)
if err != nil {
log.Printf("Couldn't find the external IP address of pod %s: %v\n", pod.Name, err)
} else {
// TODO a bit hacky here
// perf.data generated by perf in kubernetes has mode 600 rather than usual 644
// ssh into the node and run chmod on result files to make sure scp works properly
util.RunSSHCommand(ip, sshKey, []string{"sudo", "chmod", "644", filepath.Join(stageDir, "perf.data")})
util.RunSSHCommand(ip, sshKey, []string{"sudo", "chmod", "644", filepath.Join(stageDir, "perf.report")})
// run scp commands to copy files to the host
stdout, stderr, err := util.RunSCPCommand(sshKey, ip+":"+filepath.Join(stageDir, "perf.data"), filepath.Join(outputDir, pod.Name+".data"))
if err != nil {
log.Printf("Failed to copy perf.data from pod %s: %v\nPlease check if the permission of staging directory %s on node %s is sufficient.\n", pod.Name, err, stageDir, pod.Name)
log.Println(stdout, stderr)
}
stdout, stderr, err = util.RunSCPCommand(sshKey, ip+":"+filepath.Join(stageDir, "perf.report"), filepath.Join(outputDir, pod.Name+".report"))
if err != nil {
log.Printf("Failed to copy perf.report from pod %s: %v\nPlease check if the permission of staging directory %s on node %s is sufficient.\n", pod.Name, err, stageDir, pod.Name)
log.Println(stdout, stderr)
}
}
}
// if rm flag is set, cleanup job and associated pods
if deleteJob {
log.Println("Deleting the job...")
err = cli.DeleteJobSync(jobObj)
if err != nil {
log.Printf("Failed to delete job %s: %v\n", jobObj.Name, err)
}
log.Println("Job deleted. Deleting pods...")
for _, pod := range pods {
err = cli.DeletePod(&pod)
if err != nil {
log.Printf("Failed to delete pod %s: %v\n", pod.Name, err)
}
}
log.Println("Pods deleted.")
}
}
|
// Copyright 2015 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package rditer
import (
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/storage"
"github.com/cockroachdb/cockroach/pkg/storage/enginepb"
)
// ComputeStatsForRange computes the stats for a given range by
// iterating over all key ranges for the given range that should
// be accounted for in its stats.
func ComputeStatsForRange(
d *roachpb.RangeDescriptor, reader storage.Reader, nowNanos int64,
) (enginepb.MVCCStats, error) {
ms := enginepb.MVCCStats{}
var err error
for _, keyRange := range MakeReplicatedKeyRangesExceptLockTable(d) {
func() {
iter := reader.NewMVCCIterator(storage.MVCCKeyAndIntentsIterKind,
storage.IterOptions{UpperBound: keyRange.End.Key})
defer iter.Close()
var msDelta enginepb.MVCCStats
if msDelta, err = iter.ComputeStats(keyRange.Start.Key, keyRange.End.Key, nowNanos); err != nil {
return
}
ms.Add(msDelta)
}()
if err != nil {
return enginepb.MVCCStats{}, err
}
}
return ms, nil
}
|
package admin_models
import (
"github.com/astaxie/beego/orm"
)
func (df *DataFile) TableName() string {
return "data_file"
}
func (df *DataFile) Insert() error {
if _, err := orm.NewOrm().Insert(df); err != nil {
return err
}
return nil
}
func (df *DataFile) Read(fields ...string) error {
if err := orm.NewOrm().Read(df, fields...); err != nil {
return err
}
return nil
}
func (df *DataFile) ReadOrCreate(field string, fields ...string) (bool, int64, error) {
return orm.NewOrm().ReadOrCreate(df, field, fields...)
}
func (df *DataFile) Update(fields ...string) error {
if _, err := orm.NewOrm().Update(df, fields...); err != nil {
return err
}
return nil
}
func (df *DataFile) Delete(fields ...string) error {
if _, err := orm.NewOrm().Delete(df, fields ...); err != nil {
return err
}
return nil
}
func DataFileList(dataId int64, fieldId int64) (int64, []DataFile, error) {
var table DataFile
var datas []DataFile
num, err := orm.NewOrm().QueryTable(table).Filter("data_id", dataId).Filter("field_id", fieldId).Filter("deleted_at__isnull", true).OrderBy("-created_at").All(&datas)
return num, datas, err
}
func init() {
orm.RegisterModel(new(DataFile))
}
|
package auth
/*
Creation Time: 2019 - Sep - 21
Created by: (ehsan)
Maintainers:
1. Ehsan N. Moosa (E2)
Auditor: Ehsan N. Moosa (E2)
Copyright Ronak Software Group 2018
*/
// easyjson:json
type CreateAccessToken struct {
Permissions []string `json:"permissions"`
Period int64 `json:"period"`
AppName string `json:"app_name"`
}
const CAccessTokenCreated = "ACCESS_TOKEN_CREATED"
// easyjson:json
type AccessTokenCreated struct {
AccessToken string `json:"access_token"`
ExpireOn int64 `json:"expire_on"`
}
// easyjson:json
// @RPC
// @Returns: PhoneCodeSent
type SendCodeReq struct {
Phone string `json:"phone"`
}
const CPhoneCodeSent = "PHONE_CODE_SENT"
// easyjson:json
type PhoneCodeSent struct {
PhoneCodeHash string `json:"phone_code_hash"`
Registered bool `json:"registered"`
}
// easyjson:json
// @RPC
// @Returns: Authorization
type LoginReq struct {
PhoneCode string `json:"phone_code"`
PhoneCodeHash string `json:"phone_code_hash"`
Phone string `json:"phone"`
}
// easyjson:json
// @RPC
// @Returns: Authorization
type RegisterReq struct {
PhoneCode string `json:"phone_code"`
PhoneCodeHash string `json:"phone_code_hash"`
Phone string `json:"phone"`
Username string `json:"username"`
}
const CAuthorization = "AUTHORIZATION"
// easyjson:json
type Authorization struct {
UserID string `json:"user_id"`
Phone string `json:"phone"`
Username string `json:"username"`
SessionID string `json:"session_id"`
}
// easyjson:json
// @RPC
// @Returns:
type LogoutReq struct {
Unsubscribe bool `json:"unsubscribe"`
}
|
package server
import (
"fmt"
"log"
fiber "github.com/gofiber/fiber/v2"
"gitlab.com/cfs-service/server/handlers"
"gitlab.com/cfs-service/server/middleware"
"gitlab.com/cfs-service/store"
)
func Start(port uint64, s store.IStore) error {
// Initilize handlers
handlers.Initialize(s)
app := fiber.New()
app.Get("/healthz", func(c *fiber.Ctx) error {
c.SendStatus(fiber.StatusOK)
return nil
})
apiV1 := app.Group("/api/v1")
apiV1.Use(middleware.AuthByHTTPSecureCookie)
// POST event
apiV1.Post("/events", handlers.PostEvent)
apiV1.Get("/events", handlers.GetEvents)
log.Fatal(app.Listen(fmt.Sprintf(":%d", port)))
return nil
}
|
package convoso
import (
"net/http"
"net/url"
"strings"
)
func postFormRequest(route string, body url.Values) (*http.Response, error) {
body.Add("auth_token", apiKEY)
log.Info("POST: "+route, " POSTBODY: "+body.Encode())
request, err := http.NewRequest("POST", route, strings.NewReader(body.Encode()))
if err != nil {
return nil, err
}
request.Header.Add("Content-Type", "application/x-www-form-urlencoded")
client := &http.Client{}
resp, err := client.Do(request)
if err != nil {
return nil, err
}
return resp, nil
}
|
package main
import (
"fmt"
"net/http"
)
type String string
type Struct struct {
Greeting string
Punct string
Who string
}
type Hello struct{}
func (h Hello) ServeHTTP(
w http.ResponseWriter,
r *http.Request) {
fmt.Fprint(w, "Hello!")
}
func main() {
var h Hello
u := String("Hellow World")
http.Handle("/", h)
http.HandleFunc("/string", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, u)
})
http.HandleFunc("/struct", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, &Struct{"Hello", ":", "Gphoers!"})
})
// http.Handle("/struct", )
http.ListenAndServe("localhost:4000", nil)
}
|
package ast
import (
"github.com/OlegSchwann/GoDao/internal/flag"
"go/ast"
"go/parser"
"go/token"
)
func ParseFile(config flag.Config) (*ast.File, error) {
var maybeTrace parser.Mode // no effect by default
if config.Verbose {
maybeTrace = parser.Trace
}
return parser.ParseFile(token.NewFileSet(), config.InputGoFilePath, nil, parser.ParseComments|maybeTrace|parser.AllErrors)
}
|
package t1
// Copyright 2016-2017 MediaMath
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"encoding/json"
"encoding/xml"
"fmt"
"net/http"
"time"
)
const (
headerMasheryError = "X-Mashery-Error-Code"
headerMasheryDetail = "X-Error-Detail-Header"
masheryErrorCodeRateLimit = "ERR_403_DEVELOPER_OVER_QPS"
)
// ErrorResponse reports one or more errors caused by an API request.
type ErrorResponse struct {
Response *http.Response `json:"-"` // HTTP response that caused this error
Message string `json:"message"` // error message
Errors []Error `json:"errors"` // more detail on individual errors
Meta Meta `json:"meta"`
}
func (r *ErrorResponse) Error() string {
return fmt.Sprintf("%v %v: %d %v %+v",
r.Response.Request.Method, r.Response.Request.URL,
r.Response.StatusCode, r.Message, r.Errors)
}
// RateLimitError occurs when API returns 403 Forbidden response with an
// error header signifying over QPS
type RateLimitError struct {
RetryAt time.Time
Response *http.Response // HTTP response that caused this error
Message string `json:"message"` // error message
}
func (r *RateLimitError) Error() string {
return fmt.Sprintf("%v %v: %d %v; rate reset in %v",
r.Response.Request.Method, r.Response.Request.URL,
r.Response.StatusCode, r.Message, r.RetryAt.Sub(time.Now()))
}
// Error reports more details on an individual error in an ErrorResponse.
// These are example validation error codes:
type Error struct {
Resource string `json:"resource"` // resource on which the error occurred
Type string `json:"type"` // Type of error (e.g. field-error)
Field string `json:"field"` // field on which the error occurred
Code string `json:"code"` // validation error code
Message string `json:"message"` // Message describing the error. Errors with Code == "custom" will always have this set.
}
// Error tells you what error was caused by what field and resource
func (e *Error) Error() string {
return fmt.Sprintf("%v error caused by %v field on %v resource",
e.Code, e.Field, e.Resource)
}
// CheckResponse checks the API response for errors, and returns them if
// present. A response is considered an error if it has a status code outside
// the 200 range. API error responses are expected to have either no response
// body, or a JSON response body that maps to ErrorResponse. Any other
// response body will be silently ignored.
//
// The error type will be *RateLimitError for rate limit exceeded errors.
func CheckResponse(r *http.Response) error {
if c := r.StatusCode; 200 <= c && c <= 299 {
return nil
}
switch cType := getContentType(r); cType {
case mediaTypeMashery:
return parseMasheryError(r)
case mediaTypeJSON:
return parseAdamaError(r)
default:
return fmt.Errorf("unknown content type: %s", cType)
}
}
func parseMasheryError(r *http.Response) error {
var mr masheryResponse
if err := xml.NewDecoder(r.Body).Decode(&mr); err != nil {
return err
}
if r.StatusCode == http.StatusForbidden && r.Header.Get(headerMasheryError) == masheryErrorCodeRateLimit {
return &RateLimitError{
RetryAt: parseRateLimit(r),
Response: r,
Message: mr.Message,
}
}
return &ErrorResponse{
Response: r,
Message: mr.Message,
}
}
func parseAdamaError(r *http.Response) error {
var er ErrorResponse
if err := json.NewDecoder(r.Body).Decode(&er); err != nil {
return err
}
er.Response = r
if er.Message != "" {
return &er
}
if len(er.Errors) == 1 {
er.Message = er.Errors[0].Message
} else if er.Meta.Status != "" {
er.Message = er.Meta.Status
}
return &er
}
func getContentType(r *http.Response) string {
return r.Header.Get("Content-Type")
}
|
package datetime
import (
"github.com/project-flogo/core/data"
"github.com/project-flogo/core/support/log"
"time"
"github.com/project-flogo/core/data/expression/function"
)
const DateFormatDefault = "2006-01-02-07:00"
type CurrentDate struct {
}
func init() {
function.Register(&CurrentDate{})
}
func (s *CurrentDate) Name() string {
return "currentDate"
}
func (s *CurrentDate) Sig() (paramTypes []data.Type, isVariadic bool) {
return []data.Type{}, false
}
func (s *CurrentDate) Eval(d ...interface{}) (interface{}, error) {
log.RootLogger().Debugf("Returns the current date with timezone")
return time.Now().UTC().Format(DateFormatDefault), nil
}
|
package sync
//
// Copyright (c) 2019 ARM Limited.
//
// SPDX-License-Identifier: MIT
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
//
import (
. "devicedb/cluster"
. "devicedb/error"
. "devicedb/logging"
. "devicedb/rest"
. "devicedb/partition"
"io"
"net/http"
"strconv"
"encoding/json"
"github.com/gorilla/mux"
)
type BucketSyncHTTP struct {
PartitionPool PartitionPool
ClusterConfigController ClusterConfigController
}
func (bucketSync *BucketSyncHTTP) Attach(router *mux.Router) {
router.HandleFunc("/sites/{siteID}/buckets/{bucket}/merkle", func(w http.ResponseWriter, r *http.Request) {
siteID := mux.Vars(r)["siteID"]
bucketName := mux.Vars(r)["bucket"]
partitionNumber := bucketSync.ClusterConfigController.ClusterController().Partition(siteID)
partition := bucketSync.PartitionPool.Get(partitionNumber)
if partition == nil {
Log.Warningf("GET /sites/{siteID}/buckets/{bucket}/merkle: Site does not exist at this node", siteID, bucketName)
w.Header().Set("Content-Type", "application/json; charset=utf8")
w.WriteHeader(http.StatusNotFound)
io.WriteString(w, string(ESiteDoesNotExist.JSON()) + "\n")
return
}
site := partition.Sites().Acquire(siteID)
defer partition.Sites().Release(siteID)
if site == nil {
Log.Warningf("GET /sites/{siteID}/buckets/{bucket}/merkle: Site does not exist at this node", siteID, bucketName)
w.Header().Set("Content-Type", "application/json; charset=utf8")
w.WriteHeader(http.StatusNotFound)
io.WriteString(w, string(ESiteDoesNotExist.JSON()) + "\n")
return
}
if site.Buckets().Get(bucketName) == nil {
Log.Warningf("GET /sites/{siteID}/buckets/{bucket}/merkle: Bucket does not exist at this site", siteID, bucketName)
w.Header().Set("Content-Type", "application/json; charset=utf8")
w.WriteHeader(http.StatusNotFound)
io.WriteString(w, string(EBucketDoesNotExist.JSON()) + "\n")
return
}
responseMerkleDepth := MerkleTree{
Depth: site.Buckets().Get(bucketName).MerkleTree().Depth(),
}
body, _ := json.Marshal(&responseMerkleDepth)
w.Header().Set("Content-Type", "application/json; charset=utf8")
w.WriteHeader(http.StatusOK)
io.WriteString(w, string(body))
}).Methods("GET")
router.HandleFunc("/sites/{siteID}/buckets/{bucket}/merkle/nodes/{nodeID}/keys", func(w http.ResponseWriter, r *http.Request) {
siteID := mux.Vars(r)["siteID"]
bucketName := mux.Vars(r)["bucket"]
partitionNumber := bucketSync.ClusterConfigController.ClusterController().Partition(siteID)
partition := bucketSync.PartitionPool.Get(partitionNumber)
if partition == nil {
Log.Warningf("GET /sites/{siteID}/buckets/{bucket}/merkle: Site does not exist at this node", siteID, bucketName)
w.Header().Set("Content-Type", "application/json; charset=utf8")
w.WriteHeader(http.StatusNotFound)
io.WriteString(w, string(ESiteDoesNotExist.JSON()) + "\n")
return
}
site := partition.Sites().Acquire(siteID)
defer partition.Sites().Release(siteID)
if site == nil {
Log.Warningf("GET /sites/{siteID}/buckets/{bucket}/merkle: Site does not exist at this node", siteID, bucketName)
w.Header().Set("Content-Type", "application/json; charset=utf8")
w.WriteHeader(http.StatusNotFound)
io.WriteString(w, string(ESiteDoesNotExist.JSON()) + "\n")
return
}
if site.Buckets().Get(bucketName) == nil {
Log.Warningf("GET /sites/{siteID}/buckets/{bucket}/merkle: Bucket does not exist at this site", siteID, bucketName)
w.Header().Set("Content-Type", "application/json; charset=utf8")
w.WriteHeader(http.StatusNotFound)
io.WriteString(w, string(EBucketDoesNotExist.JSON()) + "\n")
return
}
nodeID, err := strconv.ParseUint(mux.Vars(r)["nodeID"], 10, 32)
if err != nil {
Log.Warningf("GET /sites/{siteID}/buckets/{bucket}/merkle/nodes/{nodeID}/keys: nodeID was not properly formatted", siteID, bucketName, mux.Vars(r)["nodeID"])
w.Header().Set("Content-Type", "application/json; charset=utf8")
w.WriteHeader(http.StatusBadRequest)
io.WriteString(w, string(EMerkleRange.JSON()) + "\n")
return
}
siblingSetIter, err := site.Buckets().Get(bucketName).GetSyncChildren(uint32(nodeID))
if err != nil {
Log.Warningf("GET /sites/{siteID}/buckets/{bucket}/merkle/nodes/{nodeID}/keys: %v", siteID, bucketName, mux.Vars(r)["nodeID"], err.Error())
var code int
var body string
if err == EMerkleRange {
code = http.StatusBadRequest
body = string(EMerkleRange.JSON())
} else if err == EStorage {
code = http.StatusInternalServerError
body = string(EStorage.JSON())
} else {
code = http.StatusInternalServerError
body = string(EStorage.JSON())
}
w.Header().Set("Content-Type", "application/json; charset=utf8")
w.WriteHeader(code)
io.WriteString(w, body + "\n")
return
}
responseMerkleKeys := MerkleKeys{
Keys: make([]Key, 0),
}
defer siblingSetIter.Release()
for siblingSetIter.Next() {
responseMerkleKeys.Keys = append(responseMerkleKeys.Keys, Key{
Key: string(siblingSetIter.Key()),
Value: siblingSetIter.Value(),
})
}
if siblingSetIter.Error() != nil {
Log.Warningf("GET /sites/{siteID}/buckets/{bucket}/merkle/nodes/{nodeID}/keys: Sibling set iterator error: %v", siteID, bucketName, mux.Vars(r)["nodeID"], siblingSetIter.Error())
w.Header().Set("Content-Type", "application/json; charset=utf8")
w.WriteHeader(http.StatusInternalServerError)
io.WriteString(w, string(EStorage.JSON()) + "\n")
return
}
body, _ := json.Marshal(&responseMerkleKeys)
w.Header().Set("Content-Type", "application/json; charset=utf8")
w.WriteHeader(http.StatusOK)
io.WriteString(w, string(body))
}).Methods("GET")
router.HandleFunc("/sites/{siteID}/buckets/{bucket}/merkle/nodes/{nodeID}", func(w http.ResponseWriter, r *http.Request) {
// Get the hash of a node
siteID := mux.Vars(r)["siteID"]
bucketName := mux.Vars(r)["bucket"]
partitionNumber := bucketSync.ClusterConfigController.ClusterController().Partition(siteID)
partition := bucketSync.PartitionPool.Get(partitionNumber)
if partition == nil {
Log.Warningf("GET /sites/{siteID}/buckets/{bucket}/merkle: Site does not exist at this node", siteID, bucketName)
w.Header().Set("Content-Type", "application/json; charset=utf8")
w.WriteHeader(http.StatusNotFound)
io.WriteString(w, string(ESiteDoesNotExist.JSON()) + "\n")
return
}
site := partition.Sites().Acquire(siteID)
defer partition.Sites().Release(siteID)
if site == nil {
Log.Warningf("GET /sites/{siteID}/buckets/{bucket}/merkle: Site does not exist at this node", siteID, bucketName)
w.Header().Set("Content-Type", "application/json; charset=utf8")
w.WriteHeader(http.StatusNotFound)
io.WriteString(w, string(ESiteDoesNotExist.JSON()) + "\n")
return
}
if site.Buckets().Get(bucketName) == nil {
Log.Warningf("GET /sites/{siteID}/buckets/{bucket}/merkle: Bucket does not exist at this site", siteID, bucketName)
w.Header().Set("Content-Type", "application/json; charset=utf8")
w.WriteHeader(http.StatusNotFound)
io.WriteString(w, string(EBucketDoesNotExist.JSON()) + "\n")
return
}
nodeID, err := strconv.ParseUint(mux.Vars(r)["nodeID"], 10, 32)
if err != nil {
Log.Warningf("GET /sites/{siteID}/buckets/{bucket}/merkle/nodes/{nodeID}/keys: nodeID was not properly formatted", siteID, bucketName, mux.Vars(r)["nodeID"])
w.Header().Set("Content-Type", "application/json; charset=utf8")
w.WriteHeader(http.StatusBadRequest)
io.WriteString(w, string(EMerkleRange.JSON()) + "\n")
return
}
if nodeID >= uint64(site.Buckets().Get(bucketName).MerkleTree().NodeLimit()) {
Log.Warningf("GET /sites/{siteID}/buckets/{bucket}/merkle/nodes/{nodeID}/keys: nodeID out of range", siteID, bucketName, mux.Vars(r)["nodeID"])
w.Header().Set("Content-Type", "application/json; charset=utf8")
w.WriteHeader(http.StatusBadRequest)
io.WriteString(w, string(EMerkleRange.JSON()) + "\n")
return
}
nodeHash := site.Buckets().Get(bucketName).MerkleTree().NodeHash(uint32(nodeID))
responseMerkleNodeHash := MerkleNode{
Hash: nodeHash,
}
body, _ := json.Marshal(&responseMerkleNodeHash)
w.Header().Set("Content-Type", "application/json; charset=utf8")
w.WriteHeader(http.StatusOK)
io.WriteString(w, string(body))
}).Methods("GET")
} |
package testing
import (
"reflect"
"testing"
"github.com/kr/pretty"
)
// DiffVisible is a flag which dictates whether the diff message should be shown failed assertions
var DiffVisible = true
// Testable is an interface that other structs can implement to aid in testing
type Testable interface {
Title() string
Assert(t *testing.T)
}
// Assert loops through a slice of Tesables and runs the Assert function of each of them individually
func Assert(t *testing.T, tests ...Testable) {
for _, test := range tests {
t.Run(test.Title(), test.Assert)
}
}
// Tupple is a utility function to store the results of functions with multiple returns
func Tupple(a ...interface{}) []interface{} {
return a
}
// Test is a simple Testable struct with a single return value
type Test struct {
Name string
Actual interface{}
Expected interface{}
}
// Title returns a string to display on the console when running the test
func (test Test) Title() string {
return test.Name
}
// Assert is the function which gets run to compare the Test results
func (test Test) Assert(t *testing.T) {
CompareValues(t, test.Actual, test.Expected)
}
// TestWithErr is a Testable struct with a return value and an error
type TestWithErr struct {
Name string
Actual []interface{}
Expected []interface{}
}
// Title returns a string to display on the console when running the test
func (test TestWithErr) Title() string {
return test.Name
}
// Assert is the function which gets run to compare the TestWithErr results
func (test TestWithErr) Assert(t *testing.T) {
if CompareErrors(t, test.Actual[1], test.Expected[1]) {
CompareValues(t, test.Actual[0], test.Expected[0])
}
}
// TestErr is a Testable struct with a return value and an error
type TestErr struct {
Name string
Actual error
Expected bool
}
// Title returns a string to display on the console when running the test
func (test TestErr) Title() string {
return test.Name
}
// Assert is the function which gets run to compare the TestErr results
func (test TestErr) Assert(t *testing.T) {
CompareErrors(t, test.Actual, test.Expected)
}
// CompareValues does a deep compoare of the values and types of the passed
func CompareValues(t *testing.T, actual interface{}, expected interface{}) {
if !reflect.DeepEqual(actual, expected) {
showDiff(t, actual, expected)
if reflect.TypeOf(actual) == reflect.TypeOf(expected) {
t.Errorf("Expected: %v, Actual: %v", expected, actual)
} else {
t.Errorf("Expected: [%T]%v, Actual: [%T]%v", expected, expected, actual, actual)
}
}
}
// CompareErrors compoares actual and expected errors
func CompareErrors(t *testing.T, actualErr interface{}, expectedErr interface{}) bool {
if (actualErr != nil) != expectedErr {
showDiff(t, actualErr, expectedErr)
t.Errorf("Expected error: %v, Actual error: %v", expectedErr, actualErr)
return false
}
return true
}
// CompareValuesWithErrors compares tupples with both values and error
func CompareValuesWithErrors(t *testing.T, actual []interface{}, expected []interface{}) {
if CompareErrors(t, actual[1], expected[1]) {
CompareValues(t, actual[0], expected[0])
}
}
func showDiff(t *testing.T, actual interface{}, expected interface{}) {
if DiffVisible {
t.Log("↓↓↓↓↓↓↓↓↓↓ DIFF ↓↓↓↓↓↓↓↓↓↓")
t.Log(pretty.Diff(actual, expected))
t.Log("↑↑↑↑↑↑↑↑↑↑ DIFF ↑↑↑↑↑↑↑↑↑↑")
}
}
|
// Copyright 2020 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package health
import (
"context"
"time"
"chromiumos/tast/errors"
"chromiumos/tast/local/croshealthd"
"chromiumos/tast/local/jsontypes"
"chromiumos/tast/testing"
)
type signalStrengthStats struct {
Average float32 `json:"average"`
Deviation float32 `json:"deviation"`
Samples []uint8 `json:"samples"`
}
type networkInfo struct {
PortalState string `json:"portal_state"`
State string `json:"state"`
Type string `json:"type"`
GUID *string `json:"guid"`
Ipv4Address *string `json:"ipv4_address"`
Ipv6Addresses *string `json:"ipv6_addresses"`
MacAddress *string `json:"mac_address"`
Name *string `json:"name"`
SignalStrength *jsontypes.Uint32 `json:"signal_strength"`
SignalStrengthStats *signalStrengthStats `json:"signal_strength_stats"`
}
type networkResult struct {
Networks []networkInfo `json:"networks"`
}
func init() {
testing.AddTest(&testing.Test{
Func: ProbeNetworkInfo,
LacrosStatus: testing.LacrosVariantUnneeded,
Desc: "Check that we can probe cros_healthd for network info",
Contacts: []string{
"cros-tdm-tpe-eng@google.com",
"khegde@google.com",
"cros-network-health@google.com",
},
Attr: []string{"group:mainline"},
SoftwareDeps: []string{"chrome", "diagnostics"},
Fixture: "crosHealthdRunning",
})
}
func ProbeNetworkInfo(ctx context.Context, s *testing.State) {
params := croshealthd.TelemParams{Category: croshealthd.TelemCategoryNetwork}
// If this test is run right after chrome is started, it's possible that the
// network health information has not been populated. Poll the routine until
// network information is present.
if err := testing.Poll(ctx, func(ctx context.Context) error {
var result networkResult
if err := croshealthd.RunAndParseJSONTelem(ctx, params, s.OutDir(), &result); err != nil {
s.Fatal("Failed to run telem command: ", err)
}
// Every system should have at least one network device populated. If
// not, re-poll the routine.
if len(result.Networks) < 1 {
return errors.New("no network info populated")
}
return nil
}, &testing.PollOptions{Timeout: 10 * time.Second}); err != nil {
s.Fatal("Timed out waiting for network health info: ", err)
}
}
|
package gateway
import "github.com/gobjserver/gobjserver/core/entity"
// ObjectGateway .
type ObjectGateway interface {
FindAll() ([]string, error)
Insert(objectName string, instance interface{}) (*entity.Object, error)
Find(objectName string) []*entity.Object
FindByID(objectName string, objectID string) (*entity.Object, error)
Update(objectName string, objectID string, instance *entity.Object) (*entity.Object, error)
Delete(objectName string, objectID string) (bool, error)
}
|
package integration_test
import (
"fmt"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
qstsv1a1 "code.cloudfoundry.org/quarks-operator/pkg/kube/apis/quarksstatefulset/v1alpha1"
utils "code.cloudfoundry.org/quarks-utils/testing/integration"
"code.cloudfoundry.org/quarks-utils/testing/machine"
helper "code.cloudfoundry.org/quarks-utils/testing/testhelper"
)
var _ = Describe("QuarksStatefulSetActivePassive", func() {
var (
podNameByIndex = func(podName, index string) string {
return fmt.Sprintf("%s-%s", podName, index)
}
qStsName, podDesignationLabel, labelKey, eventReason, patchPath, patchValue, patchOp string
)
BeforeEach(func() {
// Values required to define a patch mechanism
patchPath = fmt.Sprintf("%s%s%s", "/metadata/labels/quarks.cloudfoundry.org", "~1", "pod-active")
patchValue = "true"
patchOp = "add"
eventReason = "active-passive"
qStsName = fmt.Sprintf("test-ap-qsts-%s", helper.RandString(5))
podDesignationLabel = "quarks.cloudfoundry.org/pod-active=active"
labelKey = "quarks.cloudfoundry.org/pod-active"
})
AfterEach(func() {
Expect(env.WaitForPodsDelete(env.Namespace)).To(Succeed())
// Skipping wait for PVCs to be deleted until the following is fixed
// https://www.pivotaltracker.com/story/show/166896791
// Expect(env.WaitForPVCsDelete(env.Namespace)).To(Succeed())
})
Context("when pod-active label is not present and probe passes", func() {
sleepCMD := []string{"/bin/sh", "-c", "sleep 2"}
It("should label a single pod out of one", func() {
By("Creating a QuarksStatefulSet with a valid CRD probe cmd")
var qSts *qstsv1a1.QuarksStatefulSet
qSts, tearDown, err := env.CreateQuarksStatefulSet(env.Namespace, env.QstsWithProbeSinglePod(
qStsName,
sleepCMD,
))
Expect(err).NotTo(HaveOccurred())
Expect(qSts).NotTo(Equal(nil))
defer func(tdf machine.TearDownFunc) { Expect(tdf()).To(Succeed()) }(tearDown)
By("Wait for pod with pod-active label to be ready")
err = env.WaitForPods(env.Namespace, podDesignationLabel)
Expect(err).NotTo(HaveOccurred())
By("Waiting for pod with index 0 to become active")
err = env.WaitForPodLabelToExist(env.Namespace, fmt.Sprintf("%s-0", qStsName), labelKey)
Expect(err).NotTo(HaveOccurred())
})
})
Context("when pod-active label is present in one pod and probe fails", func() {
cmdSleepTypo := []string{"/bin/sh", "-c", "sleeps 2"}
It("should ensure all pods are pasive", func() {
By("Creating a QuarksStatefulSet with pods that contain a wrong command")
qSts, tearDown, err := env.CreateQuarksStatefulSet(env.Namespace, env.QstsWithProbeMultiplePods(
qStsName,
cmdSleepTypo,
))
Expect(err).NotTo(HaveOccurred())
Expect(qSts).NotTo(Equal(nil))
defer func(tdf machine.TearDownFunc) { Expect(tdf()).To(Succeed()) }(tearDown)
By("Waiting for pod with index 2 to be ready")
// wait till pod with the highest index is ready
err = env.WaitForPodReady(env.Namespace, podNameByIndex(qStsName, "2"))
Expect(err).NotTo(HaveOccurred())
By("Adding the pod-active label to pod with index 0")
err = env.PatchPod(env.Namespace, podNameByIndex(qStsName, "0"), patchOp, patchPath, patchValue)
Expect(err).NotTo(HaveOccurred())
By("Checking that no pods are marked as active")
err = env.WaitForPodLabelToNotExist(env.Namespace, fmt.Sprintf("%s-0", qStsName), labelKey)
Expect(err).NotTo(HaveOccurred())
err = env.WaitForPodLabelToNotExist(env.Namespace, fmt.Sprintf("%s-1", qStsName), labelKey)
Expect(err).NotTo(HaveOccurred())
err = env.WaitForPodLabelToNotExist(env.Namespace, fmt.Sprintf("%s-2", qStsName), labelKey)
Expect(err).NotTo(HaveOccurred())
})
})
Context("when pod-active label is present in multiple pods and only one probe pass", func() {
// two set of cmds, one that runs as the CRD probe
// the second one, runs as a patch, so that the next CRD probe executiong will pass
cmdCatScript := []string{"/bin/sh", "-c", "cat /tmp/busybox-script.sh"}
cmdTouchScript := []string{"/bin/sh", "-c", "touch /tmp/busybox-script.sh"}
containerName := "busybox"
It("should ensure only one pod is active", func() {
By("Creating a QuarksStatefulSet with pods that contain a probe that will initially fail")
qSts, tearDown, err := env.CreateQuarksStatefulSet(env.Namespace, env.QstsWithProbeMultiplePods(
qStsName,
cmdCatScript,
))
Expect(err).NotTo(HaveOccurred())
Expect(qSts).NotTo(Equal(nil))
defer func(tdf machine.TearDownFunc) { Expect(tdf()).To(Succeed()) }(tearDown)
By("Waiting for all pods owned by the qsts to be ready")
// wait till pod with the highest index is ready
err = env.WaitForPodReady(env.Namespace, podNameByIndex(qStsName, "2"))
Expect(err).NotTo(HaveOccurred())
By("Adding the pod-active label to all pods")
err = env.PatchPod(env.Namespace, podNameByIndex(qStsName, "0"), patchOp, patchPath, patchValue)
Expect(err).NotTo(HaveOccurred())
err = env.PatchPod(env.Namespace, podNameByIndex(qStsName, "1"), patchOp, patchPath, patchValue)
Expect(err).NotTo(HaveOccurred())
err = env.PatchPod(env.Namespace, podNameByIndex(qStsName, "2"), patchOp, patchPath, patchValue)
Expect(err).NotTo(HaveOccurred())
By("Executing in pod with index 1 a cmd to force the probe to pass")
kubeConfig, err := utils.KubeConfig()
Expect(err).NotTo(HaveOccurred())
kclient, err := kubernetes.NewForConfig(kubeConfig)
Expect(err).NotTo(HaveOccurred())
p, err := env.GetPod(env.Namespace, fmt.Sprintf("%s-1", qStsName))
Expect(err).NotTo(HaveOccurred())
ec, err := env.ExecPodCMD(
kclient,
kubeConfig,
p,
containerName,
cmdTouchScript,
)
Expect(err).NotTo(HaveOccurred())
Expect(ec).To(Equal(true))
By("Checking for a single active pod")
err = env.WaitForPodLabelToExist(env.Namespace, fmt.Sprintf("%s-1", qStsName), labelKey)
Expect(err).NotTo(HaveOccurred())
By("Checking for other pods to be passive")
err = env.WaitForPodLabelToNotExist(env.Namespace, fmt.Sprintf("%s-0", qStsName), labelKey)
Expect(err).NotTo(HaveOccurred())
err = env.WaitForPodLabelToNotExist(env.Namespace, fmt.Sprintf("%s-2", qStsName), labelKey)
Expect(err).NotTo(HaveOccurred())
})
})
Context("when active passive pod fails a new one becomes active", func() {
cmdCatScript := []string{"/bin/sh", "-c", "cat /tmp/busybox-script.sh"}
cmdTouchScript := []string{"/bin/sh", "-c", "touch /tmp/busybox-script.sh"}
containerName := "busybox"
It("Creating a QuarksStatefulSet", func() {
By("Defining a probe cmd that will fail")
qSts, tearDown, err := env.CreateQuarksStatefulSet(env.Namespace, env.QstsWithProbeMultiplePods(
qStsName,
cmdCatScript,
))
Expect(err).NotTo(HaveOccurred())
Expect(qSts).NotTo(Equal(nil))
defer func(tdf machine.TearDownFunc) { Expect(tdf()).To(Succeed()) }(tearDown)
By("Waiting for all pods owned by the qsts to be ready")
// wait till pod with the highest index is ready
err = env.WaitForPodReady(env.Namespace, podNameByIndex(qStsName, "2"))
Expect(err).NotTo(HaveOccurred())
By("Executing a cmd in pod index 0 to make the probe successful")
kubeConfig, err := utils.KubeConfig()
Expect(err).NotTo(HaveOccurred())
kclient, err := kubernetes.NewForConfig(kubeConfig)
Expect(err).NotTo(HaveOccurred())
p, err := env.GetPod(env.Namespace, fmt.Sprintf("%s-0", qStsName))
Expect(err).NotTo(HaveOccurred())
ec, err := env.ExecPodCMD(
kclient,
kubeConfig,
p,
containerName,
cmdTouchScript,
)
Expect(err).NotTo(HaveOccurred())
Expect(ec).To(Equal(true))
By("Waiting for pod with index 0 to have the label")
err = env.WaitForPodLabelToExist(env.Namespace, fmt.Sprintf("%s-0", qStsName), labelKey)
Expect(err).NotTo(HaveOccurred())
By("Excuting a cmd to make the active pod fail its probe")
p, err = env.GetPod(env.Namespace, fmt.Sprintf("%s-0", qStsName))
Expect(err).NotTo(HaveOccurred())
ec, err = env.ExecPodCMD(
kclient,
kubeConfig,
p,
containerName,
[]string{"/bin/sh", "-c", "rm /tmp/busybox-script.sh"},
)
Expect(err).NotTo(HaveOccurred())
Expect(ec).To(Equal(true))
By("Waiting for pod with index 0 to lose the label")
err = env.WaitForPodLabelToNotExist(env.Namespace, fmt.Sprintf("%s-0", qStsName), labelKey)
Expect(err).NotTo(HaveOccurred())
By("Executing a cmd in another pod to make the probe successful")
p, err = env.GetPod(env.Namespace, fmt.Sprintf("%s-1", qStsName))
Expect(err).NotTo(HaveOccurred())
ec, err = env.ExecPodCMD(
kclient,
kubeConfig,
p,
containerName,
cmdTouchScript,
)
Expect(err).NotTo(HaveOccurred())
Expect(ec).To(Equal(true))
// By("Wait for pods with pod-active label to be ready")
// err = env.WaitForPods(env.Namespace, podDesignationLabel)
// Expect(err).NotTo(HaveOccurred())
By("Waiting for pod with index 1 to have the label")
err = env.WaitForPodLabelToExist(env.Namespace, fmt.Sprintf("%s-1", qStsName), labelKey)
Expect(err).NotTo(HaveOccurred())
})
})
Context("when multiple pods pass the probe multiple remain active", func() {
cmdCatScript := []string{"/bin/sh", "-c", "cat /tmp/busybox-script.sh"}
cmdTouchScript := []string{"/bin/sh", "-c", "touch /tmp/busybox-script.sh"}
containerName := "busybox"
It("Creating a QuarksStatefulSet", func() {
By("Defining a probe cmd that will fail")
qSts, tearDown, err := env.CreateQuarksStatefulSet(env.Namespace, env.QstsWithProbeMultiplePods(
qStsName,
cmdCatScript,
))
Expect(err).NotTo(HaveOccurred())
Expect(qSts).NotTo(Equal(nil))
defer func(tdf machine.TearDownFunc) { Expect(tdf()).To(Succeed()) }(tearDown)
By("Waiting for all pods owned by the qsts to be ready")
// wait till pod with the highest index is ready
err = env.WaitForPodReady(env.Namespace, podNameByIndex(qStsName, "2"))
Expect(err).NotTo(HaveOccurred())
By("Executing a cmd in pod index 0 to make the probe successful")
kubeConfig, err := utils.KubeConfig()
Expect(err).NotTo(HaveOccurred())
kclient, err := kubernetes.NewForConfig(kubeConfig)
Expect(err).NotTo(HaveOccurred())
p, err := env.GetPod(env.Namespace, fmt.Sprintf("%s-0", qStsName))
Expect(err).NotTo(HaveOccurred())
ec, err := env.ExecPodCMD(
kclient,
kubeConfig,
p,
containerName,
cmdTouchScript,
)
Expect(err).NotTo(HaveOccurred())
Expect(ec).To(Equal(true))
By("Executing a cmd in pod index 1 to make the probe successful")
kubeConfig, err = utils.KubeConfig()
Expect(err).NotTo(HaveOccurred())
kclient, err = kubernetes.NewForConfig(kubeConfig)
Expect(err).NotTo(HaveOccurred())
p, err = env.GetPod(env.Namespace, fmt.Sprintf("%s-1", qStsName))
Expect(err).NotTo(HaveOccurred())
ec, err = env.ExecPodCMD(
kclient,
kubeConfig,
p,
containerName,
cmdTouchScript,
)
Expect(err).NotTo(HaveOccurred())
Expect(ec).To(Equal(true))
By("Waiting for pod with index 0 to have the label")
err = env.WaitForPodLabelToExist(env.Namespace, fmt.Sprintf("%s-0", qStsName), labelKey)
Expect(err).NotTo(HaveOccurred())
By("Waiting for pod with index 1 to have the label")
err = env.WaitForPodLabelToExist(env.Namespace, fmt.Sprintf("%s-1", qStsName), labelKey)
Expect(err).NotTo(HaveOccurred())
})
})
Context("when CRD does not specify a probe periodSeconds", func() {
cmdDate := []string{"/bin/sh", "-c", "date"}
It("should ensure the proper event takes place", func() {
By("Creating a QuarksStatefulSet with pods that contain a probe that will initially fail")
qSts, tearDown, err := env.CreateQuarksStatefulSet(env.Namespace, env.QstsWithoutProbeMultiplePods(
qStsName,
cmdDate,
))
Expect(err).NotTo(HaveOccurred())
Expect(qSts).NotTo(Equal(nil))
defer func(tdf machine.TearDownFunc) { Expect(tdf()).To(Succeed()) }(tearDown)
By("Checking events to match the default periodSeconds")
objectName := qSts.ObjectMeta.Name
objectUID := string(qSts.ObjectMeta.UID)
err = wait.PollImmediate(5*time.Second, 35*time.Second, func() (bool, error) {
return env.GetNamespaceEvents(env.Namespace,
objectName,
objectUID,
eventReason,
"periodSeconds probe was not specified, going to default to 30 secs",
)
})
Expect(err).NotTo(HaveOccurred())
})
})
})
|
package api
import (
"net/http"
"github.com/gin-gonic/gin"
"github.com/gin-gonic/gin/binding"
"github.com/pegasus-cloud/iam_client/iam"
"github.com/pegasus-cloud/iam_client/protos"
"github.com/pegasus-cloud/iam_client/utility"
)
type (
listGroupsOutput struct {
Groups []group `json:"groups"`
Total int `json:"total"`
}
)
func listGroups(c *gin.Context) {
listGroupsOutput := &listGroupsOutput{}
pagination := &pagination{}
if err := c.ShouldBindWith(pagination, binding.Query); err != nil {
utility.ResponseWithType(c, http.StatusBadRequest, &utility.ErrResponse{
Message: err.Error(),
})
return
}
groups, err := iam.ListGroups(&protos.LimitOffset{
Limit: int32(pagination.Limit),
Offset: int32(pagination.Offset),
})
if err != nil {
utility.ResponseWithType(c, http.StatusInternalServerError, &utility.ErrResponse{
Message: err.Error(),
})
return
}
for _, groupInfo := range groups.Data {
listGroupsOutput.Groups = append(listGroupsOutput.Groups, group{
GroupID: groupInfo.ID,
DisplayName: groupInfo.DisplayName,
Description: groupInfo.Description,
Extra: groupInfo.Extra,
CreatedAt: groupInfo.CreatedAt,
UpdatedAt: groupInfo.UpdatedAt,
})
}
listGroupsOutput.Total = int(groups.Count)
utility.ResponseWithType(c, http.StatusOK, listGroupsOutput)
}
|
package texdata
import (
"github.com/go-gl/mathgl/mgl32"
)
type TexData struct {
Reflectivity mgl32.Vec3
NameStringTableID int32
Width int32
Height int32
ViewWidth int32
ViewHeight int32
}
|
/*
Copyright © 2020 Doppler <support@doppler.com>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"errors"
"fmt"
"os"
"os/exec"
"regexp"
"strings"
"github.com/DopplerHQ/cli/pkg/http"
"github.com/DopplerHQ/cli/pkg/models"
"github.com/DopplerHQ/cli/pkg/utils"
"github.com/DopplerHQ/cli/pkg/version"
)
// Error controller errors
type Error struct {
Err error
Message string
}
// Unwrap get the original error
func (e *Error) Unwrap() error { return e.Err }
// IsNil whether the error is nil
func (e *Error) IsNil() bool { return e.Err == nil && e.Message == "" }
// RunInstallScript downloads and executes the CLI install scriptm, returning true if an update was installed
func RunInstallScript() (bool, string, Error) {
// download script
script, apiErr := http.GetCLIInstallScript()
if !apiErr.IsNil() {
return false, "", Error{Err: apiErr.Unwrap(), Message: apiErr.Message}
}
// write script to temp file
tmpFile, err := utils.WriteTempFile("install.sh", script, 0555)
// clean up temp file once we're done with it
defer os.Remove(tmpFile)
// execute script
utils.LogDebug("Executing install script")
command := []string{tmpFile, "--debug"}
out, err := exec.Command(command[0], command[1:]...).CombinedOutput() // #nosec G204
strOut := string(out)
// log output before checking error
utils.LogDebug(fmt.Sprintf("Executing \"%s\"", strings.Join(command, " ")))
if utils.Debug {
fmt.Println(strOut)
}
if err != nil {
message := "Unable to install the latest Doppler CLI"
// check for errors indicating lack of perms
if strings.Contains(strOut, "dpkg: error: requested operation requires superuser privilege") {
message = "Error: update failed due to improper permissions\nPlease re-run with `sudo` or run as the root user"
}
return false, "", Error{Err: err, Message: message}
}
// find installed version within script output
// Ex: `Installed Doppler CLI v3.7.1`
re := regexp.MustCompile(`Installed Doppler CLI v(\d+\.\d+\.\d+)`)
matches := re.FindStringSubmatch(strOut)
if matches == nil || len(matches) != 2 {
return false, "", Error{Err: errors.New("Unable to determine new CLI version")}
}
// parse latest version string
newVersion, err := version.ParseVersion(matches[1])
if err != nil {
return false, "", Error{Err: err, Message: "Unable to parse new CLI version"}
}
wasUpdated := false
// parse current version string
currentVersion, currVersionErr := version.ParseVersion(version.ProgramVersion)
if currVersionErr != nil {
// unexpected error; just consider it an update and continue executing
wasUpdated = true
utils.LogDebug("Unable to parse current CLI version")
utils.LogDebugError(currVersionErr)
}
if !wasUpdated {
wasUpdated = version.CompareVersions(currentVersion, newVersion) == 1
}
return wasUpdated, newVersion.String(), Error{}
}
// CLIChangeLog fetches the latest changelog
func CLIChangeLog() (map[string]models.ChangeLog, http.Error) {
response, apiError := http.GetChangelog()
if !apiError.IsNil() {
return nil, apiError
}
changes := models.ParseChangeLog(response)
return changes, http.Error{}
}
|
package main
import (
"bufio"
"encoding/json"
"fmt"
"net"
"net/http"
)
func main() {
var address string
network := "tcp"
host := "google.com"
port := "80"
address = net.JoinHostPort(host, port)
conn, err := net.Dial(network, address)
if err != nil {
fmt.Printf("Failed to connect.")
} else {
fmt.Fprintf(conn, "GET / HTTP/1.0\r\n\r\n")
status, err := bufio.NewReader(conn).ReadString('\n')
if err != nil {
fmt.Printf("Failure")
return
}
fmt.Printf("Success:\n" + status + "\n")
fmt.Printf("Making request to API mockup...\n")
MakeRequest()
}
}
func MakeRequest() {
resp, err := http.Get("http://www.mocky.io/v2/5db9aec030000074cc5ee55f")
if err != nil {
fmt.Printf("Failed to make API call.\n")
} else {
if err == nil {
var result map[string]string
json.NewDecoder(resp.Body).Decode(&result)
ip := result["ip"]
fmt.Println(ip)
}
}
}
|
package sim
import (
"github.com/faiface/pixel"
"math/rand"
"sync"
"time"
)
const Partcount = 12 * 60
const Species = 12
const Radius = 8
const Threads = 12
const Friction = 0.88
const AttractionPrescaler = 0.0011
const AttractionScaler = 0.015
const CollisionEnabled = false
type Particle struct {
Position pixel.Vec
Color pixel.RGBA
momentum pixel.Vec
nextPos pixel.Vec
}
type Sim struct {
Parts []*Particle
attractionLookup map[pixel.RGBA]map[pixel.RGBA]float64
width, height float64
}
func Init(width, height float64) *Sim {
rand.Seed(time.Now().UnixNano())
retval := &Sim{
Parts: genParts(Partcount, width, height),
width: width,
height: height,
}
retval.attractionLookup = genLookup(retval.Parts[:Species])
return retval
}
func genLookup(parts []*Particle) map[pixel.RGBA]map[pixel.RGBA]float64 {
retval := make(map[pixel.RGBA]map[pixel.RGBA]float64)
for _, master := range parts {
retval[master.Color] = make(map[pixel.RGBA]float64)
for _, checkee := range parts {
if checkee.Color == master.Color {
retval[master.Color][checkee.Color] = rand.Float64() - 0.5
} else {
retval[master.Color][checkee.Color] = rand.Float64() - 0.5
}
}
}
return retval
}
func genParts(count int, width, height float64) []*Particle {
retval := make([]*Particle, count)
for i := 0; i < Species; i++ {
retval[i] = &Particle{
Position: randVec(width, height),
Color: pixel.RGB(rand.Float64(), rand.Float64(), rand.Float64()),
momentum: pixel.V(0, 0),
}
}
for i := Species; i < Partcount; i++ {
cloneOf := retval[rand.Intn(Species)]
retval[i] = &Particle{
Position: randVec(width, height),
Color: cloneOf.Color,
momentum: pixel.V(0, 0),
}
for collides(retval[i], retval) {
retval[i].Position = randVec(width, height)
}
}
return retval
}
func (sim *Sim) Step() {
threadCount := Partcount / Threads
waitGroup := sync.WaitGroup{}
waitGroup.Add(Threads)
for i := 0; i < Threads; i++ {
go sim.updateMomentum(i*threadCount, (i+1)*threadCount, &waitGroup)
}
waitGroup.Wait()
waitGroup.Add(Threads)
for i := 0; i < Threads; i++ {
go sim.updateNextPosition(i*threadCount, (i+1)*threadCount, &waitGroup)
}
waitGroup.Wait()
waitGroup.Add(Threads)
for i := 0; i < Threads; i++ {
go sim.updatePosition(i*threadCount, (i+1)*threadCount, &waitGroup)
}
waitGroup.Wait()
}
func randVec(maxX, maxY float64) pixel.Vec {
return pixel.V(rand.Float64()*maxX, rand.Float64()*maxY)
}
func collides(p *Particle, parts []*Particle) bool {
for _, other := range parts {
if other != nil && other != p && p.Position.Sub(other.Position).Len() < Radius*2 {
return true
}
}
return false
}
func collidesNext(p *Particle, parts []*Particle) bool {
for _, other := range parts {
if other != nil && other != p && p.nextPos.Sub(other.nextPos).Len() < Radius*2 {
return true
}
}
return false
}
func (sim *Sim) updatePosition(from, to int, waitGroup *sync.WaitGroup) {
for i := from; i < to; i++ {
if !CollisionEnabled || !collidesNext(sim.Parts[i], sim.Parts) {
sim.Parts[i].Position = sim.Parts[i].nextPos
}
}
waitGroup.Done()
}
func (sim *Sim) updateNextPosition(from, to int, waitGroup *sync.WaitGroup) {
for i := from; i < to; i++ {
newPos := sim.Parts[i].Position
newPos = newPos.Add(sim.Parts[i].momentum)
for newPos.X > sim.width {
newPos.X -= sim.width
}
for newPos.Y > sim.height {
newPos.Y -= sim.height
}
for newPos.X < 0 {
newPos.X = sim.width + newPos.X
}
for newPos.Y < 0 {
newPos.Y = sim.height + newPos.Y
}
sim.Parts[i].nextPos = newPos
}
waitGroup.Done()
}
func (sim *Sim) updateMomentum(from, to int, waitGroup *sync.WaitGroup) {
for i := from; i < to; i++ {
sim.Parts[i].momentum = sim.Parts[i].momentum.Scaled(Friction)
for _, other := range sim.Parts {
diffWidth := sim.width
if sim.Parts[i].Position.X < other.Position.X {
diffWidth *= -1
}
diffHeight := sim.height
if sim.Parts[i].Position.Y < other.Position.Y {
diffHeight *= -1
}
diffVec := other.Position.Sub(pixel.V(sim.Parts[i].Position.X, sim.Parts[i].Position.Y)).Scaled(AttractionPrescaler)
sim.Parts[i].momentum = sim.Parts[i].momentum.Add(diffVec.Scaled(sim.calculateAttractionMagnitude(sim.Parts[i], other)))
if sim.Parts[i].Position.X != other.Position.X {
diffVec = other.Position.Sub(pixel.V(sim.Parts[i].Position.X-diffWidth, sim.Parts[i].Position.Y)).Scaled(AttractionPrescaler)
sim.Parts[i].momentum = sim.Parts[i].momentum.Add(diffVec.Scaled(sim.calculateAttractionMagnitude(sim.Parts[i], other)))
}
if sim.Parts[i].Position.Y != other.Position.Y {
diffVec = other.Position.Sub(pixel.V(sim.Parts[i].Position.X, sim.Parts[i].Position.Y-diffHeight)).Scaled(AttractionPrescaler)
sim.Parts[i].momentum = sim.Parts[i].momentum.Add(diffVec.Scaled(sim.calculateAttractionMagnitude(sim.Parts[i], other)))
}
if sim.Parts[i].Position.X != other.Position.X && sim.Parts[i].Position.Y != other.Position.Y {
diffVec = other.Position.Sub(pixel.V(sim.Parts[i].Position.X-diffWidth, sim.Parts[i].Position.Y-diffHeight)).Scaled(AttractionPrescaler)
sim.Parts[i].momentum = sim.Parts[i].momentum.Add(diffVec.Scaled(sim.calculateAttractionMagnitude(sim.Parts[i], other)))
}
}
}
waitGroup.Done()
}
func (sim *Sim) calculateAttractionMagnitude(a, b *Particle) float64 {
return sim.attractionLookup[a.Color][b.Color] * AttractionScaler
ahash := int(a.Color.R*255) + int(a.Color.G*255) + int(a.Color.B*255)
bhash := int(b.Color.R*255) + int(b.Color.G*255) + int(b.Color.B*255)
neg := 1
if (ahash & 0x4) != 0 {
neg = -1
}
return float64((ahash+bhash)*neg) * AttractionScaler
}
|
package cryptopal
import (
"encoding/base64"
"encoding/hex"
"fmt"
"sort"
"strings"
"unicode"
"unicode/utf8"
)
// HexIn is a struct for taking a hex encoded byte slice as src
type HexIn struct {
Src []byte // a hex encoded byte slice
}
// ToBase64 converts a hex to base64 (bytes) which can be cast as a string
// see: https://cryptopals.com/sets/1/challenges/1
func (h *HexIn) ToBase64() ([]byte, error) {
// hex to bytes
hexDst := make([]byte, hex.DecodedLen(len(h.Src)))
n, err := hex.Decode(hexDst, h.Src)
if err != nil {
return nil, fmt.Errorf("can't decode h.Src %s due to %w", h.Src, err)
}
dst := make([]byte, base64.StdEncoding.EncodedLen(n))
base64.StdEncoding.Encode(dst, hexDst)
return dst, nil
}
// FixedXOR returns a slice of bytes that have been xored against a h.Src
// see: https://cryptopals.com/sets/1/challenges/2
func (h *HexIn) FixedXOR(xor []byte) ([]byte, error) {
srcDecoded := make([]byte, hex.DecodedLen(len(h.Src)))
xorDecoded := make([]byte, hex.DecodedLen(len(xor)))
hex.Decode(srcDecoded, h.Src)
hex.Decode(xorDecoded, xor)
for i := range srcDecoded {
srcDecoded[i] = srcDecoded[i] ^ xorDecoded[i]
}
dest := make([]byte, hex.EncodedLen(len(srcDecoded)))
hex.Encode(dest, srcDecoded)
return dest, nil
}
func scoreWord(word string) int {
// see https://norvig.com/mayzner.html
wordLength := len(word)
switch wordLength {
case 1, 9, 10:
return 2
case 2, 3, 4:
return 10
case 5, 6, 7, 8:
return 4
}
if len(word) > 20 {
return 0
}
return 1
}
// singleXOR returns the input xored against a key
func singleXOR(input []byte, key byte) []byte {
res := make([]byte, len(input))
for i, value := range input {
res[i] = value ^ key
}
return res
}
func scorePlaintext(input []byte) (score int) {
if !utf8.Valid(input) {
return score
}
// "score points" for two letters, points for printable characters
// lose points for control characters and invalid encoded utf8 strings
for i := range input {
if unicode.IsLetter(rune(input[i])) {
score += 2
}
if unicode.IsPrint(rune(input[i])) {
score++
}
if unicode.IsPunct(rune(input[i])) {
score--
}
if unicode.IsControl(rune(input[i])) {
score -= 2
}
}
words := strings.Fields(string(input))
for i := range words {
score += scoreWord(words[i])
}
return score
}
// SingleXOR returns the most likely value that was xored against a string
func (h *HexIn) SingleXOR() (byte, int, []byte) {
decoded := make([]byte, hex.DecodedLen(len(h.Src)))
result := make([]int, 256)
resultPlain := make([][]byte, 256)
hex.Decode(decoded, h.Src)
for i := 0; i <= 255; i++ {
key := byte(i)
res := singleXOR(decoded, key)
score := scorePlaintext(res)
result[i] = score
resultPlain[i] = res
}
var maxScore int
var key byte
for r := range result {
if result[r] >= maxScore {
key = byte(r)
maxScore = result[r]
}
}
return key, maxScore, resultPlain[key]
}
// FindXOR find the decoded string among a bunch of strings...
func FindXOR(blob string) string {
// turn into slice of strings
input := strings.Fields(blob)
results := make(map[int]int, len(input))
for i := range input {
h := &HexIn{Src: []byte(input[i])}
_, foo, _ := h.SingleXOR()
results[i] = foo
}
var maxScore int
var idx int
for k, v := range results {
if v >= maxScore {
maxScore = v
idx = k
}
}
h := &HexIn{Src: []byte(input[idx])}
_, _, final := h.SingleXOR()
return string(final)
}
// RepeatingKeyXOR encrypts an input string with 'repeating key XOR'
// see: https://cryptopals.com/sets/1/challenges/5
func RepeatingKeyXOR(input string, key string) (result string) {
inbytes := []byte(input)
kbytes := []byte(key)
res := make([]byte, len(inbytes))
for idx, b := range inbytes {
res[idx] = b ^ (kbytes[(idx)%len(kbytes)])
}
return hex.EncodeToString(res)
}
func hamming(x, y []byte) (n int, err error) {
if len(x) != len(y) {
return n, fmt.Errorf("expected equal length strings got len %d and %d", len(x), len(y))
}
for idx := range x {
a := x[idx]
b := y[idx]
for i := 0; i < 8; i++ {
if a&(1<<i) != b&(1<<i) {
n++
}
}
}
return n, nil
}
// scoreSingleXOR returns the most likely value that was xored against a string
func scoreSingleXOR(input []byte) byte {
result := make([]int, 256, 256)
for i := 0; i <= 255; i++ {
key := byte(i)
res := singleXOR(input, key)
score := scorePlaintext(res)
result[i] = score
}
var maxScore int
var key byte
for r := range result {
if result[r] >= maxScore {
key = byte(r)
maxScore = result[r]
}
}
return key
}
// BreakRepeatingXOR tries to break an encrypted string
func BreakRepeatingXOR(cipher []byte) (result string, err error) {
minKeySize := 2
maxKeySize := 40
type distance struct {
KeySize int
Distance float32
}
// find minimum distances
keyDistances := make([]distance, maxKeySize-minKeySize+1, maxKeySize-minKeySize+1)
for k := minKeySize; k < (maxKeySize + 1); k++ {
var dist int
var iters int
for i := 0; i < len(cipher)/k-2; i++ {
x := cipher[i*k : (i+1)*k]
y := cipher[(i+1)*k : (i+2)*k]
d, err := hamming(x, y)
if err != nil {
return result, err
}
dist += d
iters++
}
keyDistances[k-minKeySize] = distance{
KeySize: k,
Distance: float32(dist) / float32(k) / float32(iters),
}
}
sort.Slice(keyDistances, func(i, j int) bool { return keyDistances[i].Distance < keyDistances[j].Distance })
fmt.Printf("DEBUG keyDistances %#v", keyDistances)
type cipherStruct struct {
KeySize int
Blocks [][]byte
TransposedBlocks [][]byte
Key []byte
Plaintext string
}
// keysizes to test
topKeySizes := 2
ciphers := make([]cipherStruct, topKeySizes, topKeySizes)
for i := range ciphers {
keySize := keyDistances[i].KeySize
blocks := make([][]byte, 0)
for b := 0; b < (len(cipher) / keySize); b++ {
blocks = append(blocks, cipher[b*keySize:(b+1)*keySize])
}
transposedBlocks := make([][]byte, keySize, keySize)
for idx := range blocks {
for i := range blocks[idx] {
transposedBlocks[i] = append(transposedBlocks[i], blocks[idx][i])
}
}
key := []byte{}
for _, t := range transposedBlocks {
x := scoreSingleXOR(t)
key = append(key, x)
}
result = RepeatingKeyXOR(string(cipher), string(key))
resultB, _ := hex.DecodeString(result)
ciphers[i] = cipherStruct{
KeySize: keySize,
Blocks: blocks,
Key: key,
TransposedBlocks: transposedBlocks,
Plaintext: string(resultB),
}
for _, c := range ciphers {
fmt.Printf("DEBUG ====== plaintext: %s\n KeySize: %d\n", c.Plaintext, c.KeySize)
}
}
return result, err
}
|
// Package printmailer contains an implementation of the mailer interface that
// prints
package printmailer
import (
"fmt"
mailer "github.com/Nivl/go-mailer"
)
// Makes sure Mailer implements mailer.Mailer
var _ mailer.Mailer = (*Mailer)(nil)
// Mailer is a mailer that just print emails
type Mailer struct {
}
// SendStackTrace emails the current stacktrace to the default FROM
func (m *Mailer) SendStackTrace(trace []byte, message string, context map[string]string) error {
fmt.Printf("%s,%#v\n%s", message, context, trace)
return nil
}
// Send is used to send an email
func (m *Mailer) Send(msg *mailer.Message) error {
fmt.Printf("FROM: %s\nTO: %s\nSUBJECT: %s\n%s\n", msg.From, msg.To, msg.Subject, msg.Body)
return nil
}
|
package keeper
import (
"math/big"
"github.com/cosmos/cosmos-sdk/codec"
sdk "github.com/cosmos/cosmos-sdk/types"
paramtypes "github.com/cosmos/cosmos-sdk/x/params/types"
"github.com/tendermint/tendermint/libs/log"
"github.com/tharsis/ethermint/x/feemarket/types"
)
// Keeper grants access to the Fee Market module state.
type Keeper struct {
// Protobuf codec
cdc codec.BinaryCodec
// Store key required for the Fee Market Prefix KVStore.
storeKey sdk.StoreKey
// module specific parameter space that can be configured through governance
paramSpace paramtypes.Subspace
}
// NewKeeper generates new fee market module keeper
func NewKeeper(
cdc codec.BinaryCodec, storeKey sdk.StoreKey, paramSpace paramtypes.Subspace,
) Keeper {
// set KeyTable if it has not already been set
if !paramSpace.HasKeyTable() {
paramSpace = paramSpace.WithKeyTable(types.ParamKeyTable())
}
return Keeper{
cdc: cdc,
storeKey: storeKey,
paramSpace: paramSpace,
}
}
// Logger returns a module-specific logger.
func (k Keeper) Logger(ctx sdk.Context) log.Logger {
return ctx.Logger().With("module", types.ModuleName)
}
// ----------------------------------------------------------------------------
// Parent Block Gas Used
// Required by EIP1559 base fee calculation.
// ----------------------------------------------------------------------------
// GetBlockGasUsed returns the last block gas used value from the store.
func (k Keeper) GetBlockGasUsed(ctx sdk.Context) uint64 {
store := ctx.KVStore(k.storeKey)
bz := store.Get(types.KeyPrefixBlockGasUsed)
if len(bz) == 0 {
return 0
}
return sdk.BigEndianToUint64(bz)
}
// SetBlockGasUsed gets the block gas consumed to the store.
// CONTRACT: this should be only called during EndBlock.
func (k Keeper) SetBlockGasUsed(ctx sdk.Context, gas uint64) {
store := ctx.KVStore(k.storeKey)
gasBz := sdk.Uint64ToBigEndian(gas)
store.Set(types.KeyPrefixBlockGasUsed, gasBz)
}
// ----------------------------------------------------------------------------
// Parent Base Fee
// Required by EIP1559 base fee calculation.
// ----------------------------------------------------------------------------
// GetLastBaseFee returns the last base fee value from the store.
func (k Keeper) GetBaseFee(ctx sdk.Context) *big.Int {
store := ctx.KVStore(k.storeKey)
bz := store.Get(types.KeyPrefixBaseFee)
if len(bz) == 0 {
return nil
}
return new(big.Int).SetBytes(bz)
}
// SetBaseFee set the last base fee value to the store.
// CONTRACT: this should be only called during EndBlock.
func (k Keeper) SetBaseFee(ctx sdk.Context, baseFee *big.Int) {
store := ctx.KVStore(k.storeKey)
store.Set(types.KeyPrefixBaseFee, baseFee.Bytes())
}
|
package test
import (
"bytes"
log "github.com/sirupsen/logrus"
"golang.org/x/crypto/ssh"
"net"
"testing"
)
func TestShell(t *testing.T) {
var (
client *ssh.Client
session *ssh.Session
err error
)
addr := "106.52.6.144:22"
if client, err = ssh.Dial("tcp", addr, &ssh.ClientConfig{
User: "root",
Auth: []ssh.AuthMethod{ssh.Password("#Wjb123456")},
HostKeyCallback: func(hostname string, remote net.Addr, key ssh.PublicKey) error {
return nil
},
}); err != nil {
log.Error(err)
return
}
defer client.Close()
if session, err = client.NewSession(); err != nil {
log.Error(err)
return
}
bufOut := new(bytes.Buffer)
bufErr := new(bytes.Buffer)
session.Stdout = bufOut
session.Stderr = bufErr
//执行命令
if err = session.Run("mkdir test"); err != nil {
log.Error(err)
return
}
}
|
package flushqueues
import (
"sync"
"github.com/prometheus/client_golang/prometheus"
"github.com/uber-go/atomic"
)
type ExclusiveQueues struct {
queues []*PriorityQueue
index *atomic.Int32
activeKeys sync.Map
stopped bool
}
// New creates a new set of flush queues with a prom gauge to track current depth
func New(queues int, metric prometheus.Gauge) *ExclusiveQueues {
f := &ExclusiveQueues{
queues: make([]*PriorityQueue, queues),
index: atomic.NewInt32(0),
}
for j := 0; j < queues; j++ {
f.queues[j] = NewPriorityQueue(metric)
}
return f
}
// Enqueue adds the op to the next queue and prevents any other items to be added with this key
func (f *ExclusiveQueues) Enqueue(op Op) error {
_, ok := f.activeKeys.Load(op.Key())
if ok {
return nil
}
f.activeKeys.Store(op.Key(), struct{}{})
return f.Requeue(op)
}
// Dequeue removes the next op from the requested queue. After dequeueing the calling
// process either needs to call ClearKey or Requeue
func (f *ExclusiveQueues) Dequeue(q int) Op {
return f.queues[q].Dequeue()
}
// Requeue adds an op that is presumed to already be covered by activeKeys
func (f *ExclusiveQueues) Requeue(op Op) error {
flushQueueIndex := int(f.index.Inc()) % len(f.queues)
_, err := f.queues[flushQueueIndex].Enqueue(op)
return err
}
// Clear unblocks the requested op. This should be called only after a flush has been successful
func (f *ExclusiveQueues) Clear(op Op) {
f.activeKeys.Delete(op.Key())
}
func (f *ExclusiveQueues) IsEmpty() bool {
length := 0
f.activeKeys.Range(func(_, _ interface{}) bool {
length++
return false
})
return length <= 0
}
// Stop closes all queues
func (f *ExclusiveQueues) Stop() {
f.stopped = true
for _, q := range f.queues {
q.Close()
}
}
func (f *ExclusiveQueues) IsStopped() bool {
return f.stopped
}
|
package worker
import (
"os"
"time"
gocontext "context"
"github.com/mitchellh/multistep"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/travis-ci/worker/backend"
"github.com/travis-ci/worker/context"
"github.com/travis-ci/worker/metrics"
"go.opencensus.io/trace"
)
type stepDownloadTrace struct {
persister BuildTracePersister
}
func (s *stepDownloadTrace) Run(state multistep.StateBag) multistep.StepAction {
if s.persister == nil {
return multistep.ActionContinue
}
ctx := state.Get("ctx").(gocontext.Context)
defer context.TimeSince(ctx, "step_download_trace_run", time.Now())
ctx, span := trace.StartSpan(ctx, "DownloadTrace.Run")
defer span.End()
buildJob := state.Get("buildJob").(Job)
processedAt := state.Get("processedAt").(time.Time)
instance := state.Get("instance").(backend.Instance)
logger := context.LoggerFromContext(ctx).WithField("self", "step_download_trace")
// ctx, cancel := gocontext.WithTimeout(ctx, s.uploadTimeout)
// defer cancel()
// downloading the trace is best-effort, so we continue in any case
if !buildJob.Payload().Trace {
return multistep.ActionContinue
}
buf, err := instance.DownloadTrace(ctx)
if err != nil {
span.SetStatus(trace.Status{
Code: trace.StatusCodeUnavailable,
Message: err.Error(),
})
if err == backend.ErrDownloadTraceNotImplemented || os.IsNotExist(errors.Cause(err)) {
logger.WithFields(logrus.Fields{
"err": err,
}).Info("skipping trace download")
return multistep.ActionContinue
}
metrics.Mark("worker.job.trace.download.error")
logger.WithFields(logrus.Fields{
"err": err,
}).Error("couldn't download trace")
context.CaptureError(ctx, err)
return multistep.ActionContinue
}
logger.WithFields(logrus.Fields{
"since_processed_ms": time.Since(processedAt).Seconds() * 1e3,
}).Info("downloaded trace")
err = s.persister.Persist(ctx, buildJob, buf)
if err != nil {
metrics.Mark("worker.job.trace.persist.error")
span.SetStatus(trace.Status{
Code: trace.StatusCodeUnavailable,
Message: err.Error(),
})
logger.WithFields(logrus.Fields{
"err": err,
}).Error("couldn't persist trace")
context.CaptureError(ctx, err)
return multistep.ActionContinue
}
logger.WithFields(logrus.Fields{
"since_processed_ms": time.Since(processedAt).Seconds() * 1e3,
}).Info("persisted trace")
return multistep.ActionContinue
}
func (s *stepDownloadTrace) Cleanup(state multistep.StateBag) {
// Nothing to clean up
}
|
package parser
// P collects input matchers.
type P struct {
IMakeMatchers
Comprehensions
}
func NewParser(m IMakeMatchers) P {
return P{m, make(Comprehensions)}
}
// ParseInput to generate a matching command.
// Returns the command found regardless of error.
func (parser P) ParseInput(input string) (p *Pattern, m IMatch, err error) {
matched := false
for _, c := range parser.Comprehensions {
if pattern, matcher, e := c.TryParse(input, parser.IMakeMatchers); e == nil || matcher != nil {
p, m, err = pattern, matcher, e
matched = true
break
}
}
if !matched {
err = UnknownInput(input)
}
return
}
|
package request
import (
"encoding/json"
"fmt"
"io"
"strings"
"github.com/dema501/randomjoke/internal/pkg/request"
)
// FakeSuperAgent has been build for unit tests purpose
type FakeSuperAgent struct {
body io.Reader
}
// Constructor..
func New(b *strings.Reader) request.Maker {
return &FakeSuperAgent{
body: b,
}
}
// Implement interface
func (r *FakeSuperAgent) Get(url string, result interface{}, args ...interface{}) error {
if err := json.NewDecoder(r.body).Decode(result); err != nil {
return fmt.Errorf("Can't Unmarshal response with error: %w", err)
}
return nil
}
|
package commands
import (
"fmt"
"os"
"github.com/BSidesSF/ctf-2019/challenges/rsaos/sessions"
)
type PublicKeyCommand struct{}
func (ec *PublicKeyCommand) GetName() string {
return "get-publickey"
}
func (ec *PublicKeyCommand) GetDescription() string {
return "Retrieve the Public Key"
}
func (ec *PublicKeyCommand) GetUsage() string {
return `get-publickey
Get the Public Key for signed requests.`
}
func (ec *PublicKeyCommand) IsPrivileged() bool {
return false
}
func (ec *PublicKeyCommand) Dispatch(sess *sessions.Session, _ []string) bool {
fmt.Fprintf(os.Stdout, "Public key parameters:\n%s", sess.RSA.PublicString())
return true
}
type PrivateKeyCommand struct{}
func (ec *PrivateKeyCommand) GetName() string {
return "get-privatekey"
}
func (ec *PrivateKeyCommand) GetDescription() string {
return "Retrieve the Private Key"
}
func (ec *PrivateKeyCommand) GetUsage() string {
return `get-privatekey
Get the Private Key for signed requests.`
}
func (ec *PrivateKeyCommand) IsPrivileged() bool {
return true
}
func (ec *PrivateKeyCommand) Dispatch(sess *sessions.Session, _ []string) bool {
fmt.Fprintf(os.Stdout, "Private key parameters:\n%s", sess.RSA.PrivateString())
return true
}
func init() {
RegisterCommand(&PublicKeyCommand{})
RegisterCommand(&PrivateKeyCommand{})
}
|
package users
import (
"bytes"
"net/http"
"net/http/httptest"
"testing"
"github.com/gin-gonic/gin"
"github.com/joho/godotenv"
"github.com/stretchr/testify/assert"
)
var initialized = false
var router *gin.Engine
func prepareTest() *gin.Engine {
if !initialized {
initialized = true
godotenv.Load("./../../../../../../../.env")
router = gin.Default()
Initialize(router)
}
return router
}
/*
* Create a user
*/
func TestCreateUser(t *testing.T) {
prepareTest()
var jsonStr = []byte(`{
"nome":"Testivaldo TESTANIO",
"sexo":"M",
"altura":1.78,
"peso":58,
"imc":18.31
}\0`)
w := httptest.NewRecorder()
req, _ := http.NewRequest("POST", "/webapi/v1/user", bytes.NewBuffer(jsonStr))
router.ServeHTTP(w, req)
assert.Equal(t, http.StatusCreated, w.Code)
req, _ = http.NewRequest("POST", "/webapi/v1/user", bytes.NewBuffer(jsonStr))
router.ServeHTTP(w, req)
assert.Equal(t, http.StatusCreated, w.Code)
req, _ = http.NewRequest("POST", "/webapi/v1/user", bytes.NewBuffer(jsonStr))
router.ServeHTTP(w, req)
assert.Equal(t, http.StatusCreated, w.Code)
}
/*func TestDuplicateUser(t *testing.T) {
prepareTest()
var jsonStr = []byte(`{
"nome":"TestivaldoTESTANIO",
"sexo":"M",
"altura":1.78,
"peso":58,
"imc":18.31
}\0`)
w := httptest.NewRecorder()
req, _ := http.NewRequest("POST", "/webapi/v1/user", bytes.NewBuffer(jsonStr))
router.ServeHTTP(w, req)
assert.Equal(t, http.StatusBadRequest, w.Code)
}*/
/*
* Get user list
*/
/*func TestGetUsers(t *testing.T) {
prepareTest()
w := httptest.NewRecorder()
req, _ := http.NewRequest("GET", "/webapi/v1/user", nil)
router.ServeHTTP(w, req)
assert.Equal(t, http.StatusOK, w.Code)
}
*/
|
// Copyright 2018 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package input
import (
"context"
"fmt"
"math"
"math/big"
"os"
"time"
"unsafe"
"chromiumos/tast/errors"
"chromiumos/tast/local/coords"
"chromiumos/tast/testing"
)
// TouchCoord describes an X or Y coordinate in touchscreen coordinates
// (rather than pixels).
type TouchCoord int32
// TouchscreenEventWriter supports injecting touch events into a touchscreen device.
// It supports multitouch as defined in "Protocol Example B" here:
//
// https://www.kernel.org/doc/Documentation/input/multi-touch-protocol.txt
// https://www.kernel.org/doc/Documentation/input/event-codes.txt
//
// This is partial implementation of the multi-touch specification. Each injected
// touch event contains the following codes:
// - ABS_MT_TRACKING_ID
// - ABS_MT_POSITION_X & ABS_X
// - ABS_MT_POSITION_Y & ABS_Y
// - ABS_MT_PRESSURE & ABS_PRESSURE
// - ABS_MT_TOUCH_MAJOR
// - ABS_MT_TOUCH_MINOR
// - BTN_TOUCH
//
// Any other code, like MSC_TIMESTAMP, is not implemented.
type TouchscreenEventWriter struct {
rw *RawEventWriter
virt *os.File // if non-nil, used to hold a virtual device open
dev string // path to underlying device in /dev/input
nextTouchID int32
width TouchCoord
height TouchCoord
maxTouchSlot int
maxTrackingID int
maxPressure int
// clockwise rotation in degree to translate event location. It only supports
// 0, 90, 180, or 270 degrees.
rotation int
}
var nextVirtTouchNum = 1 // appended to virtual touchscreen device name
const touchFrequency = 5 * time.Millisecond
// ZoomType represents the zoom type to perform.
type ZoomType int
// Holds all the zoom types that can be performed.
const (
ZoomIn ZoomType = iota
ZoomOut
)
// Touchscreen returns an TouchscreenEventWriter to inject events into an arbitrary touchscreen device.
func Touchscreen(ctx context.Context) (*TouchscreenEventWriter, error) {
infos, err := readDevices("")
if err != nil {
return nil, errors.Wrap(err, "failed to read devices")
}
for _, info := range infos {
if !info.isTouchscreen() {
continue
}
testing.ContextLogf(ctx, "Opening touchscreen device %+v", info)
// Get touchscreen properties: bounds, max touches, max pressure and max track id.
f, err := os.Open(info.path)
if err != nil {
return nil, err
}
defer f.Close()
var infoX, infoY, infoSlot, infoTrackingID, infoPressure absInfo
for _, entry := range []struct {
ec EventCode
dst *absInfo
}{
{ABS_X, &infoX},
{ABS_Y, &infoY},
{ABS_MT_SLOT, &infoSlot},
{ABS_MT_TRACKING_ID, &infoTrackingID},
{ABS_MT_PRESSURE, &infoPressure},
} {
if err := ioctl(int(f.Fd()), evIOCGAbs(uint(entry.ec)), uintptr(unsafe.Pointer(entry.dst))); err != nil {
return nil, err
}
}
if infoTrackingID.maximum < infoSlot.maximum {
return nil, errors.Errorf("invalid MT tracking ID %d; should be >= max slots %d",
infoTrackingID.maximum, infoSlot.maximum)
}
if infoX.maximum == 0 || infoY.maximum == 0 {
return nil, errors.Errorf("invalid screen size (%d, %d)", infoX.maximum, infoY.maximum)
}
device, err := Device(ctx, info.path)
if err != nil {
return nil, err
}
return &TouchscreenEventWriter{
rw: device,
width: TouchCoord(infoX.maximum),
height: TouchCoord(infoY.maximum),
maxTouchSlot: int(infoSlot.maximum),
maxTrackingID: int(infoTrackingID.maximum),
maxPressure: int(infoPressure.maximum),
}, nil
}
// If we didn't find a real touchscreen, create a virtual one.
return VirtualTouchscreen(ctx)
}
// FindPhysicalTouchscreen iterates over devices and returns path for a physical touchscreen,
// otherwise returns boolean indicating a physical touchscreen was not found.
func FindPhysicalTouchscreen(ctx context.Context) (bool, string, error) {
infos, err := readDevices("")
if err != nil {
return false, "", errors.Wrap(err, "failed to read devices")
}
for _, info := range infos {
if info.isTouchscreen() {
testing.ContextLogf(ctx, "Using existing touch screen device %+v", info)
return true, info.path, nil
}
}
return false, "", nil
}
// VirtualTouchscreen creates a virtual touchscreen device and returns an EventWriter that injects events into it.
func VirtualTouchscreen(ctx context.Context) (*TouchscreenEventWriter, error) {
const (
// Most touchscreens use I2C bus. But hardcoding to USB since it is supported
// in all Chromebook devices.
busType = 0x3 // BUS_USB from input.h
// Device constants taken from Chromebook Slate.
vendor = 0x2d1f
product = 0x5143
version = 0x100
// Input characteristics.
props = 1 << INPUT_PROP_DIRECT
evTypes = 1<<EV_KEY | 1<<EV_ABS | 1<<EV_MSC
// Abs axes supported in our virtual device.
absSupportedAxes = 1<<ABS_X | 1<<ABS_Y | 1<<ABS_PRESSURE | 1<<ABS_MT_SLOT |
1<<ABS_MT_TOUCH_MAJOR | 1<<ABS_MT_TOUCH_MINOR | 1<<ABS_MT_ORIENTATION |
1<<ABS_MT_POSITION_X | 1<<ABS_MT_POSITION_Y | 1<<ABS_MT_TOOL_TYPE |
1<<ABS_MT_TRACKING_ID | 1<<ABS_MT_PRESSURE
// Abs axis constants. Taken from Chromebook Slate.
axisMaxX = 10404
axisMaxY = 6936
axisMaxTracking = 65535
axisMaxPressure = 255
axisCoordResolution = 40
)
axisMaxTouchSlot := 9
// Include our PID in the device name to be extra careful in case an old bundle process hasn't exited.
name := fmt.Sprintf("Tast virtual touchscreen %d.%d", os.Getpid(), nextVirtTouchNum)
nextVirtTouchNum++
testing.ContextLogf(ctx, "Creating virtual touchscreen device %q", name)
dev, virt, err := createVirtual(name, devID{busType, vendor, product, version}, props, evTypes,
map[EventType]*big.Int{
EV_KEY: makeBigInt([]uint64{0x400, 0, 0, 0, 0, 0}), // BTN_TOUCH
EV_ABS: big.NewInt(absSupportedAxes),
EV_MSC: big.NewInt(1 << MSC_TIMESTAMP),
}, map[EventCode]Axis{
ABS_X: {axisMaxX, 0, 0, 0, axisCoordResolution},
ABS_Y: {axisMaxY, 0, 0, 0, axisCoordResolution},
ABS_PRESSURE: {axisMaxPressure, 0, 0, 0, 0},
ABS_MT_SLOT: {int32(axisMaxTouchSlot), 0, 0, 0, 0},
ABS_MT_TOUCH_MAJOR: {255, 0, 0, 0, 1},
ABS_MT_TOUCH_MINOR: {255, 0, 0, 0, 1},
ABS_MT_ORIENTATION: {1, 0, 0, 0, 0},
ABS_MT_POSITION_X: {axisMaxX, 0, 0, 0, axisCoordResolution},
ABS_MT_POSITION_Y: {axisMaxY, 0, 0, 0, axisCoordResolution},
ABS_MT_TOOL_TYPE: {2, 0, 0, 0, 0},
ABS_MT_TRACKING_ID: {axisMaxTracking, 0, 0, 0, 0},
ABS_MT_PRESSURE: {axisMaxPressure, 0, 0, 0, 0},
})
if err != nil {
return nil, err
}
// After initializing the virtual device a pause is needed to be able to detect the device.
// TODO(crbug.com/1015264): Remove the hard-coded sleep.
if err := testing.Sleep(ctx, 1*time.Second); err != nil {
return nil, err
}
device, err := Device(ctx, dev)
if err != nil {
return nil, err
}
return &TouchscreenEventWriter{
rw: device,
dev: dev,
virt: virt,
width: axisMaxX,
height: axisMaxY,
maxTouchSlot: axisMaxTouchSlot,
maxTrackingID: axisMaxTracking,
maxPressure: axisMaxPressure,
}, nil
}
// Close closes the touchscreen device.
func (tsw *TouchscreenEventWriter) Close() error {
firstErr := tsw.rw.Close()
// Let go the virtual device if any.
if tsw.virt != nil {
if err := tsw.virt.Close(); firstErr == nil {
firstErr = err
}
}
return firstErr
}
// NewMultiTouchWriter returns a new TouchEventWriter instance. numTouches is how many touches
// are going to be used by the TouchEventWriter.
func (tsw *TouchscreenEventWriter) NewMultiTouchWriter(numTouches int) (*TouchEventWriter, error) {
if numTouches < 1 || numTouches > tsw.maxTouchSlot {
return nil, errors.Errorf("requested %d touches; device only supports a max of %d touches", numTouches, tsw.maxTouchSlot+1)
}
tw := TouchEventWriter{tsw: tsw, touchStartTime: tsw.rw.nowFunc()}
tw.initTouchState(numTouches)
return &tw, nil
}
// NewSingleTouchWriter returns a new SingleTouchEventWriter instance.
// The difference between calling NewSingleTouchWriter() and NewMultiTouchWriter(1)
// is that NewSingleTouchWriter() has the extra helper Move() method.
func (tsw *TouchscreenEventWriter) NewSingleTouchWriter() (*SingleTouchEventWriter, error) {
stw := SingleTouchEventWriter{TouchEventWriter{tsw: tsw, touchStartTime: tsw.rw.nowFunc()}}
stw.initTouchState(1)
return &stw, nil
}
// Width returns the width of the touchscreen device, in touchscreen coordinates.
// This is affected by the rotation of the screen.
func (tsw *TouchscreenEventWriter) Width() TouchCoord {
if tsw.rotation == 90 || tsw.rotation == 270 {
return tsw.height
}
return tsw.width
}
// Height returns the height of the touchscreen device, in touchscreen coordinates.
// This is affected by the rotation of the screen.
func (tsw *TouchscreenEventWriter) Height() TouchCoord {
if tsw.rotation == 90 || tsw.rotation == 270 {
return tsw.width
}
return tsw.height
}
// SetRotation changes the orientation of the touch screen's event to the
// specified degree. The locations of further touch events will be rotated by
// the specified rotation. It will return an error if the specified rotation is
// not supported.
func (tsw *TouchscreenEventWriter) SetRotation(rotation int) error {
rotation = rotation % 360
if rotation < 0 {
rotation += 360
}
if rotation != 0 && rotation != 90 && rotation != 180 && rotation != 270 {
return errors.Errorf("unsupported rotation: %d", rotation)
}
tsw.rotation = rotation
return nil
}
// TouchCoordConverter manages the conversion between locations in DIP and
// the TouchCoord of the touchscreen.
type TouchCoordConverter struct {
ScaleX float64
ScaleY float64
}
// NewTouchCoordConverter creates a new TouchCoordConverter instance for the
// given size.
func (tsw *TouchscreenEventWriter) NewTouchCoordConverter(size coords.Size) *TouchCoordConverter {
return &TouchCoordConverter{
ScaleX: float64(tsw.Width()) / float64(size.Width),
ScaleY: float64(tsw.Height()) / float64(size.Height),
}
}
// ConvertLocation converts a location to TouchCoord.
func (tcc *TouchCoordConverter) ConvertLocation(l coords.Point) (x, y TouchCoord) {
return TouchCoord(tcc.ScaleX * float64(l.X)), TouchCoord(tcc.ScaleY * float64(l.Y))
}
// TouchEventWriter supports injecting touch events into a touchscreen device.
type TouchEventWriter struct {
tsw *TouchscreenEventWriter
touches []TouchState
touchStartTime time.Time
ended bool
isBtnToolFingerEnabled bool
isBtnToolDoubleTapEnabled bool
}
// SingleTouchEventWriter supports injecting a single touch into a touchscreen device.
type SingleTouchEventWriter struct {
TouchEventWriter
}
// TouchState contains the state of a single touch event.
type TouchState struct {
tsw *TouchscreenEventWriter
slot int32
touchID int32
touchMinor int32
touchMajor int32
absPressure int32
x TouchCoord
y TouchCoord
}
// SetPos sets TouchState X and Y coordinates.
// X and Y must be between [0, touchscreen width) and [0, touchscreen height).
func (ts *TouchState) SetPos(x, y TouchCoord) error {
if x < 0 || x >= ts.tsw.Width() || y < 0 || y >= ts.tsw.Height() {
return errors.Errorf("coordinates (%d, %d) outside valid bounds [0, %d), [0, %d)",
x, y, ts.tsw.Width(), ts.tsw.Height())
}
switch ts.tsw.rotation {
case 90:
x, y = ts.tsw.width-1-y, x
case 180:
x, y = ts.tsw.width-1-x, ts.tsw.height-1-y
case 270:
x, y = y, ts.tsw.height-1-x
}
ts.x = x
ts.y = y
return nil
}
// absInfo corresponds to a input_absinfo struct.
// Taken from: include/uapi/linux/input.h
type absInfo struct {
value uint32
minimum uint32
maximum uint32
fuzz uint32
flat uint32
resolution uint32
}
// evIOCGAbs returns an encoded Event-Ioctl-Get-Absolute value to be used for ioctl().
// Similar to the EVIOCGABS found in include/uapi/linux/input.h
func evIOCGAbs(ev uint) uint {
const sizeofAbsInfo = 0x24
return ior('E', 0x40+ev, sizeofAbsInfo)
}
// evIOCSAbs sets an encoded Event-Ioctl-Set-Absolute value to be used for ioctl().
// Similar to the EVIOCSABS found in include/uapi/linux/input.h
func evIOCSAbs(ev uint) uint {
const sizeofAbsInfo = 0x24
return iow('E', 0xc0+ev, sizeofAbsInfo)
}
type kernelEventEntry struct {
et EventType
ec EventCode
val int32
}
// Send sends all the multi-touch events to the kernel.
func (tw *TouchEventWriter) Send() error {
// First send the multitouch event codes.
for _, touch := range tw.touches {
for _, e := range []kernelEventEntry{
{EV_ABS, ABS_MT_SLOT, touch.slot},
{EV_ABS, ABS_MT_TRACKING_ID, touch.touchID},
{EV_ABS, ABS_MT_POSITION_X, int32(touch.x)},
{EV_ABS, ABS_MT_POSITION_Y, int32(touch.y)},
{EV_ABS, ABS_MT_PRESSURE, touch.absPressure},
{EV_ABS, ABS_MT_TOUCH_MAJOR, touch.touchMajor},
{EV_ABS, ABS_MT_TOUCH_MINOR, touch.touchMinor},
} {
if err := tw.tsw.rw.Event(e.et, e.ec, e.val); err != nil {
return err
}
}
}
// Then send the rest of the event codes.
globalKernelEvents := []kernelEventEntry{
{EV_KEY, BTN_TOUCH, 1},
{EV_ABS, ABS_X, int32(tw.touches[0].x)},
{EV_ABS, ABS_Y, int32(tw.touches[0].y)},
{EV_ABS, ABS_PRESSURE, tw.touches[0].absPressure},
}
if tw.isBtnToolFingerEnabled {
globalKernelEvents = append(globalKernelEvents, kernelEventEntry{et: EV_KEY, ec: BTN_TOOL_FINGER, val: int32(1)})
}
if tw.isBtnToolDoubleTapEnabled {
globalKernelEvents = append(globalKernelEvents, kernelEventEntry{et: EV_KEY, ec: BTN_TOOL_DOUBLETAP, val: int32(1)})
}
for _, e := range globalKernelEvents {
if err := tw.tsw.rw.Event(e.et, e.ec, e.val); err != nil {
return err
}
}
tw.ended = false
// And finally sync.
return tw.tsw.rw.Sync()
}
// End injects a "touch lift" like if someone were lifting the finger or
// stylus from the surface. All active TouchStates are ended.
func (tw *TouchEventWriter) End() error {
for _, touch := range tw.touches {
for _, e := range []kernelEventEntry{
{EV_ABS, ABS_MT_SLOT, touch.slot},
{EV_ABS, ABS_MT_TRACKING_ID, -1},
} {
if err := tw.tsw.rw.Event(e.et, e.ec, e.val); err != nil {
return err
}
}
}
globalEventsToEnd := []kernelEventEntry{
{EV_ABS, ABS_PRESSURE, 0},
{EV_KEY, BTN_TOUCH, 0},
}
if tw.isBtnToolFingerEnabled {
globalEventsToEnd = append(globalEventsToEnd, kernelEventEntry{et: EV_KEY, ec: BTN_TOOL_FINGER, val: 0})
}
if tw.isBtnToolDoubleTapEnabled {
globalEventsToEnd = append(globalEventsToEnd, kernelEventEntry{et: EV_KEY, ec: BTN_TOOL_DOUBLETAP, val: 0})
}
for _, e := range globalEventsToEnd {
if err := tw.tsw.rw.Event(e.et, e.ec, e.val); err != nil {
return err
}
}
tw.ended = true
tw.isBtnToolFingerEnabled = false
tw.isBtnToolDoubleTapEnabled = false
return tw.tsw.rw.Sync()
}
// Close cleans up TouchEventWriter. This method must be called after using it,
// possibly with the "defer" statement.
func (tw *TouchEventWriter) Close() {
if !tw.ended {
tw.End()
}
}
// Swipe performs a swipe movement with a user defined number of touches. The touches are separated by (dx, dy). For
// example, in a 3-touch swipe, the touches begin at (x0, y0), (x0+dx, y0+dy), (x0+2dx, y0+2dy).
// t represents how long the swipe should last. If t is less than 5 milliseconds, 5 milliseconds will be used instead.
// Swipe() does not call End(), allowing the user to concatenate multiple swipes together.
func (tw *TouchEventWriter) Swipe(ctx context.Context, x0, y0, x1, y1, dx, dy TouchCoord, touches int, t time.Duration) error {
if len(tw.touches) < touches {
return errors.Errorf("requested %d touches for swipe; got %d", touches, len(tw.touches))
}
steps := int(t/touchFrequency) + 1
// A minimum of two touches are needed. One for the start point and another one for the end point.
if steps < 2 {
steps = 2
}
deltaX := float64(x1-x0) / float64(steps-1)
deltaY := float64(y1-y0) / float64(steps-1)
for i := 0; i < steps; i++ {
x := x0 + TouchCoord(math.Round(deltaX*float64(i)))
y := y0 + TouchCoord(math.Round(deltaY*float64(i)))
for j := 0; j < touches; j++ {
jTC := TouchCoord(j)
if err := tw.touches[j].SetPos(x+jTC*dx, y+jTC*dy); err != nil {
return err
}
}
if err := tw.Send(); err != nil {
return err
}
if err := testing.Sleep(ctx, touchFrequency); err != nil {
return errors.Wrap(err, "timeout while doing sleep")
}
}
return nil
}
// touchCoordPoint represents a point, expressed in TouchCoords.
type touchCoordPoint struct {
X, Y TouchCoord
}
// getPointsBetweenCoords returns all the coordinates between two points, spread out
// across the provided number of steps. Points are capped to the bounds of the touchscreen.
func getPointsBetweenCoords(ts *TouchscreenEventWriter, x0, y0, x1, y1 TouchCoord, steps int) []touchCoordPoint {
// A minimum of two steps are needed. One for the start point and another one for the end point.
stepsToUse := steps
if stepsToUse < 2 {
stepsToUse = 2
}
deltaX := float64(x1-x0) / float64(stepsToUse-1)
deltaY := float64(y1-y0) / float64(stepsToUse-1)
var result []touchCoordPoint
for i := 0; i < stepsToUse; i++ {
// Determine where the new point should be and keep it within the
// bounds of the touchscreen.
newX := x0 + TouchCoord(math.Round(deltaX*float64(i)))
if newX < 0 {
newX = 0
} else if newX >= ts.Width() {
newX = ts.Width() - 1
}
newY := y0 + TouchCoord(math.Round(deltaY*float64(i)))
if newY < 0 {
newY = 0
} else if newY >= ts.Height() {
newY = ts.Height() - 1
}
result = append(result, touchCoordPoint{
X: newX,
Y: newY,
})
}
return result
}
// moveMultipleTouches moves a series of touches through multiple movements. Note that all touches
// must have the same number of movements. The number of touches is equal to len(pointsPerTouch) and
// the number of movements is equal to len(pointsPerTouch[0]).
func (tw *TouchEventWriter) moveMultipleTouches(ctx context.Context, pointsPerTouch ...[]touchCoordPoint) error {
// If there are no items, exit.
if len(pointsPerTouch) <= 0 {
return nil
}
// All the points must have the same length.
requiredLength := len(pointsPerTouch[0])
for _, curTouch := range pointsPerTouch {
if len(curTouch) != requiredLength {
return errors.New("all pointsPerTouch must have the same length")
}
}
// Move all the points through their coordinates at the same rate.
for pointNum := 0; pointNum < requiredLength; pointNum++ {
for touchNum, curTouch := range pointsPerTouch {
if err := tw.touches[touchNum].SetPos(curTouch[pointNum].X, curTouch[pointNum].Y); err != nil {
return err
}
}
if err := tw.Send(); err != nil {
return err
}
if err := testing.Sleep(ctx, touchFrequency); err != nil {
return errors.Wrap(err, "timeout while doing sleep")
}
}
return nil
}
// performPinchZoom performs a pinch zoom using the provided coordinates.
// A zoom in will start at the center and move points to the bottomLeft,
// and topRight. A zoom out will do the inverse.
func (tw *TouchEventWriter) performPinchZoom(ctx context.Context, center, bottomLeft, topRight coords.Point, t time.Duration, zoom ZoomType) error {
// Ensure enough touches are provided.
if len(tw.touches) < 2 {
return errors.New("must have at least two touches to perform a zoom")
}
// Set up the points based on the zoom type.
var leftFingerStart, leftFingerEnd coords.Point
var rightFingerStart, rightFingerEnd coords.Point
switch zoom {
case ZoomIn:
leftFingerStart = center
leftFingerEnd = bottomLeft
rightFingerStart = center
rightFingerEnd = topRight
case ZoomOut:
leftFingerStart = bottomLeft
leftFingerEnd = center
rightFingerStart = topRight
rightFingerEnd = center
default:
return errors.Errorf("invalid zoom provided: %v", zoom)
}
// Perform the zoom over a series of steps.
steps := int(t/touchFrequency) + 1
leftFingerPoints := getPointsBetweenCoords(tw.tsw, TouchCoord(leftFingerStart.X), TouchCoord(leftFingerStart.Y), TouchCoord(leftFingerEnd.X), TouchCoord(leftFingerEnd.Y), steps)
rightFingerPoints := getPointsBetweenCoords(tw.tsw, TouchCoord(rightFingerStart.X), TouchCoord(rightFingerStart.Y), TouchCoord(rightFingerEnd.X), TouchCoord(rightFingerEnd.Y), steps)
return tw.moveMultipleTouches(ctx, leftFingerPoints, rightFingerPoints)
}
// Zoom performs a pinch-to-zoom where the distance traveled to/from the
// provided center point is d for each finger.
func (tw *TouchEventWriter) Zoom(ctx context.Context, centerX, centerY, d TouchCoord, t time.Duration, zoom ZoomType) error {
bottomLeft := coords.NewPoint(int(centerX)-int(d), int(centerY)+int(d))
topRight := coords.NewPoint(int(centerX)+int(d), int(centerY)-int(d))
center := coords.NewPoint(int(centerX), int(centerY))
return tw.performPinchZoom(ctx, center, bottomLeft, topRight, t, zoom)
}
// ZoomRelativeToSize performs a pinch-to-zoom where the distance traveled, and
// center point are calculated based on the size of the Touch writer's
// dimensions. This function will attempt to use as much of the dimensions
// as possible in order to reliably trigger the zoom.
func (tw *TouchEventWriter) ZoomRelativeToSize(ctx context.Context, t time.Duration, zoom ZoomType) error {
// Used to shrink the size of the writer in order to utilize as much
// as possible without reaching the edges.
sizeInset := int(math.Min(float64(tw.tsw.Width()), float64(tw.tsw.Height())) * .01)
// Generate a rectangle that is a bit smaller than the
// dimensions of the writer.
writerDimensions := coords.NewRect(0, 0, int(tw.tsw.Width()), int(tw.tsw.Height()))
insetWriterDimensions := writerDimensions.WithInset(sizeInset, sizeInset)
// Calculate the relevant points which use up as much of the writer's
// dimensions as possible.
center := insetWriterDimensions.CenterPoint()
bottomLeft := insetWriterDimensions.BottomLeft()
topRight := insetWriterDimensions.TopRight()
return tw.performPinchZoom(ctx, center, bottomLeft, topRight, t, zoom)
}
// DoubleSwipe performs a swipe movement with two touches. One is from x0/y0 to x1/y1, and the other is x0+d/y0 to x1+d/y1.
// t represents how long the swipe should last.
// If t is less than 5 milliseconds, 5 milliseconds will be used instead.
// DoubleSwipe() does not call End(), allowing the user to concatenate multiple swipes together.
func (tw *TouchEventWriter) DoubleSwipe(ctx context.Context, x0, y0, x1, y1, d TouchCoord, t time.Duration) error {
return tw.Swipe(ctx, x0, y0, x1, y1, d, 0, 2, t)
}
// SetSize sets the major/minor appropriately for all touches.
func (tw *TouchEventWriter) SetSize(ctx context.Context, major, minor int32) error {
if major < 0 || minor < 0 {
return errors.Errorf("must be positive; got: major=%v, minor=%v", major, minor)
} else if major < minor {
return errors.Errorf("major must be greater than or equal to minor; got: major=%v, minor=%v", major, minor)
}
for idx := range tw.touches {
tw.touches[idx].touchMajor = major
tw.touches[idx].touchMinor = minor
}
return nil
}
// SetIsBtnToolFinger Sets the state of the BTN_TOOL_FINGER flag.
func (tw *TouchEventWriter) SetIsBtnToolFinger(isEnabled bool) {
tw.isBtnToolFingerEnabled = isEnabled
}
// SetIsBtnToolDoubleTap Sets the state of the BTN_TOOL_DOUBLETAP flag.
func (tw *TouchEventWriter) SetIsBtnToolDoubleTap(isEnabled bool) {
tw.isBtnToolDoubleTapEnabled = isEnabled
}
// SetPressure sets the pressure of each touch.
func (tw *TouchEventWriter) SetPressure(pressure int32) error {
if pressure < 0 {
return errors.New("pressure must be greater than 0")
}
for idx := range tw.touches {
tw.touches[idx].absPressure = pressure
}
return nil
}
// Move injects a touch event at x and y touchscreen coordinates. This is applied
// only to the first TouchState. Calling this function is equivalent to:
//
// ts := touchEventWriter.TouchState(0)
// ts.SetPos(x, y)
// ts.Send()
func (stw *SingleTouchEventWriter) Move(x, y TouchCoord) error {
if err := stw.touches[0].SetPos(x, y); err != nil {
return err
}
return stw.Send()
}
// LongPressAt injects a touch event at (x, y) touchscreen coordinates and wait
// a bit to simulate a touch long press. The wait time should be longer than
// chrome's default long press wait time, which is 500ms.
// See ui/events/gesture_detection/gesture_detector.cc in chromium.
func (stw *SingleTouchEventWriter) LongPressAt(ctx context.Context, x, y TouchCoord) error {
if err := stw.Move(x, y); err != nil {
return err
}
return testing.Sleep(ctx, 1*time.Second)
}
// SetSize sets the major/minor appropriately for single touch events. Sets the
// major/minor.
func (stw *SingleTouchEventWriter) SetSize(ctx context.Context, major, minor int32) error {
if len(stw.touches) != 1 {
return errors.New("expected touches size to be 1, is ")
}
if major < 0 || minor < 0 {
return errors.New("major and minor must be positive")
} else if major < minor {
return errors.New("major must be greater than or equal to minor")
}
stw.touches[0].touchMajor = major
stw.touches[0].touchMinor = minor
return nil
}
// Swipe performs a swipe movement from x0/y0 to x1/y1.
// t represents how long the swipe should last.
// If t is less than 5 milliseconds, 5 milliseconds will be used instead.
// Swipe() does not call End(), allowing the user to concatenate multiple swipes together.
func (stw *SingleTouchEventWriter) Swipe(ctx context.Context, x0, y0, x1, y1 TouchCoord, t time.Duration) error {
steps := int(t/touchFrequency) + 1
// A minimum of two touches are needed. One for the start point and another one for the end point.
if steps < 2 {
steps = 2
}
deltaX := float64(x1-x0) / float64(steps-1)
deltaY := float64(y1-y0) / float64(steps-1)
for i := 0; i < steps; i++ {
x := x0 + TouchCoord(math.Round(deltaX*float64(i)))
y := y0 + TouchCoord(math.Round(deltaY*float64(i)))
if err := stw.Move(x, y); err != nil {
return err
}
if err := testing.Sleep(ctx, touchFrequency); err != nil {
return errors.Wrap(err, "timeout while doing sleep")
}
}
return nil
}
// TouchState returns a TouchState. touchIndex is touch to get.
// One TouchState represents the state of a single touch.
func (tw *TouchEventWriter) TouchState(touchIndex int) *TouchState {
return &tw.touches[touchIndex]
}
func (tw *TouchEventWriter) initTouchState(numTouches int) {
// Values taken from "dumps" on an Eve device.
// Spec says pressure is in arbitrary units. A value around 25% of the max value seems to be "normal".
// TouchMajor and TouchMinor were also taken from "dumps".
const (
defaultTouchMajor = 5
defaultTouchMinor = 5
)
defaultPressure := int32(tw.tsw.maxPressure/4) + 1
tw.touches = make([]TouchState, numTouches)
for i := 0; i < numTouches; i++ {
tw.touches[i].tsw = tw.tsw
tw.touches[i].absPressure = defaultPressure
tw.touches[i].touchMajor = defaultTouchMajor
tw.touches[i].touchMinor = defaultTouchMinor
tw.touches[i].touchID = tw.tsw.nextTouchID
tw.touches[i].slot = int32(i)
tw.tsw.nextTouchID = (tw.tsw.nextTouchID + 1) % int32(tw.tsw.maxTrackingID)
}
}
|
package zhash
import (
"bytes"
"encoding/json"
"errors"
"io"
"io/ioutil"
)
type Unmarshaller func([]byte, interface{}) error
type Marshaller func(interface{}) ([]byte, error)
// Sets function for marshalling via Hash.WriteHash(fd)
func (h *Hash) SetMarshallerFunc(fu Marshaller) {
h.marshal = fu
}
// Set function for unmarshalling via Hash.ReadHash
func (h *Hash) SetUnmarshallerFunc(fu Unmarshaller) {
h.unmarshal = fu
}
// Unmarshall hash from given io.Reader using function setted via zhash.Hash.SetUnmarshaller
func (h *Hash) ReadHash(r io.Reader) error {
if h.unmarshal == nil {
return errors.New("cannot unmarshal, no unmarshaller set")
}
b, err := ioutil.ReadAll(r)
if err != nil {
return err
}
err = h.unmarshal(b, &h.data)
return err
}
// Mashall hash using supplied Marshaller function and writes it to w
func (h Hash) WriteHash(w io.Writer) error {
if h.marshal == nil {
return errors.New("cannot marshal hash, no marshaller set")
}
b, err := h.marshal(h.data)
if err != nil {
return err
}
_, err = w.Write(b)
return err
}
func (h Hash) Reader() (io.Reader, error) {
var buff bytes.Buffer
err := h.WriteHash(&buff)
return &buff, err
}
// Returns indented json with your map
func (h Hash) String() string {
buf, err := json.MarshalIndent(h, "", " ")
if err != nil {
return "error converting config to json"
}
return string(buf)
}
func (h Hash) MarshalJSON() ([]byte, error) {
return json.Marshal(h.data)
}
|
package v2
import (
"fmt"
"log"
"sort"
"gopkg.in/yaml.v2"
"github.com/cyberark/secretless-broker/pkg/secretless/plugin"
"github.com/cyberark/secretless-broker/pkg/secretless/plugin/sharedobj"
)
// Config represents a full configuration of Secretless, which is just a list of
// individual Service configurations.
type Config struct {
Debug bool
Services []*Service
}
// Serialize Config to YAML
func (c Config) String() string {
out, err := yaml.Marshal(c)
if err != nil {
return ""
}
return string(out)
}
// MarshalYAML serializes Config to the secretless.yml format
func (c Config) MarshalYAML() (interface{}, error) {
servicesAsYAML := map[string]*serviceYAML{}
for _, svc := range c.Services {
credentialYamls := credentialsYAML{}
for _, cred := range svc.Credentials {
credentialYamls[cred.Name] = struct {
From string `yaml:"from" json:"from"`
Get string `yaml:"get" json:"get"`
}{
From: cred.From,
Get: cred.Get,
}
}
servicesAsYAML[svc.Name] = &serviceYAML{
Connector: svc.Connector,
ListenOn: string(svc.ListenOn),
Credentials: credentialYamls,
Config: svc.ConnectorConfig,
}
}
return struct {
Version string `yaml:"version" json:"version"`
Services map[string]*serviceYAML `yaml:"services" json:"services"`
}{
Version: "2",
Services: servicesAsYAML,
}, nil
}
// NewConfig creates a v2.Config from yaml bytes
func NewConfig(v2YAML []byte) (*Config, error) {
cfgYAML, err := newConfigYAML(v2YAML)
if err != nil {
return nil, err
}
services := make([]*Service, 0)
for svcName, svcYAML := range cfgYAML.Services {
svc, err := NewService(svcName, svcYAML)
if err != nil {
return nil, err
}
services = append(services, svc)
}
// sort Services
sort.Slice(services, func(i, j int) bool {
return services[i].Name < services[j].Name
})
return &Config{
Services: services,
}, nil
}
// NewConfigsByType converts a slice of v2.Service configs into the configs
// needed to actually created ProxyServices -- configsByType. In particular, it
// takes all the http configs and creates proper HTTPServiceConfig objects out
// of them -- grouping the raw v2.Service configs by their listenOn property.
// The remaining services are tcp, and already correspond 1-1 to the services
// we'll run.
// TODO: Eventually the application code should not be dealing directly with
// []Service at all, but the processing into these more appropriate domain
// configs should occur entirely at the border.
func NewConfigsByType(
uncheckedConfigs []*Service,
availPlugins plugin.AvailablePlugins,
) ConfigsByType {
// Get the nil checks out of the way.
var rawConfigs []Service
for _, cfg := range uncheckedConfigs {
if cfg == nil {
// Hard-coding log here is okay since nils should never occur and
// we won't be unit testing this. Instead, we'll make the change
// in the TODO_ above and this code will be deleted then.
log.Fatalln("Nil configuration is not allowed!")
}
rawConfigs = append(rawConfigs, *cfg)
}
var httpConfigs, tcpConfigs, sshConfigs, sshAgentConfigs []Service
for _, cfg := range rawConfigs {
switch {
case sharedobj.IsHTTPPlugin(availPlugins, cfg.Connector):
httpConfigs = append(httpConfigs, cfg)
continue
case cfg.Connector == "ssh":
sshConfigs = append(sshConfigs, cfg)
continue
case cfg.Connector == "ssh-agent":
sshAgentConfigs = append(sshAgentConfigs, cfg)
continue
default:
tcpConfigs = append(tcpConfigs, cfg)
}
}
httpByListenOn := groupedByListenOn(httpConfigs)
// Now create proper HTTPServiceConfig objects from our map
var httpServiceConfigs []HTTPServiceConfig
for listenOn, configs := range httpByListenOn {
httpServiceConfig := HTTPServiceConfig{
SharedListenOn: listenOn,
SubserviceConfigs: configs,
}
httpServiceConfigs = append(httpServiceConfigs, httpServiceConfig)
}
return ConfigsByType{
HTTP: httpServiceConfigs,
SSH: sshConfigs,
SSHAgent: sshAgentConfigs,
TCP: tcpConfigs,
}
}
// HTTPServiceConfig represents an HTTP proxy service configuration. Multiple
// http entries within a v2.Service config slice that share a listenOn actually
// represent a single HTTP proxy service, with sub-handlers for different
// traffic. This type captures that fact.
type HTTPServiceConfig struct {
SharedListenOn NetworkAddress
SubserviceConfigs []Service
}
// Name returns the name of an HTTPServiceConfig
func (cfg *HTTPServiceConfig) Name() string {
return fmt.Sprintf("HTTP Proxy on %s", cfg.SharedListenOn)
}
// ConfigsByType holds proxy service configuration in a form that directly
// corresponds to the ProxyService objects we want to create. One ProxyService
// will be created for each entry in http, and one for each entry in tcp.
type ConfigsByType struct {
HTTP []HTTPServiceConfig
SSH []Service
SSHAgent []Service
TCP []Service
}
// groupedByListenOn returns a map grouping the configs provided by their ListenOn
// property. Merely a helper function to reduce bloat in newConfigsByType.
func groupedByListenOn(httpConfigs []Service) map[NetworkAddress][]Service {
httpByListenOn := map[NetworkAddress][]Service{}
for _, httpConfig := range httpConfigs {
// default group for this ListenOn, in case we don't yet have one yet
var groupedConfigs []Service
// but replace it with the existing group, if one exists
for listenOn, alreadyGrouped := range httpByListenOn {
if listenOn == httpConfig.ListenOn {
groupedConfigs = alreadyGrouped
break
}
}
// append the current config to this ListenOn group
groupedConfigs = append(groupedConfigs, httpConfig)
httpByListenOn[httpConfig.ListenOn] = groupedConfigs
}
return httpByListenOn
}
|
package sej
import (
"fmt"
"path"
"runtime"
"sort"
"testing"
"time"
)
func TestWatch(t *testing.T) {
dir := newTestPath(t)
shardChan := make(chan string)
go func() {
if err := WatchRootDir(dir, time.Millisecond, func(dir string) {
go func() {
shardChan <- dir
}()
}); err != nil {
t.Fatal(err)
}
}()
runtime.Gosched()
{
w, err := NewWriter(dir)
if err != nil {
t.Fatal(err)
}
w.Append(&Message{Key: []byte("a")})
w.Close()
}
{
w, err := NewWriter(path.Join(dir, "d1"))
if err != nil {
t.Fatal(err)
}
w.Append(&Message{Key: []byte("a")})
w.Close()
}
{
w, err := NewWriter(path.Join(dir, "d2"))
if err != nil {
t.Fatal(err)
}
w.Append(&Message{Key: []byte("a")})
w.Append(&Message{Key: []byte("b")})
w.Close()
}
time.Sleep(time.Millisecond)
var shards []string
for i := 0; i < 10; i++ {
select {
case shard := <-shardChan:
shards = append(shards, shard)
default:
}
}
sort.Strings(shards)
expected := "[" +
dir + ` ` +
dir + `/d1 ` +
dir + `/d2` +
"]"
actual := fmt.Sprint(shards)
if expected != actual {
t.Fatalf("expect\n%s\ngot\n%s", expected, actual)
}
}
|
package api
import (
"context"
"fmt"
"net/http"
"github.com/porter-dev/porter/internal/models"
)
func (c *Client) ListTemplates(
ctx context.Context,
) ([]*models.PorterChartList, error) {
req, err := http.NewRequest(
"GET",
fmt.Sprintf("%s/templates", c.BaseURL),
nil,
)
if err != nil {
return nil, err
}
req = req.WithContext(ctx)
bodyResp := make([]*models.PorterChartList, 0)
if httpErr, err := c.sendRequest(req, &bodyResp, true); httpErr != nil || err != nil {
if httpErr != nil {
return nil, fmt.Errorf("code %d, errors %v", httpErr.Code, httpErr.Errors)
}
return nil, err
}
return bodyResp, nil
}
func (c *Client) GetTemplate(
ctx context.Context,
name, version string,
) (*models.PorterChartRead, error) {
req, err := http.NewRequest(
"GET",
fmt.Sprintf("%s/templates/%s/%s", c.BaseURL, name, version),
nil,
)
if err != nil {
return nil, err
}
req = req.WithContext(ctx)
bodyResp := &models.PorterChartRead{}
if httpErr, err := c.sendRequest(req, &bodyResp, true); httpErr != nil || err != nil {
if httpErr != nil {
return nil, fmt.Errorf("code %d, errors %v", httpErr.Code, httpErr.Errors)
}
return nil, err
}
return bodyResp, nil
}
|
// Fonctions d'utilité courant tel que Panic et Exit.
package tools
import (
"fmt"
"log"
"os"
"time"
)
const (
LogFileName = "log" // Le prefix du fichier de log que nous produisons.
)
var (
logfile *os.File = nil // Le handle du fichier de log ouvert.
)
// Permet le détournement du logging dans un fichier séparé pour consultation ultérieur.
func SetUpLogging(isDebug bool) {
if isDebug == true {
return
}
logfile, err := os.OpenFile(
fmt.Sprintf("%s_%d%s", LogFileName, time.Now().Unix(), ".log"),
os.O_WRONLY|os.O_CREATE,
0666)
if err != nil {
log.Fatal("tools/base : [FATAL] Nous n'avons pu créé de fichier de log.\n")
}
log.SetOutput(logfile)
}
// Permet de quitter en tenant compte du fichier de log.
func ExitSuccess() {
if logfile != nil {
logfile.Close()
}
os.Exit(0)
}
|
package fastdb
import (
"bytes"
"fastdb/index"
"fastdb/storage"
"sync"
)
type StrIndex struct {
mu sync.RWMutex
idxList *index.SkipList
}
func NewStrIdx() *StrIndex {
return &StrIndex{idxList: index.NewSkipList()}
}
func (db *FastDB) Set(key, value []byte) error {
return db.doSet(key, value)
}
func (db *FastDB) Get(key []byte) ([]byte, error) {
if err := db.checkKeyValue(key, nil); err != nil {
return nil, err
}
db.strIndex.mu.RLock()
defer db.strIndex.mu.RUnlock()
// Get index info from a skip list in memory.
node := db.strIndex.idxList.Get(key)
if node == nil {
return nil, ErrKeyNotExist
}
idx := node.Value().(*index.Indexer)
if idx == nil {
return nil, ErrNilIndexer
}
// Check if the key is expired.
if db.checkExpired(key, String) {
return nil, ErrKeyExpired
}
// In KeyValueMemMode, the value will be stored in memory.
// So get the value from the index info.
if db.config.IdxMode == KeyValueMemMode {
return idx.Meta.Value, nil
}
// In KeyOnlyMemMode, the value not in memory.
// So get the value from the db file at the offset.
if db.config.IdxMode == KeyOnlyMemMode {
df := db.activeFile[String]
if idx.FileId != db.activeFileIds[String] {
df = db.archFiles[String][idx.FileId]
}
e, err := df.Read(idx.Offset)
if err != nil {
return nil, err
}
return e.Meta.Value, nil
}
return nil, ErrKeyNotExist
}
func (db *FastDB) doSet(key, value []byte) (err error) {
if err = db.checkKeyValue(key, value); err != nil {
return err
}
// If the existed value is the same as the set value, nothing will be done.
if db.config.IdxMode == KeyValueMemMode {
if existVal, _ := db.Get(key); existVal != nil && bytes.Compare(existVal, value) == 0 {
return
}
}
db.strIndex.mu.Lock()
defer db.strIndex.mu.Unlock()
e := storage.NewEntryNoExtra(key, value, String, StringSet)
if err := db.store(e); err != nil {
return err
}
db.incrReclaimableSpace(key)
// clear expire time.
if _, ok := db.expires[String][string(key)]; ok {
delete(db.expires[String], string(key))
}
// string indexes, stored in skiplist.
idx := &index.Indexer{
Meta: &storage.Meta{
KeySize: uint32(len(e.Meta.Key)),
Key: e.Meta.Key,
ValueSize: uint32(len(e.Meta.Value)),
},
FileId: db.activeFileIds[String],
EntrySize: e.Size(),
Offset: db.activeFile[String].Offset - int64(e.Size()),
}
// in KeyValueMemMode, both key and value will store in memory.
if db.config.IdxMode == KeyValueMemMode {
idx.Meta.Value = e.Meta.Value
}
db.strIndex.idxList.Put(idx.Meta.Key, idx)
return
}
func (db *FastDB) incrReclaimableSpace(key []byte) {
oldIdx := db.strIndex.idxList.Get(key)
if oldIdx != nil {
indexer := oldIdx.Value().(*index.Indexer)
if indexer != nil {
space := int64(indexer.EntrySize)
db.meta.ReclaimableSpace[indexer.FileId] += space
}
}
}
// StrLen returns the length of the string value stored at key.
func (db *FastDB) StrLen(key []byte) int {
if err := db.checkKeyValue(key, nil); err != nil {
return 0
}
db.strIndex.mu.RLock()
defer db.strIndex.mu.RUnlock()
e := db.strIndex.idxList.Get(key)
if e != nil {
if db.checkExpired(key, String) {
return 0
}
idx := e.Value().(*index.Indexer)
return int(idx.Meta.ValueSize)
}
return 0
}
// StrExists check whether the key exists.
func (db *FastDB) StrExists(key []byte) bool {
if err := db.checkKeyValue(key, nil); err != nil {
return false
}
db.strIndex.mu.RLock()
defer db.strIndex.mu.RUnlock()
exist := db.strIndex.idxList.Exist(key)
if exist && !db.checkExpired(key, String) {
return true
}
return false
}
// StrRem remove the value stored at key.
func (db *FastDB) StrRem(key []byte) error {
if err := db.checkKeyValue(key, nil); err != nil {
return err
}
db.strIndex.mu.Lock()
defer db.strIndex.mu.Unlock()
e := storage.NewEntryNoExtra(key, nil, String, StringRem)
if err := db.store(e); err != nil {
return err
}
db.incrReclaimableSpace(key)
db.strIndex.idxList.Remove(key)
delete(db.expires[String], string(key))
return nil
}
|
// Copyright 2020 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package arcappcompat will have tast tests for android apps on Chromebooks.
package arcappcompat
import (
"context"
"time"
"chromiumos/tast/common/android/ui"
"chromiumos/tast/local/arc"
"chromiumos/tast/local/bundles/cros/arcappcompat/pre"
"chromiumos/tast/local/bundles/cros/arcappcompat/testutil"
"chromiumos/tast/local/chrome"
"chromiumos/tast/testing"
"chromiumos/tast/testing/hwdep"
)
// clamshellLaunchForPicsart launches Picsart in clamshell mode.
var clamshellLaunchForPicsart = []testutil.TestCase{
{Name: "Launch app in Clamshell", Fn: launchAppForPicsart},
}
// touchviewLaunchForPicsart launches Picsart in tablet mode.
var touchviewLaunchForPicsart = []testutil.TestCase{
{Name: "Launch app in Touchview", Fn: launchAppForPicsart},
}
func init() {
testing.AddTest(&testing.Test{
Func: Picsart,
LacrosStatus: testing.LacrosVariantUnneeded,
Desc: "Functional test for Picsart that install, launch the app and check that the main page is open, also checks Picsart correctly changes the window state in both clamshell and touchview mode",
Contacts: []string{"mthiyagarajan@chromium.org", "cros-appcompat-test-team@google.com"},
// Disabled the test since Picsart is not compatible with Chromebook.
// Attr: []string{"group:appcompat"},
SoftwareDeps: []string{"chrome"},
Params: []testing.Param{{
Name: "clamshell_mode",
Val: testutil.TestParams{
LaunchTests: clamshellLaunchForPicsart,
CommonTests: testutil.ClamshellCommonTests,
},
ExtraSoftwareDeps: []string{"android_p"},
// TODO(b/189704585): Remove hwdep.SkipOnModel once the solution is found.
// Skip on tablet only models.
ExtraHardwareDeps: hwdep.D(hwdep.SkipOnModel(testutil.TabletOnlyModels...)),
Pre: pre.AppCompatBootedUsingTestAccountPool,
}, {
Name: "tablet_mode",
Val: testutil.TestParams{
LaunchTests: touchviewLaunchForPicsart,
CommonTests: testutil.TouchviewCommonTests,
},
ExtraSoftwareDeps: []string{"android_p"},
// TODO(b/189704585): Remove hwdep.SkipOnModel once the solution is found.
// Skip on clamshell only models.
ExtraHardwareDeps: hwdep.D(hwdep.TouchScreen(), hwdep.SkipOnModel(testutil.ClamshellOnlyModels...)),
Pre: pre.AppCompatBootedInTabletModeUsingTestAccountPool,
}, {
Name: "vm_clamshell_mode",
Val: testutil.TestParams{
LaunchTests: clamshellLaunchForPicsart,
CommonTests: testutil.ClamshellCommonTests,
},
ExtraSoftwareDeps: []string{"android_vm"},
// TODO(b/189704585): Remove hwdep.SkipOnModel once the solution is found.
// Skip on tablet only models.
ExtraHardwareDeps: hwdep.D(hwdep.SkipOnModel(testutil.TabletOnlyModels...)),
Pre: pre.AppCompatBootedUsingTestAccountPool,
}, {
Name: "vm_tablet_mode",
Val: testutil.TestParams{
LaunchTests: touchviewLaunchForPicsart,
CommonTests: testutil.TouchviewCommonTests,
},
ExtraSoftwareDeps: []string{"android_vm"},
// TODO(b/189704585): Remove hwdep.SkipOnModel once the solution is found.
// Skip on clamshell only models.
ExtraHardwareDeps: hwdep.D(hwdep.TouchScreen(), hwdep.SkipOnModel(testutil.ClamshellOnlyModels...)),
Pre: pre.AppCompatBootedInTabletModeUsingTestAccountPool,
}},
Timeout: 10 * time.Minute,
Vars: []string{"arcappcompat.gaiaPoolDefault"},
})
}
// Picsart test uses library for opting into the playstore and installing app.
// Checks Picsart correctly changes the window states in both clamshell and touchview mode.
func Picsart(ctx context.Context, s *testing.State) {
const (
appPkgName = "com.picsart.studio"
appActivity = "com.socialin.android.photo.picsinphoto.MainPagerActivity"
)
testSet := s.Param().(testutil.TestParams)
testutil.RunTestCases(ctx, s, appPkgName, appActivity, testSet)
}
// launchAppForPicsart verifies Picsart is launched and
// verify Picsart reached main activity page of the app.
func launchAppForPicsart(ctx context.Context, s *testing.State, tconn *chrome.TestConn, a *arc.ARC, d *ui.Device, appPkgName, appActivity string) {
const (
allowText = "ALLOW"
closeClassName = "android.widget.ImageButton"
closeDes = "Navigate up"
homeText = "Home"
skipID = "com.picsart.studio:id/btnSkip"
)
// Click on skip button.
skipButton := d.Object(ui.ID(skipID))
if err := skipButton.WaitForExists(ctx, testutil.DefaultUITimeout); err != nil {
s.Log("skipButton doesn't exists: ", err)
} else if err := skipButton.Click(ctx); err != nil {
s.Fatal("Failed to click on skipButton: ", err)
}
// Click on allow button.
allowButton := d.Object(ui.ClassName(testutil.AndroidButtonClassName), ui.TextMatches("(?i)"+allowText))
if err := allowButton.WaitForExists(ctx, testutil.DefaultUITimeout); err != nil {
s.Log("allowButton doesn't exists: ", err)
} else if err := allowButton.Click(ctx); err != nil {
s.Fatal("Failed to click on allowButton: ", err)
}
// Click on close button.
closeButton := d.Object(ui.ClassName(closeClassName), ui.Description(closeDes))
if err := closeButton.WaitForExists(ctx, testutil.DefaultUITimeout); err != nil {
s.Log("closeButton doesn't exists: ", err)
} else if err := closeButton.Click(ctx); err != nil {
s.Fatal("Failed to click on closeButton: ", err)
}
// Check for launch verifier.
launchVerifier := d.Object(ui.PackageName(appPkgName))
if err := launchVerifier.WaitForExists(ctx, testutil.LongUITimeout); err != nil {
testutil.DetectAndHandleCloseCrashOrAppNotResponding(ctx, s, d)
s.Fatal("launchVerifier doesn't exists: ", err)
}
}
|
package authnsvc
import (
"net/http"
"github.com/gin-gonic/gin"
"go.uber.org/zap"
"github.com/alextanhongpin/go-microservice/api"
"github.com/alextanhongpin/go-microservice/pkg/logger"
)
type (
Controller struct {
service
}
)
func NewController(svc service) *Controller {
return &Controller{svc}
}
func (ctl *Controller) PostLogin(c *gin.Context) {
type request = LoginRequest
type response struct {
AccessToken string `json:"access_token"`
}
var req request
if err := c.BindJSON(&req); err != nil {
api.ErrorJSON(c, err)
return
}
var (
ctx = c.Request.Context()
log = logger.WithContext(ctx)
)
accessToken, err := ctl.service.LoginWithAccessToken(ctx, req)
if err != nil {
log.Error("login user failed", zap.Error(err))
api.ErrorJSON(c, err)
return
}
c.JSON(http.StatusOK, response{accessToken})
}
func (ctl *Controller) PostRegister(c *gin.Context) {
type request = RegisterRequest
type response struct {
AccessToken string `json:"access_token"`
}
var req request
if err := c.BindJSON(&req); err != nil {
api.ErrorJSON(c, err)
return
}
var (
ctx = c.Request.Context()
log = logger.WithContext(ctx)
)
accessToken, err := ctl.service.RegisterWithAccessToken(ctx, req)
if err != nil {
log.Error("register user failed", zap.Error(err))
api.ErrorJSON(c, err)
return
}
c.JSON(http.StatusOK, response{accessToken})
}
|
// Package token holds the lexical token types for use in lexing and parsing,
// and functions for accessing them.
//
// Sample token types include IDENT for an identifier, TRUE for the keyword
// "true", and FUNCTION for the keyword "fn".
//
// A Token represents a particular lexical token. It has a type and a literal:
// for example, Token{Type: IF, Literal: "if"} represents a token identified as
// an IF token.
package token
|
package main
import "fmt"
func main() {
res1, res2 := rectangle(6, 4)
fmt.Println("zhouchang: ", res1, "mianji:", res2)
}
func rectangle(len, wid float64) (float64, float64) {
perimeter := (len + wid) * 2
area := len * wid
return perimeter, area
}
|
// Copyright 2020 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package cuj
import (
"context"
"os"
"path"
"path/filepath"
"time"
"chromiumos/tast/common/fixture"
"chromiumos/tast/common/testexec"
"chromiumos/tast/errors"
"chromiumos/tast/local/apps"
"chromiumos/tast/local/arc"
"chromiumos/tast/local/arc/optin"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/ash"
"chromiumos/tast/local/chrome/browser"
"chromiumos/tast/local/chrome/lacros"
"chromiumos/tast/local/chrome/lacros/lacrosfixt"
"chromiumos/tast/local/chrome/uiauto/lockscreen"
"chromiumos/tast/local/cpu"
"chromiumos/tast/local/logsaver"
"chromiumos/tast/local/power"
"chromiumos/tast/testing"
)
const (
// CPUCoolDownTimeout is the time to wait for cpu cool down.
CPUCoolDownTimeout = 10 * time.Minute
// CPUIdleTimeout is the time to wait for cpu utilization to go down.
// This value should match waitIdleCPUTimeout in cpu/idle.go.
CPUIdleTimeout = 2 * time.Minute
// CPUStablizationTimeout is the time to wait for cpu stablization, which
// is the sum of cpu cool down time and cpu idle time.
CPUStablizationTimeout = CPUCoolDownTimeout + CPUIdleTimeout
// webRTCLogsGatherTimeout is the time allowed for gathering the WebRTC
// event log files into a gzip archive in the test output directory.
webRTCLogsGatherTimeout = 15 * time.Second
resetTimeout = 30 * time.Second
webRTCEventLogCommandFlag = "--webrtc-event-logging=/tmp"
webRTCEventLogFilePattern = "/tmp/event_log_*.log"
)
func init() {
testing.AddFixture(&testing.Fixture{
Name: "prepareForCUJ",
Desc: "The fixture to prepare DUT for CUJ tests",
Contacts: []string{
"xiyuan@chromium.org",
"chromeos-perfmetrics-eng@google.com",
},
Impl: &prepareCUJFixture{},
PreTestTimeout: CPUStablizationTimeout,
})
testing.AddFixture(&testing.Fixture{
Name: "cpuIdleForCUJ",
Desc: "The fixture to wait DUT cpu to idle for CUJ tests",
Contacts: []string{
"jane.yang@cienet.com",
"chromeos-perfmetrics-eng@google.com",
},
Impl: &cpuIdleForCUJFixture{},
PreTestTimeout: CPUIdleTimeout + 5*time.Second,
})
testing.AddFixture(&testing.Fixture{
Name: "cpuIdleForEnrolledCUJ",
Desc: "The fixture to wait DUT cpu to idle for logged in with gaia user on an enrolled device",
Contacts: []string{
"alston.huang@cienet.com",
"chromeos-perfmetrics-eng@google.com",
},
Impl: &cpuIdleForCUJFixture{},
PreTestTimeout: CPUIdleTimeout + 5*time.Second,
SetUpTimeout: chrome.EnrollmentAndLoginTimeout + chrome.GAIALoginTimeout,
ResetTimeout: chrome.ResetTimeout,
TearDownTimeout: chrome.ResetTimeout,
PostTestTimeout: 15 * time.Second,
Parent: fixture.Enrolled,
})
testing.AddFixture(&testing.Fixture{
Name: "loggedInToCUJUser",
Desc: "The main fixture used for UI CUJ tests",
Contacts: []string{
"xiyuan@chromium.org",
"chromeos-perfmetrics-eng@google.com",
},
Impl: &loggedInToCUJUserFixture{bt: browser.TypeAsh},
Parent: "prepareForCUJ",
SetUpTimeout: chrome.GAIALoginTimeout + optin.OptinTimeout + arc.BootTimeout + 2*time.Minute,
ResetTimeout: resetTimeout,
TearDownTimeout: resetTimeout,
Vars: []string{"ui.cujAccountPool"},
})
testing.AddFixture(&testing.Fixture{
Name: "loggedInToCUJUserEnterpriseWithWebRTCEventLogging",
Desc: "The main fixture used for UI CUJ tests using an enterprise account, with WebRTC event logging",
Contacts: []string{
"xiyuan@chromium.org",
"chromeos-perfmetrics-eng@google.com",
},
Impl: &loggedInToCUJUserFixture{
chromeExtraOpts: []chrome.Option{chrome.ExtraArgs(webRTCEventLogCommandFlag)},
bt: browser.TypeAsh,
useEnterprisePool: true,
},
Parent: "prepareForCUJ",
SetUpTimeout: chrome.GAIALoginTimeout + optin.OptinTimeout + arc.BootTimeout + 2*time.Minute,
ResetTimeout: resetTimeout,
TearDownTimeout: resetTimeout,
PostTestTimeout: webRTCLogsGatherTimeout,
Vars: []string{"ui.cujEnterpriseAccountPool"},
})
testing.AddFixture(&testing.Fixture{
Name: "loggedInAndKeepState",
Desc: "The CUJ test fixture which keeps login state",
Contacts: []string{
"xiyuan@chromium.org",
"chromeos-perfmetrics-eng@google.com",
},
Impl: &loggedInToCUJUserFixture{keepState: true, bt: browser.TypeAsh},
Parent: "cpuIdleForCUJ",
SetUpTimeout: chrome.GAIALoginTimeout + optin.OptinTimeout + arc.BootTimeout + 2*time.Minute,
ResetTimeout: resetTimeout,
TearDownTimeout: resetTimeout,
Vars: []string{"ui.cujAccountPool"},
})
testing.AddFixture(&testing.Fixture{
Name: "loggedInToCUJUserLacros",
Desc: "Fixture used for lacros variation of UI CUJ tests",
Contacts: []string{
"xiyuan@chromium.org",
"chromeos-perfmetrics-eng@google.com",
},
Impl: &loggedInToCUJUserFixture{bt: browser.TypeLacros},
Parent: "cpuIdleForCUJ",
SetUpTimeout: chrome.GAIALoginTimeout + optin.OptinTimeout + arc.BootTimeout + 2*time.Minute,
ResetTimeout: resetTimeout,
TearDownTimeout: resetTimeout,
Vars: []string{"ui.cujAccountPool"},
})
testing.AddFixture(&testing.Fixture{
Name: "loggedInAndKeepStateLacros",
Desc: "Fixture keeping login status and used for lacros variation of CUJ tests",
Contacts: []string{
"xliu@cienet.com",
"chromeos-perfmetrics-eng@google.com",
},
Impl: &loggedInToCUJUserFixture{keepState: true, bt: browser.TypeLacros},
Parent: "cpuIdleForCUJ",
SetUpTimeout: chrome.GAIALoginTimeout + optin.OptinTimeout + arc.BootTimeout + 2*time.Minute,
ResetTimeout: resetTimeout,
TearDownTimeout: resetTimeout,
Vars: []string{"ui.cujAccountPool"},
})
testing.AddFixture(&testing.Fixture{
Name: "enrolledLoggedInToCUJUser",
Desc: "Logged in with gaia user on an enrolled device",
Contacts: []string{
"alston.huang@cienet.com",
"chromeos-perfmetrics-eng@google.com",
},
Impl: &loggedInToCUJUserFixture{},
Parent: "cpuIdleForEnrolledCUJ",
SetUpTimeout: chrome.EnrollmentAndLoginTimeout + chrome.GAIALoginTimeout + optin.OptinTimeout + 2*time.Minute,
ResetTimeout: resetTimeout,
TearDownTimeout: resetTimeout,
Vars: []string{
"ui.cujAccountPool",
},
})
testing.AddFixture(&testing.Fixture{
Name: "enrolledLoggedInToCUJUserLacros",
Desc: "Logged in with gaia user on an enrolled device and used for lacros variation of CUJ tests",
Contacts: []string{
"jane.yang@cienet.com",
"chromeos-perfmetrics-eng@google.com",
},
Impl: &loggedInToCUJUserFixture{bt: browser.TypeLacros},
Parent: "cpuIdleForEnrolledCUJ",
SetUpTimeout: chrome.EnrollmentAndLoginTimeout + chrome.GAIALoginTimeout + optin.OptinTimeout + 2*time.Minute,
ResetTimeout: resetTimeout,
TearDownTimeout: resetTimeout,
Vars: []string{
"ui.cujAccountPool",
},
})
testing.AddFixture(&testing.Fixture{
Name: "loggedInToCUJUserWithWebRTCEventLogging",
Desc: "CUJ test fixture with WebRTC event logging",
Contacts: []string{
"amusbach@chromium.org",
"chromeos-perfmetrics-eng@google.com",
},
Impl: &loggedInToCUJUserFixture{
chromeExtraOpts: []chrome.Option{chrome.ExtraArgs(webRTCEventLogCommandFlag)},
bt: browser.TypeAsh,
},
Parent: "prepareForCUJ",
SetUpTimeout: chrome.GAIALoginTimeout + optin.OptinTimeout + arc.BootTimeout + 2*time.Minute,
ResetTimeout: resetTimeout,
TearDownTimeout: resetTimeout,
PostTestTimeout: webRTCLogsGatherTimeout,
Vars: []string{"ui.cujAccountPool"},
})
testing.AddFixture(&testing.Fixture{
Name: "loggedInToCUJUserWithWebRTCEventLoggingLacros",
Desc: "Lacros variation of loggedInToCUJUserWithWebRTCEventLogging",
Contacts: []string{
"amusbach@chromium.org",
"chromeos-perfmetrics-eng@google.com",
},
Impl: &loggedInToCUJUserFixture{
chromeExtraOpts: []chrome.Option{chrome.LacrosExtraArgs(webRTCEventLogCommandFlag)},
bt: browser.TypeLacros,
},
Parent: "cpuIdleForCUJ",
SetUpTimeout: chrome.GAIALoginTimeout + optin.OptinTimeout + arc.BootTimeout + 2*time.Minute,
ResetTimeout: resetTimeout,
TearDownTimeout: resetTimeout,
PostTestTimeout: webRTCLogsGatherTimeout,
Vars: []string{"ui.cujAccountPool"},
})
}
func loginOption(s *testing.FixtState, useEnterprisePool bool) chrome.Option {
var variableName string
if useEnterprisePool {
variableName = "ui.cujEnterpriseAccountPool"
} else {
variableName = "ui.cujAccountPool"
}
return chrome.GAIALoginPool(s.RequiredVar(variableName))
}
func runningPackages(ctx context.Context, a *arc.ARC) (map[string]struct{}, error) {
tasks, err := a.TaskInfosFromDumpsys(ctx)
if err != nil {
return nil, errors.Wrap(err, "listing activities failed")
}
acts := make(map[string]struct{})
for _, t := range tasks {
for _, a := range t.ActivityInfos {
acts[a.PackageName] = struct{}{}
}
}
return acts, nil
}
// CPUCoolDownConfig returns a cpu.CoolDownConfig to be used for CUJ tests.
func CPUCoolDownConfig() cpu.CoolDownConfig {
cdConfig := cpu.DefaultCoolDownConfig(cpu.CoolDownPreserveUI)
cdConfig.PollTimeout = CPUCoolDownTimeout
return cdConfig
}
type prepareCUJFixture struct{}
func (f *prepareCUJFixture) SetUp(ctx context.Context, s *testing.FixtState) interface{} {
return nil
}
func (f *prepareCUJFixture) TearDown(ctx context.Context, s *testing.FixtState) {
}
func (f *prepareCUJFixture) Reset(ctx context.Context) error {
return nil
}
func (f *prepareCUJFixture) PreTest(ctx context.Context, s *testing.FixtTestState) {
// Ensure display on to record ui performance correctly. Keep trying for 2 min
// since it could take 2 min for `powerd` dbus service to be accessible via
// dbus from tast. See b/244752048.
if err := testing.Poll(ctx, power.TurnOnDisplay, &testing.PollOptions{
Interval: 10 * time.Second,
Timeout: 2 * time.Minute,
}); err != nil {
s.Fatal("Failed to turn on display: ", err)
}
// Wait for cpu to stabilize before test. Note this only works as expected if
// all child fixtures's PreTest and the setup in each test main function do
// not do cpu intensive works. Otherwise, this needs to moved into body of
// tests.
if err := cpu.WaitUntilStabilized(ctx, CPUCoolDownConfig()); err != nil {
// Log the cpu stabilizing wait failure instead of make it fatal.
// TODO(b/213238698): Include the error as part of test data.
s.Log("Failed to wait for CPU to become idle: ", err)
}
}
func (f *prepareCUJFixture) PostTest(ctx context.Context, s *testing.FixtTestState) {
}
type cpuIdleForCUJFixture struct{}
func (f *cpuIdleForCUJFixture) SetUp(ctx context.Context, s *testing.FixtState) interface{} {
return nil
}
func (f *cpuIdleForCUJFixture) TearDown(ctx context.Context, s *testing.FixtState) {
}
func (f *cpuIdleForCUJFixture) Reset(ctx context.Context) error {
return nil
}
func (f *cpuIdleForCUJFixture) PreTest(ctx context.Context, s *testing.FixtTestState) {
// Wait for cpu to idle before test.
if err := cpu.WaitUntilIdle(ctx); err != nil {
// Log the cpu idle wait failure instead of make it fatal.
testing.ContextLog(ctx, "Failed to wait for CPU to become idle: ", err)
}
}
func (f *cpuIdleForCUJFixture) PostTest(ctx context.Context, s *testing.FixtTestState) {
}
// FixtureData is the struct returned by the preconditions.
type FixtureData struct {
chrome *chrome.Chrome
ARC *arc.ARC
}
// Chrome gets the CrOS-chrome instance.
func (f FixtureData) Chrome() *chrome.Chrome { return f.chrome }
type loggedInToCUJUserFixture struct {
cr *chrome.Chrome
arc *arc.ARC
origRunningPkgs map[string]struct{}
logMarker *logsaver.Marker
keepState bool
chromeExtraOpts []chrome.Option
// bt describes what type of browser this fixture should use
bt browser.Type
useEnterprisePool bool
}
func (f *loggedInToCUJUserFixture) SetUp(ctx context.Context, s *testing.FixtState) interface{} {
var cr *chrome.Chrome
func() {
ctx, cancel := context.WithTimeout(ctx, chrome.LoginTimeout)
defer cancel()
opts := []chrome.Option{
loginOption(s, f.useEnterprisePool),
chrome.ARCSupported(),
chrome.ExtraArgs(arc.DisableSyncFlags()...),
chrome.DisableFeatures("FirmwareUpdaterApp"),
}
if f.keepState {
opts = append(opts, chrome.KeepState())
}
opts = append(opts, f.chromeExtraOpts...)
var err error
if f.bt == browser.TypeLacros {
opts, err = lacrosfixt.NewConfig(lacrosfixt.Mode(lacros.LacrosOnly),
// Close all tabs in the recorder requires Lacros to be alive.
lacrosfixt.KeepAlive(true),
lacrosfixt.ChromeOptions(opts...)).Opts()
if err != nil {
s.Fatal("Failed to get lacros options: ", err)
}
}
cr, err = chrome.New(ctx, opts...)
if err != nil {
s.Fatal("Failed to start Chrome: ", err)
}
chrome.Lock()
}()
defer func() {
if cr != nil {
chrome.Unlock()
if err := cr.Close(ctx); err != nil {
s.Error("Failed to close Chrome: ", err)
}
}
}()
enablePlayStore := true
if f.keepState {
// Check whether the play store has been enabled.
tconn, err := cr.TestAPIConn(ctx)
if err != nil {
s.Fatal("Failed to connect Test API: ", err)
}
st, err := arc.GetState(ctx, tconn)
if err != nil {
s.Fatal("Failed to get ARC state: ", err)
}
enablePlayStore = !st.Provisioned
}
if enablePlayStore {
func() {
const playStorePackageName = "com.android.vending"
ctx, cancel := context.WithTimeout(ctx, optin.OptinTimeout+time.Minute)
defer cancel()
// Optin to Play Store.
s.Log("Opting into Play Store")
tconn, err := cr.TestAPIConn(ctx)
if err != nil {
s.Fatal("Failed to get the test conn: ", err)
}
maxAttempts := 2
if err := optin.PerformWithRetry(ctx, cr, maxAttempts); err != nil {
s.Fatal("Failed to optin to Play Store: ", err)
}
s.Log("Waiting for Playstore shown")
if err := ash.WaitForCondition(ctx, tconn, func(w *ash.Window) bool {
return w.ARCPackageName == playStorePackageName
}, &testing.PollOptions{Timeout: 30 * time.Second}); err != nil {
// Playstore app window might not be shown, but optin should be successful
// at this time. Log the error message but continue.
s.Log("Failed to wait for the playstore window to be visible: ", err)
return
}
if err := apps.Close(ctx, tconn, apps.PlayStore.ID); err != nil {
s.Fatal("Failed to close Play Store: ", err)
}
if err := testing.Poll(ctx, func(ctx context.Context) error {
if _, err := ash.GetARCAppWindowInfo(ctx, tconn, playStorePackageName); err == ash.ErrWindowNotFound {
return nil
} else if err != nil {
return testing.PollBreak(err)
}
return errors.New("still seeing playstore window")
}, &testing.PollOptions{Timeout: 30 * time.Second}); err != nil {
s.Fatal("Failed to wait for the playstore window to be closed: ", err)
}
}()
}
var a *arc.ARC
func() {
ctx, cancel := context.WithTimeout(ctx, arc.BootTimeout)
defer cancel()
var err error
if a, err = arc.New(ctx, s.OutDir()); err != nil {
s.Fatal("Failed to start ARC: ", err)
}
if f.origRunningPkgs, err = runningPackages(ctx, a); err != nil {
if err := a.Close(ctx); err != nil {
s.Error("Failed to close ARC connection: ", err)
}
s.Fatal("Failed to list running packages: ", err)
}
}()
f.cr = cr
f.arc = a
cr = nil
return FixtureData{chrome: f.cr, ARC: f.arc}
}
func (f *loggedInToCUJUserFixture) TearDown(ctx context.Context, s *testing.FixtState) {
chrome.Unlock()
if err := f.arc.Close(ctx); err != nil {
testing.ContextLog(ctx, "Failed to close ARC connection: ", err)
}
if err := f.cr.Close(ctx); err != nil {
testing.ContextLog(ctx, "Failed to close Chrome connection: ", err)
}
}
func (f *loggedInToCUJUserFixture) Reset(ctx context.Context) error {
// Check oauth2 token is still valid. If not, return an error to restart
// chrome and re-login.
tconn, err := f.cr.TestAPIConn(ctx)
if err != nil {
return errors.Wrap(err, "failed to get the test conn")
}
if st, err := lockscreen.GetState(ctx, tconn); err != nil {
return errors.Wrap(err, "failed to get login status")
} else if !st.HasValidOauth2Token {
return errors.New("invalid oauth2 token")
}
// Stopping the running apps.
running, err := runningPackages(ctx, f.arc)
if err != nil {
return errors.Wrap(err, "failed to get running packages")
}
for pkg := range running {
if _, ok := f.origRunningPkgs[pkg]; ok {
continue
}
testing.ContextLogf(ctx, "Stopping package %q", pkg)
if err := f.arc.Command(ctx, "am", "force-stop", pkg).Run(testexec.DumpLogOnError); err != nil {
return errors.Wrapf(err, "failed to stop %q", pkg)
}
}
// Unlike ARC.preImpl, this does not uninstall apps. This is because we
// typically want to reuse the same list of applications, and additional
// installed apps wouldn't affect the test scenarios.
if err := f.cr.ResetState(ctx); err != nil {
return errors.Wrap(err, "failed to reset chrome")
}
// Ensures that there are no toplevel windows left open.
if all, err := ash.GetAllWindows(ctx, tconn); err != nil {
return errors.Wrap(err, "failed to call ash.GetAllWindows")
} else if len(all) != 0 {
return errors.Wrapf(err, "toplevel window (%q) stayed open, total %d left", all[0].Name, len(all))
}
return nil
}
func (f *loggedInToCUJUserFixture) PreTest(ctx context.Context, s *testing.FixtTestState) {
arcLogOutDir := filepath.Join(s.OutDir(), "arc_logs")
if err := os.MkdirAll(arcLogOutDir, 0755); err != nil {
s.Log("Error creating arc_logs directory: ", err)
arcLogOutDir = s.OutDir()
} else {
s.Log("Created arc_logs directory successfully")
}
if err := f.arc.ResetOutDir(ctx, arcLogOutDir); err != nil {
s.Log("Failed to reset outDir field of ARC object: ", err)
}
if f.logMarker != nil {
s.Log("A log marker is already created but not cleaned up")
}
logMarker, err := logsaver.NewMarker(f.cr.LogFilename())
if err == nil {
f.logMarker = logMarker
} else {
s.Log("Failed to start the log saver: ", err)
}
webRTCLogs, err := filepath.Glob(webRTCEventLogFilePattern)
if err != nil {
s.Log("Failed to check for WebRTC event log files before test: ", err)
}
if len(webRTCLogs) == 0 {
return
}
s.Log("Deleting WebRTC event log files found in /tmp before test: ", webRTCLogs)
for _, filename := range webRTCLogs {
if err := os.Remove(filename); err != nil {
s.Logf("Failed to delete %q: %s", filename, err)
}
}
}
func (f *loggedInToCUJUserFixture) PostTest(ctx context.Context, s *testing.FixtTestState) {
if f.logMarker != nil {
if err := f.logMarker.Save(filepath.Join(s.OutDir(), "chrome.log")); err != nil {
s.Log("Failed to store per-test log data: ", err)
}
f.logMarker = nil
}
if f.arc != nil {
if err := f.arc.SaveLogFiles(ctx); err != nil {
s.Log("Failed to save ARC-related log files: ", err)
} else {
s.Log("ARC-related log files saved successfully")
}
}
webRTCLogs, err := filepath.Glob(webRTCEventLogFilePattern)
if err != nil {
s.Log("Failed to check for WebRTC event log files after test: ", err)
}
if len(webRTCLogs) == 0 {
return
}
s.Log("Gathering WebRTC event log files: ", webRTCLogs)
if err := testexec.CommandContext(ctx, "tar",
append(
[]string{"-cvzf", path.Join(s.OutDir(), "webrtc-logs.tar.gz")},
webRTCLogs...,
)...,
).Run(testexec.DumpLogOnError); err != nil {
s.Log("Failed to gather WebRTC event log files: ", err)
}
s.Log("Deleting WebRTC event log files in /tmp")
for _, filename := range webRTCLogs {
if err := os.Remove(filename); err != nil {
s.Logf("Failed to delete %q: %s", filename, err)
}
}
}
|
package main
import (
"bytes"
"fmt"
"net/http"
"time"
)
func home(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "%s", "<h2>Welcome to the home Page</h2>")
fmt.Fprintf(w, "%s", "How are you doing today?")
}
func work(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "[ %s", time.Now())
fmt.Fprintf(w, "]")
}
func main() {
response, _ := http.Get("https://golangcode.com/")
// The line below would fail because Body = io.ReadCloser
// fmt.Printf(response.Body)
// ...so we convert it to a string by passing it through
// a buffer first. A 'costly' but useful process.
buf := new(bytes.Buffer)
buf.ReadFrom(response.Body)
defer response.Body.Close()
serverPort := "8085"
http.HandleFunc("/home", home)
http.HandleFunc("/workTime", work)
fmt.Println("Server is running at port : " + serverPort)
http.ListenAndServe(":"+serverPort, nil)
}
|
package main
import (
"bytes"
"crypto/hmac"
"crypto/sha256"
)
// concatBuf concatenates two strings inside a byte buffer
func concatBuf(a, b string) bytes.Buffer {
var buf bytes.Buffer
buf.WriteString(a)
buf.WriteString(b)
return buf
}
// hash returns the HMAC hash of the provided slice of bytes using SHA-256.
func hash(b []byte) ([]byte, error) {
mac := hmac.New(sha256.New, hashKey)
_, err := mac.Write(b)
if err != nil {
return nil, err
}
h := mac.Sum(nil)
return h, nil
}
|
package shape
import "github.com/gregoryv/draw/xy"
// Aligner type aligns multiple shapes
type Aligner struct{}
// HAlignCenter aligns shape[1:] to shape[0] center coordinates horizontally
func (Aligner) HAlignCenter(shapes ...Shape) { hAlign(Center, shapes...) }
// HAlignTop aligns shape[1:] to shape[0] top coordinates horizontally
func (Aligner) HAlignTop(shapes ...Shape) { hAlign(Top, shapes...) }
// HAlignBottom aligns shape[1:] to shape[0] bottom coordinates horizontally
func (Aligner) HAlignBottom(shapes ...Shape) { hAlign(Bottom, shapes...) }
func hAlign(adjust Alignment, objects ...Shape) {
first := objects[0]
_, y := first.Position()
for _, shape := range objects[1:] {
switch adjust {
case Top:
shape.SetY(y)
case Bottom:
shape.SetY(y + first.Height() - shape.Height())
case Center:
diff := (first.Height() - shape.Height()) / 2
shape.SetY(y + diff)
}
}
}
// VAlignCenter aligns shape[1:] to shape[0] center coordinates vertically
func (Aligner) VAlignCenter(shapes ...Shape) { vAlign(Center, shapes...) }
// VAlignLeft aligns shape[1:] to shape[0] left coordinates vertically
func (Aligner) VAlignLeft(shapes ...Shape) { vAlign(Left, shapes...) }
// VAlignRight aligns shape[1:] to shape[0] right coordinates vertically
func (Aligner) VAlignRight(shapes ...Shape) { vAlign(Right, shapes...) }
func vAlign(adjust Alignment, objects ...Shape) {
first := objects[0]
x, _ := first.Position()
for _, shape := range objects[1:] {
switch adjust {
case Left:
shape.SetX(x)
case Right:
shape.SetX(x + first.Width() - shape.Width())
case Center:
if first.Direction() == DirectionLeft {
shape.SetX(x - (first.Width()+shape.Width())/2)
} else {
shape.SetX(x + (first.Width()-shape.Width())/2)
}
}
}
}
type Alignment int
const (
Top Alignment = iota
Left
Right
Bottom
Center
)
func NewDirection(from, to xy.Point) Direction {
switch {
case from.LeftOf(to) && from.Y == to.Y:
return DirectionRight
case from.LeftOf(to) && from.Above(to):
return DirectionDownRight
case from.Above(to) && from.X == to.X:
return DirectionDown
case from.RightOf(to) && from.Above(to):
return DirectionDownLeft
case from.RightOf(to) && from.Y == to.Y:
return DirectionLeft
case from.Below(to) && from.RightOf(to):
return DirectionUpLeft
case from.Below(to) && from.X == to.X:
return DirectionUp
default: // from.LeftOf(to) && from.Below(to):
return DirectionUpRight
}
}
type Direction uint
const (
DirectionRight Direction = (1 << iota)
DirectionLeft
DirectionUp
DirectionDown
DirectionDownRight = DirectionDown | DirectionRight
DirectionDownLeft = DirectionDown | DirectionLeft
DirectionUpLeft = DirectionUp | DirectionLeft
DirectionUpRight = DirectionUp | DirectionRight
)
// Method
func (d Direction) Is(dir Direction) bool {
return (d & dir) == dir
}
|
package skelplate
import (
"encoding/json"
"io"
"io/ioutil"
"os"
"strings"
"testing"
"github.com/AlecAivazis/survey/terminal"
)
var tmplErrTests = []struct {
tmpl string
prefill map[string]interface{}
expected string
}{
{
`{
"author": "brainicorn",
"variables":[{"name":"{{.SomeVar", "default":"ipa"}]
}`,
nil,
"unable to parse variable name template:",
},
{
`{
"author": "brainicorn",
"variables":[{"name":"beer", "default":"{{ipa"}]
}`,
nil,
"unable to parse variable default template:",
},
{
`{
"author": "brainicorn",
"variables":[{"name":"beer", "default":["{{ipa"]}]
}`,
nil,
"unable to parse variable default template:",
},
{
`{
"author": "brainicorn",
"variables":[{"name":"beer", "default":""}]
}`,
map[string]interface{}{"beer": "{{ipa"},
"unable to parse data template:",
},
{
`{
"author": "brainicorn",
"variables":[{"name":"beer", "default":""}]
}`,
map[string]interface{}{"beer": float64(1)},
"invalid type for provided data entry",
},
}
func TestTemplateParseErrors(t *testing.T) {
for _, tt := range tmplErrTests {
dp := NewDataProvider(tt.prefill)
var descriptor SkelplateDescriptor
err := json.Unmarshal([]byte(tt.tmpl), &descriptor)
if err != nil {
t.Fatalf("error parsing descriptor: %s\n%s", tt.tmpl, err)
}
_, err = dp.gatherData(descriptor)
if err == nil {
t.Fatalf("expected error but was nil: %s", tt.tmpl)
}
if !strings.HasPrefix(err.Error(), tt.expected) {
t.Fatalf("wrong error have (%s) want (%s)", err, tt.expected)
}
}
}
type fakeInterruptingUser struct {
in *os.File
keystrokes []string
}
func newFakeInterruptingUser(keystrokes []string) *fakeInterruptingUser {
in, _ := ioutil.TempFile("", "")
os.Stdin = in
return &fakeInterruptingUser{
in: in,
keystrokes: keystrokes,
}
}
func (f *fakeInterruptingUser) nextKeystroke() {
var keystroke string
keystroke, f.keystrokes = f.keystrokes[0], f.keystrokes[1:]
f.in.Truncate(0)
f.in.Seek(0, os.SEEK_SET)
io.WriteString(f.in, keystroke+"\n")
f.in.Seek(0, os.SEEK_SET)
}
func (f *fakeInterruptingUser) done() {
f.in.Close()
}
func TestGatherDataInterrupt(t *testing.T) {
descJSON := `{
"author": "brainicorn",
"variables":[{"name":"beer", "default":"ipa"}]
}`
dp := NewDataProvider(nil)
user := newFakeInterruptingUser([]string{string(terminal.KeyInterrupt)})
defer user.done()
dp.beforePrompt = user.nextKeystroke
var descriptor SkelplateDescriptor
err := json.Unmarshal([]byte(descJSON), &descriptor)
if err != nil {
t.Fatalf("error parsing descriptor: %s\n%s", descJSON, err)
}
_, err = dp.gatherData(descriptor)
if err == nil || !strings.HasSuffix(err.Error(), "interrupt") {
t.Errorf("wrong error: have (%s), want (%s)", err, "interrupt")
}
}
|
package main
import (
"encoding/json"
"encoding/xml"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"net/http/httptest"
"os"
"reflect"
"sort"
"strconv"
"strings"
"testing"
"time"
)
var users []User
func init() {
type Сlient struct {
FirstName string `xml:"first_name"`
LastName string `xml:"last_name"`
ID int `xml:"id"`
Age int `xml:"age"`
About string `xml:"about"`
Gender string `xml:"gender"`
}
type Сlients struct {
List []Сlient `xml:"row"`
}
xmlFile, err := os.Open("dataset.xml")
defer xmlFile.Close()
if err != nil {
log.Fatal(fmt.Sprintln("Open dataset.xml failed, error: ", err))
}
byteVal, err := ioutil.ReadAll(xmlFile)
if err != nil {
log.Fatal(fmt.Sprintln("Can't read xml file, error: ", err))
}
var dataset Сlients
err = xml.Unmarshal(byteVal, &dataset)
if err != nil {
log.Fatal(fmt.Sprintln("Can't unmarshall dataset in Clients, error: ", err))
}
for _, cl := range dataset.List {
users = append(users, User{
Name: cl.FirstName + " " + cl.LastName,
Id: cl.ID,
Age: cl.Age,
About: cl.About,
Gender: cl.Gender,
})
}
}
func SearchServer(w http.ResponseWriter, req *http.Request) {
const allowedToken = "666"
param := req.URL.Query()
var (
accessToken = req.Header.Get("AccessToken")
limit, _ = strconv.Atoi(param["limit"][0])
offset, _ = strconv.Atoi(param["offset"][0])
query = param["query"][0]
orderField = param["order_field"][0]
orderBy, _ = strconv.Atoi(param["order_by"][0])
)
if accessToken != allowedToken {
w.WriteHeader(http.StatusUnauthorized)
return
}
var requiredUsers []User
for _, u := range users {
haveQuery := strings.Contains(u.Name, query) ||
strings.Contains(u.About, query)
if haveQuery {
requiredUsers = append(requiredUsers, u)
}
}
if offset >= len(requiredUsers) {
w.WriteHeader(http.StatusBadRequest)
jsonErr, _ := json.Marshal(
SearchErrorResponse{"Offset is bigger than number of docs"},
)
w.Write(jsonErr)
return
}
correctOrder := false
for _, order := range []int{-1, 0, 1} {
if orderBy == order {
correctOrder = true
break
}
}
if !correctOrder {
w.WriteHeader(http.StatusBadRequest)
return
}
if orderBy != 0 {
if !strings.Contains("Id, Age, Name", orderField) {
w.WriteHeader(http.StatusBadRequest)
jsonErr, _ := json.Marshal(
SearchErrorResponse{"ErrorBadOrderField"},
)
w.Write(jsonErr)
return
}
sort.SliceStable(requiredUsers, func(i, j int) bool {
var relation bool
switch orderField {
case "Id":
relation = requiredUsers[i].Id > requiredUsers[j].Id
case "Age":
relation = requiredUsers[i].Age > requiredUsers[j].Age
case "Name", "":
relation = requiredUsers[i].Name > requiredUsers[j].Name
}
if orderBy == -1 {
relation = !relation
}
return relation
})
}
requiredUsers = requiredUsers[offset:]
if limit > 0 && limit < len(requiredUsers) {
requiredUsers = requiredUsers[:limit]
}
usersJSON, _ := json.Marshal(requiredUsers)
w.Write(usersJSON)
}
func TestBase(t *testing.T) {
type TestCase struct {
req SearchRequest
res *SearchResponse
isError bool
}
cases := []TestCase{
{
SearchRequest{
Limit: 1,
Query: "Boyd",
OrderField: "Name",
OrderBy: -1,
},
&SearchResponse{users[0:1], false},
false,
},
{
SearchRequest{
Limit: 3,
OrderField: "Id",
OrderBy: 1,
},
&SearchResponse{
[]User{users[34], users[33], users[32]},
true,
},
false,
},
{
SearchRequest{
Limit: 10,
Query: "W",
OrderField: "Id",
OrderBy: -1,
},
&SearchResponse{
[]User{users[0], users[13], users[21], users[22]},
false,
},
false,
},
{
SearchRequest{OrderBy: -1, OrderField: "Djopa slona"},
nil,
true,
},
{
SearchRequest{Limit: 40},
&SearchResponse{users[0:25], true},
false,
},
{
SearchRequest{Offset: 40},
nil,
true,
},
{
SearchRequest{OrderBy: -666},
nil,
true,
},
}
ts := httptest.NewServer(http.HandlerFunc(SearchServer))
cli := SearchClient{AccessToken: "666", URL: ts.URL}
defer ts.Close()
for caseNum, item := range cases {
cliRes, cliErr := cli.FindUsers(item.req)
if cliErr != nil && !item.isError {
t.Errorf("[%d] unexpected error: %#v", caseNum, cliErr)
}
if cliErr == nil && item.isError {
t.Errorf("[%d] expected error, got nil", caseNum)
}
if !reflect.DeepEqual(item.res, cliRes) {
t.Errorf("[%d] wrong result, \n\nexpected %#v, \n\ngot %#v", caseNum, item.res, cliRes)
}
}
}
func TestWithErrorCheck(t *testing.T) {
type TestCase struct {
req SearchRequest
res *SearchResponse
err error
}
cases := []TestCase{
{
SearchRequest{Limit: -1},
nil,
fmt.Errorf("limit must be > 0"),
},
{
SearchRequest{Offset: -1},
nil,
fmt.Errorf("offset must be > 0"),
},
}
ts := httptest.NewServer(http.HandlerFunc(SearchServer))
cli := SearchClient{AccessToken: "666", URL: ts.URL}
defer ts.Close()
for caseNum, item := range cases {
cliRes, cliErr := cli.FindUsers(item.req)
if !reflect.DeepEqual(cliErr, item.err) {
t.Errorf("[%d] wrong error, \n\nexpected %#v, \n\ngot %#v", caseNum, item.err, cliErr)
}
if !reflect.DeepEqual(item.res, cliRes) {
t.Errorf("[%d] wrong result, \n\nexpected %#v, \n\ngot %#v", caseNum, item.res, cliRes)
}
}
}
func TestAuth(t *testing.T) {
type TestCase struct {
token string
err error
}
cases := []TestCase{
{"666", nil},
{"333", fmt.Errorf("Bad AccessToken")},
}
ts := httptest.NewServer(http.HandlerFunc(SearchServer))
cli := SearchClient{URL: ts.URL}
defer ts.Close()
for caseNum, item := range cases {
cli.AccessToken = item.token
_, err := cli.FindUsers(SearchRequest{})
if !reflect.DeepEqual(err, item.err) {
t.Errorf("[%d] wrong error, \n\nexpected %#v, \n\ngot %#v", caseNum, item.err, err)
}
}
}
func TestBrokenServer(t *testing.T) {
var (
ts *httptest.Server
cli SearchClient
)
{
ts = httptest.NewServer(
http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
w.WriteHeader(http.StatusInternalServerError)
}),
)
cli = SearchClient{URL: ts.URL}
_, err := cli.FindUsers(SearchRequest{})
if !reflect.DeepEqual(err, fmt.Errorf("SearchServer fatal error")) {
t.Errorf("Wrong handle of the Server Fatal Error: %#v", err)
}
ts.Close()
}
{
ts = httptest.NewServer(
http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
io.WriteString(w, "{incorrect: json, djopa: slona}")
}),
)
cli = SearchClient{URL: ts.URL}
_, err := cli.FindUsers(SearchRequest{})
if err == nil {
t.Errorf("Incorrect json error not handled")
}
ts.Close()
}
{
cli = SearchClient{}
_, err := cli.FindUsers(SearchRequest{})
if err == nil {
t.Errorf("It can't be true")
}
}
{
ts = httptest.NewServer(
http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
time.Sleep(time.Second)
}),
)
cli = SearchClient{URL: ts.URL}
_, err := cli.FindUsers(SearchRequest{})
if err == nil {
t.Errorf("Must give timeout error: %#v", err)
}
ts.Close()
}
}
|
package main
import (
"fmt"
"math"
)
type Geometrica interface {
area() float64
}
type Quadrado struct {
lado float64
} // area = lado ^ 2 (lado * lado)
func (q Quadrado) area() float64 {
return q.lado * q.lado
}
type Circulo struct {
raio float64
} // area = pi * (raio ^ 2) (raio * raio)
func (c Circulo) area() float64 {
return math.Pi * c.raio * c.raio
}
func main() {
var g Geometrica
g = Quadrado{3}
fmt.Printf("A area do quadrado é %v\n", g.area())
g = Circulo{5}
fmt.Printf("A area do circulo é %v\n", g.area())
}
|
package main
import "sync"
func main() {
}
type stack struct {
lock sync.Mutex
data []item
size int
}
func (s *stack) push(t item) {
s.lock.Lock()
defer s.lock.Unlock()
s.data = append(s.data, t)
s.size++
}
func (s *stack) pop() (item, bool) {
s.lock.Lock()
defer s.lock.Unlock()
if s.size == 0 {
return item{}, false
}
t := s.data[s.size-1]
s.data = s.data[:s.size-1]
s.size--
return t, true
}
func (s *stack) peek() (item, bool) {
s.lock.Lock()
defer s.lock.Unlock()
if s.size == 0 {
return item{}, false
}
return s.data[s.size-1], true
}
func (s *stack) isEmpty() bool {
s.lock.Lock()
defer s.lock.Unlock()
return s.size == 0
}
type item struct {
Val int
}
|
package exchange
import (
"fmt"
)
type Bittrex struct {
Exchange
Pairs []*Pair
}
type BittrexTicker struct {
Volume string `json:"volume"`
LastPrice string `json:"last_price"`
}
type BittrexMarket struct {
MarketCurrency string `json:"MarketCurrency"`
BaseCurrency string `json:"BaseCurrency"`
}
type BittrexMarkets struct {
Result []BittrexMarket `json:"result"`
}
type BittrexSummary struct {
Last float64 `json:"Last"`
Volume float64 `json:"Volume"`
}
type BittrexSummaries struct {
Result []BittrexSummary `json:"result"`
}
func (exc *Bittrex) GetResponse(base, quote string) (*Response, *Error) {
var summaries BittrexSummaries
config := exc.GetConfig()
err := HttpGet(config, fmt.Sprintf("/public/getmarketsummary?market=%s-%s", base, quote), &summaries)
if err != nil {
return nil, err
}
return &Response{Name: config.Name, Price: summaries.Result[0].Last, Volume: summaries.Result[0].Volume}, nil
}
func (exc *Bittrex) SetPairs() *Error {
var markets BittrexMarkets
config := exc.GetConfig()
err := HttpGet(config, "/public/getmarkets", &markets)
if err != nil {
return err
}
for _, pair := range markets.Result {
exc.Pairs = append(exc.Pairs, &Pair{Base: pair.BaseCurrency, Quote: pair.MarketCurrency})
}
return nil
}
func (exc *Bittrex) GetConfig() *Config {
return &Config{
Name: "Bittrex",
BaseUrl: "https://bittrex.com/api/v1.1",
Client: nil,
Pairs: exc.Pairs}
}
|
package controllers
import (
"context"
"fmt"
"strconv"
"github.com/ACER/app/ent"
"github.com/ACER/app/ent/course"
"github.com/ACER/app/ent/courseitem"
"github.com/ACER/app/ent/subject"
"github.com/ACER/app/ent/subjecttype"
"github.com/gin-gonic/gin"
)
// CourseItemController defines the struct for the course item controller
type CourseItemController struct {
client *ent.Client
router gin.IRouter
}
// Course_Item struct
type Course_Item struct {
Courses int
Subjects int
SubjectTypes int
}
// CreateCourseItem handles POST requests for adding courseitem entities
// @Summary Create courseitem
// @Description Create courseitem
// @ID create-courseitem
// @Accept json
// @Produce json
// @Param courseitem body ent.CourseItem true "courseitem entity"
// @Success 200 {object} ent.CourseItem
// @Failure 400 {object} gin.H
// @Failure 500 {object} gin.H
// @Router /CourseItems [post]
func (ctl *CourseItemController) CreateCourseItem(c *gin.Context) {
obj := Course_Item{}
if err := c.ShouldBind(&obj); err != nil {
c.JSON(400, gin.H{
"error": "Course Item binding failed",
})
return
}
co, err := ctl.client.Course.
Query().
Where(course.IDEQ(int(obj.Courses))).
Only(context.Background())
if err != nil {
c.JSON(400, gin.H{
"error": "Course not found",
})
return
}
s, err := ctl.client.Subject.
Query().
Where(subject.IDEQ(int(obj.Subjects))).
Only(context.Background())
if err != nil {
c.JSON(400, gin.H{
"error": "Subjects not found",
})
return
}
t, err := ctl.client.SubjectType.
Query().
Where(subjecttype.IDEQ(int(obj.SubjectTypes))).
Only(context.Background())
if err != nil {
c.JSON(400, gin.H{
"error": "Subject Types not found",
})
return
}
ci, err := ctl.client.CourseItem.
Create().
SetCourses(co).
SetSubjects(s).
SetTypes(t).
Save(context.Background())
if err != nil {
c.JSON(400, gin.H{
"error": "saving failed",
})
return
}
c.JSON(200, gin.H{
"status": true,
"data": ci,
})
}
// GetCourseItem handles GET requests to retrieve a courseitem entity
// @Summary Get a courseitem entity by ID
// @Description get courseitem by ID
// @ID get-courseitem
// @Produce json
// @Param id path int true "courseitem ID"
// @Success 200 {object} ent.CourseItem
// @Failure 400 {object} gin.H
// @Failure 404 {object} gin.H
// @Failure 500 {object} gin.H
// @Router /CourseItems/{id} [get]
func (ctl *CourseItemController) GetCourseItem(c *gin.Context) {
id, err := strconv.ParseInt(c.Param("id"), 10, 64)
if err != nil {
c.JSON(400, gin.H{
"error": err.Error(),
})
return
}
i, err := ctl.client.CourseItem.
Query().
Where(courseitem.IDEQ(int(id))).
Only(context.Background())
if err != nil {
c.JSON(404, gin.H{
"error": err.Error(),
})
return
}
c.JSON(200, i)
}
// ListCourseItem handles request to get a list of courseitem entities
// @Summary List courseitem entities
// @Description list courseitem entities
// @ID list-courseitem
// @Produce json
// @Param limit query int false "Limit"
// @Param offset query int false "Offset"
// @Success 200 {array} ent.CourseItem
// @Failure 400 {object} gin.H
// @Failure 500 {object} gin.H
// @Router /CourseItems [get]
func (ctl *CourseItemController) ListCourseItem(c *gin.Context) {
limitQuery := c.Query("limit")
limit := 10
if limitQuery != "" {
limit64, err := strconv.ParseInt(limitQuery, 10, 64)
if err == nil {
limit = int(limit64)
}
}
offsetQuery := c.Query("offset")
offset := 0
if offsetQuery != "" {
offset64, err := strconv.ParseInt(offsetQuery, 10, 64)
if err == nil {
offset = int(offset64)
}
}
courseitems, err := ctl.client.CourseItem.
Query().
WithCourses().
WithSubjects().
WithTypes().
Limit(limit).
Offset(offset).
All(context.Background())
if err != nil {
c.JSON(400, gin.H{"error": err.Error()})
return
}
c.JSON(200, courseitems)
}
// DeleteCourseItem handles DELETE requests to delete a courseitem entity
// @Summary Delete a courseitem entity by ID
// @Description get courseitem by ID
// @ID delete-courseitem
// @Produce json
// @Param id path int true "courseitems ID"
// @Success 200 {object} gin.H
// @Failure 400 {object} gin.H
// @Failure 404 {object} gin.H
// @Failure 500 {object} gin.H
// @Router /CourseItems/{id} [delete]
func (ctl *CourseItemController) DeleteCourseItem(c *gin.Context) {
id, err := strconv.ParseInt(c.Param("id"), 10, 64)
if err != nil {
c.JSON(400, gin.H{
"error": err.Error(),
})
return
}
err = ctl.client.CourseItem.
DeleteOneID(int(id)).
Exec(context.Background())
if err != nil {
c.JSON(404, gin.H{
"error": err.Error(),
})
return
}
c.JSON(200, gin.H{"result": fmt.Sprintf("ok deleted %v", id)})
}
// UpdateCourseItem handles PUT requests to update a courseitem entity
// @Summary Update a courseitem entity by ID
// @Description update courseitem by ID
// @ID update-courseitem
// @Accept json
// @Produce json
// @Param id path int true "courseitem ID"
// @Param courseitem body ent.CourseItem true "courseitem entity"
// @Success 200 {object} ent.CourseItem
// @Failure 400 {object} gin.H
// @Failure 500 {object} gin.H
// @Router /CourseItems/{id} [put]
func (ctl *CourseItemController) UpdateCourseItem(c *gin.Context) {
id, err := strconv.ParseInt(c.Param("id"), 10, 64)
if err != nil {
c.JSON(400, gin.H{
"error": err.Error(),
})
return
}
obj := ent.CourseItem{}
if err := c.ShouldBind(&obj); err != nil {
c.JSON(400, gin.H{
"error": "Course Item binding failed",
})
return
}
obj.ID = int(id)
ci, err := ctl.client.CourseItem.
UpdateOne(&obj).
Save(context.Background())
if err != nil {
c.JSON(400, gin.H{"error": "update failed"})
return
}
c.JSON(200, ci)
}
// NewCourseItemControllercreates and registers handles for the CourseItem controller
func NewCourseItemController(router gin.IRouter, client *ent.Client) *CourseItemController {
ci := &CourseItemController{
client: client,
router: router,
}
ci.register()
return ci
}
// InitCourseItemController registers routes to the main engine
func (ctl *CourseItemController) register() {
CourseItems := ctl.router.Group("/CourseItems")
CourseItems.GET("", ctl.ListCourseItem)
// CRUD
CourseItems.POST("", ctl.CreateCourseItem)
CourseItems.GET(":id", ctl.GetCourseItem)
CourseItems.PUT(":id", ctl.UpdateCourseItem)
CourseItems.DELETE(":id", ctl.DeleteCourseItem)
}
|
/*
* @Author: Sy.
* @Create: 2019-11-01 20:54:15
* @LastTime: 2019-11-16 17:09:31
* @LastEdit: Sy.
* @FilePath: \server\controllers\admin_controllers\admin_auth_controller.go
* @Description: 权限因子
*/
package admin_controllers
import (
"encoding/json"
"fmt"
"time"
"vue-typescript-beego-admin/server/utils"
"strconv"
cache "github.com/patrickmn/go-cache"
"vue-typescript-beego-admin/server/models"
)
type AuthController struct {
BaseController
}
// 列表
func (_this *AuthController) List() {
filters := make([]interface{}, 0)
filters = append(filters, "status", 1)
result, count := models.AuthGetList(1, 1000, filters...)
list := GetChildrens(0, result)
_this.ajaxList(count, list)
}
// 节点详情
func (_this *AuthController) Info() {
id, _ := _this.GetInt("id")
result, _ := models.AuthGetById(id)
// if err == nil {
// _this.ajaxMsg(err.Error(), MSG_ERR)
// }
row := make(map[string]interface{})
row["id"] = result.Id
row["pid"] = result.Pid
row["auth_name"] = result.AuthName
row["auth_url"] = result.AuthUrl
row["sort"] = result.Sort
row["is_show"] = result.IsShow
row["icon"] = result.Icon
fmt.Println(row)
_this.ajaxList(0, row)
}
//新增或修改
func (_this *AuthController) Edit() {
listStr := _this.GetString("list")
// 列表添加、修改
_this.EditAuths(listStr)
// auth := new(models.Auth)
// auth.UserId = _this.userId
// auth.Pid, _ = _this.GetInt("pid")
// auth.AuthName = strings.TrimSpace(_this.GetString("auth_name"))
// auth.AuthUrl = strings.TrimSpace(_this.GetString("auth_url"))
// auth.Sort, _ = _this.GetInt("sort")
// auth.IsShow, _ = _this.GetInt("is_show")
// auth.Icon = strings.TrimSpace(_this.GetString("icon"))
// auth.UpdateTime = time.Now().Unix()
// auth.Status = 1
// if id == 0 {
// _this.ajaxMsg("找不到", MSG_ERR)
// //新增
// auth.CreateTime = time.Now().Unix()
// auth.CreateId = _this.userId
// auth.UpdateId = _this.userId
// if _, err := models.AuthAdd(auth); err != nil {
// _this.ajaxMsg(err.Error(), MSG_ERR)
// }
// } else {
// auth.Id = id
// auth.UpdateId = _this.userId
// if err := auth.Update(); err != nil {
// _this.ajaxMsg(err.Error(), MSG_ERR)
// }
// }
// utils.Che.Set("menu"+strconv.Itoa(_this.user.Id), nil, cache.DefaultExpiration)
// _this.ajaxMsg("", MSG_OK)
}
// auths edit by list
func (_this *AuthController) EditAuths(jsonStr string) {
auths := make([]map[string]interface{}, 0)
if err := json.Unmarshal([]byte(jsonStr), &auths); err != nil {
_this.ajaxMsg(err.Error(), MSG_ERR)
}
if len(auths) == 0 {
_this.ajaxMsg("缺少参数", CODE_PARAMS)
}
for _, v := range auths {
id := int(v["id"].(float64))
auth, err := models.AuthGetById(id)
if err != nil {
auth := new(models.Auth)
auth.CreateId = _this.userId
auth.CreateTime = time.Now().Unix()
}
auth.Status = 1
auth.AuthName = v["title"].(string)
auth.AuthUrl = v["authUrl"].(string)
auth.Pid = int(v["pid"].(float64))
auth.Sort = int(v["sort"].(float64))
auth.Icon = v["icon"].(string)
auth.IsShow = int(v["isShow"].(float64))
auth.Path = v["path"].(string)
auth.SidebarHidden = int(v["sidebarHidden"].(float64))
auth.Breadcrumb = int(v["breadcrumb"].(float64))
auth.Component = v["component"].(string)
auth.Redirect = v["redirect"].(string)
auth.UpdateId = _this.userId
auth.UpdateTime = time.Now().Unix()
auth.Update()
}
_this.ajaxMsg("", MSG_OK)
}
//删除
func (_this *AuthController) Del() {
id, _ := _this.GetInt("id")
auth, _ := models.AuthGetById(id)
auth.Id = id
auth.Status = 0
if err := auth.Update(); err != nil {
_this.ajaxMsg(err.Error(), MSG_ERR)
}
utils.Che.Set("menu"+strconv.Itoa(_this.user.Id), nil, cache.DefaultExpiration)
_this.ajaxMsg("", MSG_OK)
}
// element tree data
func GetChildrens(pid int, authList []*models.Auth) []map[string]interface{} {
list := make([]map[string]interface{}, 0)
for _, v := range authList {
if v.Pid == pid {
row := make(map[string]interface{})
row["id"] = v.Id
row["title"] = v.AuthName
row["authUrl"] = v.AuthUrl
row["isShow"] = v.IsShow
row["pid"] = v.Pid
row["sort"] = v.Sort
row["path"] = v.Path
row["sidebarHidden"] = v.SidebarHidden
row["breadcrumb"] = v.Breadcrumb
row["component"] = v.Component
row["redirect"] = v.Redirect
row["icon"] = v.Icon
subList := GetChildrens(v.Id, authList)
if len(subList) > 0 {
row["children"] = subList
}
list = append(list, row)
}
}
return list
}
|
package timer
import "time"
func NewRealTimer(resetTime time.Duration)(*realTimer) {
return &realTimer{
timer:time.NewTimer(resetTime),
resetTime:resetTime,
}
}
func (t *realTimer)Until(f func(), stopCh <-chan struct{}) {
defer func() {
t.timer.Stop()
}()
for {
select {
case <-stopCh:
return
default:
}
t.Reset(t.resetTime)
f()
select {
case <-stopCh:
return
case <-t.C():
}
}
}
type realTimer struct {
timer *time.Timer
resetTime time.Duration
}
// C returns the underlying timer's channel.
func (r *realTimer) C() <-chan time.Time {
return r.timer.C
}
// Reset calls Reset() on the underlying timer.
func (r *realTimer) Reset(d time.Duration) bool {
return r.timer.Reset(d)
} |
// Copyright (C) 2017 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package image_test
import (
"testing"
"github.com/google/gapid/core/image"
"github.com/google/gapid/core/math/f32"
"github.com/google/gapid/gapis/database"
)
// Interface compliance checks.
var (
_ = database.Resolvable((*image.ConvertResolvable)(nil))
_ = database.Resolvable((*image.ResizeResolvable)(nil))
)
func TestDifference(t *testing.T) {
fill := func(w, h uint32, r, g, b, a byte) *image.Data {
bytes := make([]byte, w*h*4)
for p := 0; p < len(bytes); p += 4 {
bytes[p+0] = r
bytes[p+1] = g
bytes[p+2] = b
bytes[p+3] = a
}
return &image.Data{
Width: w,
Height: h,
Depth: 1,
Bytes: bytes,
Format: image.RGBA_U8_NORM,
}
}
for _, test := range []struct {
name string
a, b *image.Data
diff float32
}{
{
name: "white vs black",
a: fill(8, 8, 0xff, 0xff, 0xff, 0xff),
b: fill(8, 8, 0x00, 0x00, 0x00, 0x00),
diff: 1.0,
}, {
name: "transparent-yellow vs blue",
a: fill(8, 8, 0xff, 0xff, 0x00, 0x00),
b: fill(8, 8, 0x00, 0x00, 0xff, 0xff),
diff: 1.0,
}, {
name: "transparent-white vs cyan",
a: fill(8, 8, 0xff, 0xff, 0xff, 0x00),
b: fill(8, 8, 0x00, 0xff, 0xff, 0xff),
diff: 0.5,
}, {
name: "transparent-purple vs transparent-purple",
a: fill(8, 8, 0xff, 0x00, 0xff, 0x00),
b: fill(8, 8, 0xff, 0x00, 0xff, 0x00),
diff: 0.0,
},
} {
diff, err := image.Difference(test.a, test.b)
if err != nil {
t.Errorf("Difference of %v returned error: %v", test.name, err)
continue
}
if f32.Abs(diff-test.diff) > 0.0000001 {
t.Errorf("Difference of %v gave value: %v, expected: %v",
test.name, diff, test.diff)
}
}
}
|
package sfAuth
import (
"fmt"
"net/http"
"sync"
)
type SfAuth struct {
SvcPath string
AuthPlugin []Plugin
AuthType AuthType
Next http.Handler
Mu sync.Mutex
}
type UserInfo struct {
Token string
UserName string
ExpireTime int64
Tenant string
}
func (s *SfAuth)GetSvcPolicyBySvc(svc string) (AuthType, error) {
s.Mu.Lock()
defer s.Mu.Unlock()
if s.AuthType != 0 {
return s.AuthType, nil
}
return 1, nil
}
func GetUserInfoByToken(token string) (UserInfo, error) {
return UserInfo{}, nil
}
func NewHandler(next http.Handler) (*SfAuth, error) {
openAuthPlugin := OpenAuthPlugin{}
loginAuthPlugin := LoginAuthPlugin{}
authorizationAuthPlugin := AuthorizationAuthPlugin{}
approvalAuthPlugin := ApprovalAuthPlugin{}
recordAuthPlugin := RecordAuthPlugin{}
plugins := []Plugin{
openAuthPlugin,
loginAuthPlugin,
authorizationAuthPlugin,
approvalAuthPlugin,
recordAuthPlugin,
}
sfAuth := &SfAuth{
SvcPath: "",
AuthType: 0,
AuthPlugin: plugins,
Next: next,
}
return sfAuth, nil
}
func (s *SfAuth) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
if s.SvcPath == "" {
s.SvcPath = req.Header.Get("X-Forwarded-Prefix")
}
if s.AuthType == 0 {
authType, err := s.GetSvcPolicyBySvc(s.SvcPath)
if err != nil {
rw.WriteHeader(500)
rw.Write([]byte(err.Error()))
fmt.Errorf("get svc policy error: %s", err)
return
}
s.AuthType = authType
}
for _, plugin := range s.AuthPlugin {
authSuccess, err := plugin.Auth(s.AuthType, rw, req)
if err != nil {
rw.WriteHeader(401)
rw.Write([]byte(err.Error()))
fmt.Errorf("%s exec auth plugin fail: %s", s.SvcPath, err)
return
}
if authSuccess {
if s.Next != nil {
s.Next.ServeHTTP(rw, req)
}
return
}
}
err := fmt.Errorf("%s auth fail", s.SvcPath)
rw.WriteHeader(401)
rw.Write([]byte(err.Error()))
return
}
|
package ontap
import "encoding/xml"
type NetappVolume struct {
XMLName xml.Name `xml:"netapp"`
Text string `xml:",chardata"`
Version string `xml:"version,attr"`
Xmlns string `xml:"xmlns,attr"`
Results struct {
Text string `xml:",chardata"`
Status string `xml:"status,attr"`
AttributesList struct {
Text string `xml:",chardata"`
VolumeAttributes []struct {
Text string `xml:",chardata"`
Encrypt string `xml:"encrypt"`
VolumeAutosizeAttributes struct {
Text string `xml:",chardata"`
GrowThresholdPercent string `xml:"grow-threshold-percent"`
IsEnabled string `xml:"is-enabled"`
MaximumSize string `xml:"maximum-size"`
MinimumSize string `xml:"minimum-size"`
Mode string `xml:"mode"`
ShrinkThresholdPercent string `xml:"shrink-threshold-percent"`
} `xml:"volume-autosize-attributes"`
VolumeCompAggrAttributes struct {
Text string `xml:",chardata"`
TieringPolicy string `xml:"tiering-policy"`
} `xml:"volume-comp-aggr-attributes"`
VolumeDirectoryAttributes struct {
Text string `xml:",chardata"`
I2pEnabled string `xml:"i2p-enabled"`
MaxDirSize string `xml:"max-dir-size"`
RootDirGen string `xml:"root-dir-gen"`
} `xml:"volume-directory-attributes"`
VolumeHybridCacheAttributes struct {
Text string `xml:",chardata"`
Eligibility string `xml:"eligibility"`
} `xml:"volume-hybrid-cache-attributes"`
VolumeIDAttributes struct {
Text string `xml:",chardata"`
AggrList struct {
Text string `xml:",chardata"`
AggrName string `xml:"aggr-name"`
} `xml:"aggr-list"`
ContainingAggregateName string `xml:"containing-aggregate-name"`
ContainingAggregateUuid string `xml:"containing-aggregate-uuid"`
CreationTime string `xml:"creation-time"`
ExtentSize string `xml:"extent-size"`
Fsid string `xml:"fsid"`
InstanceUuid string `xml:"instance-uuid"`
Name string `xml:"name"`
NameOrdinal string `xml:"name-ordinal"`
Node string `xml:"node"`
Nodes struct {
Text string `xml:",chardata"`
NodeName string `xml:"node-name"`
} `xml:"nodes"`
OwningVserverName string `xml:"owning-vserver-name"`
OwningVserverUuid string `xml:"owning-vserver-uuid"`
ProvenanceUuid string `xml:"provenance-uuid"`
Style string `xml:"style"`
StyleExtended string `xml:"style-extended"`
Type string `xml:"type"`
Uuid string `xml:"uuid"`
} `xml:"volume-id-attributes"`
VolumeInodeAttributes struct {
Text string `xml:",chardata"`
BlockType string `xml:"block-type"`
FilesPrivateUsed string `xml:"files-private-used"`
FilesTotal string `xml:"files-total"`
FilesUsed string `xml:"files-used"`
InodefilePrivateCapacity string `xml:"inodefile-private-capacity"`
InodefilePublicCapacity string `xml:"inodefile-public-capacity"`
InofileVersion string `xml:"inofile-version"`
} `xml:"volume-inode-attributes"`
VolumeLanguageAttributes struct {
Text string `xml:",chardata"`
IsConvertUcodeEnabled string `xml:"is-convert-ucode-enabled"`
IsCreateUcodeEnabled string `xml:"is-create-ucode-enabled"`
Language string `xml:"language"`
NfsCharacterSet string `xml:"nfs-character-set"`
OemCharacterSet string `xml:"oem-character-set"`
} `xml:"volume-language-attributes"`
VolumeMirrorAttributes struct {
Text string `xml:",chardata"`
IsDataProtectionMirror string `xml:"is-data-protection-mirror"`
IsLoadSharingMirror string `xml:"is-load-sharing-mirror"`
IsMoveMirror string `xml:"is-move-mirror"`
IsReplicaVolume string `xml:"is-replica-volume"`
IsSnapmirrorSource string `xml:"is-snapmirror-source"`
MirrorTransferInProgress string `xml:"mirror-transfer-in-progress"`
RedirectSnapshotID string `xml:"redirect-snapshot-id"`
} `xml:"volume-mirror-attributes"`
VolumePerformanceAttributes struct {
Text string `xml:",chardata"`
ExtentEnabled string `xml:"extent-enabled"`
FcDelegsEnabled string `xml:"fc-delegs-enabled"`
IsAtimeUpdateEnabled string `xml:"is-atime-update-enabled"`
MaxWriteAllocBlocks string `xml:"max-write-alloc-blocks"`
MinimalReadAhead string `xml:"minimal-read-ahead"`
ReadRealloc string `xml:"read-realloc"`
SingleInstanceDataLogging string `xml:"single-instance-data-logging"`
} `xml:"volume-performance-attributes"`
VolumeSecurityAttributes struct {
Text string `xml:",chardata"`
VolumeSecurityUnixAttributes struct {
Text string `xml:",chardata"`
Permissions string `xml:"permissions"`
} `xml:"volume-security-unix-attributes"`
} `xml:"volume-security-attributes"`
VolumeSisAttributes struct {
Text string `xml:",chardata"`
CompressionSpaceSaved string `xml:"compression-space-saved"`
DeduplicationSpaceSaved string `xml:"deduplication-space-saved"`
DeduplicationSpaceShared string `xml:"deduplication-space-shared"`
IsSisLoggingEnabled string `xml:"is-sis-logging-enabled"`
IsSisStateEnabled string `xml:"is-sis-state-enabled"`
IsSisVolume string `xml:"is-sis-volume"`
PercentageCompressionSpaceSaved string `xml:"percentage-compression-space-saved"`
PercentageDeduplicationSpaceSaved string `xml:"percentage-deduplication-space-saved"`
PercentageTotalSpaceSaved string `xml:"percentage-total-space-saved"`
TotalSpaceSaved string `xml:"total-space-saved"`
} `xml:"volume-sis-attributes"`
VolumeSnaplockAttributes struct {
Text string `xml:",chardata"`
SnaplockType string `xml:"snaplock-type"`
} `xml:"volume-snaplock-attributes"`
VolumeSnapshotAttributes struct {
Text string `xml:",chardata"`
SnapdirAccessEnabled string `xml:"snapdir-access-enabled"`
SnapshotCloneDependencyEnabled string `xml:"snapshot-clone-dependency-enabled"`
SnapshotCount string `xml:"snapshot-count"`
} `xml:"volume-snapshot-attributes"`
VolumeSnapshotAutodeleteAttributes struct {
Text string `xml:",chardata"`
Commitment string `xml:"commitment"`
DeferDelete string `xml:"defer-delete"`
DeleteOrder string `xml:"delete-order"`
DestroyList string `xml:"destroy-list"`
IsAutodeleteEnabled string `xml:"is-autodelete-enabled"`
Prefix string `xml:"prefix"`
TargetFreeSpace string `xml:"target-free-space"`
Trigger string `xml:"trigger"`
} `xml:"volume-snapshot-autodelete-attributes"`
VolumeSpaceAttributes struct {
Text string `xml:",chardata"`
ExpectedAvailable string `xml:"expected-available"`
FilesystemSize string `xml:"filesystem-size"`
IsFilesysSizeFixed string `xml:"is-filesys-size-fixed"`
IsSpaceGuaranteeEnabled string `xml:"is-space-guarantee-enabled"`
IsSpaceSloEnabled string `xml:"is-space-slo-enabled"`
OverProvisioned string `xml:"over-provisioned"`
OverwriteReserve string `xml:"overwrite-reserve"`
OverwriteReserveRequired string `xml:"overwrite-reserve-required"`
OverwriteReserveUsed string `xml:"overwrite-reserve-used"`
OverwriteReserveUsedActual string `xml:"overwrite-reserve-used-actual"`
PercentageFractionalReserve string `xml:"percentage-fractional-reserve"`
PercentageSizeUsed string `xml:"percentage-size-used"`
PercentageSnapshotReserve string `xml:"percentage-snapshot-reserve"`
PercentageSnapshotReserveUsed string `xml:"percentage-snapshot-reserve-used"`
PhysicalUsed string `xml:"physical-used"`
PhysicalUsedPercent string `xml:"physical-used-percent"`
Size string `xml:"size"`
SizeAvailable string `xml:"size-available"`
SizeAvailableForSnapshots string `xml:"size-available-for-snapshots"`
SizeTotal string `xml:"size-total"`
SizeUsed string `xml:"size-used"`
SizeUsedBySnapshots string `xml:"size-used-by-snapshots"`
SnapshotReserveAvailable string `xml:"snapshot-reserve-available"`
SnapshotReserveSize string `xml:"snapshot-reserve-size"`
SpaceFullThresholdPercent string `xml:"space-full-threshold-percent"`
SpaceGuarantee string `xml:"space-guarantee"`
SpaceMgmtOptionTryFirst string `xml:"space-mgmt-option-try-first"`
SpaceNearlyFullThresholdPercent string `xml:"space-nearly-full-threshold-percent"`
SpaceSlo string `xml:"space-slo"`
} `xml:"volume-space-attributes"`
VolumeStateAttributes struct {
Text string `xml:",chardata"`
BecomeNodeRootAfterReboot string `xml:"become-node-root-after-reboot"`
ForceNvfailOnDr string `xml:"force-nvfail-on-dr"`
IgnoreInconsistent string `xml:"ignore-inconsistent"`
InNvfailedState string `xml:"in-nvfailed-state"`
IsClusterVolume string `xml:"is-cluster-volume"`
IsConstituent string `xml:"is-constituent"`
IsFlexgroup string `xml:"is-flexgroup"`
IsInconsistent string `xml:"is-inconsistent"`
IsInvalid string `xml:"is-invalid"`
IsNodeRoot string `xml:"is-node-root"`
IsNvfailEnabled string `xml:"is-nvfail-enabled"`
IsQuiescedInMemory string `xml:"is-quiesced-in-memory"`
IsQuiescedOnDisk string `xml:"is-quiesced-on-disk"`
IsUnrecoverable string `xml:"is-unrecoverable"`
State string `xml:"state"`
} `xml:"volume-state-attributes"`
VolumeTransitionAttributes struct {
Text string `xml:",chardata"`
IsCftPrecommit string `xml:"is-cft-precommit"`
IsCopiedForTransition string `xml:"is-copied-for-transition"`
IsTransitioned string `xml:"is-transitioned"`
TransitionBehavior string `xml:"transition-behavior"`
} `xml:"volume-transition-attributes"`
} `xml:"volume-attributes"`
} `xml:"attributes-list"`
NextTag struct {
Text string `xml:",chardata"`
VolumeGetIterKeyTd struct {
Text string `xml:",chardata"`
Key0 string `xml:"key-0"`
Key1 string `xml:"key-1"`
} `xml:"volume-get-iter-key-td"`
} `xml:"next-tag"`
NumRecords string `xml:"num-records"`
} `xml:"results"`
}
type VolumeGetIterShort struct {
XMLName xml.Name `xml:"netapp"`
Text string `xml:",chardata"`
Version string `xml:"version,attr"`
Xmlns string `xml:"xmlns,attr"`
VolumeGetIter struct {
Text string `xml:",chardata"`
MaxRecords string `xml:"max-records"`
DesiredAttributes struct {
VolumeAttributes struct {
Text string `xml:",chardata"`
VolumeIDAttributes string `xml:"volume-id-attributes"`
VolumeSpaceAttributes string `xml:"volume-space-attributes"`
VolumeStateAttributes string `xml:"volume-state-attributes"`
VolumeSisAttributes string `xml:"volume-sis-attributes"`
VolumeInodeAttributes string `xml:"volume-inode-attributes"`
} `xml:"volume-attributes,omitempty"`
} `xml:"desired-attributes"`
} `xml:"volume-get-iter"`
}
type VolumeGetIterFull struct {
XMLName xml.Name `xml:"netapp"`
Text string `xml:",chardata"`
Version string `xml:"version,attr"`
Xmlns string `xml:"xmlns,attr"`
VolumeGetIter struct {
Text string `xml:",chardata"`
MaxRecords string `xml:"max-records"`
DesiredAttributes struct {
Text string `xml:",chardata"`
} `xml:"desired-attributes"`
} `xml:"volume-get-iter"`
}
|
package logger
import (
"fmt"
"github.com/sirupsen/logrus"
"os"
"runtime"
)
// Logger is an alias to the logrus logger that provides additional
// methods for bundle manipulation and reflection
type Logger struct {
*logrus.Logger
}
// Fields type-aliases the logrus.Fields so the package can be skipped within
// the mirror package
type Fields = logrus.Fields
// init instruments third party libraries to work in default
// settings when running mirror code or its bundling extension
func init() {
L.SetLevel(logrus.InfoLevel)
L.SetOutput(os.Stdout)
L.SetFormatter(&logrus.TextFormatter{})
}
// Method creates a log entry with predefined fields for the
// caller Object and Method name. Additionaly, it will save the
// caller's method file and it's line if Trace is enabled
func (l *Logger) Method(obj, method string) *logrus.Entry {
ff := Fields{
"Object": obj,
"Method": method,
}
if l.Level == logrus.TraceLevel {
_, callerFile, callerLine, _ := runtime.Caller(0)
ff["Caller"] = fmt.Sprintf("%s:%d", callerFile, callerLine)
}
return l.WithFields(ff)
}
var (
// L is a global logger that can be reconfigured by third parties
// to customize logging
L = &Logger{logrus.New()}
)
|
package main
import (
"fmt"
"log"
"net/http"
)
type myMux struct{}
func (mux *myMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if r.URL.Path == "/hello" {
sayHello(w, r)
return
}
http.NotFound(w, r)
return
}
func sayHello(w http.ResponseWriter, r *http.Request) {
fmt.Println("=============URL=============")
fmt.Println("path", r.URL.Path)
fmt.Println("scheme", r.URL.Scheme)
fmt.Fprintf(w, "hello go mux")
}
func main() {
mux := &myMux{}
err := http.ListenAndServe(":3000", mux)
if err != nil {
log.Fatal("ListenAndServe:", err)
}
}
|
package main
import (
"reflect"
"testing"
"exeTwo.devThree/functions"
)
func TestSortMain(t *testing.T) {
var check = make(map[int][]string)
check[1] = []string{"6. Iota June 3. Gamma March 5. Epsilon May 4. Delta April 2. Beta February 1. Alfa January"}
for _, correctResult := range check {
letTest := functions.SortMain(functions.ReadFromFile("text.txt"), false, true, false, false, false, false, 2)
if reflect.DeepEqual(letTest, correctResult) {
t.Fatalf("Incorrect parsing %v. Expected: %v", letTest, correctResult)
}
}
}
func TestSortByM(t *testing.T) {
var check = make(map[int][]string)
check[1] = []string{"6. Iota June 5. Epsilon May 4. Delta April 3. Gamma March 2. Beta February 1. Alfa January"}
for _, correctResult := range check {
letTest := functions.SortByM(functions.ReadFromFile("text.txt"), false, true, false, false)
if reflect.DeepEqual(letTest, correctResult) {
t.Fatalf("Incorrect parsing %v. Expected: %v", letTest, correctResult)
}
}
}
func TestSortByu(t *testing.T) {
var check = make(map[int][]string)
check[1] = []string{"6. Iota June 5. Epsilon May 3. Gamma March 1. Alfa January 4. Delta April 2. Beta February"}
for _, correctResult := range check {
letTest := functions.SortMain(functions.ReadFromFile("text.txt"), false, false, false, true, false, false, 2)
if reflect.DeepEqual(letTest, correctResult) {
t.Fatalf("Incorrect parsing %v. Expected: %v", letTest, correctResult)
}
}
}
|
package database
import (
"github.com/jinzhu/gorm"
_ "github.com/jinzhu/gorm/dialects/postgres"
)
func SetupDatabase(conn string) Database {
db, err := gorm.Open("postgres", conn)
if err != nil {
panic("failed to connect database")
}
db.LogMode(true)
// Migrate the schema
if err := db.AutoMigrate(&Property{}).Error; err != nil {
panic(err)
}
if err := db.AutoMigrate(&Rental{}).Error; err != nil {
panic(err)
}
return Database{
db: db,
Properties: &PropertyQueries{db: db},
Rentals: &RentalQueries{db: db},
}
}
type Database struct {
db *gorm.DB
Properties *PropertyQueries
Rentals *RentalQueries
}
|
package main
import (
log "github.com/cihub/seelog"
"fmt"
"github.com/yoheiMune/MyGoProject/002_logging/sub"
)
func main() {
/**
SeeLogの調査.
# 参照
https://github.com/cihub/seelog
# インストール
go get -u github.com/cihub/seelog
*/
defer log.Flush()
// デフォルトで出力.
log.Info("Hello from SeeLog!")
// フォーマット指定
// フォーマット一覧:https://github.com/cihub/seelog/wiki/Format-reference
config := `
<seelog type="sync">
<outputs formatid="main">
<console/>
</outputs>
<formats>
<format id="main" format="%Date(2006-01-02T15:04:05Z07:00) %LEVEL %RelFile(%Line)(%FuncShort) %Msg%n"/>
</formats>
</seelog>
`
logger, err := log.LoggerFromConfigAsBytes([]byte(config))
if err != nil {
fmt.Printf("エラーだよ:%v", err)
panic("ロガーの初期化エラー")
}
log.ReplaceLogger(logger)
// 試しに表示.
log.Debug("デバッグログ")
// サブパッケージでの呼び出しもしてみる
sub.A()
}
|
package main
import (
"context"
"log"
"google.golang.org/grpc"
)
// implementing the gRPC Unary Interceptor
func logger(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) {
log.Printf("---> Unary interceptor: %v\n", info.FullMethod)
return handler(ctx, req)
}
|
// Copyright 2017 The EvAlgo Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package evhtml
import (
"errors"
"testing"
)
func Test_New(t *testing.T) {
form := NewForm()
if form.Name == "" {
t.Log("Test_New passed!")
} else {
t.Error("Test_New failed!")
}
}
func Test_NewFormElement(t *testing.T) {
element := NewFormElement("testName", "text")
element.AddLanguage("en-us", "testValue")
if element.Id == "testName" {
t.Log("Test_NewFormElement passed!")
} else {
t.Error("Test_NewFormElement failed!")
}
}
func validateFunction(value interface{}) error {
if value == "testValue" {
return nil
}
return errors.New("Validation failed!")
}
func Test_NewFormElementValidate(t *testing.T) {
element := NewElementValidate("testName", "text", validateFunction, "validateFunction")
element.AddLanguage("en-us", "testValue")
if element.Validate("en-us") == nil {
t.Log("Test_NewFormElementValidate passed!")
} else {
t.Error("Test_NewFormElementValidate failed!")
}
}
func Test_FormAddFormElement(t *testing.T) {
element := NewFormElement("testName", "text")
element.AddLanguage("en-us", "testValue")
form := NewForm()
form.AddFormElement(element)
newElement, _ := form.FormElement("testName")
if newElement[0] == element {
t.Log("Test_FormAddFormElement passed!")
} else {
t.Error("Test_FormAddFormElement failed!")
}
}
func Test_GenerateCaptcha(t *testing.T) {
form := NewForm()
form.GenerateCaptcha("Id", "Class")
if form.CaptchaId != "" {
t.Log("Test_GenerateCaptcha passed!")
} else {
t.Error("Test_GenerateCaptcha failed!")
}
if form.CaptchaUrl != "" {
t.Log("Test_GenerateCaptcha passed!")
} else {
t.Error("Test_GenerateCaptcha failed!")
}
}
func Test_FormElementRender(t *testing.T) {
e := NewFormElement("testName", "text")
e.AddLanguage("en-us", "test")
res, err := e.Render("en-us")
if err != nil {
t.Error(err)
}
if res != "<input type=\"text\" name=\"\" class=\"\" value=\"test\"/>" {
t.Error("can not generate language dependend text test element")
}
}
func Test_FormRenderHtml(t *testing.T) {
element := NewFormElement("testName", "text")
element.AddLanguage("en-us", "testErrValue")
element.NodeId = "testName"
element.Name = "testName"
element.Class = "testClass"
element.AddValidationFunc(validateFunction, "validateFunction")
form := NewForm()
form.AddFormElement(element)
formStringsSlice, err := form.RenderHtml("en-us")
if err != nil {
t.Error(err)
}
if formStringsSlice["FormHtmltestName"][0] == "<input id=\"testName\" type=\"text\" name=\"testName\" class=\"testClass\" value=\"testErrValue\"/>" {
t.Log("Test_FormRender passed!")
} else {
t.Error("Test_FormRender failed!")
}
}
func Test_FormElementTypeCheckbox(t *testing.T) {
element := NewFormElement("id", "checkbox")
element.AddLanguage("en-us", "")
rEle, err := element.Render("en-us")
if err != nil {
t.Error(err)
}
if rEle == "<input type=\"checkbox\" name=\"\" class=\"\" value=\"\"/>" {
t.Log("Test_FormElementTypeCheckbox passed!")
} else {
// todo check this error
//t.Error("Test_FormElementTypeCheckbox failed!")
}
element.Checked = true
rEle, err = element.Render("en-us")
if err != nil {
t.Error(err)
}
if rEle == "<input type=\"checkbox\" name=\"\" class=\"\" value=\"\" checked=\"checked\"/>" {
t.Log("Test_FormElementTypeCheckbox passed!")
} else {
// todo check this error
// t.Error("Test_FormElementTypeCheckbox failed!")
}
}
func generateTestForm() *Form {
lang := "en-us"
// create new form element
Form := NewForm()
Form.Action = ""
Form.Method = "POST"
Form.Name = "form"
// captcha section
Form.Captcha = true
Form.CaptchaImgName = "CaptchaImg"
Form.CaptchaName = "HiddenCaptchaIdField"
Form.CaptchaResultName = "CaptchaInputField"
Form.CaptchaUrl = "/security/captcha"
// text field section
Text := NewFormElement("Test", "text")
Text.AddLanguage(lang, "")
Text.Name = "name"
Text.Class = "css_class"
Text.ValidationFunc = ValidateStringNotEmpty
Form.AddFormElement(Text)
// file field section
f := NewFormElement("File", "file")
f.AddLanguage(lang, "")
f.Name = "file"
f.Class = "css_class"
Form.AddFormElement(f)
// create send button and add to the form
Send := NewFormElement("Send", "submit")
Send.AddLanguage(lang, "Save")
Form.AddFormElement(Send)
// create reset button and add to the form
Reset := NewFormElement("Reset", "reset")
Reset.AddLanguage("en-us", "Reset")
Form.AddFormElement(Reset)
return Form
}
func Test_FormGenerateTemplate(t *testing.T) {
form := generateTestForm()
t.Log(form.GenerateTemplate("en-us"))
}
func Test_FormGenerateTemplateFile(t *testing.T) {
form := generateTestForm()
err := form.GenerateTemplateFile("en-us", "/tmp/form.tmpl")
if err != nil {
t.Error(err)
}
}
func Test_CreateForm(t *testing.T) {
form := generateTestForm()
t.Log(form.RenderHtml("en-us"))
}
|
package algorithm
import (
"fmt"
"testing"
)
// 描述
// 模拟筛选出把可以淘汰的服务器
// 用二维数组表示当前服务器的依赖关系
// 用一维数组表示提供需要淘汰的服务器
// 通过算法淘汰可以淘汰的服务器序列
// 规则:如果提供淘汰服务器之外还存在一定依赖关系则无法删除
// 例如:依赖服务器序列号:[[0,1,2], [0,4], [5,6]] 说明:0,1,2相互依赖,同时 0,4号机器也相互依赖,5,6也相互依赖
// 此时提供待淘汰服务器序列数组:[0,1,2,5,6] 因 0,1,2,4 相互依赖而 4 并非在淘汰列表中 所以 0,1,2 无法直接淘汰,5,6 因在一组依赖下,没有其他依赖
// 所以 5,6 可以 淘汰,最终返回可淘汰服务器序列数组:[5,6]
var toBeEliminatedList = []int{0, 1, 2, 5, 6}
var relationServers [][]int
func init() {
// 初始化依赖服务器列表
rss := make([][]int, 0)
rss = append(rss, []int{0, 1, 2})
rss = append(rss, []int{0, 4})
rss = append(rss, []int{5, 6})
relationServers = rss
}
func TestRelationServer(t *testing.T) {
t.Log(relationServers)
t.Log(toBeEliminatedList)
EliminatedServers(toBeEliminatedList)
}
func EliminatedServers(list []int) []int {
// 1、把依赖服务器的并集转换成map类型
UnionToMap()
return nil
}
func UnionToMap() map[int][]int {
unionMap := make(map[int][]int)
for i := 0; i < len(relationServers); i++ {
y := relationServers[i]
for j := 0; j < len(y); j++ {
s := y[j]
um := unionMap[s]
if um == nil {
unionMap[s] = append(unionMap[s], y...)
} else {
for _, vv := range um {
unionMap[vv] = append(unionMap[vv], y...)
}
}
}
}
fmt.Println(unionMap)
return unionMap
}
// func hasRelation(y int, rrs []int) (bool, []int) {
// flag := false
// r := make([]int, 0)
// for index, rss := range relationServers {
// for _, rs := range rss {
// if rs == y {
// if rrs == nil || len(rrs) == 0 {
// flag = true
// r = append(r, index)
// } else {
// var f = true
// for _, c := range rrs {
// if index == c {
// f = false
// break
// }
// }
// if f {
// flag = true
// rrs = append(rrs, index)
// r = rrs
// }
// }
// break
// }
// }
// }
// return flag, r
// }
|
package currency
import (
"golang.org/x/text/currency"
)
// ConstantRates doesn't do any currency conversions and accepts only conversions where
// both currencies (from and to) are the same.
// If not the same currencies, it returns an error.
type ConstantRates struct{}
// NewConstantRates creates a new ConstantRates object holding currencies rates
func NewConstantRates() *ConstantRates {
return &ConstantRates{}
}
// GetRate returns 1 if both currencies are the same.
// If not, it will return an error.
func (r *ConstantRates) GetRate(from string, to string) (float64, error) {
fromUnit, err := currency.ParseISO(from)
if err != nil {
return 0, err
}
toUnit, err := currency.ParseISO(to)
if err != nil {
return 0, err
}
if fromUnit.String() != toUnit.String() {
return 0, ConversionNotFoundError{FromCur: fromUnit.String(), ToCur: toUnit.String()}
}
return 1, nil
}
// GetRates returns current rates
func (r *ConstantRates) GetRates() *map[string]map[string]float64 {
return nil
}
|
package main
import (
"compiler/evaluator"
"compiler/lexer"
"compiler/object"
"compiler/parser"
"fmt"
"io"
"io/ioutil"
"os"
)
func main() {
content, e := ioutil.ReadFile(os.Args[1])
if e != nil {
io.WriteString(os.Stdout, fmt.Sprintf("Error: %q", e.Error()) )
}
sourceCode := string(content)
l := lexer.New(sourceCode)
p := parser.New(l)
program := p.ParseProgram()
env := object.NewEnvironment()
evaluated := evaluator.Eval(program, env)
if evaluated != nil {
io.WriteString(os.Stdout, evaluated.Inspect())
}
}
|
package dto
import (
"github.com/shopspring/decimal"
"go-resk/src/entity/service_flag"
"time"
)
//账户创建对象
type AccountCreatedDTO struct {
UserId string `validate:"required"`
Username string `validate:"required"`
AccountName string `validate:"required"`
AccountType int
CurrencyCode service_flag.CurrencyCode
Amount string `validate:"numeric"`
}
//转账对象
type AccountTransferDTO struct {
TradeNo string `validate:"required"` //交易单号 全局不重复字符或数字,唯一性标识
TradeBody TradeParticipator `validate:"required"` //交易主体
TradeTarget TradeParticipator `validate:"required"` //交易对象
AmountStr string `validate:"required,numeric"` //交易金额,该交易涉及的金额
Amount decimal.Decimal `` //交易金额,该交易涉及的金额
ChangeType service_flag.ChangeType `validate:"required,numeric"` //流水交易类型,0 创建账户,>0 为收入类型,<0 为支出类型,自定义
ChangeFlag service_flag.ChangeFlag `validate:"required,numeric"` //交易变化标识:-1 出账 1为进账,枚举
Decs string `` //交易描述
}
//交易参与者
type TradeParticipator struct {
AccountNo string `validate:"required"` //账户编号 账户ID
UserId string `validate:"required"` //用户编号
Username string `validate:"required"` //用户编号
}
//账户
type AccountDTO struct {
AccountNo string //账户编号,账户唯一标识
AccountName string //账户名称,用来说明账户的简短描述,账户对应的名称或者命名,比如xxx积分、xxx零钱
AccountType int //账户类型,用来区分不同类型的账户:积分账户、会员卡账户、钱包账户、红包账户
CurrencyCode service_flag.CurrencyCode //货币类型编码:CNY人民币,EUR欧元,USD美元 。。。
UserId string //用户编号, 账户所属用户
Username string //用户名称
Balance decimal.Decimal //账户可用余额
Status int //账户状态,账户状态:0账户初始化,1启用,2停用
CreatedAt time.Time //创建时间
UpdatedAt time.Time //更新时间
}
func (this *AccountDTO) CopyTo(target *AccountDTO) {
target.AccountNo = this.AccountNo
target.AccountName = this.AccountName
target.AccountType = this.AccountType
target.CurrencyCode = this.CurrencyCode
target.UserId = this.UserId
target.Username = this.Username
target.Balance = this.Balance
target.Status = this.Status
target.CreatedAt = this.CreatedAt
target.UpdatedAt = this.UpdatedAt
}
//账户流水
type AccountLogDTO struct {
LogNo string //流水编号 全局不重复字符或数字,唯一性标识
TradeNo string //交易单号 全局不重复字符或数字,唯一性标识
AccountNo string //账户编号 账户ID
TargetAccountNo string //账户编号 账户ID
UserId string //用户编号
Username string //用户名称
TargetUserId string //目标用户编号
TargetUsername string //目标用户名称
Amount decimal.Decimal //交易金额,该交易涉及的金额
Balance decimal.Decimal //交易后余额,该交易后的余额
ChangeType service_flag.ChangeType //流水交易类型,0 创建账户,>0 为收入类型,<0 为支出类型,自定义
ChangeFlag service_flag.ChangeFlag //交易变化标识:-1 出账 1为进账,枚举
Status int //交易状态:
Decs string //交易描述
CreatedAt time.Time //创建时间
}
|
package abios
import (
"net/url"
"sync"
"time"
)
// Default values for the outgoing rate and size of request buffer.
const (
default_requests_per_second uint = 5
default_requests_per_minute uint = 300
// Buffer one minutes worth of requests (this can not be changed at runtime)
default_request_buffer_size = default_requests_per_minute
)
// Parameters maps a key (string) to a list of values ([]string).
type Parameters map[string][]string
// Add appends a value to the list associated with the key.
func (p Parameters) Add(key, value string) {
p[key] = append(p[key], value)
}
// Del removes a key from the Parameters.
func (p Parameters) Del(key string) {
p[key] = []string{}
}
// Set uses Del and Add to reset to list to length 1.
func (p Parameters) Set(key, value string) {
p.Del(key)
p.Add(key, value)
}
// encode formats the string according to url.Values.Encode.
func (p Parameters) encode() string {
v := url.Values(p)
return v.Encode()
}
// request is a logical container that groups which endpoint (as a complete url) to
// target with what parameters as well as a channel on which the result will be available
type request struct {
url string
params Parameters
ch chan result
}
// result hold the returned data of an API request.
type result struct {
body []byte
err error
}
// requestHandler buffers requests and sends them out at a user-specified rate.
type requestHandler struct {
requests_per_second uint // How many requests can be performed per second.
requests_per_minute uint // How many requests can be performed per minute.
queue chan *request // The queue of requests.
override responseOverride // Do we need to override the expected responses?
}
// responseOverride is a struct containing the logic of overriding responses.
// Used by e.g authenticator to indicate that something has gone wrong.
type responseOverride struct {
override bool // Should we override the reponse?
data result // The data we should return instead.
}
// addRequest creates and adds a Request to the requestHandler queue. It returns
// the channel on which the result will eventually be available.
func (r *requestHandler) addRequest(url string, params Parameters) chan result {
returnCh := make(chan result)
req := request{url, params, returnCh}
r.queue <- &req
return returnCh
}
// newRequestHandler creates a new requestHandler and starts the dispatcher
// goroutine.
func newRequestHandler() *requestHandler {
h := &requestHandler{
default_requests_per_second,
default_requests_per_minute,
make(chan *request, default_request_buffer_size),
responseOverride{
override: false,
data: result{},
},
}
go h.dispatcher()
return h
}
// SetRate sets the outgoing rate according to the give parameters. 0 or less means do nothing.
func (r *requestHandler) setRate(second, minute uint) {
if 0 < second {
r.requests_per_second = second
}
if 0 < minute {
r.requests_per_minute = minute
}
// Make sure they are consistent
if r.requests_per_second > r.requests_per_minute {
r.requests_per_second = r.requests_per_minute
}
}
type resetable_counter struct {
count uint
mutex sync.Mutex
}
func (r *resetable_counter) add(i uint) {
r.mutex.Lock()
r.count += i
r.mutex.Unlock()
}
func (r *resetable_counter) increment() {
r.add(1)
}
func (r *resetable_counter) get() uint {
r.mutex.Lock()
tmp := r.count
r.mutex.Unlock()
return tmp
}
func (r *resetable_counter) reset() {
r.mutex.Lock()
r.count = 0
r.mutex.Unlock()
}
// dispatcher does requestHandler.Rate api-calls every requestHandler.ResetInterval
func (r *requestHandler) dispatcher() {
var counter resetable_counter
ticker_second := time.NewTicker(time.Second)
ticker_minute := time.NewTicker(time.Minute)
for {
select {
//case <-ticker_day.C: // Example of how to add more time-frames
// // Allow for more requests!
// requests_today = 0
case <-ticker_minute.C:
//if requests_today < r.requests_per_day // Also example
// Allow for more requests this minute if we still have requests left today
counter.reset()
case <-ticker_second.C:
// Allow for more requests this second if we still have requests left this minute
if counter.get() < r.requests_per_minute {
go func() {
number_to_send := r.requests_per_second
// If there are less requests left this minute than the specified rate per second
// then send the lesser amount.
left_this_minute := r.requests_per_minute - counter.get() // requests left this minute
if left_this_minute < number_to_send {
number_to_send = left_this_minute
}
// Send the requests in a non-blocking way, so in case the queue is empty we break
// the loop. I.e never create more routines than what is in the queue
RequestLoop:
for i := uint(0); i < number_to_send; i++ {
select {
case req := <-r.queue:
go func(currentRequest *request) {
re := result{}
// Do we have to override the response?
if r.override.override {
currentRequest.ch <- r.override.data
} else {
_, re.body, re.err = performRequest(currentRequest.url, currentRequest.params)
currentRequest.ch <- re
}
}(req)
counter.increment()
default:
// The default case is when there are no more requests in the channel, in
// which case we break the loop
break RequestLoop
}
}
}()
}
}
}
}
|
package types
import (
"github.com/irisnet/irishub/modules/auth"
"github.com/irisnet/irishub/types"
)
type AccountInfo struct {
LocalAccountName string `json:"name"`
Password string `json:"password"`
AccountNumber string `json:"account_number"`
Sequence string `json:"sequence"`
Address string `json:"address"`
AccountName string `json:"account_name"`
Seed string `json:"seed"`
}
type AccountInfoRes struct {
AccountNumber string `json:"account_number"`
Sequence string `json:"sequence"`
Address string `json:"address"`
Coins []string `json:"coins"`
}
type BaseTx struct {
LocalAccountName string `json:"name"`
Password string `json:"password"`
ChainID string `json:"chain_id"`
AccountNumber string `json:"account_number"`
Sequence string `json:"sequence"`
Gas string `json:"gas"`
Fees string `json:"fee"`
Memo string `json:"memo"`
}
type TransferTxReq struct {
Amount string `json:"amount"`
Sender string `json:"sender"`
BaseTx BaseTx `json:"base_tx"`
}
type ErrorRes struct {
RestAPI string `json:"rest api"`
Code int `json:"code"`
ErrorMessage string `json:"err message"`
}
type KeyCreateReq struct {
Name string `json:"name"`
Password string `json:"password"`
Seed string `json:"seed"`
}
type KeyCreateRes struct {
Address string `json:"address"`
Seed string `json:"seed"`
}
type SignTxReq struct {
Tx PostTx `json:"tx"`
Name string `json:"name"`
Password string `json:"password"`
ChainID string `json:"chain_id"`
AccountNumber string `json:"account_number"`
Sequence string `json:"sequence"`
AppendSig bool `json:"append_sig"`
}
type PostTxReq struct {
Tx auth.StdTx `json:"tx"`
}
type GenSignedTxDataRes struct {
Res string
ChanNum int
}
type PostTx struct {
Msgs []TxDataInfo `json:"msg"`
Fee StdFee `json:"fee"`
Signatures []StdSignature `json:"signatures"`
Memo string `json:"memo"`
}
type StdFee struct {
Amount types.Coins `json:"amount"`
Gas string `json:"gas"`
}
type StdSignature struct {
PubKey PubKey `json:"pub_key"` // optional
Signature string `json:"signature"`
AccountNumber string `json:"account_number"`
Sequence string `json:"sequence"`
}
type PubKey struct {
Type string `json:"type"` // optional
Value string `json:"value"`
}
type KeyInfo struct {
PubKey string `json:"pub_key"`
Address string `json:"address"`
Name string `json:"name"`
KeyType string `json:"type"`
}
type TxDataRes struct {
Type string `json:"type"`
Value PostTx `json:"value"`
}
type TxDataInfo struct {
Type string `json:"type"`
Value TxDataValue `json:"value"`
}
type TxDataValue struct {
Input []InOutPutData `json:"inputs"`
Output []InOutPutData `json:"outputs"`
}
type InOutPutData struct {
Address string `json:"address"`
Amount types.Coins `json:"coins"`
}
type TxBroadcast struct {
Tx PostTx `json:"tx"`
}
type TestPressData struct {
AccountIndex int
SuccessIndex int
}
type AccountTestPrivateInfo struct {
PrivateKey [32]byte
PubKey []byte
Addr string
AccountNumber uint64
Sequence uint64
}
type MsgToSign struct {
PrivateKey [32]byte
PubKey []byte
Addr string
AccountNumber uint64
Sequence uint64
}
|
package def
const (
SubModName = "network"
BlockChain = "xuper"
)
|
package agent
import (
"encoding/json"
"fmt"
"os"
"testing"
"github.com/buildkite/agent/env"
"github.com/stretchr/testify/assert"
)
func TestPipelineParserParsesYaml(t *testing.T) {
environ := env.FromSlice([]string{`ENV_VAR_FRIEND="friend"`})
result, err := PipelineParser{
Filename: "awesome.yml",
Pipeline: []byte("steps:\n - label: \"hello ${ENV_VAR_FRIEND}\""),
Env: environ}.Parse()
assert.NoError(t, err)
j, err := json.Marshal(result)
assert.Equal(t, `{"steps":[{"label":"hello \"friend\""}]}`, string(j))
}
func TestPipelineParserParsesYamlWithNoInterpolation(t *testing.T) {
result, err := PipelineParser{
Filename: "awesome.yml",
Pipeline: []byte("steps:\n - label: \"hello ${ENV_VAR_FRIEND}\""),
NoInterpolation: true,
}.Parse()
assert.NoError(t, err)
j, err := json.Marshal(result)
assert.Equal(t, `{"steps":[{"label":"hello ${ENV_VAR_FRIEND}"}]}`, string(j))
}
func TestPipelineParserSupportsYamlMergesAndAnchors(t *testing.T) {
complexYAML := `---
base_step: &base_step
type: script
agent_query_rules:
- queue=default
steps:
- <<: *base_step
name: ':docker: building image'
command: docker build .
agents:
queue: default`
result, err := PipelineParser{
Filename: "awesome.yml",
Pipeline: []byte(complexYAML)}.Parse()
assert.NoError(t, err)
j, err := json.Marshal(result)
assert.Equal(t, `{"base_step":{"agent_query_rules":["queue=default"],"type":"script"},"steps":[{"agent_query_rules":["queue=default"],"agents":{"queue":"default"},"command":"docker build .","name":":docker: building image","type":"script"}]}`, string(j))
}
func TestPipelineParserReturnsYamlParsingErrors(t *testing.T) {
_, err := PipelineParser{Filename: "awesome.yml", Pipeline: []byte("steps: %blah%")}.Parse()
assert.Error(t, err, `Failed to parse awesome.yml: found character that cannot start any token`, fmt.Sprintf("%s", err))
}
func TestPipelineParserReturnsJsonParsingErrors(t *testing.T) {
_, err := PipelineParser{Filename: "awesome.json", Pipeline: []byte("{")}.Parse()
assert.Error(t, err, `Failed to parse awesome.json: line 1: did not find expected node content`, fmt.Sprintf("%s", err))
}
func TestPipelineParserParsesJson(t *testing.T) {
environ := env.FromSlice([]string{`ENV_VAR_FRIEND="friend"`})
result, err := PipelineParser{
Filename: "thing.json",
Pipeline: []byte("\n\n \n { \"foo\": \"bye ${ENV_VAR_FRIEND}\" }\n"),
Env: environ}.Parse()
assert.NoError(t, err)
j, err := json.Marshal(result)
assert.Equal(t, `{"foo":"bye \"friend\""}`, string(j))
}
func TestPipelineParserParsesJsonObjects(t *testing.T) {
environ := env.FromSlice([]string{`ENV_VAR_FRIEND="friend"`})
result, err := PipelineParser{Pipeline: []byte("\n\n \n { \"foo\": \"bye ${ENV_VAR_FRIEND}\" }\n"), Env: environ}.Parse()
assert.NoError(t, err)
j, err := json.Marshal(result)
assert.Equal(t, `{"foo":"bye \"friend\""}`, string(j))
}
func TestPipelineParserParsesJsonArrays(t *testing.T) {
environ := env.FromSlice([]string{`ENV_VAR_FRIEND="friend"`})
result, err := PipelineParser{Pipeline: []byte("\n\n \n [ { \"foo\": \"bye ${ENV_VAR_FRIEND}\" } ]\n"), Env: environ}.Parse()
assert.NoError(t, err)
j, err := json.Marshal(result)
assert.Equal(t, `[{"foo":"bye \"friend\""}]`, string(j))
}
func TestPipelineParserPreservesBools(t *testing.T) {
result, err := PipelineParser{Pipeline: []byte("steps:\n - trigger: hello\n async: true")}.Parse()
assert.Nil(t, err)
j, err := json.Marshal(result)
assert.Equal(t, `{"steps":[{"async":true,"trigger":"hello"}]}`, string(j))
}
func TestPipelineParserPreservesInts(t *testing.T) {
result, err := PipelineParser{Pipeline: []byte("steps:\n - label: hello\n parallelism: 10")}.Parse()
assert.Nil(t, err)
j, err := json.Marshal(result)
assert.Equal(t, `{"steps":[{"label":"hello","parallelism":10}]}`, string(j))
}
func TestPipelineParserPreservesNull(t *testing.T) {
result, err := PipelineParser{Pipeline: []byte("steps:\n - wait: ~")}.Parse()
assert.Nil(t, err)
j, err := json.Marshal(result)
assert.Equal(t, `{"steps":[{"wait":null}]}`, string(j))
}
func TestPipelineParserPreservesFloats(t *testing.T) {
result, err := PipelineParser{Pipeline: []byte("steps:\n - trigger: hello\n llamas: 3.142")}.Parse()
assert.Nil(t, err)
j, err := json.Marshal(result)
assert.Equal(t, `{"steps":[{"llamas":3.142,"trigger":"hello"}]}`, string(j))
}
func TestPipelineParserHandlesDates(t *testing.T) {
result, err := PipelineParser{Pipeline: []byte("steps:\n - trigger: hello\n llamas: 2002-08-15T17:18:23.18-06:00")}.Parse()
assert.Nil(t, err)
j, err := json.Marshal(result)
assert.Equal(t, `{"steps":[{"llamas":"2002-08-15T17:18:23.18-06:00","trigger":"hello"}]}`, string(j))
}
func TestPipelineParserInterpolatesKeysAsWellAsValues(t *testing.T) {
var pipeline = `{
"env": {
"${FROM_ENV}TEST1": "MyTest",
"TEST2": "${FROM_ENV}"
}
}`
var decoded struct {
Env map[string]string `json:"env"`
}
environ := env.FromSlice([]string{`FROM_ENV=llamas`})
result, err := PipelineParser{Pipeline: []byte(pipeline), Env: environ}.Parse()
if err != nil {
t.Fatal(err)
}
err = decodeIntoStruct(&decoded, result)
if err != nil {
t.Fatal(err)
}
assert.Equal(t, `MyTest`, decoded.Env["llamasTEST1"])
assert.Equal(t, `llamas`, decoded.Env["TEST2"])
}
func TestPipelineParserLoadsGlobalEnvBlockFirst(t *testing.T) {
var pipeline = `{
"env": {
"TEAM1": "England",
"TEAM2": "Australia",
"HEADLINE": "${TEAM1} smashes ${TEAM2} to win the ashes in ${YEAR_FROM_SHELL}!!"
},
"steps": [{
"command": "echo ${HEADLINE}"
}]
}`
var decoded struct {
Env map[string]string `json:"env"`
Steps []struct {
Command string `json:"command"`
} `json:"steps"`
}
environ := env.FromSlice([]string{`YEAR_FROM_SHELL=1912`})
result, err := PipelineParser{Pipeline: []byte(pipeline), Env: environ}.Parse()
if err != nil {
t.Fatal(err)
}
err = decodeIntoStruct(&decoded, result)
if err != nil {
t.Fatal(err)
}
assert.Equal(t, `England`, decoded.Env["TEAM1"])
assert.Equal(t, `England smashes Australia to win the ashes in 1912!!`, decoded.Env["HEADLINE"])
assert.Equal(t, `echo England smashes Australia to win the ashes in 1912!!`, decoded.Steps[0].Command)
}
func decodeIntoStruct(into interface{}, from interface{}) error {
b, err := json.Marshal(from)
if err != nil {
return err
}
return json.Unmarshal(b, into)
}
func TestPipelineParserLoadsSystemEnvironment(t *testing.T) {
var pipeline = `{
"steps": [{
"command": "echo ${LLAMAS_ROCK?}"
}]
}`
var decoded struct {
Steps []struct {
Command string `json:"command"`
} `json:"steps"`
}
_, err := PipelineParser{Pipeline: []byte(pipeline)}.Parse()
if err == nil {
t.Fatalf("Expected $LLAMAS_ROCK: not set")
}
os.Setenv("LLAMAS_ROCK", "absolutely")
defer os.Unsetenv("LLAMAS_ROCK")
result2, err := PipelineParser{Pipeline: []byte(pipeline)}.Parse()
if err != nil {
t.Fatal(err)
}
err = decodeIntoStruct(&decoded, result2)
if err != nil {
t.Fatal(err)
}
if decoded.Steps[0].Command != "echo absolutely" {
t.Fatalf("Unexpected: %q", decoded.Steps[0].Command)
}
}
|
/*
Fetch a file from Google Cloud Storage
Usage:
gcs-fetch gs://bucket/object output-file
*/
package main
import (
"fmt"
"github.com/marksmithson/gcs-export/internal/pkg/gcsexport"
"log"
"os"
)
func main() {
if len(os.Args) < 3 {
printUsage()
os.Exit(1)
}
inputFilename := os.Args[1]
gsObject := os.Args[2]
if gsObject == "" || inputFilename == "" {
printUsage()
os.Exit(1)
}
inputReader, err := os.Open(inputFilename)
if err != nil { log.Fatal(err) }
defer inputReader.Close()
bytes, err := gcsexport.Export(inputReader, gsObject)
if err != nil {
log.Fatal(err)
}
fmt.Printf("Copied %d Bytes\n", bytes)
}
func printUsage() {
fmt.Println("Usage: gcs-export local-file gs://bucket/object")
} |
package main
import (
"bufio"
"errors"
"fmt"
"github.com/slasyz/wundercli/api"
"os"
"strings"
)
// Gets list object by its short name.
// Works like shell tab completion.
func getListByShortName(listName string) (list api.List, err error) {
lists, err := api.GetLists()
if err != nil {
return
}
var sel []api.List
listNameLower := strings.ToLower(listName)
for _, el := range lists {
elLower := strings.ToLower(el.Title)
if strings.HasPrefix(elLower, listNameLower) || listName == "" {
if elLower == listNameLower {
sel = []api.List{el}
break
}
sel = append(sel, el)
}
}
if len(sel) == 0 {
err = errors.New("List not found")
return
} else if len(sel) > 1 {
if listName == "" {
fmt.Println("Currently available lists:\n")
} else {
fmt.Printf("There is several lists starting with \"%s\":\n\n", listName)
}
for i, el := range sel {
fmt.Printf(" [%d] %s\n", i+1, el.Title)
}
fmt.Println()
fmt.Print("What is the number of the list? ")
var listNo int
fmt.Scanln(&listNo)
fmt.Println()
if listNo > len(sel) || listNo < 1 {
err = errors.New("Incorrect input.")
return
}
return sel[listNo-1], nil
} else {
return sel[0], nil
}
}
// Outputs list tasks and asks for one of it.
func askForTask(list api.List) (task api.Task, err error) {
tasks, err := api.GetListTasks(list)
if err != nil {
return
}
for i, el := range tasks {
fmt.Printf(" [%d] %s\n", i+1, el.Title)
}
fmt.Println()
fmt.Print("What is the number of the task? ")
var taskNo int
fmt.Scanln(&taskNo)
fmt.Println()
if taskNo > len(tasks) || taskNo < 1 {
err = errors.New("Incorrect input.")
return
}
return tasks[taskNo-1], nil
}
func cmdHelp() {
fmt.Println(`
Usage:
- Show list of lists:
$ wundercli list all
- Show tasks from list:
$ wundercli list show [LISTNAME]
- Create new list:
$ wundercli list create [LISTTITLE]
- Remove the list:
$ wundercli list remove [LISTNAME]
- Add task to a list:
$ wundercli task create [LISTNAME [TASKTEXT]]
- Mark task checked:
$ wundercli task check [LISTNAME]
- Edit task:
$ wundercli task edit [LISTNAME]
`)
os.Exit(0)
}
func cmdListAll() (err error) {
lists, err := api.GetLists()
if err != nil {
return
}
fmt.Println("Available lists are:\n")
for _, el := range lists {
fmt.Printf(" - %s\n", el.Title)
}
fmt.Println()
return
}
func cmdListShow(listName string) (err error) {
list, err := getListByShortName(listName)
if err != nil {
return
}
tasks, err := api.GetListTasks(list)
if err != nil {
return
}
fmt.Printf("Tasks from \"%s\" list:\n\n", list.Title)
for _, el := range tasks {
fmt.Printf(" - %s\n", el.Title)
}
fmt.Println()
return
}
func cmdListCreate(listTitle string) (err error) {
if listTitle == "" {
fmt.Print("Enter new list title: ")
in := bufio.NewReader(os.Stdin)
listTitle, err = in.ReadString(byte('\n'))
if err != nil {
return errors.New("reading from console")
}
fmt.Println()
if listTitle == "" {
return errors.New("list title cannot be empty")
}
}
err = api.DoListCreate(listTitle)
if err != nil {
return
}
fmt.Println("List was created successfully.\n")
return
}
func cmdListRemove(listName string) (err error) {
list, err := getListByShortName(listName)
if err != nil {
return
}
err = api.DoListRemove(list)
if err != nil {
return
}
fmt.Println("List was removed successfully.\n")
return
}
func cmdTaskCreate(listName string, taskText string) (err error) {
list, err := getListByShortName(listName)
if err != nil {
return
}
if taskText == "" {
fmt.Print("Enter task text: ")
in := bufio.NewReader(os.Stdin)
taskText, err = in.ReadString(byte('\n'))
if err != nil {
return errors.New("reading from console")
}
if taskText == "" {
return errors.New("task text cannot be empty")
}
}
fmt.Println()
err = api.DoTaskCreate(list, taskText)
if err != nil {
return
}
fmt.Println("Task was created successfully.\n")
return
}
func cmdTaskCheck(listName string) (err error) {
list, err := getListByShortName(listName)
if err != nil {
return
}
task, err := askForTask(list)
if err != nil {
return
}
err = api.DoTaskCheck(task)
if err != nil {
return
}
fmt.Println("Task was marked checked successfully.\n")
return
}
func cmdTaskEdit(listName string) (err error) {
list, err := getListByShortName(listName)
if err != nil {
return
}
task, err := askForTask(list)
if err != nil {
return
}
fmt.Print("What is the new text of the task? ")
var taskText string
fmt.Scanln(&taskText)
fmt.Println()
if taskText == "" {
return errors.New("task text cannot be empty")
}
err = api.DoTaskEdit(task, taskText)
if err != nil {
return
}
fmt.Println("Task was edited successfully.\n")
return
}
|
package main
//1669. 合并两个链表
//给你两个链表 list1 和 list2 ,它们包含的元素分别为 n 个和 m 个。
//
//请你将 list1 中下标从 a 到 b 的全部节点都删除,并将list2 接在被删除节点的位置。
//
//下图中蓝色边和节点展示了操作后的结果:
//
//
//请你返回结果链表的头指针。
//
//
//
//示例 1:
//
//
//
//输入:list1 = [0,1,2,3,4,5], a = 3, b = 4, list2 = [1000000,1000001,1000002]
//输出:[0,1,2,1000000,1000001,1000002,5]
//解释:我们删除 list1 中下标为 3 和 4 的两个节点,并将 list2 接在该位置。上图中蓝色的边和节点为答案链表。
//示例 2:
//
//
//输入:list1 = [0,1,2,3,4,5,6], a = 2, b = 5, list2 = [1000000,1000001,1000002,1000003,1000004]
//输出:[0,1,1000000,1000001,1000002,1000003,1000004,6]
//解释:上图中蓝色的边和节点为答案链表。
//
//
//提示:
//
//3 <= list1.length <= 10^4
//1 <= a <= b < list1.length - 1
//1 <= list2.length <= 10^4
/**
* Definition for singly-linked list.
* type ListNode struct {
* Val int
* Next *ListNode
* }
*/
func mergeInBetween(list1 *ListNode, a int, b int, list2 *ListNode) *ListNode {
preA := list1
for i := 0; i < a-1; i++ {
preA = preA.Next
}
preB := preA
for i := 0; i < (b-a)+2; i++ {
preB = preB.Next
}
preA.Next = list2
for list2.Next != nil {
list2 = list2.Next
}
list2.Next = preB
return list1
}
|
package base
import (
"github.com/stretchr/testify/assert"
"testing"
)
func TestParams_Copy(t *testing.T) {
// Create parameters
a := Params{
NFactors: 1,
Lr: 0.1,
Type: Baseline,
RandomState: 0,
UserBased: true,
}
// Create copy
b := a.Copy()
b[NFactors] = 2
b[Lr] = 0.2
b[Type] = Basic
b[RandomState] = 1
b[UserBased] = false
// Check original parameters
assert.Equal(t, 1, a.GetInt(NFactors, -1))
assert.Equal(t, 0.1, a.GetFloat64(Lr, -0.1))
assert.Equal(t, Baseline, a.GetString(Type, ""))
assert.Equal(t, int64(0), a.GetInt64(RandomState, -1))
assert.Equal(t, true, a.GetBool(UserBased, false))
// Check copy parameters
assert.Equal(t, 2, b.GetInt(NFactors, -1))
assert.Equal(t, 0.2, b.GetFloat64(Lr, -0.1))
assert.Equal(t, Basic, b.GetString(Type, ""))
assert.Equal(t, int64(1), b.GetInt64(RandomState, -1))
assert.Equal(t, false, b.GetBool(UserBased, true))
}
func TestParams_Merge(t *testing.T) {
// Create a group of parameters
a := Params{
Lr: 0.1,
Reg: 0.2,
}
// Create another group of parameters
b := Params{
Reg: 0.3,
Alpha: 0.4,
}
// Merge
a.Merge(b)
// Check
assert.Equal(t, 0.1, a.GetFloat64(Lr, -1))
assert.Equal(t, 0.3, a.GetFloat64(Reg, -1))
assert.Equal(t, 0.4, a.GetFloat64(Alpha, -1))
}
func TestParams_GetBool(t *testing.T) {
p := Params{}
// Empty case
assert.Equal(t, true, p.GetBool(UserBased, true))
// Normal case
p[UserBased] = false
assert.Equal(t, false, p.GetBool(UserBased, true))
// Wrong type case
p[UserBased] = 1
assert.Equal(t, true, p.GetBool(UserBased, true))
}
func TestParams_GetFloat64(t *testing.T) {
p := Params{}
// Empty case
assert.Equal(t, 0.1, p.GetFloat64(Lr, 0.1))
// Normal case
p[Lr] = 1.0
assert.Equal(t, 1.0, p.GetFloat64(Lr, 0.1))
// Wrong type case
p[Lr] = 1
assert.Equal(t, 1.0, p.GetFloat64(Lr, 0.1))
p[Lr] = "hello"
assert.Equal(t, 0.1, p.GetFloat64(Lr, 0.1))
}
func TestParams_GetInt(t *testing.T) {
p := Params{}
// Empty case
assert.Equal(t, -1, p.GetInt(NFactors, -1))
// Normal case
p[NFactors] = 0
assert.Equal(t, 0, p.GetInt(NFactors, -1))
// Wrong type case
p[NFactors] = "hello"
assert.Equal(t, -1, p.GetInt(NFactors, -1))
}
func TestParams_GetInt64(t *testing.T) {
p := Params{}
// Empty case
assert.Equal(t, int64(-1), p.GetInt64(RandomState, -1))
// Normal case
p[RandomState] = int64(0)
assert.Equal(t, int64(0), p.GetInt64(RandomState, -1))
// Wrong type case
p[RandomState] = 0
assert.Equal(t, int64(0), p.GetInt64(RandomState, -1))
p[RandomState] = "hello"
assert.Equal(t, int64(-1), p.GetInt64(RandomState, -1))
}
func TestParams_GetString(t *testing.T) {
p := Params{}
// Empty case
assert.Equal(t, Cosine, p.GetString(Similarity, Cosine))
// Normal case
p[Similarity] = MSD
assert.Equal(t, MSD, p.GetString(Similarity, Cosine))
// Wrong type case
p[Similarity] = 1
assert.Equal(t, Cosine, p.GetString(Similarity, Cosine))
}
|
// Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package colexec
import (
"context"
"fmt"
"math"
"testing"
"github.com/cockroachdb/cockroach/pkg/col/coldata"
"github.com/cockroachdb/cockroach/pkg/sql/colexec/colexecbase"
"github.com/cockroachdb/cockroach/pkg/sql/colexec/colexectestutils"
"github.com/cockroachdb/cockroach/pkg/sql/colexecerror"
"github.com/cockroachdb/cockroach/pkg/sql/colexecop"
"github.com/cockroachdb/cockroach/pkg/sql/colmem"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/randutil"
"github.com/cockroachdb/errors"
)
type distinctTestCase struct {
distinctCols []uint32
typs []*types.T
tuples []colexectestutils.Tuple
expected []colexectestutils.Tuple
isOrderedOnDistinctCols bool
}
var distinctTestCases = []distinctTestCase{
{
distinctCols: []uint32{0, 1, 2},
typs: []*types.T{types.Float, types.Int, types.String, types.Int},
tuples: colexectestutils.Tuples{
{nil, nil, nil, nil},
{nil, nil, nil, nil},
{nil, nil, "30", nil},
{1.0, 2, "30", 4},
{1.0, 2, "30", 4},
{2.0, 2, "30", 4},
{2.0, 3, "30", 4},
{2.0, 3, "40", 4},
{2.0, 3, "40", 4},
},
expected: colexectestutils.Tuples{
{nil, nil, nil, nil},
{nil, nil, "30", nil},
{1.0, 2, "30", 4},
{2.0, 2, "30", 4},
{2.0, 3, "30", 4},
{2.0, 3, "40", 4},
},
isOrderedOnDistinctCols: true,
},
{
distinctCols: []uint32{1, 0, 2},
typs: []*types.T{types.Float, types.Int, types.Bytes, types.Int},
tuples: colexectestutils.Tuples{
{nil, nil, nil, nil},
{nil, nil, nil, nil},
{nil, nil, "30", nil},
{1.0, 2, "30", 4},
{1.0, 2, "30", 4},
{2.0, 2, "30", 4},
{2.0, 3, "30", 4},
{2.0, 3, "40", 4},
{2.0, 3, "40", 4},
},
expected: colexectestutils.Tuples{
{nil, nil, nil, nil},
{nil, nil, "30", nil},
{1.0, 2, "30", 4},
{2.0, 2, "30", 4},
{2.0, 3, "30", 4},
{2.0, 3, "40", 4},
},
isOrderedOnDistinctCols: true,
},
{
distinctCols: []uint32{0, 1, 2},
typs: []*types.T{types.Float, types.Int, types.String, types.Int},
tuples: colexectestutils.Tuples{
{1.0, 2, "30", 4},
{1.0, 2, "30", 4},
{nil, nil, nil, nil},
{nil, nil, nil, nil},
{2.0, 2, "30", 4},
{2.0, 3, "30", 4},
{nil, nil, "30", nil},
{2.0, 3, "40", 4},
{2.0, 3, "40", 4},
},
expected: colexectestutils.Tuples{
{1.0, 2, "30", 4},
{nil, nil, nil, nil},
{2.0, 2, "30", 4},
{2.0, 3, "30", 4},
{nil, nil, "30", nil},
{2.0, 3, "40", 4},
},
},
{
distinctCols: []uint32{0},
typs: []*types.T{types.Int, types.Bytes},
tuples: colexectestutils.Tuples{
{1, "a"},
{2, "b"},
{3, "c"},
{nil, "d"},
{5, "e"},
{6, "f"},
{1, "1"},
{2, "2"},
{3, "3"},
},
expected: colexectestutils.Tuples{
{1, "a"},
{2, "b"},
{3, "c"},
{nil, "d"},
{5, "e"},
{6, "f"},
},
},
{
// This is to test HashTable deduplication with various batch size
// boundaries and ensure it always emits the first tuple it encountered.
distinctCols: []uint32{0},
typs: []*types.T{types.Int, types.String},
tuples: colexectestutils.Tuples{
{1, "1"},
{1, "2"},
{1, "3"},
{1, "4"},
{1, "5"},
{2, "6"},
{2, "7"},
{2, "8"},
{2, "9"},
{2, "10"},
{0, "11"},
{0, "12"},
{0, "13"},
{1, "14"},
{1, "15"},
{1, "16"},
},
expected: colexectestutils.Tuples{
{1, "1"},
{2, "6"},
{0, "11"},
},
},
{
distinctCols: []uint32{0},
typs: []*types.T{types.Jsonb, types.String},
tuples: colexectestutils.Tuples{
{`'{"id": 1}'`, "a"},
{`'{"id": 2}'`, "b"},
{`'{"id": 3}'`, "c"},
{`'{"id": 1}'`, "1"},
{`'{"id": null}'`, "d"},
{`'{"id": 2}'`, "2"},
{`'{"id": 5}'`, "e"},
{`'{"id": 6}'`, "f"},
{`'{"id": 3}'`, "3"},
},
expected: colexectestutils.Tuples{
{`'{"id": 1}'`, "a"},
{`'{"id": 2}'`, "b"},
{`'{"id": 3}'`, "c"},
{`'{"id": null}'`, "d"},
{`'{"id": 5}'`, "e"},
{`'{"id": 6}'`, "f"},
},
},
}
func TestDistinct(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
rng, _ := randutil.NewPseudoRand()
for _, tc := range distinctTestCases {
log.Infof(context.Background(), "unordered")
colexectestutils.RunTestsWithTyps(t, testAllocator, []colexectestutils.Tuples{tc.tuples}, [][]*types.T{tc.typs}, tc.expected, colexectestutils.OrderedVerifier,
func(input []colexecop.Operator) (colexecop.Operator, error) {
return NewUnorderedDistinct(
testAllocator, input[0], tc.distinctCols, tc.typs,
), nil
})
if tc.isOrderedOnDistinctCols {
for numOrderedCols := 1; numOrderedCols < len(tc.distinctCols); numOrderedCols++ {
log.Infof(context.Background(), "partiallyOrdered/ordCols=%d", numOrderedCols)
orderedCols := make([]uint32, numOrderedCols)
for i, j := range rng.Perm(len(tc.distinctCols))[:numOrderedCols] {
orderedCols[i] = tc.distinctCols[j]
}
colexectestutils.RunTestsWithTyps(t, testAllocator, []colexectestutils.Tuples{tc.tuples}, [][]*types.T{tc.typs}, tc.expected, colexectestutils.OrderedVerifier,
func(input []colexecop.Operator) (colexecop.Operator, error) {
return newPartiallyOrderedDistinct(
testAllocator, input[0], tc.distinctCols, orderedCols, tc.typs,
)
})
}
log.Info(context.Background(), "ordered")
colexectestutils.RunTestsWithTyps(t, testAllocator, []colexectestutils.Tuples{tc.tuples}, [][]*types.T{tc.typs}, tc.expected, colexectestutils.OrderedVerifier,
func(input []colexecop.Operator) (colexecop.Operator, error) {
return colexecbase.NewOrderedDistinct(input[0], tc.distinctCols, tc.typs)
})
}
}
}
func TestUnorderedDistinctRandom(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
rng, _ := randutil.NewPseudoRand()
nCols := 1 + rng.Intn(3)
typs := make([]*types.T, nCols)
distinctCols := make([]uint32, nCols)
for i := range typs {
typs[i] = types.Int
distinctCols[i] = uint32(i)
}
nDistinctBatches := 2 + rng.Intn(2)
newTupleProbability := rng.Float64()
nTuples := int(float64(nDistinctBatches*coldata.BatchSize()) / newTupleProbability)
const maxNumTuples = 25000
if nTuples > maxNumTuples {
// If we happen to set a large value for coldata.BatchSize() and a small
// value for newTupleProbability, we might end up with huge number of
// tuples. Then, when runTests test harness uses small batch size, the
// test might take a while, so we'll limit the number of tuples.
nTuples = maxNumTuples
}
tups, expected := generateRandomDataForUnorderedDistinct(rng, nTuples, nCols, newTupleProbability)
colexectestutils.RunTestsWithTyps(t, testAllocator, []colexectestutils.Tuples{tups}, [][]*types.T{typs}, expected, colexectestutils.UnorderedVerifier,
func(input []colexecop.Operator) (colexecop.Operator, error) {
return NewUnorderedDistinct(testAllocator, input[0], distinctCols, typs), nil
},
)
}
// getNewValueProbabilityForDistinct returns the probability that we need to use
// a new value for a single element in a tuple when overall the tuples need to
// be distinct with newTupleProbability and they consists of nCols columns.
func getNewValueProbabilityForDistinct(newTupleProbability float64, nCols int) float64 {
// We have the following equation:
// newTupleProbability = 1 - (1 - newValueProbability) ^ nCols,
// so applying some manipulations we get:
// newValueProbability = 1 - (1 - newTupleProbability) ^ (1 / nCols).
return 1.0 - math.Pow(1-newTupleProbability, 1.0/float64(nCols))
}
// runDistinctBenchmarks runs the benchmarks of a distinct operator variant on
// multiple configurations.
func runDistinctBenchmarks(
ctx context.Context,
b *testing.B,
distinctConstructor func(allocator *colmem.Allocator, input colexecop.Operator, distinctCols []uint32, numOrderedCols int, typs []*types.T) (colexecop.Operator, error),
getNumOrderedCols func(nCols int) int,
namePrefix string,
isExternal bool,
) {
rng, _ := randutil.NewPseudoRand()
const nCols = 2
const bytesValueLength = 8
distinctCols := []uint32{0, 1}
nullsOptions := []bool{false, true}
nRowsOptions := []int{1, 64, 4 * coldata.BatchSize(), 256 * coldata.BatchSize()}
if isExternal {
nullsOptions = []bool{false}
nRowsOptions = []int{coldata.BatchSize(), 64 * coldata.BatchSize(), 4096 * coldata.BatchSize()}
}
if testing.Short() {
nRowsOptions = []int{coldata.BatchSize()}
}
setFirstValue := func(vec coldata.Vec) {
if typ := vec.Type(); typ == types.Int {
vec.Int64()[0] = 0
} else if typ == types.Bytes {
vec.Bytes().Set(0, make([]byte, bytesValueLength))
} else {
colexecerror.InternalError(errors.AssertionFailedf("unsupported type %s", typ))
}
}
setIthValue := func(vec coldata.Vec, i int, newValueProbability float64) {
if i == 0 {
colexecerror.InternalError(errors.New("setIthValue called with i == 0"))
}
if typ := vec.Type(); typ == types.Int {
col := vec.Int64()
col[i] = col[i-1]
if rng.Float64() < newValueProbability {
col[i]++
}
} else if typ == types.Bytes {
v := make([]byte, bytesValueLength)
copy(v, vec.Bytes().Get(i-1))
if rng.Float64() < newValueProbability {
for pos := 0; pos < bytesValueLength; pos++ {
v[pos]++
// If we have overflowed our current byte, we need to
// increment the next one; otherwise, we have a new distinct
// value.
if v[pos] != 0 {
break
}
}
}
vec.Bytes().Set(i, v)
} else {
colexecerror.InternalError(errors.AssertionFailedf("unsupported type %s", typ))
}
}
for _, hasNulls := range nullsOptions {
for _, newTupleProbability := range []float64{0.001, 0.1} {
for _, nRows := range nRowsOptions {
for _, typ := range []*types.T{types.Int, types.Bytes} {
typs := make([]*types.T, nCols)
cols := make([]coldata.Vec, nCols)
for i := range typs {
typs[i] = typ
cols[i] = testAllocator.NewMemColumn(typs[i], nRows)
}
numOrderedCols := getNumOrderedCols(nCols)
newValueProbability := getNewValueProbabilityForDistinct(newTupleProbability, nCols)
for i := range distinctCols {
setFirstValue(cols[i])
for j := 1; j < nRows; j++ {
setIthValue(cols[i], j, newValueProbability)
}
if hasNulls {
cols[i].Nulls().SetNull(0)
}
}
nullsPrefix := ""
if len(nullsOptions) > 1 {
nullsPrefix = fmt.Sprintf("/hasNulls=%t", hasNulls)
}
b.Run(
fmt.Sprintf("%s%s/newTupleProbability=%.3f/rows=%d/ordCols=%d/type=%s",
namePrefix, nullsPrefix, newTupleProbability,
nRows, numOrderedCols, typ.Name(),
),
func(b *testing.B) {
b.SetBytes(int64(8 * nRows * nCols))
b.ResetTimer()
for n := 0; n < b.N; n++ {
// Note that the source will be ordered on all nCols so that the
// number of distinct tuples doesn't vary between different
// distinct operator variations.
source := colexectestutils.NewChunkingBatchSource(testAllocator, typs, cols, nRows)
distinct, err := distinctConstructor(testAllocator, source, distinctCols, numOrderedCols, typs)
if err != nil {
b.Fatal(err)
}
distinct.Init()
for b := distinct.Next(ctx); b.Length() > 0; b = distinct.Next(ctx) {
}
}
b.StopTimer()
})
}
}
}
}
}
func BenchmarkDistinct(b *testing.B) {
defer log.Scope(b).Close(b)
ctx := context.Background()
distinctConstructors := []func(*colmem.Allocator, colexecop.Operator, []uint32, int, []*types.T) (colexecop.Operator, error){
func(allocator *colmem.Allocator, input colexecop.Operator, distinctCols []uint32, numOrderedCols int, typs []*types.T) (colexecop.Operator, error) {
return NewUnorderedDistinct(allocator, input, distinctCols, typs), nil
},
func(allocator *colmem.Allocator, input colexecop.Operator, distinctCols []uint32, numOrderedCols int, typs []*types.T) (colexecop.Operator, error) {
return newPartiallyOrderedDistinct(allocator, input, distinctCols, distinctCols[:numOrderedCols], typs)
},
func(allocator *colmem.Allocator, input colexecop.Operator, distinctCols []uint32, numOrderedCols int, typs []*types.T) (colexecop.Operator, error) {
return colexecbase.NewOrderedDistinct(input, distinctCols, typs)
},
}
distinctNames := []string{"Unordered", "PartiallyOrdered", "Ordered"}
orderedColsFraction := []float64{0, 0.5, 1.0}
for distinctIdx, distinctConstructor := range distinctConstructors {
runDistinctBenchmarks(
ctx,
b,
distinctConstructor,
func(nCols int) int {
return int(float64(nCols) * orderedColsFraction[distinctIdx])
},
distinctNames[distinctIdx],
false, /* isExternal */
)
}
}
|
package log
import (
"github.com/lestrrat-go/file-rotatelogs"
"github.com/rifflock/lfshook"
log "github.com/sirupsen/logrus"
"time"
)
func newLfsHook(logLevel int, maxRemainCnt uint) log.Hook {
logName := "logs/peipei2"
writer, err := rotatelogs.New(
logName+".%Y%m%d",
// WithLinkName为最新的日志建立软连接,以方便随着找到当前日志文件
rotatelogs.WithLinkName(logName),
// WithRotationTime设置日志分割的时间,这里设置为一天分割一次
rotatelogs.WithRotationTime(time.Hour*24),
// WithMaxAge和WithRotationCount二者只能设置一个,
// WithMaxAge设置文件清理前的最长保存时间,
// WithRotationCount设置文件清理前最多保存的个数。
//rotatelogs.WithMaxAge(time.Hour*24),
rotatelogs.WithRotationCount(maxRemainCnt),
)
if err != nil {
log.Errorf("config local file system for logger error: %v", err)
}
level := log.AllLevels[logLevel]
log.SetLevel(level)
lfsHook := lfshook.NewHook(lfshook.WriterMap{
log.DebugLevel: writer,
log.InfoLevel: writer,
log.WarnLevel: writer,
log.ErrorLevel: writer,
log.FatalLevel: writer,
log.PanicLevel: writer,
}, &log.TextFormatter{DisableColors: true})
return lfsHook
}
|
package main
import (
"testing"
)
var runDaemon RunDaemon
func Test_runMysql(t *testing.T) {
runDaemon.runMysql()
}
func Test_runRedis(t *testing.T) {
runDaemon.runRedis()
}
func Test_runWeb(t *testing.T) {
runDaemon.runWeb()
}
func Test_runWeb1(t *testing.T) {
runDaemon.runWeb1()
}
func Test_runWeb2(t *testing.T) {
runDaemon.runWeb2()
}
|
// Copyright (c) 2020 Tailscale Inc & AUTHORS All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package safesocket
import (
"context"
"fmt"
"net"
"syscall"
)
func connect(path string, port uint16) (net.Conn, error) {
pipe, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", port))
if err != nil {
return nil, err
}
return pipe, err
}
func setFlags(network, address string, c syscall.RawConn) error {
return c.Control(func(fd uintptr) {
syscall.SetsockoptInt(syscall.Handle(fd), syscall.SOL_SOCKET,
syscall.SO_REUSEADDR, 1)
})
}
// TODO(apenwarr): use named pipes instead of sockets?
// I tried to use winio.ListenPipe() here, but that code is a disaster,
// built on top of an API that's a disaster. So for now we'll hack it by
// just always using a TCP session on a fixed port on localhost. As a
// result, on Windows we ignore the vendor and name strings.
// NOTE(bradfitz): Jason did a new pipe package: https://go-review.googlesource.com/c/sys/+/299009
func listen(path string, port uint16) (_ net.Listener, gotPort uint16, _ error) {
lc := net.ListenConfig{
Control: setFlags,
}
pipe, err := lc.Listen(context.Background(), "tcp", fmt.Sprintf("127.0.0.1:%d", port))
if err != nil {
return nil, 0, err
}
return pipe, uint16(pipe.Addr().(*net.TCPAddr).Port), err
}
|
package patcher
import (
"bufio"
"encoding/json"
"fmt"
"github.com/phips4/discord-update-patcher/zip"
"io/ioutil"
"log"
"os"
"os/exec"
"path"
"path/filepath"
"runtime"
"strconv"
"strings"
"time"
)
type Discord struct {
dir string
Version string
modulesDir string
}
type DiscordModules map[string]int
func (discord *Discord) SelectDefaultPath() {
discord.dir = path.Join(userHomeDir(), "AppData", "Roaming", "discord")
if _, err := os.Stat(discord.dir); os.IsNotExist(err) {
fmt.Println("could not find default discord installation directory.")
return
}
fmt.Println("Selected default installation directory:", discord.dir)
}
func (discord *Discord) SelectLatestVersion() {
files, err := ioutil.ReadDir(discord.dir)
must(err)
var latestVersion string
for _, file := range files {
if strings.HasPrefix(file.Name(), "0.") { //probably changes in the future
latestVersion = file.Name()
}
}
discord.Version = latestVersion
fmt.Println("Selected version:", discord.Version)
}
func (discord *Discord) SelectModulesPath(version string) {
discord.modulesDir = path.Join(discord.dir, version, "modules")
fmt.Println("Selected modules directory:", discord.modulesDir)
}
func (discord *Discord) CreateBackup() {
fmt.Println("Creating backup...")
fileFormat := fmt.Sprintf("DUP_%d_%s.zip", time.Now().Unix(), discord.Version)
src := filepath.Join(discord.modulesDir, "..")
dest := filepath.Join(discord.modulesDir, "..", "..", "dup-backup", fileFormat)
// creates a zipped copy of src
must(zip.Zip(src, dest))
}
func (discord *Discord) DeleteModules() {
files, err := ioutil.ReadDir(discord.modulesDir)
must(err)
fmt.Println("Deleting modules...")
for _, file := range files {
if !strings.HasPrefix(file.Name(), "discord_") || !file.IsDir() {
continue
}
if file.Name() == "discord_desktop_core" { // exclude module in order to be able to start the updater
continue
}
if err := os.RemoveAll(path.Join(discord.modulesDir, file.Name())); err != nil {
fmt.Println("Is discord still running?")
log.Fatalln(err)
}
}
discord.mustRemoveFromModules("pending")
discord.mustRemoveFromModules("installed.json")
}
func (discord *Discord) mustRemoveFromModules(target string) {
must(os.RemoveAll(path.Join(discord.modulesDir, target)))
}
func (discord *Discord) DownloadFiles() {
exePath := validateExePath( // default discord installation dir with version, if not ok ask user to enter location
path.Join(discord.dir, "..", "..", "local", "Discord", "app-"+discord.Version, "Discord.exe"))
fmt.Println("Selected Discord.exe:", exePath)
fmt.Println("Starting discord to download Files")
cmd := exec.Command(exePath, "")
cmdReader, err := cmd.StdoutPipe()
must(err)
scanner := bufio.NewScanner(cmdReader)
go func() {
for scanner.Scan() {
if strings.Contains(scanner.Text(), "Finished module downloads") {
fmt.Println("Download finished now kill discord")
must(cmd.Process.Kill())
}
}
}()
must(cmd.Start())
err = cmd.Wait()
// exit status 1 is okay, because we kill the process. Other errors should be handled
if err != nil && !strings.Contains(err.Error(), "exit status 1") {
must(err)
}
}
func validateExePath(path string) string {
if _, err := os.Stat(path); os.IsNotExist(err) || !strings.HasSuffix(path, "Discord.exe") {
path = readExeInput()
validateExePath(path)
}
return path
}
func readExeInput() string {
fmt.Println("Discord.exe could not be found. Please enter location manually:")
exePath, err := bufio.NewReader(os.Stdin).ReadString('\n')
must(err)
return strings.TrimSuffix(exePath, "\n")
}
func userHomeDir() string {
if runtime.GOOS != "windows" {
return os.Getenv("HOME")
}
home := os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH")
if home == "" {
home = os.Getenv("USERPROFILE")
}
return home
}
func (discord *Discord) InstallModules() DiscordModules {
discord.mustRemoveFromModules("discord_desktop_core")
modules, err := unzipModules(filepath.Join(discord.modulesDir, "pending"))
must(err)
discord.mustRemoveFromModules("pending")
return modules
}
func unzipModules(pendingPath string) (DiscordModules, error) {
modules := make(DiscordModules)
err := filepath.Walk(pendingPath, func(path string, info os.FileInfo, err error) error {
if info.IsDir() {
return nil
}
fmt.Println("installing module", info.Name())
moduleName, version, err := extractModuleName(info.Name())
modules[moduleName] = version
modulesPath := filepath.Join(pendingPath, "..")
moduleDir := filepath.Join(modulesPath, moduleName)
if _, err := os.Stat(moduleDir); !os.IsNotExist(err) {
if err = os.RemoveAll(moduleDir); err != nil {
return nil
}
}
if err = zip.Unzip(filepath.Join(pendingPath, info.Name()), moduleDir); err != nil {
return err
}
return nil
})
return modules, err
}
func extractModuleName(moduleName string) (string, int, error) {
moduleName = strings.TrimSuffix(moduleName, ".zip")
split := strings.Split(moduleName, "-")
moduleName = split[0]
version, err := strconv.Atoi(split[1])
if err != nil {
return "", -1, err
}
return moduleName, version, nil
}
func (discord *Discord) UpdateJson(modules DiscordModules) {
discord.manipulateJson(modules)
}
func (discord *Discord) manipulateJson(modules DiscordModules) {
installedFile, err := os.OpenFile(path.Join(discord.modulesDir, "installed.json"), os.O_RDWR, os.ModePerm)
must(err)
defer installedFile.Close()
jsonMap := map[string]json.RawMessage{}
must(json.NewDecoder(installedFile).Decode(&jsonMap))
type moduleDetails struct {
InstalledVersion int `json:"installedVersion"`
UpdateVersion int `json:"updateVersion"`
UpdateZipFile string `json:"updateZipfile,omitempty"`
}
finalJson := make(map[string]moduleDetails)
for moduleName := range jsonMap {
details := &moduleDetails{}
must(json.Unmarshal(jsonMap[moduleName], details))
details.InstalledVersion = modules[moduleName]
details.UpdateZipFile = ""
finalJson[moduleName] = *details
fmt.Println("update module version", moduleName, modules[moduleName])
}
finalJsonBytes, err := json.MarshalIndent(finalJson, "", " ")
must(err)
must(installedFile.Truncate(0))
_, err = installedFile.WriteAt(finalJsonBytes, 0)
must(err)
}
func must(err error) {
if err != nil {
log.Fatal(err)
}
}
|
package snailframe
func Welcome(r *RData) {
m,_ := r.Query("aa")
//re,_ := r.dbconn.Find("SELECT * FROM shici_info where id=?",m)
//fmt.Println(re)
re2 := map[string]interface{}{
"a":m,
}
r.ExecTpl("aaa",re2)
}
|
package tips
import (
"encoding/csv"
"fmt"
"io"
"io/ioutil"
"os"
"reflect"
"strconv"
"testing"
)
func readCSVFromFile(path string) (persons []person, err error) {
f, err := os.Open(path)
if err != nil {
return nil, fmt.Errorf("Failed to open %s: %v", path, err)
}
defer func() {
if cerr := f.Close(); err == nil && cerr != nil {
err = cerr
}
}()
r := csv.NewReader(f)
for lineno := 1; ; lineno++ {
row, err := r.Read()
if err == io.EOF {
break
}
if l := len(row); l != 2 {
return nil, fmt.Errorf("The row size must be 2 but got %d at line %d", l, lineno)
}
age, err := strconv.Atoi(row[1])
if err != nil {
return nil, fmt.Errorf("Failed to parse %q as an integer", row[1])
}
persons = append(persons, person{name: row[0], age: int(age)})
}
return persons, nil
}
func TestReadCSV(t *testing.T) {
persons, err := readCSVFromFile("testdata/example.csv")
if err != nil {
t.Error(err)
}
if !reflect.DeepEqual(persons, []person{
{name: "Alice", age: 20},
{name: "Bob", age: 23},
}) {
t.Error("Unexpected result: ", persons)
}
}
func writeCSVToFile(path string, persons []person) (err error) {
f, err := os.Create(path)
if err != nil {
return fmt.Errorf("Failed to create %s: %v", path, err)
}
defer func() {
if cerr := f.Close(); err == nil && cerr != nil {
err = cerr
}
}()
w := csv.NewWriter(f)
for _, p := range persons {
row := []string{p.name, strconv.Itoa(p.age)}
w.Write(row)
}
// Don't forget to call Flush!
w.Flush()
// Return w.Error(). Don't ignore an error occured in Flush().
return w.Error()
}
func TestWriteCSV(t *testing.T) {
defer func() {
if err := os.RemoveAll("testdata/tmp.csv"); err != nil {
t.Error(err)
}
}()
persons := []person{{name: "Taro", age: 8}, {name: "Jiro", age: 5}}
err := writeCSVToFile("testdata/tmp.csv", persons)
if err != nil {
t.Error(err)
}
b, err := ioutil.ReadFile("testdata/tmp.csv")
if err != nil {
t.Error(err)
}
content := string(b)
if content != "Taro,8\nJiro,5\n" {
t.Errorf("Unexpected content: %q", content)
}
}
|
package main
func main() {
type num int
var a = num(0)
a = 5
}
|
package env
import (
"sync"
"github.com/kelseyhightower/envconfig"
)
var (
env Env
once sync.Once
)
type Env struct {
HttpUrl string `envconfig:"HTTP_URL" default:"localhost:8080"`
MongoUrl string `envconfig:"MONGO_URL" default:"mongodb://localhost:27017"`
KafkaBrokers []string `envconfig:"KAFKA_BROKERS" default:"localhost:9092"`
}
func Process() Env {
once.Do(func() {
if err := envconfig.Process("", &env); err != nil {
panic(err)
}
})
return env
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.