text stringlengths 11 4.05M |
|---|
package main
import "fmt"
func main() {
first := 123
var firstPtr *int = &first
fmt.Printf("Type of firstPtr: %T and value %v\n", firstPtr, firstPtr)
*firstPtr += 200
fmt.Println("New value of first:", first)
//Pointer zero value
var zeroPtr *int
//*zeroPtr++
if zeroPtr == nil {
fmt.Println("Old Value in ZeroPtr:", zeroPtr)
zeroPtr = &first
fmt.Println("New Value in ZeroPtr:", zeroPtr)
}
// new()
width := new(int)
*width++
fmt.Printf("Type width: %T, value width: %v\n", width, width)
fmt.Printf("Type of *width %T, and value of *width %v\n", *width, *width)
}
|
package tests_test
import (
"testing"
ecombase "github.com/codedv8/go-ecom-base"
)
func TestTreeNode(t *testing.T) {
node := &ecombase.LinkedTreeNode{
Key: "F",
Data: "xxx",
}
ok, err := node.Add("A", "Whatever")
if ok == false {
t.Error("ok was false for A")
}
if err != nil {
t.Error("Returned error for A")
}
ok, err = node.Add("B", "Hmmmm")
if ok == false {
t.Error("ok was false for B")
}
if err != nil {
t.Error("Returned error for B")
}
ok, err = node.Add("C", "Hmmmm")
if ok == false {
t.Error("ok was false for C")
}
if err != nil {
t.Error("Returned error for C")
}
ok, err = node.Add("D", "Hmmmm")
if ok == false {
t.Error("ok was false for D")
}
if err != nil {
t.Error("Returned error for D")
}
ok, err = node.Add("E", "Hmmmm")
if ok == false {
t.Error("ok was false for E")
}
if err != nil {
t.Error("Returned error for E")
}
ok, err = node.Add("G", "Hmmmm")
if ok == false {
t.Error("ok was false for G")
}
if err != nil {
t.Error("Returned error for G")
}
ok, err = node.Add("H", "Hmmmm")
if ok == false {
t.Error("ok was false for H")
}
if err != nil {
t.Error("Returned error for H")
}
ok, err = node.Add("I", "Hmmmm")
if ok == false {
t.Error("ok was false for I")
}
if err != nil {
t.Error("Returned error for I")
}
ok, err = node.Add("J", "Hmmmm")
if ok == false {
t.Error("ok was false for J")
}
if err != nil {
t.Error("Returned error for J")
}
ok, err = node.Add("A", "Whatever")
if ok != false {
t.Error("ok should be false for A")
}
if err != nil {
t.Error("Returned error for A")
}
first, _ := node.GetFirstNode()
if first.Key != "A" {
t.Error("A is not first (" + first.Key + ")")
}
next := first.Next
if next.Key != "B" {
t.Error("B is not next (" + next.Key + ")")
}
next = next.Next
if next.Key != "C" {
t.Error("C is not next (" + next.Key + ")")
}
next = next.Next
if next.Key != "D" {
t.Error("D is not next (" + next.Key + ")")
}
next = next.Next
if next.Key != "E" {
t.Error("E is not next (" + next.Key + ")")
}
next = next.Next
if next.Key != "F" {
t.Error("F is not next (" + next.Key + ")")
}
next = next.Next
if next.Key != "G" {
t.Error("G is not next (" + next.Key + ")")
}
next = next.Next
if next.Key != "H" {
t.Error("H is not next (" + next.Key + ")")
}
next = next.Next
if next.Key != "I" {
t.Error("I is not next (" + next.Key + ")")
}
next = next.Next
if next.Key != "J" {
t.Error("J is not next (" + next.Key + ")")
}
match, matchErr := first.FindNode("C")
if matchErr != nil {
t.Error("FindNode returned an error")
}
if match == nil {
t.Error("FindNode returned nil")
} else if match.Key != "C" {
t.Errorf("FindNode returned wrong key (%s)\n", match.Key)
}
match, matchErr = first.FindNode("J")
if matchErr != nil {
t.Error("FindNode returned an error")
}
if match == nil {
t.Error("FindNode returned nil")
} else if match.Key != "J" {
t.Errorf("FindNode returned wrong key (%s)\n", match.Key)
}
match, matchErr = first.FindNode("K")
if matchErr != nil {
t.Error("FindNode returned an error")
}
if match != nil {
t.Error("FindNode should return nil")
}
}
|
package pool
import (
"errors"
"fmt"
"github.com/barakb/go-rpc"
"io"
"net"
"os"
"time"
)
var marshaller *rpc.Marshaller
type tcpTransport struct {
rpc.Logger
bindAddr string
listenAddress net.Addr
timeout time.Duration
consumer chan RPC
connectionPool *ConnectionPool
server net.Listener
context *rpc.Context
quit chan interface{}
}
func NewTCPTransport(bindAddr string, timeout time.Duration, connectionPool *ConnectionPool, logger rpc.Logger) *tcpTransport {
if logger == nil {
logger = rpc.NewLogger(os.Stdout)
}
res := &tcpTransport{Logger: logger, bindAddr: bindAddr, timeout: timeout,
consumer: make(chan RPC), connectionPool: connectionPool,
context: rpc.NewContext()}
res.quit = make(chan interface{})
addressChannel := make(chan net.Addr)
go res.listen(addressChannel)
// do not return before the server publish itself.
res.listenAddress = <-addressChannel
close(addressChannel)
return res
}
func (t *tcpTransport) LocalAddr() string {
if t.listenAddress != nil {
return t.listenAddress.String()
}
return t.bindAddr
}
func (t *tcpTransport) Consumer() <-chan RPC {
return t.consumer
}
func (t *tcpTransport) Close() {
// close(t.quit)
// t.server.Close()
// t.connectionPool.Close()
// t.context.Close()
}
func (t *tcpTransport) listen(addressChannel chan net.Addr) {
var err error
t.server, err = net.Listen("tcp", t.bindAddr)
if t.server == nil {
t.Info("couldn't start listening: %v\n", err)
}
addressChannel <- t.server.Addr()
t.Debug("Starting listener at %s\n", t.server.Addr().String())
for {
connection, err := t.server.Accept()
if err != nil {
select {
case <-t.quit:
return
default:
t.Info("Failed to accept connection : %#v \n", err)
continue
}
}
con := rpc.Wrap(connection, t.context)
go t.handleConnection(con)
}
}
func (t *tcpTransport) handleConnection(conn *rpc.Connection) {
t.Debug("handleConnection: %v\n", *conn)
for {
req, err := marshaller.UnMarshalRequest(conn)
if err != nil {
if err != io.EOF {
t.Debug("failed to read rpcType %v, error is %#v\n", *conn, err)
}
conn.Close()
return
}
respCh := make(chan RPCResponse, 1)
rpc := RPC{
RespChan: respCh,
}
rpc.Command = req
t.consumer <- rpc
t.Debug("Sending command %#v to consumer, waiting for consumer response\n", rpc.Command)
resp := <-respCh
t.Debug("server got consumer respond %#v, sending it back to client\n", resp)
if err := t.sendReplyFromServer(conn, &resp); err != nil {
t.Debug("failed to reply %#v on message %#v to client %s\n", resp, req, conn.RemoteAddress())
conn.Close()
return
}
}
}
func (t *tcpTransport) genericRPC(address string, rpcType uint8, args interface{}, resp interface{}) error {
// conn, err := openConnection(address)
conn, err := t.connectionPool.Get(address)
if err != nil {
return errors.New(fmt.Sprintf("Failed to open client connection to %s for sending request %#v error is %v.", address, args, err))
}
// defer conn.Close()
defer t.connectionPool.Put(conn)
if err := sendRPC(conn, rpcType, args); err != nil {
return err
}
return marshaller.UnMarshalResponse(conn, resp)
}
func sendRPC(conn *rpc.Connection, rpcType uint8, args interface{}) error {
if err := marshaller.Marshal(conn, byte(0), args); err != nil {
return err
}
conn.Flush()
return nil
}
func (t *tcpTransport) sendReplyFromServer(conn *rpc.Connection, response *RPCResponse) error {
t.Debug("sendReplyFromServer %v -> %v %#v \n", conn.LocalAddress(), conn.RemoteAddress(), response.Response)
if response.Error != nil {
if err := marshaller.Marshal(conn, byte(1), response.Error.Error()); err != nil {
return err
}
} else {
if err := marshaller.Marshal(conn, byte(0), response.Response); err != nil {
return err
}
}
conn.Flush()
return nil
}
type RPCResponse struct {
Response interface{}
Error error
}
// RPC has a command, and provides a response mechanism.
type RPC struct {
Command interface{}
RespChan chan<- RPCResponse
}
// Respond is used to respond with a response, error or both
func (r *RPC) Respond(resp interface{}, err error) {
r.RespChan <- RPCResponse{resp, err}
}
type Transport interface {
Consumer() <-chan RPC
LocalAddr() string
Echo(target string, msg string) (string, error)
}
|
package main
import (
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"log"
"github.com/thoas/stats"
"github.com/unrolled/render"
)
type env struct {
Metrics *stats.Stats
Render *render.Render
}
var fPort string
var fFixtures string
func init() {
// parse command line flags
flag.StringVar(&fFixtures, "fixtures", "./fixtures.json", "location of fixtures.json file")
flag.StringVar(&fPort, "port", "3009", "serve traffic on this port")
flag.Parse()
// read JSON fixtures file
var jsonObject map[string][]User
fmt.Println("Location of fixtures.json file: " + fFixtures)
file, err := ioutil.ReadFile(fFixtures)
if err != nil {
log.Fatalf("File error: %v\n", err)
}
err = json.Unmarshal(file, &jsonObject)
if err != nil {
log.Fatal(err)
}
// load data in database
list := make(map[int]User)
list[0] = jsonObject["users"][0]
list[1] = jsonObject["users"][1]
db = &Database{
UserList: list,
MaxUserID: 1,
}
}
func main() {
env := env{
Metrics: stats.New(),
Render: render.New(),
}
fmt.Println("===> 🌍 Starting server on port: " + fPort)
StartServer(env, fPort)
}
|
package port
import (
"github.com/mirzaakhena/danarisan/domain/repository"
"github.com/mirzaakhena/danarisan/domain/service"
)
// BuatArisanOutport ...
type BuatArisanOutport interface {
service.TransactionDB
service.IDGenerator
repository.FindOnePesertaRepo
repository.SaveArisanRepo
repository.SavePesertaRepo
repository.SaveSlotRepo
repository.SaveUndianRepo
}
|
package advicedb
import (
"context"
"errors"
"log"
"os"
"time"
"github.com/jackc/pgx/v4/pgxpool"
"github.com/joho/godotenv"
)
type Advice struct {
UserId string
Advice string
CreatedAt time.Time
}
var _ = loadLocalEnv()
var (
db = GetEnv("POSTGRES_DB")
username = GetEnv("POSTGRES_USER")
password = GetEnv("POSTGRES_PASSWORD")
host = GetEnv("POSTGRES_HOST")
)
func NewClient(ctx context.Context) (*pgxpool.Pool, error) {
url := "postgres://" + username + ":" + password + "@" + host + "/" + db
client, err := pgxpool.Connect(ctx, url)
if err != nil {
return nil, errors.New("cannot connect to postgres instance")
}
return client, nil
}
func CreateOne(client *pgxpool.Pool, ctx context.Context, advice *Advice) error {
_, err := client.Exec(ctx, "insert into advices(user_id,advice,created_at) values($1,$2,CURRENT_TIMESTAMP)", advice.UserId, advice.Advice)
return err
}
func UpdateOne(client *pgxpool.Pool, ctx context.Context, advice *Advice) error {
_, err := client.Exec(ctx, "update advices set advice=$1, created_at=CURRENT_TIMESTAMP where user_id=$2", advice.Advice, advice.UserId)
return err
}
func FindOne(client *pgxpool.Pool, ctx context.Context, id string) (*Advice, error) {
advice := Advice{UserId: id}
err := client.QueryRow(ctx, "select advice,created_at from advices where user_id=$1", id).Scan(&advice.Advice, &advice.CreatedAt)
if err != nil {
return nil, err
}
return &advice, nil
}
func loadLocalEnv() interface{} {
if _, runningInContainer := os.LookupEnv("ADVICE_GRPC_SERVICE"); !runningInContainer {
err := godotenv.Load("../.env.local")
if err != nil {
log.Fatal(err)
}
}
return nil
}
func GetEnv(key string) string {
value, ok := os.LookupEnv(key)
if !ok {
log.Fatal("Environment variable not found: ", key)
}
return value
}
|
// Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Convert makefile containing device configuration to Starlark file
// The conversion can handle the following constructs in a makefile:
// * comments
// * simple variable assignments
// * $(call init-product,<file>)
// * $(call inherit-product-if-exists
// * if directives
// All other constructs are carried over to the output starlark file as comments.
//
package mk2rbc
import (
"bytes"
"fmt"
"io"
"io/fs"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"strconv"
"strings"
"text/scanner"
mkparser "android/soong/androidmk/parser"
)
const (
baseUri = "//build/make/core:product_config.rbc"
// The name of the struct exported by the product_config.rbc
// that contains the functions and variables available to
// product configuration Starlark files.
baseName = "rblf"
// And here are the functions and variables:
cfnGetCfg = baseName + ".cfg"
cfnMain = baseName + ".product_configuration"
cfnPrintVars = baseName + ".printvars"
cfnWarning = baseName + ".warning"
cfnLocalAppend = baseName + ".local_append"
cfnLocalSetDefault = baseName + ".local_set_default"
cfnInherit = baseName + ".inherit"
cfnSetListDefault = baseName + ".setdefault"
)
const (
// Phony makefile functions, they are eventually rewritten
// according to knownFunctions map
addSoongNamespace = "add_soong_config_namespace"
addSoongConfigVarValue = "add_soong_config_var_value"
fileExistsPhony = "$file_exists"
wildcardExistsPhony = "$wildcard_exists"
)
const (
callLoadAlways = "inherit-product"
callLoadIf = "inherit-product-if-exists"
)
var knownFunctions = map[string]struct {
// The name of the runtime function this function call in makefiles maps to.
// If it starts with !, then this makefile function call is rewritten to
// something else.
runtimeName string
returnType starlarkType
hiddenArg hiddenArgType
}{
"abspath": {baseName + ".abspath", starlarkTypeString, hiddenArgNone},
fileExistsPhony: {baseName + ".file_exists", starlarkTypeBool, hiddenArgNone},
wildcardExistsPhony: {baseName + ".file_wildcard_exists", starlarkTypeBool, hiddenArgNone},
addSoongNamespace: {baseName + ".add_soong_config_namespace", starlarkTypeVoid, hiddenArgGlobal},
addSoongConfigVarValue: {baseName + ".add_soong_config_var_value", starlarkTypeVoid, hiddenArgGlobal},
"add-to-product-copy-files-if-exists": {baseName + ".copy_if_exists", starlarkTypeList, hiddenArgNone},
"addprefix": {baseName + ".addprefix", starlarkTypeList, hiddenArgNone},
"addsuffix": {baseName + ".addsuffix", starlarkTypeList, hiddenArgNone},
"copy-files": {baseName + ".copy_files", starlarkTypeList, hiddenArgNone},
"dir": {baseName + ".dir", starlarkTypeList, hiddenArgNone},
"enforce-product-packages-exist": {baseName + ".enforce_product_packages_exist", starlarkTypeVoid, hiddenArgNone},
"error": {baseName + ".mkerror", starlarkTypeVoid, hiddenArgNone},
"findstring": {"!findstring", starlarkTypeInt, hiddenArgNone},
"find-copy-subdir-files": {baseName + ".find_and_copy", starlarkTypeList, hiddenArgNone},
"find-word-in-list": {"!find-word-in-list", starlarkTypeUnknown, hiddenArgNone}, // internal macro
"filter": {baseName + ".filter", starlarkTypeList, hiddenArgNone},
"filter-out": {baseName + ".filter_out", starlarkTypeList, hiddenArgNone},
"firstword": {"!firstword", starlarkTypeString, hiddenArgNone},
"get-vendor-board-platforms": {"!get-vendor-board-platforms", starlarkTypeList, hiddenArgNone}, // internal macro, used by is-board-platform, etc.
"info": {baseName + ".mkinfo", starlarkTypeVoid, hiddenArgNone},
"is-android-codename": {"!is-android-codename", starlarkTypeBool, hiddenArgNone}, // unused by product config
"is-android-codename-in-list": {"!is-android-codename-in-list", starlarkTypeBool, hiddenArgNone}, // unused by product config
"is-board-platform": {"!is-board-platform", starlarkTypeBool, hiddenArgNone},
"is-board-platform-in-list": {"!is-board-platform-in-list", starlarkTypeBool, hiddenArgNone},
"is-chipset-in-board-platform": {"!is-chipset-in-board-platform", starlarkTypeUnknown, hiddenArgNone}, // unused by product config
"is-chipset-prefix-in-board-platform": {"!is-chipset-prefix-in-board-platform", starlarkTypeBool, hiddenArgNone}, // unused by product config
"is-not-board-platform": {"!is-not-board-platform", starlarkTypeBool, hiddenArgNone}, // defined but never used
"is-platform-sdk-version-at-least": {"!is-platform-sdk-version-at-least", starlarkTypeBool, hiddenArgNone}, // unused by product config
"is-product-in-list": {"!is-product-in-list", starlarkTypeBool, hiddenArgNone},
"is-vendor-board-platform": {"!is-vendor-board-platform", starlarkTypeBool, hiddenArgNone},
callLoadAlways: {"!inherit-product", starlarkTypeVoid, hiddenArgNone},
callLoadIf: {"!inherit-product-if-exists", starlarkTypeVoid, hiddenArgNone},
"lastword": {"!lastword", starlarkTypeString, hiddenArgNone},
"match-prefix": {"!match-prefix", starlarkTypeUnknown, hiddenArgNone}, // internal macro
"match-word": {"!match-word", starlarkTypeUnknown, hiddenArgNone}, // internal macro
"match-word-in-list": {"!match-word-in-list", starlarkTypeUnknown, hiddenArgNone}, // internal macro
"notdir": {baseName + ".notdir", starlarkTypeString, hiddenArgNone},
"my-dir": {"!my-dir", starlarkTypeString, hiddenArgNone},
"patsubst": {baseName + ".mkpatsubst", starlarkTypeString, hiddenArgNone},
"product-copy-files-by-pattern": {baseName + ".product_copy_files_by_pattern", starlarkTypeList, hiddenArgNone},
"require-artifacts-in-path": {baseName + ".require_artifacts_in_path", starlarkTypeVoid, hiddenArgNone},
"require-artifacts-in-path-relaxed": {baseName + ".require_artifacts_in_path_relaxed", starlarkTypeVoid, hiddenArgNone},
// TODO(asmundak): remove it once all calls are removed from configuration makefiles. see b/183161002
"shell": {baseName + ".shell", starlarkTypeString, hiddenArgNone},
"strip": {baseName + ".mkstrip", starlarkTypeString, hiddenArgNone},
"tb-modules": {"!tb-modules", starlarkTypeUnknown, hiddenArgNone}, // defined in hardware/amlogic/tb_modules/tb_detect.mk, unused
"subst": {baseName + ".mksubst", starlarkTypeString, hiddenArgNone},
"warning": {baseName + ".mkwarning", starlarkTypeVoid, hiddenArgNone},
"word": {baseName + "!word", starlarkTypeString, hiddenArgNone},
"wildcard": {baseName + ".expand_wildcard", starlarkTypeList, hiddenArgNone},
}
var builtinFuncRex = regexp.MustCompile(
"^(addprefix|addsuffix|abspath|and|basename|call|dir|error|eval" +
"|flavor|foreach|file|filter|filter-out|findstring|firstword|guile" +
"|if|info|join|lastword|notdir|or|origin|patsubst|realpath" +
"|shell|sort|strip|subst|suffix|value|warning|word|wordlist|words" +
"|wildcard)")
// Conversion request parameters
type Request struct {
MkFile string // file to convert
Reader io.Reader // if set, read input from this stream instead
RootDir string // root directory path used to resolve included files
OutputSuffix string // generated Starlark files suffix
OutputDir string // if set, root of the output hierarchy
ErrorLogger ErrorMonitorCB
TracedVariables []string // trace assignment to these variables
TraceCalls bool
WarnPartialSuccess bool
SourceFS fs.FS
MakefileFinder MakefileFinder
}
// An error sink allowing to gather error statistics.
// NewError is called on every error encountered during processing.
type ErrorMonitorCB interface {
NewError(s string, node mkparser.Node, args ...interface{})
}
// Derives module name for a given file. It is base name
// (file name without suffix), with some characters replaced to make it a Starlark identifier
func moduleNameForFile(mkFile string) string {
base := strings.TrimSuffix(filepath.Base(mkFile), filepath.Ext(mkFile))
// TODO(asmundak): what else can be in the product file names?
return strings.NewReplacer("-", "_", ".", "_").Replace(base)
}
func cloneMakeString(mkString *mkparser.MakeString) *mkparser.MakeString {
r := &mkparser.MakeString{StringPos: mkString.StringPos}
r.Strings = append(r.Strings, mkString.Strings...)
r.Variables = append(r.Variables, mkString.Variables...)
return r
}
func isMakeControlFunc(s string) bool {
return s == "error" || s == "warning" || s == "info"
}
// Starlark output generation context
type generationContext struct {
buf strings.Builder
starScript *StarlarkScript
indentLevel int
inAssignment bool
tracedCount int
}
func NewGenerateContext(ss *StarlarkScript) *generationContext {
return &generationContext{starScript: ss}
}
// emit returns generated script
func (gctx *generationContext) emit() string {
ss := gctx.starScript
// The emitted code has the following layout:
// <initial comments>
// preamble, i.e.,
// load statement for the runtime support
// load statement for each unique submodule pulled in by this one
// def init(g, handle):
// cfg = rblf.cfg(handle)
// <statements>
// <warning if conversion was not clean>
iNode := len(ss.nodes)
for i, node := range ss.nodes {
if _, ok := node.(*commentNode); !ok {
iNode = i
break
}
node.emit(gctx)
}
gctx.emitPreamble()
gctx.newLine()
// The arguments passed to the init function are the global dictionary
// ('g') and the product configuration dictionary ('cfg')
gctx.write("def init(g, handle):")
gctx.indentLevel++
if gctx.starScript.traceCalls {
gctx.newLine()
gctx.writef(`print(">%s")`, gctx.starScript.mkFile)
}
gctx.newLine()
gctx.writef("cfg = %s(handle)", cfnGetCfg)
for _, node := range ss.nodes[iNode:] {
node.emit(gctx)
}
if ss.hasErrors && ss.warnPartialSuccess {
gctx.newLine()
gctx.writef("%s(%q, %q)", cfnWarning, filepath.Base(ss.mkFile), "partially successful conversion")
}
if gctx.starScript.traceCalls {
gctx.newLine()
gctx.writef(`print("<%s")`, gctx.starScript.mkFile)
}
gctx.indentLevel--
gctx.write("\n")
return gctx.buf.String()
}
func (gctx *generationContext) emitPreamble() {
gctx.newLine()
gctx.writef("load(%q, %q)", baseUri, baseName)
// Emit exactly one load statement for each URI.
loadedSubConfigs := make(map[string]string)
for _, sc := range gctx.starScript.inherited {
uri := sc.path
if m, ok := loadedSubConfigs[uri]; ok {
// No need to emit load statement, but fix module name.
sc.moduleLocalName = m
continue
}
if sc.optional {
uri += "|init"
}
gctx.newLine()
gctx.writef("load(%q, %s = \"init\")", uri, sc.entryName())
loadedSubConfigs[uri] = sc.moduleLocalName
}
gctx.write("\n")
}
func (gctx *generationContext) emitPass() {
gctx.newLine()
gctx.write("pass")
}
func (gctx *generationContext) write(ss ...string) {
for _, s := range ss {
gctx.buf.WriteString(s)
}
}
func (gctx *generationContext) writef(format string, args ...interface{}) {
gctx.write(fmt.Sprintf(format, args...))
}
func (gctx *generationContext) newLine() {
if gctx.buf.Len() == 0 {
return
}
gctx.write("\n")
gctx.writef("%*s", 2*gctx.indentLevel, "")
}
type knownVariable struct {
name string
class varClass
valueType starlarkType
}
type knownVariables map[string]knownVariable
func (pcv knownVariables) NewVariable(name string, varClass varClass, valueType starlarkType) {
v, exists := pcv[name]
if !exists {
pcv[name] = knownVariable{name, varClass, valueType}
return
}
// Conflict resolution:
// * config class trumps everything
// * any type trumps unknown type
match := varClass == v.class
if !match {
if varClass == VarClassConfig {
v.class = VarClassConfig
match = true
} else if v.class == VarClassConfig {
match = true
}
}
if valueType != v.valueType {
if valueType != starlarkTypeUnknown {
if v.valueType == starlarkTypeUnknown {
v.valueType = valueType
} else {
match = false
}
}
}
if !match {
fmt.Fprintf(os.Stderr, "cannot redefine %s as %v/%v (already defined as %v/%v)\n",
name, varClass, valueType, v.class, v.valueType)
}
}
// All known product variables.
var KnownVariables = make(knownVariables)
func init() {
for _, kv := range []string{
// Kernel-related variables that we know are lists.
"BOARD_VENDOR_KERNEL_MODULES",
"BOARD_VENDOR_RAMDISK_KERNEL_MODULES",
"BOARD_VENDOR_RAMDISK_KERNEL_MODULES_LOAD",
"BOARD_RECOVERY_KERNEL_MODULES",
// Other variables we knwo are lists
"ART_APEX_JARS",
} {
KnownVariables.NewVariable(kv, VarClassSoong, starlarkTypeList)
}
}
type nodeReceiver interface {
newNode(node starlarkNode)
}
// Information about the generated Starlark script.
type StarlarkScript struct {
mkFile string
moduleName string
mkPos scanner.Position
nodes []starlarkNode
inherited []*moduleInfo
hasErrors bool
topDir string
traceCalls bool // print enter/exit each init function
warnPartialSuccess bool
sourceFS fs.FS
makefileFinder MakefileFinder
}
func (ss *StarlarkScript) newNode(node starlarkNode) {
ss.nodes = append(ss.nodes, node)
}
// varAssignmentScope points to the last assignment for each variable
// in the current block. It is used during the parsing to chain
// the assignments to a variable together.
type varAssignmentScope struct {
outer *varAssignmentScope
vars map[string]*assignmentNode
}
// parseContext holds the script we are generating and all the ephemeral data
// needed during the parsing.
type parseContext struct {
script *StarlarkScript
nodes []mkparser.Node // Makefile as parsed by mkparser
currentNodeIndex int // Node in it we are processing
ifNestLevel int
moduleNameCount map[string]int // count of imported modules with given basename
fatalError error
builtinMakeVars map[string]starlarkExpr
outputSuffix string
errorLogger ErrorMonitorCB
tracedVariables map[string]bool // variables to be traced in the generated script
variables map[string]variable
varAssignments *varAssignmentScope
receiver nodeReceiver // receptacle for the generated starlarkNode's
receiverStack []nodeReceiver
outputDir string
dependentModules map[string]*moduleInfo
soongNamespaces map[string]map[string]bool
}
func newParseContext(ss *StarlarkScript, nodes []mkparser.Node) *parseContext {
topdir, _ := filepath.Split(filepath.Join(ss.topDir, "foo"))
predefined := []struct{ name, value string }{
{"SRC_TARGET_DIR", filepath.Join("build", "make", "target")},
{"LOCAL_PATH", filepath.Dir(ss.mkFile)},
{"TOPDIR", topdir},
// TODO(asmundak): maybe read it from build/make/core/envsetup.mk?
{"TARGET_COPY_OUT_SYSTEM", "system"},
{"TARGET_COPY_OUT_SYSTEM_OTHER", "system_other"},
{"TARGET_COPY_OUT_DATA", "data"},
{"TARGET_COPY_OUT_ASAN", filepath.Join("data", "asan")},
{"TARGET_COPY_OUT_OEM", "oem"},
{"TARGET_COPY_OUT_RAMDISK", "ramdisk"},
{"TARGET_COPY_OUT_DEBUG_RAMDISK", "debug_ramdisk"},
{"TARGET_COPY_OUT_VENDOR_DEBUG_RAMDISK", "vendor_debug_ramdisk"},
{"TARGET_COPY_OUT_TEST_HARNESS_RAMDISK", "test_harness_ramdisk"},
{"TARGET_COPY_OUT_ROOT", "root"},
{"TARGET_COPY_OUT_RECOVERY", "recovery"},
{"TARGET_COPY_OUT_VENDOR_RAMDISK", "vendor_ramdisk"},
// TODO(asmundak): to process internal config files, we need the following variables:
// BOARD_CONFIG_VENDOR_PATH
// TARGET_VENDOR
// target_base_product
//
// the following utility variables are set in build/make/common/core.mk:
{"empty", ""},
{"space", " "},
{"comma", ","},
{"newline", "\n"},
{"pound", "#"},
{"backslash", "\\"},
}
ctx := &parseContext{
script: ss,
nodes: nodes,
currentNodeIndex: 0,
ifNestLevel: 0,
moduleNameCount: make(map[string]int),
builtinMakeVars: map[string]starlarkExpr{},
variables: make(map[string]variable),
dependentModules: make(map[string]*moduleInfo),
soongNamespaces: make(map[string]map[string]bool),
}
ctx.pushVarAssignments()
for _, item := range predefined {
ctx.variables[item.name] = &predefinedVariable{
baseVariable: baseVariable{nam: item.name, typ: starlarkTypeString},
value: &stringLiteralExpr{item.value},
}
}
return ctx
}
func (ctx *parseContext) lastAssignment(name string) *assignmentNode {
for va := ctx.varAssignments; va != nil; va = va.outer {
if v, ok := va.vars[name]; ok {
return v
}
}
return nil
}
func (ctx *parseContext) setLastAssignment(name string, asgn *assignmentNode) {
ctx.varAssignments.vars[name] = asgn
}
func (ctx *parseContext) pushVarAssignments() {
va := &varAssignmentScope{
outer: ctx.varAssignments,
vars: make(map[string]*assignmentNode),
}
ctx.varAssignments = va
}
func (ctx *parseContext) popVarAssignments() {
ctx.varAssignments = ctx.varAssignments.outer
}
func (ctx *parseContext) pushReceiver(rcv nodeReceiver) {
ctx.receiverStack = append(ctx.receiverStack, ctx.receiver)
ctx.receiver = rcv
}
func (ctx *parseContext) popReceiver() {
last := len(ctx.receiverStack) - 1
if last < 0 {
panic(fmt.Errorf("popReceiver: receiver stack empty"))
}
ctx.receiver = ctx.receiverStack[last]
ctx.receiverStack = ctx.receiverStack[0:last]
}
func (ctx *parseContext) hasNodes() bool {
return ctx.currentNodeIndex < len(ctx.nodes)
}
func (ctx *parseContext) getNode() mkparser.Node {
if !ctx.hasNodes() {
return nil
}
node := ctx.nodes[ctx.currentNodeIndex]
ctx.currentNodeIndex++
return node
}
func (ctx *parseContext) backNode() {
if ctx.currentNodeIndex <= 0 {
panic("Cannot back off")
}
ctx.currentNodeIndex--
}
func (ctx *parseContext) handleAssignment(a *mkparser.Assignment) {
// Handle only simple variables
if !a.Name.Const() {
ctx.errorf(a, "Only simple variables are handled")
return
}
name := a.Name.Strings[0]
const soongNsPrefix = "SOONG_CONFIG_"
// Soong confuguration
if strings.HasPrefix(name, soongNsPrefix) {
ctx.handleSoongNsAssignment(strings.TrimPrefix(name, soongNsPrefix), a)
return
}
lhs := ctx.addVariable(name)
if lhs == nil {
ctx.errorf(a, "unknown variable %s", name)
return
}
_, isTraced := ctx.tracedVariables[name]
asgn := &assignmentNode{lhs: lhs, mkValue: a.Value, isTraced: isTraced}
if lhs.valueType() == starlarkTypeUnknown {
// Try to divine variable type from the RHS
asgn.value = ctx.parseMakeString(a, a.Value)
if xBad, ok := asgn.value.(*badExpr); ok {
ctx.wrapBadExpr(xBad)
return
}
inferred_type := asgn.value.typ()
if inferred_type != starlarkTypeUnknown {
lhs.setValueType(inferred_type)
}
}
if lhs.valueType() == starlarkTypeList {
xConcat := ctx.buildConcatExpr(a)
if xConcat == nil {
return
}
switch len(xConcat.items) {
case 0:
asgn.value = &listExpr{}
case 1:
asgn.value = xConcat.items[0]
default:
asgn.value = xConcat
}
} else {
asgn.value = ctx.parseMakeString(a, a.Value)
if xBad, ok := asgn.value.(*badExpr); ok {
ctx.wrapBadExpr(xBad)
return
}
}
// TODO(asmundak): move evaluation to a separate pass
asgn.value, _ = asgn.value.eval(ctx.builtinMakeVars)
asgn.previous = ctx.lastAssignment(name)
ctx.setLastAssignment(name, asgn)
switch a.Type {
case "=", ":=":
asgn.flavor = asgnSet
case "+=":
if asgn.previous == nil && !asgn.lhs.isPreset() {
asgn.flavor = asgnMaybeAppend
} else {
asgn.flavor = asgnAppend
}
case "?=":
asgn.flavor = asgnMaybeSet
default:
panic(fmt.Errorf("unexpected assignment type %s", a.Type))
}
ctx.receiver.newNode(asgn)
}
func (ctx *parseContext) handleSoongNsAssignment(name string, asgn *mkparser.Assignment) {
val := ctx.parseMakeString(asgn, asgn.Value)
if xBad, ok := val.(*badExpr); ok {
ctx.wrapBadExpr(xBad)
return
}
val, _ = val.eval(ctx.builtinMakeVars)
// Unfortunately, Soong namespaces can be set up by directly setting corresponding Make
// variables instead of via add_soong_config_namespace + add_soong_config_var_value.
// Try to divine the call from the assignment as follows:
if name == "NAMESPACES" {
// Upon seeng
// SOONG_CONFIG_NAMESPACES += foo
// remember that there is a namespace `foo` and act as we saw
// $(call add_soong_config_namespace,foo)
s, ok := maybeString(val)
if !ok {
ctx.errorf(asgn, "cannot handle variables in SOONG_CONFIG_NAMESPACES assignment, please use add_soong_config_namespace instead")
return
}
for _, ns := range strings.Fields(s) {
ctx.addSoongNamespace(ns)
ctx.receiver.newNode(&exprNode{&callExpr{
name: addSoongNamespace,
args: []starlarkExpr{&stringLiteralExpr{ns}},
returnType: starlarkTypeVoid,
}})
}
} else {
// Upon seeing
// SOONG_CONFIG_x_y = v
// find a namespace called `x` and act as if we encountered
// $(call add_config_var_value(x,y,v)
// or check that `x_y` is a namespace, and then add the RHS of this assignment as variables in
// it.
// Emit an error in the ambiguous situation (namespaces `foo_bar` with a variable `baz`
// and `foo` with a variable `bar_baz`.
namespaceName := ""
if ctx.hasSoongNamespace(name) {
namespaceName = name
}
var varName string
for pos, ch := range name {
if !(ch == '_' && ctx.hasSoongNamespace(name[0:pos])) {
continue
}
if namespaceName != "" {
ctx.errorf(asgn, "ambiguous soong namespace (may be either `%s` or `%s`)", namespaceName, name[0:pos])
return
}
namespaceName = name[0:pos]
varName = name[pos+1:]
}
if namespaceName == "" {
ctx.errorf(asgn, "cannot figure out Soong namespace, please use add_soong_config_var_value macro instead")
return
}
if varName == "" {
// Remember variables in this namespace
s, ok := maybeString(val)
if !ok {
ctx.errorf(asgn, "cannot handle variables in SOONG_CONFIG_ assignment, please use add_soong_config_var_value instead")
return
}
ctx.updateSoongNamespace(asgn.Type != "+=", namespaceName, strings.Fields(s))
return
}
// Finally, handle assignment to a namespace variable
if !ctx.hasNamespaceVar(namespaceName, varName) {
ctx.errorf(asgn, "no %s variable in %s namespace, please use add_soong_config_var_value instead", varName, namespaceName)
return
}
ctx.receiver.newNode(&exprNode{&callExpr{
name: addSoongConfigVarValue,
args: []starlarkExpr{&stringLiteralExpr{namespaceName}, &stringLiteralExpr{varName}, val},
returnType: starlarkTypeVoid,
}})
}
}
func (ctx *parseContext) buildConcatExpr(a *mkparser.Assignment) *concatExpr {
xConcat := &concatExpr{}
var xItemList *listExpr
addToItemList := func(x ...starlarkExpr) {
if xItemList == nil {
xItemList = &listExpr{[]starlarkExpr{}}
}
xItemList.items = append(xItemList.items, x...)
}
finishItemList := func() {
if xItemList != nil {
xConcat.items = append(xConcat.items, xItemList)
xItemList = nil
}
}
items := a.Value.Words()
for _, item := range items {
// A function call in RHS is supposed to return a list, all other item
// expressions return individual elements.
switch x := ctx.parseMakeString(a, item).(type) {
case *badExpr:
ctx.wrapBadExpr(x)
return nil
case *stringLiteralExpr:
addToItemList(maybeConvertToStringList(x).(*listExpr).items...)
default:
switch x.typ() {
case starlarkTypeList:
finishItemList()
xConcat.items = append(xConcat.items, x)
case starlarkTypeString:
finishItemList()
xConcat.items = append(xConcat.items, &callExpr{
object: x,
name: "split",
args: nil,
returnType: starlarkTypeList,
})
default:
addToItemList(x)
}
}
}
if xItemList != nil {
xConcat.items = append(xConcat.items, xItemList)
}
return xConcat
}
func (ctx *parseContext) newDependentModule(path string, optional bool) *moduleInfo {
modulePath := ctx.loadedModulePath(path)
if mi, ok := ctx.dependentModules[modulePath]; ok {
mi.optional = mi.optional || optional
return mi
}
moduleName := moduleNameForFile(path)
moduleLocalName := "_" + moduleName
n, found := ctx.moduleNameCount[moduleName]
if found {
moduleLocalName += fmt.Sprintf("%d", n)
}
ctx.moduleNameCount[moduleName] = n + 1
mi := &moduleInfo{
path: modulePath,
originalPath: path,
moduleLocalName: moduleLocalName,
optional: optional,
}
ctx.dependentModules[modulePath] = mi
ctx.script.inherited = append(ctx.script.inherited, mi)
return mi
}
func (ctx *parseContext) handleSubConfig(
v mkparser.Node, pathExpr starlarkExpr, loadAlways bool, processModule func(inheritedModule)) {
pathExpr, _ = pathExpr.eval(ctx.builtinMakeVars)
// In a simple case, the name of a module to inherit/include is known statically.
if path, ok := maybeString(pathExpr); ok {
if strings.Contains(path, "*") {
if paths, err := fs.Glob(ctx.script.sourceFS, path); err == nil {
for _, p := range paths {
processModule(inheritedStaticModule{ctx.newDependentModule(p, !loadAlways), loadAlways})
}
} else {
ctx.errorf(v, "cannot glob wildcard argument")
}
} else {
processModule(inheritedStaticModule{ctx.newDependentModule(path, !loadAlways), loadAlways})
}
return
}
// If module path references variables (e.g., $(v1)/foo/$(v2)/device-config.mk), find all the paths in the
// source tree that may be a match and the corresponding variable values. For instance, if the source tree
// contains vendor1/foo/abc/dev.mk and vendor2/foo/def/dev.mk, the first one will be inherited when
// (v1, v2) == ('vendor1', 'abc'), and the second one when (v1, v2) == ('vendor2', 'def').
// We then emit the code that loads all of them, e.g.:
// load("//vendor1/foo/abc:dev.rbc", _dev1_init="init")
// load("//vendor2/foo/def/dev.rbc", _dev2_init="init")
// And then inherit it as follows:
// _e = {
// "vendor1/foo/abc/dev.mk": ("vendor1/foo/abc/dev", _dev1_init),
// "vendor2/foo/def/dev.mk": ("vendor2/foo/def/dev", _dev_init2) }.get("%s/foo/%s/dev.mk" % (v1, v2))
// if _e:
// rblf.inherit(handle, _e[0], _e[1])
//
var matchingPaths []string
varPath, ok := pathExpr.(*interpolateExpr)
if !ok {
ctx.errorf(v, "inherit-product/include argument is too complex")
return
}
pathPattern := []string{varPath.chunks[0]}
for _, chunk := range varPath.chunks[1:] {
if chunk != "" {
pathPattern = append(pathPattern, chunk)
}
}
if pathPattern[0] != "" {
matchingPaths = ctx.findMatchingPaths(pathPattern)
} else {
// Heuristics -- if pattern starts from top, restrict it to the directories where
// we know inherit-product uses dynamically calculated path. Restrict it even further
// for certain path which would yield too many useless matches
if len(varPath.chunks) == 2 && varPath.chunks[1] == "/BoardConfigVendor.mk" {
pathPattern[0] = "vendor/google_devices"
matchingPaths = ctx.findMatchingPaths(pathPattern)
} else {
for _, t := range []string{"vendor/qcom", "vendor/google_devices"} {
pathPattern[0] = t
matchingPaths = append(matchingPaths, ctx.findMatchingPaths(pathPattern)...)
}
}
}
// Safeguard against $(call inherit-product,$(PRODUCT_PATH))
const maxMatchingFiles = 150
if len(matchingPaths) > maxMatchingFiles {
ctx.errorf(v, "there are >%d files matching the pattern, please rewrite it", maxMatchingFiles)
return
}
res := inheritedDynamicModule{*varPath, []*moduleInfo{}, loadAlways}
for _, p := range matchingPaths {
// A product configuration files discovered dynamically may attempt to inherit
// from another one which does not exist in this source tree. Prevent load errors
// by always loading the dynamic files as optional.
res.candidateModules = append(res.candidateModules, ctx.newDependentModule(p, true))
}
processModule(res)
}
func (ctx *parseContext) findMatchingPaths(pattern []string) []string {
files := ctx.script.makefileFinder.Find(ctx.script.topDir)
if len(pattern) == 0 {
return files
}
// Create regular expression from the pattern
s_regexp := "^" + regexp.QuoteMeta(pattern[0])
for _, s := range pattern[1:] {
s_regexp += ".*" + regexp.QuoteMeta(s)
}
s_regexp += "$"
rex := regexp.MustCompile(s_regexp)
// Now match
var res []string
for _, p := range files {
if rex.MatchString(p) {
res = append(res, p)
}
}
return res
}
func (ctx *parseContext) handleInheritModule(v mkparser.Node, pathExpr starlarkExpr, loadAlways bool) {
ctx.handleSubConfig(v, pathExpr, loadAlways, func(im inheritedModule) {
ctx.receiver.newNode(&inheritNode{im})
})
}
func (ctx *parseContext) handleInclude(v mkparser.Node, pathExpr starlarkExpr, loadAlways bool) {
ctx.handleSubConfig(v, pathExpr, loadAlways, func(im inheritedModule) {
ctx.receiver.newNode(&includeNode{im})
})
}
func (ctx *parseContext) handleVariable(v *mkparser.Variable) {
// Handle:
// $(call inherit-product,...)
// $(call inherit-product-if-exists,...)
// $(info xxx)
// $(warning xxx)
// $(error xxx)
expr := ctx.parseReference(v, v.Name)
switch x := expr.(type) {
case *callExpr:
if x.name == callLoadAlways || x.name == callLoadIf {
ctx.handleInheritModule(v, x.args[0], x.name == callLoadAlways)
} else if isMakeControlFunc(x.name) {
// File name is the first argument
args := []starlarkExpr{
&stringLiteralExpr{ctx.script.mkFile},
x.args[0],
}
ctx.receiver.newNode(&exprNode{
&callExpr{name: x.name, args: args, returnType: starlarkTypeUnknown},
})
} else {
ctx.receiver.newNode(&exprNode{expr})
}
case *badExpr:
ctx.wrapBadExpr(x)
return
default:
ctx.errorf(v, "cannot handle %s", v.Dump())
return
}
}
func (ctx *parseContext) handleDefine(directive *mkparser.Directive) {
macro_name := strings.Fields(directive.Args.Strings[0])[0]
// Ignore the macros that we handle
if _, ok := knownFunctions[macro_name]; !ok {
ctx.errorf(directive, "define is not supported: %s", macro_name)
}
}
func (ctx *parseContext) handleIfBlock(ifDirective *mkparser.Directive) {
ssSwitch := &switchNode{}
ctx.pushReceiver(ssSwitch)
for ctx.processBranch(ifDirective); ctx.hasNodes() && ctx.fatalError == nil; {
node := ctx.getNode()
switch x := node.(type) {
case *mkparser.Directive:
switch x.Name {
case "else", "elifdef", "elifndef", "elifeq", "elifneq":
ctx.processBranch(x)
case "endif":
ctx.popReceiver()
ctx.receiver.newNode(ssSwitch)
return
default:
ctx.errorf(node, "unexpected directive %s", x.Name)
}
default:
ctx.errorf(ifDirective, "unexpected statement")
}
}
if ctx.fatalError == nil {
ctx.fatalError = fmt.Errorf("no matching endif for %s", ifDirective.Dump())
}
ctx.popReceiver()
}
// processBranch processes a single branch (if/elseif/else) until the next directive
// on the same level.
func (ctx *parseContext) processBranch(check *mkparser.Directive) {
block := switchCase{gate: ctx.parseCondition(check)}
defer func() {
ctx.popVarAssignments()
ctx.ifNestLevel--
}()
ctx.pushVarAssignments()
ctx.ifNestLevel++
ctx.pushReceiver(&block)
for ctx.hasNodes() {
node := ctx.getNode()
if ctx.handleSimpleStatement(node) {
continue
}
switch d := node.(type) {
case *mkparser.Directive:
switch d.Name {
case "else", "elifdef", "elifndef", "elifeq", "elifneq", "endif":
ctx.popReceiver()
ctx.receiver.newNode(&block)
ctx.backNode()
return
case "ifdef", "ifndef", "ifeq", "ifneq":
ctx.handleIfBlock(d)
default:
ctx.errorf(d, "unexpected directive %s", d.Name)
}
default:
ctx.errorf(node, "unexpected statement")
}
}
ctx.fatalError = fmt.Errorf("no matching endif for %s", check.Dump())
ctx.popReceiver()
}
func (ctx *parseContext) newIfDefinedNode(check *mkparser.Directive) (starlarkExpr, bool) {
if !check.Args.Const() {
return ctx.newBadExpr(check, "ifdef variable ref too complex: %s", check.Args.Dump()), false
}
v := ctx.addVariable(check.Args.Strings[0])
return &variableDefinedExpr{v}, true
}
func (ctx *parseContext) parseCondition(check *mkparser.Directive) starlarkNode {
switch check.Name {
case "ifdef", "ifndef", "elifdef", "elifndef":
v, ok := ctx.newIfDefinedNode(check)
if ok && strings.HasSuffix(check.Name, "ndef") {
v = ¬Expr{v}
}
return &ifNode{
isElif: strings.HasPrefix(check.Name, "elif"),
expr: v,
}
case "ifeq", "ifneq", "elifeq", "elifneq":
return &ifNode{
isElif: strings.HasPrefix(check.Name, "elif"),
expr: ctx.parseCompare(check),
}
case "else":
return &elseNode{}
default:
panic(fmt.Errorf("%s: unknown directive: %s", ctx.script.mkFile, check.Dump()))
}
}
func (ctx *parseContext) newBadExpr(node mkparser.Node, text string, args ...interface{}) starlarkExpr {
message := fmt.Sprintf(text, args...)
if ctx.errorLogger != nil {
ctx.errorLogger.NewError(text, node, args)
}
ctx.script.hasErrors = true
return &badExpr{node, message}
}
func (ctx *parseContext) parseCompare(cond *mkparser.Directive) starlarkExpr {
// Strip outer parentheses
mkArg := cloneMakeString(cond.Args)
mkArg.Strings[0] = strings.TrimLeft(mkArg.Strings[0], "( ")
n := len(mkArg.Strings)
mkArg.Strings[n-1] = strings.TrimRight(mkArg.Strings[n-1], ") ")
args := mkArg.Split(",")
// TODO(asmundak): handle the case where the arguments are in quotes and space-separated
if len(args) != 2 {
return ctx.newBadExpr(cond, "ifeq/ifneq len(args) != 2 %s", cond.Dump())
}
args[0].TrimRightSpaces()
args[1].TrimLeftSpaces()
isEq := !strings.HasSuffix(cond.Name, "neq")
switch xLeft := ctx.parseMakeString(cond, args[0]).(type) {
case *stringLiteralExpr, *variableRefExpr:
switch xRight := ctx.parseMakeString(cond, args[1]).(type) {
case *stringLiteralExpr, *variableRefExpr:
return &eqExpr{left: xLeft, right: xRight, isEq: isEq}
case *badExpr:
return xRight
default:
expr, ok := ctx.parseCheckFunctionCallResult(cond, xLeft, args[1])
if ok {
return expr
}
return ctx.newBadExpr(cond, "right operand is too complex: %s", args[1].Dump())
}
case *badExpr:
return xLeft
default:
switch xRight := ctx.parseMakeString(cond, args[1]).(type) {
case *stringLiteralExpr, *variableRefExpr:
expr, ok := ctx.parseCheckFunctionCallResult(cond, xRight, args[0])
if ok {
return expr
}
return ctx.newBadExpr(cond, "left operand is too complex: %s", args[0].Dump())
case *badExpr:
return xRight
default:
return ctx.newBadExpr(cond, "operands are too complex: (%s,%s)", args[0].Dump(), args[1].Dump())
}
}
}
func (ctx *parseContext) parseCheckFunctionCallResult(directive *mkparser.Directive, xValue starlarkExpr,
varArg *mkparser.MakeString) (starlarkExpr, bool) {
mkSingleVar, ok := varArg.SingleVariable()
if !ok {
return nil, false
}
expr := ctx.parseReference(directive, mkSingleVar)
negate := strings.HasSuffix(directive.Name, "neq")
checkIsSomethingFunction := func(xCall *callExpr) starlarkExpr {
s, ok := maybeString(xValue)
if !ok || s != "true" {
return ctx.newBadExpr(directive,
fmt.Sprintf("the result of %s can be compared only to 'true'", xCall.name))
}
if len(xCall.args) < 1 {
return ctx.newBadExpr(directive, "%s requires an argument", xCall.name)
}
return nil
}
switch x := expr.(type) {
case *callExpr:
switch x.name {
case "filter":
return ctx.parseCompareFilterFuncResult(directive, x, xValue, !negate), true
case "filter-out":
return ctx.parseCompareFilterFuncResult(directive, x, xValue, negate), true
case "wildcard":
return ctx.parseCompareWildcardFuncResult(directive, x, xValue, negate), true
case "findstring":
return ctx.parseCheckFindstringFuncResult(directive, x, xValue, negate), true
case "strip":
return ctx.parseCompareStripFuncResult(directive, x, xValue, negate), true
case "is-board-platform":
if xBad := checkIsSomethingFunction(x); xBad != nil {
return xBad, true
}
return &eqExpr{
left: &variableRefExpr{ctx.addVariable("TARGET_BOARD_PLATFORM"), false},
right: x.args[0],
isEq: !negate,
}, true
case "is-board-platform-in-list":
if xBad := checkIsSomethingFunction(x); xBad != nil {
return xBad, true
}
return &inExpr{
expr: &variableRefExpr{ctx.addVariable("TARGET_BOARD_PLATFORM"), false},
list: maybeConvertToStringList(x.args[0]),
isNot: negate,
}, true
case "is-product-in-list":
if xBad := checkIsSomethingFunction(x); xBad != nil {
return xBad, true
}
return &inExpr{
expr: &variableRefExpr{ctx.addVariable("TARGET_PRODUCT"), true},
list: maybeConvertToStringList(x.args[0]),
isNot: negate,
}, true
case "is-vendor-board-platform":
if xBad := checkIsSomethingFunction(x); xBad != nil {
return xBad, true
}
s, ok := maybeString(x.args[0])
if !ok {
return ctx.newBadExpr(directive, "cannot handle non-constant argument to is-vendor-board-platform"), true
}
return &inExpr{
expr: &variableRefExpr{ctx.addVariable("TARGET_BOARD_PLATFORM"), false},
list: &variableRefExpr{ctx.addVariable(s + "_BOARD_PLATFORMS"), true},
isNot: negate,
}, true
default:
return ctx.newBadExpr(directive, "Unknown function in ifeq: %s", x.name), true
}
case *badExpr:
return x, true
default:
return nil, false
}
}
func (ctx *parseContext) parseCompareFilterFuncResult(cond *mkparser.Directive,
filterFuncCall *callExpr, xValue starlarkExpr, negate bool) starlarkExpr {
// We handle:
// * ifeq/ifneq (,$(filter v1 v2 ..., EXPR) becomes if EXPR not in/in ["v1", "v2", ...]
// * ifeq/ifneq (,$(filter EXPR, v1 v2 ...) becomes if EXPR not in/in ["v1", "v2", ...]
// * ifeq/ifneq ($(VAR),$(filter $(VAR), v1 v2 ...) becomes if VAR in/not in ["v1", "v2"]
// TODO(Asmundak): check the last case works for filter-out, too.
xPattern := filterFuncCall.args[0]
xText := filterFuncCall.args[1]
var xInList *stringLiteralExpr
var expr starlarkExpr
var ok bool
switch x := xValue.(type) {
case *stringLiteralExpr:
if x.literal != "" {
return ctx.newBadExpr(cond, "filter comparison to non-empty value: %s", xValue)
}
// Either pattern or text should be const, and the
// non-const one should be varRefExpr
if xInList, ok = xPattern.(*stringLiteralExpr); ok {
expr = xText
} else if xInList, ok = xText.(*stringLiteralExpr); ok {
expr = xPattern
} else {
return &callExpr{
object: nil,
name: filterFuncCall.name,
args: filterFuncCall.args,
returnType: starlarkTypeBool,
}
}
case *variableRefExpr:
if v, ok := xPattern.(*variableRefExpr); ok {
if xInList, ok = xText.(*stringLiteralExpr); ok && v.ref.name() == x.ref.name() {
// ifeq/ifneq ($(VAR),$(filter $(VAR), v1 v2 ...), flip negate,
// it's the opposite to what is done when comparing to empty.
expr = xPattern
negate = !negate
}
}
}
if expr != nil && xInList != nil {
slExpr := newStringListExpr(strings.Fields(xInList.literal))
// Generate simpler code for the common cases:
if expr.typ() == starlarkTypeList {
if len(slExpr.items) == 1 {
// Checking that a string belongs to list
return &inExpr{isNot: negate, list: expr, expr: slExpr.items[0]}
} else {
// TODO(asmundak):
panic("TBD")
}
} else if len(slExpr.items) == 1 {
return &eqExpr{left: expr, right: slExpr.items[0], isEq: !negate}
} else {
return &inExpr{isNot: negate, list: newStringListExpr(strings.Fields(xInList.literal)), expr: expr}
}
}
return ctx.newBadExpr(cond, "filter arguments are too complex: %s", cond.Dump())
}
func (ctx *parseContext) parseCompareWildcardFuncResult(directive *mkparser.Directive,
xCall *callExpr, xValue starlarkExpr, negate bool) starlarkExpr {
if !isEmptyString(xValue) {
return ctx.newBadExpr(directive, "wildcard result can be compared only to empty: %s", xValue)
}
callFunc := wildcardExistsPhony
if s, ok := xCall.args[0].(*stringLiteralExpr); ok && !strings.ContainsAny(s.literal, "*?{[") {
callFunc = fileExistsPhony
}
var cc starlarkExpr = &callExpr{name: callFunc, args: xCall.args, returnType: starlarkTypeBool}
if !negate {
cc = ¬Expr{cc}
}
return cc
}
func (ctx *parseContext) parseCheckFindstringFuncResult(directive *mkparser.Directive,
xCall *callExpr, xValue starlarkExpr, negate bool) starlarkExpr {
if isEmptyString(xValue) {
return &eqExpr{
left: &callExpr{
object: xCall.args[1],
name: "find",
args: []starlarkExpr{xCall.args[0]},
returnType: starlarkTypeInt,
},
right: &intLiteralExpr{-1},
isEq: !negate,
}
}
return ctx.newBadExpr(directive, "findstring result can be compared only to empty: %s", xValue)
}
func (ctx *parseContext) parseCompareStripFuncResult(directive *mkparser.Directive,
xCall *callExpr, xValue starlarkExpr, negate bool) starlarkExpr {
if _, ok := xValue.(*stringLiteralExpr); !ok {
return ctx.newBadExpr(directive, "strip result can be compared only to string: %s", xValue)
}
return &eqExpr{
left: &callExpr{
name: "strip",
args: xCall.args,
returnType: starlarkTypeString,
},
right: xValue, isEq: !negate}
}
// parses $(...), returning an expression
func (ctx *parseContext) parseReference(node mkparser.Node, ref *mkparser.MakeString) starlarkExpr {
ref.TrimLeftSpaces()
ref.TrimRightSpaces()
refDump := ref.Dump()
// Handle only the case where the first (or only) word is constant
words := ref.SplitN(" ", 2)
if !words[0].Const() {
return ctx.newBadExpr(node, "reference is too complex: %s", refDump)
}
// If it is a single word, it can be a simple variable
// reference or a function call
if len(words) == 1 {
if isMakeControlFunc(refDump) || refDump == "shell" {
return &callExpr{
name: refDump,
args: []starlarkExpr{&stringLiteralExpr{""}},
returnType: starlarkTypeUnknown,
}
}
if v := ctx.addVariable(refDump); v != nil {
return &variableRefExpr{v, ctx.lastAssignment(v.name()) != nil}
}
return ctx.newBadExpr(node, "unknown variable %s", refDump)
}
expr := &callExpr{name: words[0].Dump(), returnType: starlarkTypeUnknown}
args := words[1]
args.TrimLeftSpaces()
// Make control functions and shell need special treatment as everything
// after the name is a single text argument
if isMakeControlFunc(expr.name) || expr.name == "shell" {
x := ctx.parseMakeString(node, args)
if xBad, ok := x.(*badExpr); ok {
return xBad
}
expr.args = []starlarkExpr{x}
return expr
}
if expr.name == "call" {
words = args.SplitN(",", 2)
if words[0].Empty() || !words[0].Const() {
return ctx.newBadExpr(node, "cannot handle %s", refDump)
}
expr.name = words[0].Dump()
if len(words) < 2 {
args = &mkparser.MakeString{}
} else {
args = words[1]
}
}
if kf, found := knownFunctions[expr.name]; found {
expr.returnType = kf.returnType
} else {
return ctx.newBadExpr(node, "cannot handle invoking %s", expr.name)
}
switch expr.name {
case "word":
return ctx.parseWordFunc(node, args)
case "firstword", "lastword":
return ctx.parseFirstOrLastwordFunc(node, expr.name, args)
case "my-dir":
return &variableRefExpr{ctx.addVariable("LOCAL_PATH"), true}
case "subst", "patsubst":
return ctx.parseSubstFunc(node, expr.name, args)
default:
for _, arg := range args.Split(",") {
arg.TrimLeftSpaces()
arg.TrimRightSpaces()
x := ctx.parseMakeString(node, arg)
if xBad, ok := x.(*badExpr); ok {
return xBad
}
expr.args = append(expr.args, x)
}
}
return expr
}
func (ctx *parseContext) parseSubstFunc(node mkparser.Node, fname string, args *mkparser.MakeString) starlarkExpr {
words := args.Split(",")
if len(words) != 3 {
return ctx.newBadExpr(node, "%s function should have 3 arguments", fname)
}
if !words[0].Const() || !words[1].Const() {
return ctx.newBadExpr(node, "%s function's from and to arguments should be constant", fname)
}
from := words[0].Strings[0]
to := words[1].Strings[0]
words[2].TrimLeftSpaces()
words[2].TrimRightSpaces()
obj := ctx.parseMakeString(node, words[2])
typ := obj.typ()
if typ == starlarkTypeString && fname == "subst" {
// Optimization: if it's $(subst from, to, string), emit string.replace(from, to)
return &callExpr{
object: obj,
name: "replace",
args: []starlarkExpr{&stringLiteralExpr{from}, &stringLiteralExpr{to}},
returnType: typ,
}
}
return &callExpr{
name: fname,
args: []starlarkExpr{&stringLiteralExpr{from}, &stringLiteralExpr{to}, obj},
returnType: obj.typ(),
}
}
func (ctx *parseContext) parseWordFunc(node mkparser.Node, args *mkparser.MakeString) starlarkExpr {
words := args.Split(",")
if len(words) != 2 {
return ctx.newBadExpr(node, "word function should have 2 arguments")
}
var index uint64 = 0
if words[0].Const() {
index, _ = strconv.ParseUint(strings.TrimSpace(words[0].Strings[0]), 10, 64)
}
if index < 1 {
return ctx.newBadExpr(node, "word index should be constant positive integer")
}
words[1].TrimLeftSpaces()
words[1].TrimRightSpaces()
array := ctx.parseMakeString(node, words[1])
if xBad, ok := array.(*badExpr); ok {
return xBad
}
if array.typ() != starlarkTypeList {
array = &callExpr{object: array, name: "split", returnType: starlarkTypeList}
}
return indexExpr{array, &intLiteralExpr{int(index - 1)}}
}
func (ctx *parseContext) parseFirstOrLastwordFunc(node mkparser.Node, name string, args *mkparser.MakeString) starlarkExpr {
arg := ctx.parseMakeString(node, args)
if bad, ok := arg.(*badExpr); ok {
return bad
}
index := &intLiteralExpr{0}
if name == "lastword" {
if v, ok := arg.(*variableRefExpr); ok && v.ref.name() == "MAKEFILE_LIST" {
return &stringLiteralExpr{ctx.script.mkFile}
}
index.literal = -1
}
if arg.typ() == starlarkTypeList {
return &indexExpr{arg, index}
}
return &indexExpr{&callExpr{object: arg, name: "split", returnType: starlarkTypeList}, index}
}
func (ctx *parseContext) parseMakeString(node mkparser.Node, mk *mkparser.MakeString) starlarkExpr {
if mk.Const() {
return &stringLiteralExpr{mk.Dump()}
}
if mkRef, ok := mk.SingleVariable(); ok {
return ctx.parseReference(node, mkRef)
}
// If we reached here, it's neither string literal nor a simple variable,
// we need a full-blown interpolation node that will generate
// "a%b%c" % (X, Y) for a$(X)b$(Y)c
xInterp := &interpolateExpr{args: make([]starlarkExpr, len(mk.Variables))}
for i, ref := range mk.Variables {
arg := ctx.parseReference(node, ref.Name)
if x, ok := arg.(*badExpr); ok {
return x
}
xInterp.args[i] = arg
}
xInterp.chunks = append(xInterp.chunks, mk.Strings...)
return xInterp
}
// Handles the statements whose treatment is the same in all contexts: comment,
// assignment, variable (which is a macro call in reality) and all constructs that
// do not handle in any context ('define directive and any unrecognized stuff).
// Return true if we handled it.
func (ctx *parseContext) handleSimpleStatement(node mkparser.Node) bool {
handled := true
switch x := node.(type) {
case *mkparser.Comment:
ctx.insertComment("#" + x.Comment)
case *mkparser.Assignment:
ctx.handleAssignment(x)
case *mkparser.Variable:
ctx.handleVariable(x)
case *mkparser.Directive:
switch x.Name {
case "define":
ctx.handleDefine(x)
case "include", "-include":
ctx.handleInclude(node, ctx.parseMakeString(node, x.Args), x.Name[0] != '-')
default:
handled = false
}
default:
ctx.errorf(x, "unsupported line %s", x.Dump())
}
return handled
}
func (ctx *parseContext) insertComment(s string) {
ctx.receiver.newNode(&commentNode{strings.TrimSpace(s)})
}
func (ctx *parseContext) carryAsComment(failedNode mkparser.Node) {
for _, line := range strings.Split(failedNode.Dump(), "\n") {
ctx.insertComment("# " + line)
}
}
// records that the given node failed to be converted and includes an explanatory message
func (ctx *parseContext) errorf(failedNode mkparser.Node, message string, args ...interface{}) {
if ctx.errorLogger != nil {
ctx.errorLogger.NewError(message, failedNode, args...)
}
message = fmt.Sprintf(message, args...)
ctx.insertComment(fmt.Sprintf("# MK2RBC TRANSLATION ERROR: %s", message))
ctx.carryAsComment(failedNode)
ctx.script.hasErrors = true
}
func (ctx *parseContext) wrapBadExpr(xBad *badExpr) {
ctx.insertComment(fmt.Sprintf("# MK2RBC TRANSLATION ERROR: %s", xBad.message))
ctx.carryAsComment(xBad.node)
}
func (ctx *parseContext) loadedModulePath(path string) string {
// During the transition to Roboleaf some of the product configuration files
// will be converted and checked in while the others will be generated on the fly
// and run. The runner (rbcrun application) accommodates this by allowing three
// different ways to specify the loaded file location:
// 1) load(":<file>",...) loads <file> from the same directory
// 2) load("//path/relative/to/source/root:<file>", ...) loads <file> source tree
// 3) load("/absolute/path/to/<file> absolute path
// If the file being generated and the file it wants to load are in the same directory,
// generate option 1.
// Otherwise, if output directory is not specified, generate 2)
// Finally, if output directory has been specified and the file being generated and
// the file it wants to load from are in the different directories, generate 2) or 3):
// * if the file being loaded exists in the source tree, generate 2)
// * otherwise, generate 3)
// Finally, figure out the loaded module path and name and create a node for it
loadedModuleDir := filepath.Dir(path)
base := filepath.Base(path)
loadedModuleName := strings.TrimSuffix(base, filepath.Ext(base)) + ctx.outputSuffix
if loadedModuleDir == filepath.Dir(ctx.script.mkFile) {
return ":" + loadedModuleName
}
if ctx.outputDir == "" {
return fmt.Sprintf("//%s:%s", loadedModuleDir, loadedModuleName)
}
if _, err := os.Stat(filepath.Join(loadedModuleDir, loadedModuleName)); err == nil {
return fmt.Sprintf("//%s:%s", loadedModuleDir, loadedModuleName)
}
return filepath.Join(ctx.outputDir, loadedModuleDir, loadedModuleName)
}
func (ctx *parseContext) addSoongNamespace(ns string) {
if _, ok := ctx.soongNamespaces[ns]; ok {
return
}
ctx.soongNamespaces[ns] = make(map[string]bool)
}
func (ctx *parseContext) hasSoongNamespace(name string) bool {
_, ok := ctx.soongNamespaces[name]
return ok
}
func (ctx *parseContext) updateSoongNamespace(replace bool, namespaceName string, varNames []string) {
ctx.addSoongNamespace(namespaceName)
vars := ctx.soongNamespaces[namespaceName]
if replace {
vars = make(map[string]bool)
ctx.soongNamespaces[namespaceName] = vars
}
for _, v := range varNames {
vars[v] = true
}
}
func (ctx *parseContext) hasNamespaceVar(namespaceName string, varName string) bool {
vars, ok := ctx.soongNamespaces[namespaceName]
if ok {
_, ok = vars[varName]
}
return ok
}
func (ss *StarlarkScript) String() string {
return NewGenerateContext(ss).emit()
}
func (ss *StarlarkScript) SubConfigFiles() []string {
var subs []string
for _, src := range ss.inherited {
subs = append(subs, src.originalPath)
}
return subs
}
func (ss *StarlarkScript) HasErrors() bool {
return ss.hasErrors
}
// Convert reads and parses a makefile. If successful, parsed tree
// is returned and then can be passed to String() to get the generated
// Starlark file.
func Convert(req Request) (*StarlarkScript, error) {
reader := req.Reader
if reader == nil {
mkContents, err := ioutil.ReadFile(req.MkFile)
if err != nil {
return nil, err
}
reader = bytes.NewBuffer(mkContents)
}
parser := mkparser.NewParser(req.MkFile, reader)
nodes, errs := parser.Parse()
if len(errs) > 0 {
for _, e := range errs {
fmt.Fprintln(os.Stderr, "ERROR:", e)
}
return nil, fmt.Errorf("bad makefile %s", req.MkFile)
}
starScript := &StarlarkScript{
moduleName: moduleNameForFile(req.MkFile),
mkFile: req.MkFile,
topDir: req.RootDir,
traceCalls: req.TraceCalls,
warnPartialSuccess: req.WarnPartialSuccess,
sourceFS: req.SourceFS,
makefileFinder: req.MakefileFinder,
}
ctx := newParseContext(starScript, nodes)
ctx.outputSuffix = req.OutputSuffix
ctx.outputDir = req.OutputDir
ctx.errorLogger = req.ErrorLogger
if len(req.TracedVariables) > 0 {
ctx.tracedVariables = make(map[string]bool)
for _, v := range req.TracedVariables {
ctx.tracedVariables[v] = true
}
}
ctx.pushReceiver(starScript)
for ctx.hasNodes() && ctx.fatalError == nil {
node := ctx.getNode()
if ctx.handleSimpleStatement(node) {
continue
}
switch x := node.(type) {
case *mkparser.Directive:
switch x.Name {
case "ifeq", "ifneq", "ifdef", "ifndef":
ctx.handleIfBlock(x)
default:
ctx.errorf(x, "unexpected directive %s", x.Name)
}
default:
ctx.errorf(x, "unsupported line")
}
}
if ctx.fatalError != nil {
return nil, ctx.fatalError
}
return starScript, nil
}
func Launcher(path, name string) string {
var buf bytes.Buffer
fmt.Fprintf(&buf, "load(%q, %q)\n", baseUri, baseName)
fmt.Fprintf(&buf, "load(%q, \"init\")\n", path)
fmt.Fprintf(&buf, "g, config = %s(%q, init)\n", cfnMain, name)
fmt.Fprintf(&buf, "%s(g, config)\n", cfnPrintVars)
return buf.String()
}
func MakePath2ModuleName(mkPath string) string {
return strings.TrimSuffix(mkPath, filepath.Ext(mkPath))
}
|
package ds
/**
Remove all elements from a linked list of integers that have value val.
Example:
Input: 1->2->6->3->4->5->6, val = 6
Output: 1->2->3->4->5
*/
/**
* Definition for singly-linked list.
* type ListNode struct {
* Val int
* Next *ListNode
* }
*/
type ListNode struct {
Val int
Next *ListNode
}
func removeElements(head *ListNode, val int) *ListNode {
pre := &ListNode{}
pre.Next = head
dummy := pre
for pre != nil && pre.Next != nil {
for pre.Next != nil && pre.Next.Val == val {
pre.Next = pre.Next.Next
}
pre = pre.Next
}
return dummy.Next
}
|
package main
import "fmt"
func characterReplacement(s string, k int) int {
n := len(s)
if n <= k {
return n
}
maxLength := k
var remaining int
for c := 0; c < n; c++ {
if c > 0 && s[c] == s[c-1] {
continue
}
remaining = k
l, r := c, c
for r < n {
if s[r] == s[c] {
r++
} else if remaining > 0 {
remaining--
r++
} else {
break
}
}
for l > 0 {
if s[l-1] == s[c] {
l--
} else if remaining > 0 {
remaining--
l--
} else {
break
}
}
if r-l > maxLength {
maxLength = r - l
}
}
return maxLength
}
func main() {
fmt.Println(characterReplacement("AABABBA", 2))
}
|
package ferraris
import (
"log"
"strings"
)
// Power returns the current power measurement in Watts
func (f Ferraris) Power() float64 {
if f.stop == 0 {
return 0
}
return (1000 / float64(f.RotationsPerKiloWattHour)) / f.stop.Hours()
}
// Print screen output
func (f Ferraris) Print() {
log.Printf("%10v %2v %4v %7.1f %10.3f\n", f.Name, f.BcmPin, f.count, f.Power(), f.Meter)
}
// InfluxMeasurement …
func (f Ferraris) InfluxMeasurement() string {
return "meter"
}
// InfluxFields …
func (f Ferraris) InfluxFields() map[string]interface{} {
return map[string]interface{}{
"value": f.Meter,
"wattage": f.Power(),
}
}
// InfluxTags …
func (f Ferraris) InfluxTags() map[string]string {
return map[string]string{
"meter": strings.ToLower(f.Name),
}
}
|
package models
import (
"go.mongodb.org/mongo-driver/bson/primitive"
"time"
)
type User struct {
ID primitive.ObjectID `json:"id,omitempty" bson:"_id,omitempty"`
LastName string `json:"lastName,omitempty" bson:"lastName,omitempty"`
UpdatedAt time.Time `json:"updatedAt,omitempty" bson:"updatedAt,omitempty"`
Email string `json:"email,omitempty" bson:"email,omitempty"`
IsActive bool `json:"isActive,omitempty" bson:"isActive,omitempty"`
Password string `json:"password,omitempty" bson:"password,omitempty"`
CreatedAt time.Time `json:"createdAt,omitempty" bson:"createdAt,omitempty"`
FirstName string `json:"firstName,omitempty" bson:"firstName,omitempty"`
ChangingEmail string `json:"changingEmail,omitempty" bson:"changingEmail,omitempty"`
IsAdmin bool `json:"isAdmin,omitempty" bson:"isAdmin,omitempty"`
VerifiedEmail bool `json:"verifiedEmail,omitempty" bson:"verifiedEmail,omitempty"`
CountryPhoneCode string `json:"countryPhoneCode,omitempty" bson:"countryPhoneCode,omitempty"`
}
type PasswordLogin struct {
ID primitive.ObjectID `bson:"_id,omitempty"`
Email string `bson:"email,omitempty"`
Password string `bson:"password,omitempty"`
Recovery string `bson:"recovery,omitempty"`
UserID string `bson:"userId,omitempty"`
}
|
package database
import (
pb "github.com/autograde/aguis/ag"
)
// Database contains methods for manipulating the database.
type Database interface {
GetRemoteIdentity(provider string, rid uint64) (*pb.RemoteIdentity, error)
CreateUserFromRemoteIdentity(*pb.User, *pb.RemoteIdentity) error
AssociateUserWithRemoteIdentity(uid uint64, provider string, rid uint64, accessToken string) error
// GetUserByRemoteIdentity returns the user for the given remote identity.
// The supplied remote identity must contain Provider and RemoteID.
GetUserByRemoteIdentity(*pb.RemoteIdentity) (*pb.User, error)
// UpdateAccessToken updates the access token for the given remote identity.
// The supplied remote identity must contain Provider, RemoteID and AccessToken.
UpdateAccessToken(*pb.RemoteIdentity) error
// GetUser returns the given user, including remote identities.
GetUser(uint64) (*pb.User, error)
// GetUserWithEnrollments returns the user by ID with preloaded user enrollments
GetUserWithEnrollments(uint64) (*pb.User, error)
// GetUsers returns the users for the given set of user IDs.
GetUsers(...uint64) ([]*pb.User, error)
// UpdateUser updates the user's details, excluding remote identities.
UpdateUser(*pb.User) error
// SetAdmin makes an existing user an administrator. The admin role is allowed to
// create courses, so it makes sense that teachers are made admins.
SetAdmin(uint64) error
CreateCourse(uint64, *pb.Course) error
GetCourse(uint64, bool) (*pb.Course, error)
GetCourseByOrganizationID(did uint64) (*pb.Course, error)
GetCourses(...uint64) ([]*pb.Course, error)
GetCoursesByUser(uid uint64, statuses ...pb.Enrollment_UserStatus) ([]*pb.Course, error)
UpdateCourse(*pb.Course) error
CreateEnrollment(*pb.Enrollment) error
RejectEnrollment(uid uint64, cid uint64) error
EnrollStudent(uid uint64, cid uint64) error
EnrollTeacher(uid uint64, cid uint64) error
SetPendingEnrollment(uid, cid uint64) error
// UpdateGroupEnrollment is used to reset group ID when previously aproved group is
// being removed or a user is removed from the group
UpdateGroupEnrollment(uid, cid uint64) error
GetEnrollmentsByCourse(cid uint64, statuses ...pb.Enrollment_UserStatus) ([]*pb.Enrollment, error)
GetEnrollmentByCourseAndUser(cid uint64, uid uint64) (*pb.Enrollment, error)
// CreateAssignment creates a new or updates an existing assignment.
CreateAssignment(*pb.Assignment) error
// UpdateAssignments updates the specified list of assignments.
UpdateAssignments([]*pb.Assignment) error
GetAssignmentsByCourse(uint64) ([]*pb.Assignment, error)
GetNextAssignment(cid, uid, gid uint64) (*pb.Assignment, error)
GetAssignment(query *pb.Assignment) (*pb.Assignment, error)
// CreateSubmission creates a submission in the database.
CreateSubmission(*pb.Submission) error
// UpdateSubmission updates the specified submission with approved or not approved.
UpdateSubmission(submissionID uint64, approved bool) error
// GetSubmission returns a single submission matching the given query.
GetSubmission(query *pb.Submission) (*pb.Submission, error)
// GetSubmissions returns a list of submission entries for the given course, matching the given query.
GetSubmissions(cid uint64, query *pb.Submission) ([]*pb.Submission, error)
GetCourseSubmissions(uint64, bool) ([]pb.Submission, error)
// CreateGroup creates a new group and assign users to newly created group.
CreateGroup(*pb.Group) error
// UpdateGroup updates a group with the specified users and enrollments.
UpdateGroup(group *pb.Group) error
// UpdateGroupStatus updates status field of a group.
UpdateGroupStatus(*pb.Group) error
// DeleteGroup deletes a group and its corresponding enrollments.
DeleteGroup(uint64) error
// GetGroup returns the group with the specified group id.
GetGroup(uint64) (*pb.Group, error)
// GetGroupsByCourse returns the groups for the given course.
GetGroupsByCourse(cid uint64) ([]*pb.Group, error)
// CreateRepository creates a new repository.
CreateRepository(repo *pb.Repository) error
// GetRepository returns the repository for the SCM provider's repository ID.
GetRepositoryByRemoteID(uint64) (*pb.Repository, error)
// GetRepositories returns repositories that match the given query.
GetRepositories(query *pb.Repository) ([]*pb.Repository, error)
// DeleteRepository deletes repository by the given ID
DeleteRepositoryByRemoteID(uint64) error
}
|
package main
import (
"encoding/json"
"net/http"
"github.com/xsymphony/ac"
"github.com/xsymphony/fin"
)
var automaton *ac.Automaton
type replaceSensitiveRequest struct {
Sentence string `json:"sentence"`
Symbol string `json:"symbol"`
}
func replaceWord(c *fin.Context) {
var req replaceSensitiveRequest
if err := json.Unmarshal(c.Request.Body(), &req); err != nil {
c.String(http.StatusBadRequest, "param error")
return
}
words, index := automaton.Find(req.Sentence)
if len(index) == 0 {
c.JSON(http.StatusOK, map[string]interface{}{
"code": 0,
"message": "ok",
"data": map[string]interface{}{
"words": words,
"replaced": "",
"matched": false,
},
})
return
}
symbol := []rune(req.Symbol)[0]
runes := []rune(req.Sentence)
replaced := make([]rune, len(runes))
var cursor int
for i := 0; i < len(runes); i++ {
start, end := index[cursor], index[cursor+1]
if i >= start && i <= end {
replaced[i] = symbol
} else {
replaced[i] = runes[i]
}
if i == end && cursor+1 < len(index)-1 {
cursor += 2
}
}
c.JSON(http.StatusOK, map[string]interface{}{
"code": 0,
"message": "ok",
"data": map[string]interface{}{
"replaced": string(replaced),
"matched": true,
},
})
}
func main() {
automaton = ac.NewAutomaton()
automaton.Add("暴力")
automaton.Add("膜")
automaton.Add("蛤")
automaton.Build()
r := fin.New()
r.Apply(fin.HandleNotFound(func(c *fin.Context) {
c.JSONAbort(http.StatusNotFound, map[string]interface{}{
"code": 404,
"message": "not found",
})
}))
{
r.POST("/api/v1/replace", replaceWord)
}
r.Run(":8080")
}
|
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT license.
package cmd
import (
"testing"
"github.com/google/uuid"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/Azure/aks-engine/pkg/api"
)
func TestNewGenerateCmd(t *testing.T) {
t.Parallel()
command := newGenerateCmd()
if command.Use != generateName || command.Short != generateShortDescription || command.Long != generateLongDescription {
t.Fatalf("generate command should have use %s equal %s, short %s equal %s and long %s equal to %s", command.Use, generateName, command.Short, generateShortDescription, command.Long, generateLongDescription)
}
expectedFlags := []string{"api-model", "output-directory", "ca-certificate-path", "ca-private-key-path", "set", "no-pretty-print", "parameters-only", "client-id", "client-secret"}
for _, f := range expectedFlags {
if command.Flags().Lookup(f) == nil {
t.Fatalf("generate command should have flag %s", f)
}
}
command.SetArgs([]string{})
if err := command.Execute(); err == nil {
t.Fatalf("expected an error when calling generate with no arguments")
}
}
func TestGenerateCmdValidate(t *testing.T) {
t.Parallel()
g := &generateCmd{}
r := &cobra.Command{}
// validate cmd with 1 arg
err := g.validate(r, []string{"../pkg/engine/testdata/simple/kubernetes.json"})
if err != nil {
t.Fatalf("unexpected error validating 1 arg: %s", err.Error())
}
g = &generateCmd{}
// validate cmd with 0 args
err = g.validate(r, []string{})
if err == nil {
t.Fatalf("expected error validating 0 args")
}
g = &generateCmd{}
// validate cmd with more than 1 arg
err = g.validate(r, []string{"../pkg/engine/testdata/simple/kubernetes.json", "arg1"})
if err == nil {
t.Fatalf("expected error validating multiple args")
}
}
func TestGenerateCmdMergeAPIModel(t *testing.T) {
cases := []struct {
test func(*testing.T)
name string
}{
{
name: "NoSetFlagDefined",
test: func(t *testing.T) {
g := new(generateCmd)
g.apimodelPath = "../pkg/engine/testdata/simple/kubernetes.json"
err := g.mergeAPIModel()
if err != nil {
t.Fatalf("unexpected error calling mergeAPIModel with no --set flag defined: %s", err.Error())
}
},
},
{
name: "OneFlagSet",
test: func(t *testing.T) {
g := new(generateCmd)
g.apimodelPath = "../pkg/engine/testdata/simple/kubernetes.json"
g.set = []string{"masterProfile.count=3,linuxProfile.adminUsername=testuser"}
err := g.mergeAPIModel()
if err != nil {
t.Fatalf("unexpected error calling mergeAPIModel with one --set flag: %s", err.Error())
}
},
},
{
name: "TwoFlagsSet",
test: func(t *testing.T) {
g := new(generateCmd)
g.apimodelPath = "../pkg/engine/testdata/simple/kubernetes.json"
g.set = []string{"masterProfile.count=3,linuxProfile.adminUsername=testuser"}
err := g.mergeAPIModel()
if err != nil {
t.Fatalf("unexpected error calling mergeAPIModel with one --set flag: %s", err.Error())
}
},
},
{
name: "OverrideArrayFlagSet",
test: func(t *testing.T) {
g := new(generateCmd)
g.apimodelPath = "../pkg/engine/testdata/simple/kubernetes.json"
g.set = []string{"agentPoolProfiles[0].count=1"}
err := g.mergeAPIModel()
if err != nil {
t.Fatalf("unexpected error calling mergeAPIModel with one --set flag to override an array property: %s", err.Error())
}
},
},
{
name: "SshKeyContains==FlagSet",
test: func(t *testing.T) {
g := new(generateCmd)
g.apimodelPath = "../pkg/engine/testdata/simple/kubernetes.json"
g.set = []string{"linuxProfile.ssh.publicKeys[0].keyData=\"ssh-rsa AAAAB3NO8b9== azureuser@cluster.local\",servicePrincipalProfile.clientId=\"123a4321-c6eb-4b61-9d6f-7db123e14a7a\",servicePrincipalProfile.secret=\"=#msRock5!t=\""}
err := g.mergeAPIModel()
if err != nil {
t.Fatalf("unexpected error calling mergeAPIModel with one --set flag to override an array property: %s", err.Error())
}
},
},
{
name: "SimpleQuoteContainingFlagSet",
test: func(t *testing.T) {
g := new(generateCmd)
g.apimodelPath = "../pkg/engine/testdata/simple/kubernetes.json"
g.set = []string{"servicePrincipalProfile.secret='=MsR0ck5!t='"}
err := g.mergeAPIModel()
if err != nil {
t.Fatalf("unexpected error calling mergeAPIModel with one --set flag to override an array property: %s", err.Error())
}
},
},
}
for _, tc := range cases {
c := tc
t.Run(c.name, func(t *testing.T) {
t.Parallel()
c.test(t)
})
}
}
func TestGenerateCmdMLoadAPIModel(t *testing.T) {
g := &generateCmd{}
r := &cobra.Command{}
g.apimodelPath = "../pkg/engine/testdata/simple/kubernetes.json"
g.set = []string{"agentPoolProfiles[0].count=1"}
err := g.validate(r, []string{"../pkg/engine/testdata/simple/kubernetes.json"})
if err != nil {
t.Fatalf("unexpected error validating api model: %s", err.Error())
}
err = g.mergeAPIModel()
if err != nil {
t.Fatalf("unexpected error merging api model: %s", err.Error())
}
err = g.loadAPIModel()
if err != nil {
t.Fatalf("unexpected error loading api model: %s", err.Error())
}
}
func TestGenerateCmdMLoadAPIModelWithoutMasterProfile(t *testing.T) {
g := &generateCmd{}
r := &cobra.Command{}
g.apimodelPath = "../pkg/engine/testdata/simple/kubernetes.json"
g.set = []string{"masterProfile=nil"}
err := g.validate(r, []string{"../pkg/engine/testdata/simple/kubernetes.json"})
if err != nil {
t.Fatalf("unexpected error validating api model: %s", err.Error())
}
err = g.mergeAPIModel()
if err != nil {
t.Fatalf("unexpected error merging api model: %s", err.Error())
}
err = g.loadAPIModel()
if err == nil {
t.Fatalf("expected error loading api model without MasterProfile: %s", err.Error())
}
}
func TestAPIModelWithoutServicePrincipalProfileAndClientIdAndSecretInGenerateCmd(t *testing.T) {
t.Parallel()
apiloader := &api.Apiloader{
Translator: nil,
}
apimodel := getAPIModelWithoutServicePrincipalProfile(false)
cs, ver, err := apiloader.DeserializeContainerService([]byte(apimodel), false, false, nil)
if err != nil {
t.Fatalf("unexpected error deserializing the example apimodel: %s", err)
}
cs.Properties.LinuxProfile.SSH.PublicKeys[0].KeyData = "ssh test"
outfile, del := makeTmpFile(t, "_test_output")
defer del()
clientID, _ := uuid.Parse("e810b868-afab-412d-98cc-ce7db5cc840b")
clientSecret := "Test Client secret"
generateCmd := &generateCmd{
apimodelPath: "./this/is/unused.json",
outputDirectory: outfile,
ClientID: clientID,
ClientSecret: clientSecret,
containerService: cs,
apiVersion: ver,
}
err = generateCmd.autofillApimodel()
if err != nil {
t.Fatalf("unexpected error autofilling the example apimodel: %s", err)
}
if generateCmd.containerService.Properties.ServicePrincipalProfile == nil || generateCmd.containerService.Properties.ServicePrincipalProfile.ClientID == "" || generateCmd.containerService.Properties.ServicePrincipalProfile.Secret == "" {
t.Fatalf("expected service principal profile to be populated from deployment command arguments")
}
if generateCmd.containerService.Properties.ServicePrincipalProfile.ClientID != clientID.String() {
t.Fatalf("expected service principal profile client id to be %s but got %s", clientID.String(), generateCmd.containerService.Properties.ServicePrincipalProfile.ClientID)
}
if generateCmd.containerService.Properties.ServicePrincipalProfile.Secret != clientSecret {
t.Fatalf("expected service principal profile client secret to be %s but got %s", clientSecret, generateCmd.containerService.Properties.ServicePrincipalProfile.Secret)
}
err = generateCmd.validateAPIModelAsVLabs()
if err != nil {
t.Fatalf("unexpected error validateAPIModelAsVLabs the example apimodel: %s", err)
}
}
func TestAPIModelWithoutServicePrincipalProfileAndWithoutClientIdAndSecretInGenerateCmd(t *testing.T) {
t.Parallel()
apiloader := &api.Apiloader{
Translator: nil,
}
apimodel := getAPIModelWithoutServicePrincipalProfile(false)
cs, ver, err := apiloader.DeserializeContainerService([]byte(apimodel), false, false, nil)
if err != nil {
t.Fatalf("unexpected error deserializing the example apimodel: %s", err)
}
cs.Properties.LinuxProfile.SSH.PublicKeys[0].KeyData = "ssh test"
outfile, del := makeTmpFile(t, "_test_output")
defer del()
generateCmd := &generateCmd{
apimodelPath: "./this/is/unused.json",
outputDirectory: outfile,
containerService: cs,
apiVersion: ver,
}
err = generateCmd.autofillApimodel()
if err != nil {
t.Fatalf("unexpected error autofilling the example apimodel: %s", err)
}
if generateCmd.containerService.Properties.ServicePrincipalProfile != nil {
t.Fatalf("expected service principal profile to be nil for unmanaged identity, where client id and secret are not supplied in api model and deployment command")
}
err = generateCmd.validateAPIModelAsVLabs()
expectedErr := errors.New("ServicePrincipalProfile must be specified")
if err != nil && err.Error() != expectedErr.Error() {
t.Fatalf("expected validate generate command to return error %s, but instead got %s", expectedErr.Error(), err.Error())
}
}
func TestAPIModelWithManagedIdentityWithoutServicePrincipalProfileAndClientIdAndSecretInGenerateCmd(t *testing.T) {
t.Parallel()
apiloader := &api.Apiloader{
Translator: nil,
}
apimodel := getAPIModelWithoutServicePrincipalProfile(true)
cs, ver, err := apiloader.DeserializeContainerService([]byte(apimodel), false, false, nil)
if err != nil {
t.Fatalf("unexpected error deserializing the example apimodel: %s", err)
}
cs.Properties.LinuxProfile.SSH.PublicKeys[0].KeyData = "ssh test"
clientID, _ := uuid.Parse("e810b868-afab-412d-98cc-ce7db5cc840b")
clientSecret := "Test Client secret"
outfile, del := makeTmpFile(t, "_test_output")
defer del()
generateCmd := &generateCmd{
apimodelPath: "./this/is/unused.json",
outputDirectory: outfile,
ClientID: clientID,
ClientSecret: clientSecret,
containerService: cs,
apiVersion: ver,
}
err = generateCmd.autofillApimodel()
if err != nil {
t.Fatalf("unexpected error autofilling the example apimodel: %s", err)
}
if generateCmd.containerService.Properties.ServicePrincipalProfile != nil {
t.Fatalf("expected service principal profile to be nil for managed identity")
}
err = generateCmd.validateAPIModelAsVLabs()
if err != nil {
t.Fatalf("unexpected error validateAPIModelAsVLabs the example apimodel: %s", err)
}
}
func TestExampleAPIModels(t *testing.T) {
defaultSet := []string{"masterProfile.dnsPrefix=my-cluster,linuxProfile.ssh.publicKeys[0].keyData=\"ssh-rsa AAAAB3NO8b9== azureuser@cluster.local\",servicePrincipalProfile.clientId=\"123a4321-c6eb-4b61-9d6f-7db123e14a7a\",servicePrincipalProfile.secret=\"=#msRock5!t=\""}
tests := []struct {
name string
apiModelPath string
setArgs []string
}{
{
name: "default cluster configuration",
apiModelPath: "../examples/kubernetes.json",
setArgs: defaultSet,
},
{
name: "AAD pod identity",
apiModelPath: "../examples/addons/aad-pod-identity/kubernetes-aad-pod-identity.json",
setArgs: defaultSet,
},
{
name: "App gateway ingress",
apiModelPath: "../examples/addons/appgw-ingress/kubernetes-appgw-ingress.json",
setArgs: defaultSet,
},
{
name: "cluster-autoscaler",
apiModelPath: "../examples/addons/cluster-autoscaler/kubernetes-cluster-autoscaler.json",
setArgs: defaultSet,
},
{
name: "container-monitoring",
apiModelPath: "../examples/addons/container-monitoring/kubernetes-container-monitoring.json",
setArgs: defaultSet,
},
{
name: "custom PSP",
apiModelPath: "../examples/addons/custom-manifests/kubernetes-custom-psp.json",
setArgs: defaultSet,
},
{
name: "keyvault flexvol",
apiModelPath: "../examples/addons/keyvault-flexvolume/kubernetes-keyvault-flexvolume.json",
setArgs: defaultSet,
},
{
name: "nvidia",
apiModelPath: "../examples/addons/nvidia-device-plugin/nvidia-device-plugin.json",
setArgs: defaultSet,
},
{
name: "azure-policy",
apiModelPath: "../examples/addons/azure-policy/azure-policy.json",
setArgs: defaultSet,
},
{
name: "node problem detector",
apiModelPath: "../examples/addons/node-problem-detector/node-problem-detector.json",
setArgs: defaultSet,
},
{
name: "flatcar",
apiModelPath: "../examples/flatcar/kubernetes-flatcar.json",
setArgs: defaultSet,
},
{
name: "flatcar hybrid",
apiModelPath: "../examples/flatcar/kubernetes-flatcar-hybrid.json",
setArgs: defaultSet,
},
{
name: "cosmos etcd",
apiModelPath: "../examples/cosmos-etcd/kubernetes-3-masters-cosmos.json",
setArgs: defaultSet,
},
{
name: "custom files pod node selector",
apiModelPath: "../examples/customfiles/kubernetes-customfiles-podnodeselector.json",
setArgs: []string{"aadProfile.clientAppID=e810b868-afab-412d-98cc-ce7db5cc840b,aadProfile.serverAppID=f810b868-afab-412d-98cc-ce7db5cc840b,masterProfile.dnsPrefix=my-cluster,linuxProfile.ssh.publicKeys[0].keyData=\"ssh-rsa AAAAB3NO8b9== azureuser@cluster.local\",servicePrincipalProfile.clientId=\"123a4321-c6eb-4b61-9d6f-7db123e14a7a\",servicePrincipalProfile.secret=\"=#msRock5!t=\""},
},
{
name: "ephemeral disks Standard_D2s_v3",
apiModelPath: "../examples/disks-ephemeral/ephemeral-disks.json",
setArgs: defaultSet,
},
{
name: "ephemeral disks Standard_D2_v2",
apiModelPath: "../examples/disks-ephemeral/kubernetes-vmas.json",
setArgs: defaultSet,
},
{
name: "ephemeral disks Standard_D2_v2",
apiModelPath: "../examples/disks-ephemeral/kubernetes-vmas.json",
setArgs: defaultSet,
},
{
name: "managed disk pre-attached",
apiModelPath: "../examples/disks-managed/kubernetes-preAttachedDisks-vmas.json",
setArgs: defaultSet,
},
{
name: "managed disk",
apiModelPath: "../examples/disks-managed/kubernetes-vmas.json",
setArgs: defaultSet,
},
{
name: "storage account on master",
apiModelPath: "../examples/disks-storageaccount/kubernetes-master-sa.json",
setArgs: defaultSet,
},
{
name: "storage account",
apiModelPath: "../examples/disks-storageaccount/kubernetes.json",
setArgs: defaultSet,
},
{
name: "dualstack ipv6",
apiModelPath: "../examples/dualstack/kubernetes.json",
setArgs: defaultSet,
},
{
name: "extensions",
apiModelPath: "../examples/extensions/kubernetes.json",
setArgs: defaultSet,
},
{
name: "extensions oms",
apiModelPath: "../examples/extensions/kubernetes.oms.json",
setArgs: defaultSet,
},
{
name: "extensions preprovision",
apiModelPath: "../examples/extensions/kubernetes.preprovision.json",
setArgs: defaultSet,
},
{
name: "extensions prometheus grafana",
apiModelPath: "../examples/extensions/prometheus-grafana-k8s.json",
setArgs: defaultSet,
},
{
name: "feature gates",
apiModelPath: "../examples/feature-gates/kubernetes-featuresgates.json",
setArgs: defaultSet,
},
{
name: "ipvs",
apiModelPath: "../examples/ipvs/kubernetes-msi.json",
setArgs: defaultSet,
},
{
name: "keyvault params",
apiModelPath: "../examples/keyvault-params/kubernetes.json",
setArgs: []string{"aadProfile.clientAppID=e810b868-afab-412d-98cc-ce7db5cc840b,aadProfile.serverAppID=f810b868-afab-412d-98cc-ce7db5cc840b,masterProfile.dnsPrefix=my-cluster,linuxProfile.ssh.publicKeys[0].keyData=\"ssh-rsa AAAAB3NO8b9== azureuser@cluster.local\",servicePrincipalProfile.clientId=\"123a4321-c6eb-4b61-9d6f-7db123e14a7a\""},
},
{
name: "keyvault certs",
apiModelPath: "../examples/keyvaultcerts/kubernetes.json",
setArgs: []string{"linuxProfile.secrets[0].sourceVault.id=my-id,masterProfile.dnsPrefix=my-cluster,linuxProfile.ssh.publicKeys[0].keyData=\"ssh-rsa AAAAB3NO8b9== azureuser@cluster.local\",servicePrincipalProfile.clientId=\"123a4321-c6eb-4b61-9d6f-7db123e14a7a\",servicePrincipalProfile.secret=\"=#msRock5!t=\""},
},
{
name: "accelerated networking",
apiModelPath: "../examples/kubernetes-config/kubernetes-accelerated-network.json",
setArgs: defaultSet,
},
{
name: "useCloudControllerManager",
apiModelPath: "../examples/kubernetes-config/kubernetes-cloud-controller-manager.json",
setArgs: defaultSet,
},
{
name: "clusterSubnet",
apiModelPath: "../examples/kubernetes-config/kubernetes-clustersubnet.json",
setArgs: defaultSet,
},
{
name: "enableDataEncryptionAtRest",
apiModelPath: "../examples/kubernetes-config/kubernetes-data-encryption-at-rest.json",
setArgs: defaultSet,
},
{
name: "dockerBridgeSubnet",
apiModelPath: "../examples/kubernetes-config/kubernetes-dockerbridgesubnet.json",
setArgs: defaultSet,
},
{
name: "etcdDiskSizeGB",
apiModelPath: "../examples/kubernetes-config/kubernetes-etcd-storage-size.json",
setArgs: defaultSet,
},
{
name: "gc thresholds",
apiModelPath: "../examples/kubernetes-config/kubernetes-gc.json",
setArgs: defaultSet,
},
{
name: "enableEncryptionWithExternalKms",
apiModelPath: "../examples/kubernetes-config/kubernetes-keyvault-encryption.json",
setArgs: []string{"masterProfile.dnsPrefix=my-cluster,linuxProfile.ssh.publicKeys[0].keyData=\"ssh-rsa AAAAB3NO8b9== azureuser@cluster.local\",servicePrincipalProfile.clientId=\"123a4321-c6eb-4b61-9d6f-7db123e14a7a\",servicePrincipalProfile.objectId=\"223a4321-c6eb-4b61-9d6f-7db123e14a7a\",servicePrincipalProfile.secret=\"=#msRock5!t=\""},
},
{
name: "max pods",
apiModelPath: "../examples/kubernetes-config/kubernetes-maxpods.json",
setArgs: defaultSet,
},
{
name: "private cluster single master",
apiModelPath: "../examples/kubernetes-config/kubernetes-private-cluster-single-master.json",
setArgs: []string{"orchestratorProfile.kubernetesConfig.privateCluster.jumpboxProfile.publicKey=\"ssh-rsa AAAAB3NO8b9== azureuser@cluster.local\",masterProfile.dnsPrefix=my-cluster,linuxProfile.ssh.publicKeys[0].keyData=\"ssh-rsa AAAAB3NO8b9== azureuser@cluster.local\",servicePrincipalProfile.clientId=\"123a4321-c6eb-4b61-9d6f-7db123e14a7a\",servicePrincipalProfile.secret=\"=#msRock5!t=\""},
},
{
name: "private cluster",
apiModelPath: "../examples/kubernetes-config/kubernetes-private-cluster.json",
setArgs: []string{"orchestratorProfile.kubernetesConfig.privateCluster.jumpboxProfile.publicKey=\"ssh-rsa AAAAB3NO8b9== azureuser@cluster.local\",masterProfile.dnsPrefix=my-cluster,linuxProfile.ssh.publicKeys[0].keyData=\"ssh-rsa AAAAB3NO8b9== azureuser@cluster.local\",servicePrincipalProfile.clientId=\"123a4321-c6eb-4b61-9d6f-7db123e14a7a\",servicePrincipalProfile.secret=\"=#msRock5!t=\""},
},
{
name: "standard LB",
apiModelPath: "../examples/kubernetes-config/kubernetes-standardlb.json",
setArgs: defaultSet,
},
{
name: "gpu",
apiModelPath: "../examples/kubernetes-gpu/kubernetes.json",
setArgs: defaultSet,
},
{
name: "labels",
apiModelPath: "../examples/kubernetes-labels/kubernetes.json",
setArgs: defaultSet,
},
{
name: "msi user-assigned vmas",
apiModelPath: "../examples/kubernetes-msi-userassigned/kube-vma.json",
setArgs: defaultSet,
},
{
name: "msi user-assigned vmss",
apiModelPath: "../examples/kubernetes-msi-userassigned/kube-vmss.json",
setArgs: defaultSet,
},
{
name: "1.21 example",
apiModelPath: "../examples/kubernetes-releases/kubernetes1.21.json",
setArgs: defaultSet,
},
{
name: "1.22 example",
apiModelPath: "../examples/kubernetes-releases/kubernetes1.22.json",
setArgs: defaultSet,
},
{
name: "1.23 example",
apiModelPath: "../examples/kubernetes-releases/kubernetes1.23.json",
setArgs: defaultSet,
},
{
name: "1.24 example",
apiModelPath: "../examples/kubernetes-releases/kubernetes1.24.json",
setArgs: defaultSet,
},
{
name: "vmss",
apiModelPath: "../examples/kubernetes-vmss/kubernetes.json",
setArgs: defaultSet,
},
{
name: "vmss spot",
apiModelPath: "../examples/kubernetes-vmss-spot/kubernetes.json",
setArgs: defaultSet,
},
{
name: "vmss master",
apiModelPath: "../examples/kubernetes-vmss-master/kubernetes.json",
setArgs: defaultSet,
},
{
name: "vmss master custom vnet",
apiModelPath: "../examples/kubernetes-vmss-master/customvnet.json",
setArgs: defaultSet,
},
{
name: "vmss master windows",
apiModelPath: "../examples/kubernetes-vmss-master/windows.json",
setArgs: defaultSet,
},
{
name: "large cluster",
apiModelPath: "../examples/largeclusters/kubernetes.json",
setArgs: defaultSet,
},
{
name: "msi",
apiModelPath: "../examples/managed-identity/kubernetes-msi.json",
setArgs: defaultSet,
},
{
name: "multiple masters - 3",
apiModelPath: "../examples/multiple-masters/kubernetes-3-masters.json",
setArgs: defaultSet,
},
{
name: "multiple masters - 5",
apiModelPath: "../examples/multiple-masters/kubernetes-5-masters.json",
setArgs: defaultSet,
},
{
name: "multiple node pools",
apiModelPath: "../examples/multiple-nodepools/multipool.json",
setArgs: defaultSet,
},
{
name: "Azure CNI",
apiModelPath: "../examples/networkplugin/kubernetes-azure.json",
setArgs: defaultSet,
},
{
name: "Azure CNI with calico",
apiModelPath: "../examples/networkpolicy/kubernetes-calico-azure.json",
setArgs: defaultSet,
},
{
name: "kubenet with calico",
apiModelPath: "../examples/networkpolicy/kubernetes-calico-kubenet.json",
setArgs: defaultSet,
},
{
name: "cilium network policy",
apiModelPath: "../examples/networkpolicy/kubernetes-cilium.json",
setArgs: defaultSet,
},
{
name: "antrea network policy",
apiModelPath: "../examples/networkpolicy/kubernetes-antrea.json",
setArgs: defaultSet,
},
{
name: "istio",
apiModelPath: "../examples/service-mesh/istio.json",
setArgs: defaultSet,
},
{
name: "ubuntu 18.04",
apiModelPath: "../examples/ubuntu-1804/kubernetes.json",
setArgs: defaultSet,
},
{
name: "kubenet custom vnet",
apiModelPath: "../examples/vnet/kubernetesvnet.json",
setArgs: defaultSet,
},
{
name: "Azure CNI custom vnet",
apiModelPath: "../examples/vnet/kubernetesvnet-azure-cni.json",
setArgs: defaultSet,
},
{
name: "master vmss custom vnet",
apiModelPath: "../examples/vnet/kubernetes-master-vmss.json",
setArgs: defaultSet,
},
{
name: "custom node DNS custom vnet",
apiModelPath: "../examples/vnet/kubernetesvnet-customnodesdns.json",
setArgs: defaultSet,
},
{
name: "custom search domain custom vnet",
apiModelPath: "../examples/vnet/kubernetesvnet-customsearchdomain.json",
setArgs: defaultSet,
},
{
name: "windows custom image",
apiModelPath: "../examples/windows/kubernetes-custom-image.json",
setArgs: defaultSet,
},
{
name: "windows Standard_D2",
apiModelPath: "../examples/windows/kubernetes-D2.json",
setArgs: defaultSet,
},
{
name: "windows hybrid",
apiModelPath: "../examples/windows/kubernetes-hybrid.json",
setArgs: defaultSet,
},
{
name: "windows hyperv",
apiModelPath: "../examples/windows/kubernetes-hyperv.json",
setArgs: defaultSet,
},
{
name: "windows managed disk",
apiModelPath: "../examples/windows/kubernetes-manageddisks.json",
setArgs: defaultSet,
},
{
name: "windows master uses storage account",
apiModelPath: "../examples/windows/kubernetes-master-sa.json",
setArgs: defaultSet,
},
{
name: "windows master uses storage account",
apiModelPath: "../examples/windows/kubernetes-master-sa.json",
setArgs: defaultSet,
},
{
name: "windows storage account",
apiModelPath: "../examples/windows/kubernetes-sadisks.json",
setArgs: defaultSet,
},
{
name: "windows winrm extension",
apiModelPath: "../examples/windows/kubernetes-wincni.json",
setArgs: defaultSet,
},
{
name: "windows 1903",
apiModelPath: "../examples/windows/kubernetes-windows-1903.json",
setArgs: defaultSet,
},
{
name: "windows windowsDockerVersion",
apiModelPath: "../examples/windows/kubernetes-windows-docker-version.json",
setArgs: defaultSet,
},
{
name: "windows version",
apiModelPath: "../examples/windows/kubernetes-windows-version.json",
setArgs: defaultSet,
},
{
name: "windows recommended kubernetes config",
apiModelPath: "../examples/windows/kubernetes.json",
setArgs: defaultSet,
},
{
name: "custom image",
apiModelPath: "../examples/custom-image.json",
setArgs: defaultSet,
},
{
name: "custom shared image",
apiModelPath: "../examples/custom-shared-image.json",
setArgs: defaultSet,
},
{
name: "Standard_D2",
apiModelPath: "../examples/kubernetes-D2.json",
setArgs: defaultSet,
},
{
name: "ubuntu distros",
apiModelPath: "../examples/kubernetes-non-vhd-distros.json",
setArgs: defaultSet,
},
{
name: "docker tmp dir",
apiModelPath: "../examples/kubernetes-config/kubernetes-docker-tmpdir.json",
setArgs: defaultSet,
},
{
name: "containerd tmp dir",
apiModelPath: "../examples/kubernetes-config/kubernetes-containerd-tmpdir.json",
setArgs: defaultSet,
},
{
name: "e2e flatcar",
apiModelPath: "../examples/e2e-tests/kubernetes/flatcar/flatcar.json",
setArgs: defaultSet,
},
{
name: "e2e gpu",
apiModelPath: "../examples/e2e-tests/kubernetes/gpu-enabled/definition.json",
setArgs: defaultSet,
},
{
name: "e2e addons disabled",
apiModelPath: "../examples/e2e-tests/kubernetes/kubernetes-config/addons-disabled.json",
setArgs: defaultSet,
},
{
name: "e2e addons enabled",
apiModelPath: "../examples/e2e-tests/kubernetes/kubernetes-config/addons-enabled.json",
setArgs: defaultSet,
},
{
name: "e2e kubenet",
apiModelPath: "../examples/e2e-tests/kubernetes/kubernetes-config/network-plugin-kubenet.json",
setArgs: defaultSet,
},
{
name: "e2e 50 nodes",
apiModelPath: "../examples/e2e-tests/kubernetes/node-count/50-nodes/definition.json",
setArgs: defaultSet,
},
{
name: "e2e full configuration",
apiModelPath: "../examples/e2e-tests/kubernetes/release/default/definition.json",
setArgs: defaultSet,
},
{
name: "e2e windows",
apiModelPath: "../examples/e2e-tests/kubernetes/windows/definition.json",
setArgs: defaultSet,
},
{
name: "e2e hybrid",
apiModelPath: "../examples/e2e-tests/kubernetes/windows/hybrid/definition.json",
setArgs: defaultSet,
},
{
name: "e2e zones",
apiModelPath: "../examples/e2e-tests/kubernetes/zones/definition.json",
setArgs: defaultSet,
},
{
name: "e2e user-assigned identity vmas",
apiModelPath: "../examples/e2e-tests/userassignedidentity/vmas/kubernetes-vmas.json",
setArgs: defaultSet,
},
{
name: "e2e user-assigned identity vmas multi-master",
apiModelPath: "../examples/e2e-tests/userassignedidentity/vmas/kubernetes-vmas-multimaster.json",
setArgs: defaultSet,
},
{
name: "e2e user-assigned identity vmss",
apiModelPath: "../examples/e2e-tests/userassignedidentity/vmss/kubernetes-vmss.json",
setArgs: defaultSet,
},
}
for _, test := range tests {
test := test
t.Run(test.name, func(t *testing.T) {
t.Parallel()
dir, del := makeTmpDir(t)
defer del()
g := &generateCmd{
apimodelPath: test.apiModelPath,
outputDirectory: dir,
}
g.set = test.setArgs
r := &cobra.Command{}
if err := g.validate(r, []string{}); err != nil {
t.Fatalf("unexpected error validating api model: %s", err.Error())
}
if err := g.mergeAPIModel(); err != nil {
t.Fatalf("unexpected error merging api model: %s", err.Error())
}
if err := g.loadAPIModel(); err != nil {
t.Fatalf("unexpected error loading api model: %s", err.Error())
}
if err := g.validateAPIModelAsVLabs(); err != nil {
t.Fatalf("unexpected error validateAPIModelAsVLabs the example apimodel: %s", err)
}
})
}
}
|
package otpauth
import (
"crypto"
)
func GenOTP(a crypto.Hash, s []byte, f int64) (int64, error) {
h, err := HMAC(a, s, Itob(f))
if err != nil {
return 0, err
}
o := h[len(h)-1] & 0xf
b := ((int64(h[o]) & 0x7f) << 24) |
((int64(h[o+1]) & 0xff) << 16) |
((int64(h[o+2]) & 0xff) << 8) |
(int64(h[o+3]) & 0xff)
return b, nil
}
|
package core
type Player struct {
platform int
handler func(...interface{})
params []interface{}
}
func (Player *Player) NewClient(player int) *Player {
Player.platform = player
return Player
}
func (Player *Player) RecHandleFunc(hFuc handlerFunc,params ...interface{}) {
Player.handler = hFuc
}
func (Player *Player) Connect() {
Player.handler(Player.params)
}
type handlerFunc func(...interface{})
|
package rpmmd_mock
import (
"github.com/osbuild/osbuild-composer/internal/rpmmd"
"github.com/osbuild/osbuild-composer/internal/store"
"github.com/osbuild/osbuild-composer/internal/worker"
)
type fetchPackageList struct {
ret rpmmd.PackageList
checksums map[string]string
err error
}
type depsolve struct {
ret []rpmmd.PackageSpec
checksums map[string]string
err error
}
type Fixture struct {
fetchPackageList
depsolve
*store.Store
Workers *worker.Server
}
type rpmmdMock struct {
Fixture Fixture
}
func NewRPMMDMock(fixture Fixture) rpmmd.RPMMD {
return &rpmmdMock{Fixture: fixture}
}
func (r *rpmmdMock) FetchMetadata(repos []rpmmd.RepoConfig, modulePlatformID string, arch string) (rpmmd.PackageList, map[string]string, error) {
return r.Fixture.fetchPackageList.ret, r.Fixture.fetchPackageList.checksums, r.Fixture.fetchPackageList.err
}
func (r *rpmmdMock) Depsolve(packageSet rpmmd.PackageSet, repos []rpmmd.RepoConfig, modulePlatformID, arch string) ([]rpmmd.PackageSpec, map[string]string, error) {
return r.Fixture.depsolve.ret, r.Fixture.fetchPackageList.checksums, r.Fixture.depsolve.err
}
|
package main
import (
"fmt"
"math/rand"
"os"
"strconv"
)
// Roll a die until first is hit, immediately followed by second
// Return the number of rolls to end the game
// x and y must be in {min,...,max}
func playGame(first int, second int, min int, max int) int {
last := -1
for nRolls := 1; ; nRolls++ {
roll := rand.Intn(max) + min
if last == first && roll == second {
return nRolls
}
last = roll
}
}
// Play the game with a six-sided die
func playDieGame(first int, second int) int {
return playGame(first, second, 1, 6)
}
func main() {
if len(os.Args) != 2 {
fmt.Fprintf(os.Stderr, "Usage: go run sim.go nTrials\n")
os.Exit(1)
}
nTrials, err := strconv.Atoi(os.Args[1])
if err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
os.Exit(1)
}
fiveSixRolls := 0
fiveFiveRolls := 0
for n := 0; n < nTrials; n++ {
fiveSixRolls += playDieGame(5, 6)
fiveFiveRolls += playDieGame(5, 5)
}
fmt.Printf("Five-Six game average rolls: %v\n", fiveSixRolls/nTrials)
fmt.Printf("Five-Five game average rolls: %v\n", fiveFiveRolls/nTrials)
}
|
package usecase
import (
"bytes"
"context"
"fmt"
"io"
"io/ioutil"
"mime/multipart"
"net/http"
"path/filepath"
"../domain"
"../utils"
"cloud.google.com/go/storage"
"github.com/google/uuid"
"gopkg.in/mgo.v2/bson"
)
//VideoService ...
type VideoService struct{}
//UploadVideo ...
func (vs *VideoService) UploadVideo(reqBody map[string]string, video *multipart.FileHeader) (int, bool, string) {
statusCode := http.StatusInternalServerError
success := false
message := "Something went wrong."
var database = _db.GetMongo()
fileExtension := filepath.Ext(video.Filename)
fmt.Println("fileExtension")
fmt.Println(fileExtension)
if fileExtension == "" {
fileExtension = ".mp4"
}
videoFileName := uuid.New().String() + fileExtension
file, err := video.Open()
defer file.Close()
if bson.IsObjectIdHex(reqBody["channel_id"]) {
if database != nil {
if err == nil {
videoBytes, err := ioutil.ReadAll(file)
if err == nil {
statusCode, success, message = vs.UploadToStorage(videoBytes, videoFileName, fileExtension, reqBody["Content-Type"], utils.VIDEO)
if success {
config := _config.GetConfiguration()
collection := database.C(config.MongoDb.Collections.Videos)
var video domain.Video
video.VideoURL = "https://storage.googleapis.com/gogetin/" + videoFileName
video.UserID = bson.ObjectIdHex(reqBody["user_id"])
channelIDS := []domain.ChannelID{}
channelID := domain.ChannelID{}
channelID.ID = bson.ObjectIdHex(reqBody["channel_id"])
channelIDS = append(channelIDS, channelID)
video.ChannelIDS = channelIDS
err := collection.Insert(video)
if err == nil {
statusCode = http.StatusOK
success = true
message = "Successfully uploaded and saved video."
} else {
_logger.Error("Error in inserting video: " + err.Error())
}
} else {
_logger.Error("Error in uploading video: " + err.Error())
}
} else {
_logger.Error("Error in uploading video: " + err.Error())
}
} else {
_logger.Error("Error in uploading video: " + err.Error())
}
} else {
_logger.Error("Error connecting to db.")
}
} else {
_logger.Error("Invalid channel id.")
statusCode = http.StatusBadRequest
message = "Invalid id."
}
return statusCode, success, message
}
//UploadToStorage ...
func (vs *VideoService) UploadToStorage(fileBytes []byte, fileName, ext, contentType, fileType string) (int, bool, string) {
statusCode := http.StatusInternalServerError
success := false
message := "Something went wrong."
StorageBucketName := "gogetin"
StorageBucket, err := vs.configureStorage(StorageBucketName)
if err == nil {
ctx := context.Background()
w := StorageBucket.Object(fileName).NewWriter(ctx)
w.ACL = []storage.ACLRule{{Entity: storage.AllUsers, Role: storage.RoleReader}}
// Entries are immutable, be aggressive about caching (1 day).
w.CacheControl = "public, max-age=86400"
r := bytes.NewReader(fileBytes)
_, err := io.Copy(w, r)
err = w.Close()
if err == nil {
statusCode = http.StatusOK
success = true
message = "Succcessfully uploaded."
} else {
_logger.Error("Error in uploading video: " + err.Error())
}
} else {
_logger.Error("Error in uploading video: " + err.Error())
}
return statusCode, success, message
}
func (vs *VideoService) configureStorage(bucketID string) (*storage.BucketHandle, error) {
ctx := context.Background()
client, err := storage.NewClient(ctx)
if err != nil {
return nil, err
}
return client.Bucket(bucketID), nil
}
|
/*
Tencent is pleased to support the open source community by making Basic Service Configuration Platform available.
Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except
in compliance with the License. You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under
the License is distributed on an "as IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
either express or implied. See the License for the specific language governing permissions and
limitations under the License.
*/
package service
import (
"context"
"fmt"
"bscp.io/pkg/iam/meta"
"bscp.io/pkg/kit"
"bscp.io/pkg/logs"
pbcs "bscp.io/pkg/protocol/config-server"
pbatv "bscp.io/pkg/protocol/core/app-template-variable"
pbds "bscp.io/pkg/protocol/data-service"
)
// ExtractAppTemplateVariables extract app template variables
func (s *Service) ExtractAppTemplateVariables(ctx context.Context, req *pbcs.ExtractAppTemplateVariablesReq) (
*pbcs.ExtractAppTemplateVariablesResp, error) {
grpcKit := kit.FromGrpcContext(ctx)
resp := new(pbcs.ExtractAppTemplateVariablesResp)
res := &meta.ResourceAttribute{Basic: &meta.Basic{Type: meta.App, Action: meta.Find},
BizID: req.BizId}
if err := s.authorizer.AuthorizeWithResp(grpcKit, resp, res); err != nil {
return nil, err
}
r := &pbds.ExtractAppTemplateVariablesReq{
BizId: req.BizId,
AppId: req.AppId,
}
rp, err := s.client.DS.ExtractAppTemplateVariables(grpcKit.RpcCtx(), r)
if err != nil {
logs.Errorf("extract app template variables failed, err: %v, rid: %s", err, grpcKit.Rid)
return nil, err
}
resp = &pbcs.ExtractAppTemplateVariablesResp{
Details: rp.Details,
}
return resp, nil
}
// GetAppTemplateVariableReferences get app template variable references
func (s *Service) GetAppTemplateVariableReferences(ctx context.Context, req *pbcs.GetAppTemplateVariableReferencesReq) (
*pbcs.GetAppTemplateVariableReferencesResp, error) {
grpcKit := kit.FromGrpcContext(ctx)
resp := new(pbcs.GetAppTemplateVariableReferencesResp)
res := &meta.ResourceAttribute{Basic: &meta.Basic{Type: meta.App, Action: meta.Find},
BizID: req.BizId}
if err := s.authorizer.AuthorizeWithResp(grpcKit, resp, res); err != nil {
return nil, err
}
r := &pbds.GetAppTemplateVariableReferencesReq{
BizId: req.BizId,
AppId: req.AppId,
}
rp, err := s.client.DS.GetAppTemplateVariableReferences(grpcKit.RpcCtx(), r)
if err != nil {
logs.Errorf("get app template variable references failed, err: %v, rid: %s", err, grpcKit.Rid)
return nil, err
}
resp = &pbcs.GetAppTemplateVariableReferencesResp{
Details: rp.Details,
}
return resp, nil
}
// ListAppTemplateVariables list app template variables
func (s *Service) ListAppTemplateVariables(ctx context.Context, req *pbcs.ListAppTemplateVariablesReq) (
*pbcs.ListAppTemplateVariablesResp, error) {
grpcKit := kit.FromGrpcContext(ctx)
resp := new(pbcs.ListAppTemplateVariablesResp)
res := &meta.ResourceAttribute{Basic: &meta.Basic{Type: meta.App, Action: meta.Find},
BizID: req.BizId}
if err := s.authorizer.AuthorizeWithResp(grpcKit, resp, res); err != nil {
return nil, err
}
r := &pbds.ListAppTemplateVariablesReq{
BizId: req.BizId,
AppId: req.AppId,
}
rp, err := s.client.DS.ListAppTemplateVariables(grpcKit.RpcCtx(), r)
if err != nil {
logs.Errorf("list app template variables failed, err: %v, rid: %s", err, grpcKit.Rid)
return nil, err
}
resp = &pbcs.ListAppTemplateVariablesResp{
Details: rp.Details,
}
return resp, nil
}
// ListReleasedAppTemplateVariables list released app template variables
func (s *Service) ListReleasedAppTemplateVariables(ctx context.Context, req *pbcs.ListReleasedAppTemplateVariablesReq) (
*pbcs.ListReleasedAppTemplateVariablesResp, error) {
grpcKit := kit.FromGrpcContext(ctx)
resp := new(pbcs.ListReleasedAppTemplateVariablesResp)
if req.ReleaseId <= 0 {
return nil, fmt.Errorf("invalid release id %d, it must bigger than 0", req.ReleaseId)
}
res := &meta.ResourceAttribute{Basic: &meta.Basic{Type: meta.App, Action: meta.Find},
BizID: req.BizId}
if err := s.authorizer.AuthorizeWithResp(grpcKit, resp, res); err != nil {
return nil, err
}
r := &pbds.ListReleasedAppTemplateVariablesReq{
BizId: req.BizId,
AppId: req.AppId,
ReleaseId: req.ReleaseId,
}
rp, err := s.client.DS.ListReleasedAppTemplateVariables(grpcKit.RpcCtx(), r)
if err != nil {
logs.Errorf("list released app template variables failed, err: %v, rid: %s", err, grpcKit.Rid)
return nil, err
}
resp = &pbcs.ListReleasedAppTemplateVariablesResp{
Details: rp.Details,
}
return resp, nil
}
// UpdateAppTemplateVariables update app template variables
func (s *Service) UpdateAppTemplateVariables(ctx context.Context, req *pbcs.UpdateAppTemplateVariablesReq) (
*pbcs.UpdateAppTemplateVariablesResp, error) {
grpcKit := kit.FromGrpcContext(ctx)
resp := new(pbcs.UpdateAppTemplateVariablesResp)
res := &meta.ResourceAttribute{Basic: &meta.Basic{Type: meta.App, Action: meta.Find},
BizID: req.BizId}
if err := s.authorizer.AuthorizeWithResp(grpcKit, resp, res); err != nil {
return nil, err
}
r := &pbds.UpdateAppTemplateVariablesReq{
Attachment: &pbatv.AppTemplateVariableAttachment{
BizId: req.BizId,
AppId: req.AppId,
},
Spec: &pbatv.AppTemplateVariableSpec{
Variables: req.Variables,
},
}
_, err := s.client.DS.UpdateAppTemplateVariables(grpcKit.RpcCtx(), r)
if err != nil {
logs.Errorf("update app template variables failed, err: %v, rid: %s", err, grpcKit.Rid)
return nil, err
}
return resp, nil
}
|
package producer
import (
"context"
"errors"
"fmt"
"log"
"time"
"github.com/cenkalti/backoff"
"github.com/streadway/amqp"
)
type ProducerMQ struct {
conn *amqp.Connection
channel *amqp.Channel
uri string
done chan error
exchangeName string
exchangeType string
queue string
routingKey string
}
func NewProducerMQ(uri, exchangeName, exchangeType, queue, routingKey string) *ProducerMQ {
return &ProducerMQ{
uri: uri,
exchangeName: exchangeName,
exchangeType: exchangeType,
routingKey: routingKey,
done: make(chan error),
}
}
func (p *ProducerMQ) reConnect() error {
be := backoff.NewExponentialBackOff()
be.MaxElapsedTime = time.Minute
be.InitialInterval = 1 * time.Second
be.Multiplier = 2
be.MaxInterval = 15 * time.Second
b := backoff.WithContext(be, context.Background())
for {
d := b.NextBackOff()
if d == backoff.Stop {
return fmt.Errorf("stop reconnecting")
}
select {
case <-time.After(d):
if err := p.connect(); err != nil {
log.Printf("could not connect in reconnect call: %+v", err)
continue
}
return nil
}
}
}
func (p *ProducerMQ) connect() error {
var err error
p.conn, err = amqp.Dial(p.uri)
if err != nil {
return fmt.Errorf("Dial: %s", err)
}
go func() {
errMQ := <-p.conn.NotifyClose(make(chan *amqp.Error))
log.Printf("closing: %s", errMQ)
if errMQ != nil {
// Понимаем, что канал сообщений закрыт, надо пересоздать соединение.
p.done <- errors.New("Channel Closed")
} else {
p.done <- nil
}
}()
p.channel, err = p.conn.Channel()
if err != nil {
return fmt.Errorf("Channel: %s", err)
}
if err = p.channel.ExchangeDeclare(
p.exchangeName,
p.exchangeType,
true,
false,
false,
false,
nil,
); err != nil {
return fmt.Errorf("Exchange Declare: %s", err)
}
return nil
}
func (p *ProducerMQ) Publish(msg []byte) error {
return p.channel.Publish(
p.exchangeName, // exchange
p.routingKey, // routing key
false, // mandatory
false, // immediate
amqp.Publishing{
ContentType: "application/json",
Body: msg,
})
}
func (p *ProducerMQ) KeepConnection() error {
if err := p.connect(); err != nil {
return fmt.Errorf("Error: %v", err)
}
for {
select {
case err := <-p.done:
if err != nil {
err = p.reConnect()
if err != nil {
return fmt.Errorf("Reconnecting Error: %s", err)
}
} else {
log.Println("Finishing ProducerMQ...")
return nil
}
fmt.Println("Reconnected...")
}
}
}
func (p *ProducerMQ) GracefulStop() error {
err := p.channel.Close()
if err != nil {
return err
}
err = p.conn.Close()
if err != nil {
return err
}
return nil
}
|
// Package maps 实现映射
// REF: http://ifeve.com/go-concurrency-concurrent-map/
package maps
import (
"reflect"
)
// GenericMap 通用的Map
type GenericMap interface {
// Get 获取给定键值对应的元素值若没有对应元素值则返回nil
Get(key interface{}) interface{}
// Put 添加键值对,并返回与给定键值对应的旧的元素值若没有旧元素值则返回(nil, true)
Put(key interface{}, elem interface{}) (interface{}, bool)
// Remove 删除与给定键值对应的键值对,并返回旧的元素值若没有旧元素值则返回nil
Remove(key interface{}) interface{}
// Clear 清除所有的键值对
Clear()
// Len 获取键值对的数量
Len() int
// Contains 判断是否包含给定的键值
Contains(key interface{}) bool
// Keys 获取已排序的键值所组成的切片值
Keys() []interface{}
// Elems 获取已排序的元素值所组成的切片值
Elems() []interface{}
// ToMap 获取已包含的键值对所组成的字典值
ToMap() map[interface{}]interface{}
// KeyType 获取键的类型
KeyType() reflect.Type
// ElemType 获取元素的类型
ElemType() reflect.Type
}
|
package context
var panicHandlerKey = &struct{ bool }{}
func (c *ctx) panicHandle(panicErr interface{}) {
sc := c
defer func() {
if panicErr = recover(); panicErr != nil {
if sc != nil {
sc = sc.parent
}
if sc != nil {
sc.panicHandle(panicErr)
} else {
panic(panicErr)
}
}
}()
for {
h, ok := sc.value.Load(panicHandlerKey)
if ok {
h.(func(ctx Context, panic interface{}))(sc, panicErr)
return
}
sc = sc.parent
if sc == nil {
panic(panicErr)
}
}
}
func (c *ctx) PanicHandlerSet(handler func(ctx Context, panic interface{})) Context {
c.value.Store(panicHandlerKey, handler)
return c
}
|
package main
import (
"log"
"os"
"path/filepath"
"text/template"
)
type Version struct {
OS string
AnsibleVer string
}
func main() {
os_names := []string{"trusty", "xenial"}
ansible_versions := []string{"1.9", "2.0", "2.1", "2.2"}
tpl := template.Must(template.ParseFiles("Dockerfile.tpl"))
var member Version
for _, os_name := range os_names {
for _, ansible_version := range ansible_versions {
dir_name := os_name + "_" + ansible_version
_, err := os.Stat(dir_name)
if err != nil {
err := os.Mkdir(dir_name, 0755)
if err != nil {
log.Println(err)
return
}
}
f, err := os.Create(filepath.Join(dir_name, "Dockerfile"))
if err != nil {
log.Println(err)
return
}
defer f.Close()
member = Version{os_name, ansible_version}
err = tpl.Execute(f, member)
if err != nil {
log.Println(err)
return
}
}
}
}
|
package main
import "fmt"
type Pet interface {
Name() string
Age() uint8
}
type Dog struct {
name string
age uint8
}
func (dog Dog) Name() string {
return dog.name
}
func (dog Dog) Age() uint8 {
return dog.age
}
func main() {
myDog := Dog{"Little D", 3}
_, ok1 := interface{}(&myDog).(Pet)
_, ok2 := interface{}(myDog).(Pet)
fmt.Printf("%v, %v\n", ok1, ok2)
var str1 string = "hello"
p1 := &str1
var arr1 []int
fmt.Printf("str:%s, p1: %X, p: %p %x, arr1:%p", *p1, p1, &str1, &str1, arr1)
}
|
package multibase
import (
"bytes"
"math/rand"
"testing"
)
func TestMap(t *testing.T) {
for s, e := range Encodings {
s2 := EncodingToStr[e]
if s != s2 {
t.Errorf("round trip failed on encoding map: %s != %s", s, s2)
}
}
for e, s := range EncodingToStr {
e2 := Encodings[s]
if e != e2 {
t.Errorf("round trip failed on encoding map: '%c' != '%c'", e, e2)
}
}
}
var sampleBytes = []byte("Decentralize everything!!!")
var encodedSamples = map[Encoding]string{
Identity: string(0x00) + "Decentralize everything!!!",
Base16: "f446563656e7472616c697a652065766572797468696e67212121",
Base16Upper: "F446563656E7472616C697A652065766572797468696E67212121",
Base32: "birswgzloorzgc3djpjssazlwmvzhs5dinfxgoijbee",
Base32Upper: "BIRSWGZLOORZGC3DJPJSSAZLWMVZHS5DINFXGOIJBEE",
Base32pad: "cirswgzloorzgc3djpjssazlwmvzhs5dinfxgoijbee======",
Base32padUpper: "CIRSWGZLOORZGC3DJPJSSAZLWMVZHS5DINFXGOIJBEE======",
Base32hex: "v8him6pbeehp62r39f9ii0pbmclp7it38d5n6e89144",
Base32hexUpper: "V8HIM6PBEEHP62R39F9II0PBMCLP7IT38D5N6E89144",
Base32hexPad: "t8him6pbeehp62r39f9ii0pbmclp7it38d5n6e89144======",
Base32hexPadUpper: "T8HIM6PBEEHP62R39F9II0PBMCLP7IT38D5N6E89144======",
Base58BTC: "z36UQrhJq9fNDS7DiAHM9YXqDHMPfr4EMArvt",
Base64: "mRGVjZW50cmFsaXplIGV2ZXJ5dGhpbmchISE",
Base64url: "uRGVjZW50cmFsaXplIGV2ZXJ5dGhpbmchISE",
Base64pad: "MRGVjZW50cmFsaXplIGV2ZXJ5dGhpbmchISE=",
Base64urlPad: "URGVjZW50cmFsaXplIGV2ZXJ5dGhpbmchISE=",
}
func testEncode(t *testing.T, encoding Encoding, bytes []byte, expected string) {
actual, err := Encode(encoding, bytes)
if err != nil {
t.Error(err)
return
}
if actual != expected {
t.Errorf("encoding failed for %c (%d), expected: %s, got: %s", encoding, encoding, expected, actual)
}
}
func testDecode(t *testing.T, expectedEncoding Encoding, expectedBytes []byte, data string) {
actualEncoding, actualBytes, err := Decode(data)
if err != nil {
t.Error(err)
return
}
if actualEncoding != expectedEncoding {
t.Errorf("wrong encoding code, expected: %c (%d), got %c (%d)", expectedEncoding, expectedEncoding, actualEncoding, actualEncoding)
}
if !bytes.Equal(actualBytes, expectedBytes) {
t.Errorf("decoding failed for %c (%d), expected: %v, got %v", actualEncoding, actualEncoding, expectedBytes, actualBytes)
}
}
func TestEncode(t *testing.T) {
for encoding, data := range encodedSamples {
testEncode(t, encoding, sampleBytes, data)
}
}
func TestDecode(t *testing.T) {
for encoding, data := range encodedSamples {
testDecode(t, encoding, sampleBytes, data)
}
}
func TestRoundTrip(t *testing.T) {
buf := make([]byte, 17)
rand.Read(buf)
baseList := []Encoding{Identity, Base16, Base32, Base32hex, Base32pad, Base32hexPad, Base58BTC, Base58Flickr, Base64pad, Base64urlPad}
for _, base := range baseList {
enc, err := Encode(base, buf)
if err != nil {
t.Fatal(err)
}
e, out, err := Decode(enc)
if err != nil {
t.Fatal(err)
}
if e != base {
t.Fatal("got wrong encoding out")
}
if !bytes.Equal(buf, out) {
t.Fatal("input wasnt the same as output", buf, out)
}
}
_, _, err := Decode("")
if err == nil {
t.Fatal("shouldnt be able to decode empty string")
}
}
func BenchmarkRoundTrip(b *testing.B) {
buf := make([]byte, 32)
rand.Read(buf)
b.ResetTimer()
bases := map[string]Encoding{
"Identity": Identity,
"Base16": Base16,
"Base16Upper": Base16Upper,
"Base32": Base32,
"Base32Upper": Base32Upper,
"Base32pad": Base32pad,
"Base32padUpper": Base32padUpper,
"Base32hex": Base32hex,
"Base32hexUpper": Base32hexUpper,
"Base32hexPad": Base32hexPad,
"Base32hexPadUpper": Base32hexPadUpper,
"Base58Flickr": Base58Flickr,
"Base58BTC": Base58BTC,
"Base64": Base64,
"Base64url": Base64url,
"Base64pad": Base64pad,
"Base64urlPad": Base64urlPad,
}
for name, base := range bases {
b.Run(name, func(b *testing.B) {
for i := 0; i < b.N; i++ {
enc, err := Encode(base, buf)
if err != nil {
b.Fatal(err)
}
e, out, err := Decode(enc)
if err != nil {
b.Fatal(err)
}
if e != base {
b.Fatal("got wrong encoding out")
}
if !bytes.Equal(buf, out) {
b.Fatal("input wasnt the same as output", buf, out)
}
}
})
}
}
|
package p2p
import (
"context"
"errors"
"time"
"github.com/bluele/gcache"
"github.com/qlcchain/go-qlc/common"
"github.com/qlcchain/go-qlc/common/types"
"github.com/qlcchain/go-qlc/ledger"
"github.com/qlcchain/go-qlc/ledger/process"
"github.com/qlcchain/go-qlc/p2p/protos"
)
const (
checkCacheTimeInterval = 30 * time.Second
checkBlockCacheInterval = 60 * time.Second
msgResendMaxTimes = 10
msgNeedResendInterval = 10 * time.Second
)
// Message Type
const (
PublishReq = "0" //PublishReq
ConfirmReq = "1" //ConfirmReq
ConfirmAck = "2" //ConfirmAck
FrontierRequest = "3" //FrontierReq
FrontierRsp = "4" //FrontierRsp
BulkPullRequest = "5" //BulkPullRequest
BulkPullRsp = "6" //BulkPullRsp
BulkPushBlock = "7" //BulkPushBlock
MessageResponse = "8" //MessageResponse
PovStatus = "20"
PovPublishReq = "21"
PovBulkPullReq = "22"
PovBulkPullRsp = "23"
)
type cacheValue struct {
peerID string
resendTimes uint32
startTime time.Time
data []byte
t string
}
type MessageService struct {
netService *QlcService
ctx context.Context
cancel context.CancelFunc
messageCh chan *Message
publishMessageCh chan *Message
confirmReqMessageCh chan *Message
confirmAckMessageCh chan *Message
rspMessageCh chan *Message
povMessageCh chan *Message
ledger *ledger.Ledger
syncService *ServiceSync
cache gcache.Cache
}
// NewService return new Service.
func NewMessageService(netService *QlcService, ledger *ledger.Ledger) *MessageService {
ctx, cancel := context.WithCancel(context.Background())
ms := &MessageService{
ctx: ctx,
cancel: cancel,
messageCh: make(chan *Message, common.P2PMonitorMsgChanSize),
publishMessageCh: make(chan *Message, common.P2PMonitorMsgChanSize),
confirmReqMessageCh: make(chan *Message, common.P2PMonitorMsgChanSize),
confirmAckMessageCh: make(chan *Message, common.P2PMonitorMsgChanSize),
rspMessageCh: make(chan *Message, common.P2PMonitorMsgChanSize),
povMessageCh: make(chan *Message, common.P2PMonitorMsgChanSize),
ledger: ledger,
netService: netService,
cache: gcache.New(common.P2PMonitorMsgCacheSize).LRU().Build(),
}
ms.syncService = NewSyncService(netService, ledger)
return ms
}
// Start start message service.
func (ms *MessageService) Start() {
// register the network handler.
netService := ms.netService
netService.Register(NewSubscriber(ms, ms.publishMessageCh, false, PublishReq))
netService.Register(NewSubscriber(ms, ms.confirmReqMessageCh, false, ConfirmReq))
netService.Register(NewSubscriber(ms, ms.confirmAckMessageCh, false, ConfirmAck))
netService.Register(NewSubscriber(ms, ms.messageCh, false, FrontierRequest))
netService.Register(NewSubscriber(ms, ms.messageCh, false, FrontierRsp))
netService.Register(NewSubscriber(ms, ms.messageCh, false, BulkPullRequest))
netService.Register(NewSubscriber(ms, ms.messageCh, false, BulkPullRsp))
netService.Register(NewSubscriber(ms, ms.messageCh, false, BulkPushBlock))
netService.Register(NewSubscriber(ms, ms.rspMessageCh, false, MessageResponse))
// PoV message handlers
netService.Register(NewSubscriber(ms, ms.povMessageCh, false, PovStatus))
netService.Register(NewSubscriber(ms, ms.povMessageCh, false, PovPublishReq))
netService.Register(NewSubscriber(ms, ms.povMessageCh, false, PovBulkPullReq))
netService.Register(NewSubscriber(ms, ms.povMessageCh, false, PovBulkPullRsp))
// start loop().
go ms.startLoop()
go ms.syncService.Start()
go ms.checkMessageCacheLoop()
go ms.messageResponseLoop()
go ms.publishReqLoop()
go ms.confirmReqLoop()
go ms.confirmAckLoop()
go ms.povMessageLoop()
go ms.processBlockCacheLoop()
}
func (ms *MessageService) processBlockCacheLoop() {
ms.netService.node.logger.Info("Started process blockCache loop.")
ticker := time.NewTicker(checkBlockCacheInterval)
for {
select {
case <-ms.ctx.Done():
return
case <-ticker.C:
ms.processBlockCache()
}
}
}
func (ms *MessageService) processBlockCache() {
blocks := make([]*types.StateBlock, 0)
err := ms.ledger.GetBlockCaches(func(block *types.StateBlock) error {
blocks = append(blocks, block)
return nil
})
if err != nil {
ms.netService.node.logger.Error("get block cache error")
}
for _, blk := range blocks {
if b, err := ms.ledger.HasStateBlockConfirmed(blk.GetHash()); b && err == nil {
_ = ms.ledger.DeleteBlockCache(blk.GetHash())
} else {
b, _ := ms.ledger.HasStateBlock(blk.GetHash())
if b {
ms.netService.msgEvent.Publish(common.EventBroadcast, PublishReq, blk)
ms.netService.msgEvent.Publish(common.EventGenerateBlock, process.Progress, blk)
}
}
}
}
func (ms *MessageService) startLoop() {
ms.netService.node.logger.Info("Started Message Service.")
for {
select {
case <-ms.ctx.Done():
return
case message := <-ms.messageCh:
switch message.MessageType() {
case FrontierRequest:
if err := ms.syncService.onFrontierReq(message); err != nil {
ms.netService.node.logger.Error(err)
}
case FrontierRsp:
ms.syncService.checkFrontier(message)
case BulkPullRequest:
if err := ms.syncService.onBulkPullRequest(message); err != nil {
ms.netService.node.logger.Error(err)
}
case BulkPullRsp:
if err := ms.syncService.onBulkPullRsp(message); err != nil {
ms.netService.node.logger.Error(err)
}
case BulkPushBlock:
if err := ms.syncService.onBulkPushBlock(message); err != nil {
ms.netService.node.logger.Error(err)
}
default:
ms.netService.node.logger.Error("Received unknown message.")
}
}
}
}
func (ms *MessageService) messageResponseLoop() {
for {
select {
case <-ms.ctx.Done():
return
case message := <-ms.rspMessageCh:
switch message.MessageType() {
case MessageResponse:
ms.onMessageResponse(message)
}
}
}
}
func (ms *MessageService) publishReqLoop() {
for {
select {
case <-ms.ctx.Done():
return
case message := <-ms.publishMessageCh:
switch message.MessageType() {
case PublishReq:
ms.onPublishReq(message)
}
}
}
}
func (ms *MessageService) confirmReqLoop() {
for {
select {
case <-ms.ctx.Done():
return
case message := <-ms.confirmReqMessageCh:
switch message.MessageType() {
case ConfirmReq:
ms.onConfirmReq(message)
}
}
}
}
func (ms *MessageService) confirmAckLoop() {
for {
select {
case <-ms.ctx.Done():
return
case message := <-ms.confirmAckMessageCh:
switch message.MessageType() {
case ConfirmAck:
ms.onConfirmAck(message)
}
}
}
}
func (ms *MessageService) checkMessageCacheLoop() {
ticker := time.NewTicker(checkCacheTimeInterval)
for {
select {
case <-ms.ctx.Done():
return
case <-ticker.C:
ms.checkMessageCache()
}
}
}
func (ms *MessageService) checkMessageCache() {
var cs []*cacheValue
var csTemp []*cacheValue
var hash types.Hash
m := ms.cache.GetALL(false)
for k, v := range m {
hash = k.(types.Hash)
cs = v.([]*cacheValue)
for i, value := range cs {
if value.resendTimes > msgResendMaxTimes {
csTemp = append(csTemp, cs[i])
continue
}
if time.Now().Sub(value.startTime) < msgNeedResendInterval {
continue
}
stream := ms.netService.node.streamManager.FindByPeerID(value.peerID)
if stream == nil {
ms.netService.node.logger.Debug("Failed to locate peer's stream,maybe lost connect")
//csTemp = append(csTemp, cs[i])
value.resendTimes++
continue
}
stream.SendMessageToChan(value.data)
value.resendTimes++
if value.resendTimes > msgResendMaxTimes {
csTemp = append(csTemp, cs[i])
continue
}
}
if len(csTemp) == len(cs) {
t := ms.cache.Remove(hash)
if t {
ms.netService.node.logger.Debugf("remove message:[%s] success", hash.String())
}
} else {
csDiff := sliceDiff(cs, csTemp)
err := ms.cache.Set(hash, csDiff)
if err != nil {
ms.netService.node.logger.Error(err)
}
}
}
}
func (ms *MessageService) povMessageLoop() {
for {
select {
case <-ms.ctx.Done():
return
case message := <-ms.povMessageCh:
switch message.MessageType() {
case PovStatus:
ms.onPovStatus(message)
case PovPublishReq:
ms.onPovPublishReq(message)
case PovBulkPullReq:
ms.onPovBulkPullReq(message)
case PovBulkPullRsp:
ms.onPovBulkPullRsp(message)
default:
ms.netService.node.logger.Warn("Received unknown pov message.")
}
}
}
}
func (ms *MessageService) onMessageResponse(message *Message) {
//ms.netService.node.logger.Info("receive MessageResponse")
var hash types.Hash
var cs []*cacheValue
var csTemp []*cacheValue
err := hash.UnmarshalText(message.Data())
if err != nil {
ms.netService.node.logger.Errorf("onMessageResponse err:[%s]", err)
return
}
v, err := ms.cache.Get(hash)
if err != nil {
if err == gcache.KeyNotFoundError {
ms.netService.node.logger.Debugf("this hash:[%s] is not in cache", hash)
} else {
ms.netService.node.logger.Errorf("Get cache err:[%s] for hash:[%s]", err, hash)
}
return
}
cs = v.([]*cacheValue)
for k, v := range cs {
if v.peerID == message.MessageFrom() {
csTemp = append(csTemp, cs[k])
}
}
if len(csTemp) == len(cs) {
t := ms.cache.Remove(hash)
if t {
ms.netService.node.logger.Debugf("remove message cache for hash:[%s] success", hash)
}
} else {
csDiff := sliceDiff(cs, csTemp)
err := ms.cache.Set(hash, csDiff)
if err != nil {
ms.netService.node.logger.Error(err)
}
}
}
func (ms *MessageService) onPublishReq(message *Message) {
if ms.netService.node.cfg.PerformanceEnabled {
blk, err := protos.PublishBlockFromProto(message.Data())
if err != nil {
ms.netService.node.logger.Error(err)
return
}
hash := blk.Blk.GetHash()
ms.addPerformanceTime(hash)
}
err := ms.netService.SendMessageToPeer(MessageResponse, message.Hash(), message.MessageFrom())
if err != nil {
ms.netService.node.logger.Debugf("send Publish Response err:[%s] for message hash:[%s]", err, message.Hash().String())
}
hash, err := types.HashBytes(message.Content())
if err != nil {
ms.netService.node.logger.Error(err)
return
}
p, err := protos.PublishBlockFromProto(message.Data())
if err != nil {
ms.netService.node.logger.Info(err)
return
}
ms.netService.msgEvent.Publish(common.EventPublish, p.Blk, hash, message.MessageFrom())
}
func (ms *MessageService) onConfirmReq(message *Message) {
if ms.netService.node.cfg.PerformanceEnabled {
blk, err := protos.ConfirmReqBlockFromProto(message.Data())
if err != nil {
ms.netService.node.logger.Error(err)
return
}
hash := blk.Blk.GetHash()
ms.addPerformanceTime(hash)
}
err := ms.netService.SendMessageToPeer(MessageResponse, message.Hash(), message.MessageFrom())
if err != nil {
ms.netService.node.logger.Debugf("send ConfirmReq Response err:[%s] for message hash:[%s]", err, message.Hash().String())
}
hash, err := types.HashBytes(message.Content())
if err != nil {
ms.netService.node.logger.Error(err)
return
}
r, err := protos.ConfirmReqBlockFromProto(message.Data())
if err != nil {
ms.netService.node.logger.Error(err)
return
}
ms.netService.msgEvent.Publish(common.EventConfirmReq, r.Blk, hash, message.MessageFrom())
}
func (ms *MessageService) onConfirmAck(message *Message) {
if ms.netService.node.cfg.PerformanceEnabled {
ack, err := protos.ConfirmAckBlockFromProto(message.Data())
if err != nil {
ms.netService.node.logger.Error(err)
return
}
for _, h := range ack.Hash {
ms.addPerformanceTime(h)
}
}
err := ms.netService.SendMessageToPeer(MessageResponse, message.Hash(), message.MessageFrom())
if err != nil {
ms.netService.node.logger.Debugf("send ConfirmAck Response err:[%s] for message hash:[%s]", err, message.Hash().String())
}
hash, err := types.HashBytes(message.Content())
if err != nil {
ms.netService.node.logger.Error(err)
return
}
ack, err := protos.ConfirmAckBlockFromProto(message.Data())
if err != nil {
ms.netService.node.logger.Info(err)
return
}
ms.netService.msgEvent.Publish(common.EventConfirmAck, ack, hash, message.MessageFrom())
}
func (ms *MessageService) onPovStatus(message *Message) {
status, err := protos.PovStatusFromProto(message.data)
if err != nil {
ms.netService.node.logger.Errorf("failed to decode PovStatus from peer %s", message.from)
return
}
hash, err := types.HashBytes(message.Content())
if err != nil {
ms.netService.node.logger.Error(err)
return
}
ms.netService.msgEvent.Publish(common.EventPovPeerStatus, status, hash, message.MessageFrom())
}
func (ms *MessageService) onPovPublishReq(message *Message) {
err := ms.netService.SendMessageToPeer(MessageResponse, message.Hash(), message.MessageFrom())
if err != nil {
ms.netService.node.logger.Errorf("send PoV Publish Response err:[%s] for message hash:[%s]", err, message.Hash().String())
}
p, err := protos.PovPublishBlockFromProto(message.Data())
if err != nil {
ms.netService.node.logger.Info(err)
return
}
ms.netService.msgEvent.Publish(common.EventPovRecvBlock, p.Blk, types.PovBlockFromRemoteBroadcast, message.MessageFrom())
}
func (ms *MessageService) onPovBulkPullReq(message *Message) {
hash, err := types.HashBytes(message.Content())
if err != nil {
ms.netService.node.logger.Error(err)
return
}
req, err := protos.PovBulkPullReqFromProto(message.Data())
if err != nil {
ms.netService.node.logger.Info(err)
return
}
ms.netService.msgEvent.Publish(common.EventPovBulkPullReq, req, hash, message.MessageFrom())
}
func (ms *MessageService) onPovBulkPullRsp(message *Message) {
hash, err := types.HashBytes(message.Content())
if err != nil {
ms.netService.node.logger.Error(err)
return
}
rsp, err := protos.PovBulkPullRspFromProto(message.Data())
if err != nil {
ms.netService.node.logger.Info(err)
return
}
ms.netService.msgEvent.Publish(common.EventPovBulkPullRsp, rsp, hash, message.MessageFrom())
}
func (ms *MessageService) Stop() {
//ms.netService.node.logger.Info("stopped message monitor")
// quit.
ms.cancel()
ms.syncService.quitCh <- true
ms.netService.Deregister(NewSubscriber(ms, ms.publishMessageCh, false, PublishReq))
ms.netService.Deregister(NewSubscriber(ms, ms.confirmReqMessageCh, false, ConfirmReq))
ms.netService.Deregister(NewSubscriber(ms, ms.confirmAckMessageCh, false, ConfirmAck))
ms.netService.Deregister(NewSubscriber(ms, ms.messageCh, false, FrontierRequest))
ms.netService.Deregister(NewSubscriber(ms, ms.messageCh, false, FrontierRsp))
ms.netService.Deregister(NewSubscriber(ms, ms.messageCh, false, BulkPullRequest))
ms.netService.Deregister(NewSubscriber(ms, ms.messageCh, false, BulkPullRsp))
ms.netService.Deregister(NewSubscriber(ms, ms.messageCh, false, BulkPushBlock))
ms.netService.Deregister(NewSubscriber(ms, ms.rspMessageCh, false, MessageResponse))
ms.netService.Deregister(NewSubscriber(ms, ms.povMessageCh, false, PovStatus))
ms.netService.Deregister(NewSubscriber(ms, ms.povMessageCh, false, PovPublishReq))
ms.netService.Deregister(NewSubscriber(ms, ms.povMessageCh, false, PovBulkPullReq))
ms.netService.Deregister(NewSubscriber(ms, ms.povMessageCh, false, PovBulkPullRsp))
}
func marshalMessage(messageName string, value interface{}) ([]byte, error) {
switch messageName {
case PublishReq:
packet := protos.PublishBlock{
Blk: value.(*types.StateBlock),
}
data, err := protos.PublishBlockToProto(&packet)
if err != nil {
return nil, err
}
return data, nil
case ConfirmReq:
packet := &protos.ConfirmReqBlock{
Blk: value.(*types.StateBlock),
}
data, err := protos.ConfirmReqBlockToProto(packet)
if err != nil {
return nil, err
}
return data, nil
case ConfirmAck:
data, err := protos.ConfirmAckBlockToProto(value.(*protos.ConfirmAckBlock))
if err != nil {
return nil, err
}
return data, nil
case FrontierRequest:
data, err := protos.FrontierReqToProto(value.(*protos.FrontierReq))
if err != nil {
return nil, err
}
return data, nil
case FrontierRsp:
packet := value.(*protos.FrontierResponse)
data, err := protos.FrontierResponseToProto(packet)
if err != nil {
return nil, err
}
return data, nil
case BulkPullRequest:
data, err := protos.BulkPullReqPacketToProto(value.(*protos.BulkPullReqPacket))
if err != nil {
return nil, err
}
return data, nil
case BulkPullRsp:
PullRsp := &protos.BulkPullRspPacket{
Blk: value.(*types.StateBlock),
}
data, err := protos.BulkPullRspPacketToProto(PullRsp)
if err != nil {
return nil, err
}
return data, err
case BulkPushBlock:
push := &protos.BulkPush{
Blk: value.(*types.StateBlock),
}
data, err := protos.BulkPushBlockToProto(push)
if err != nil {
return nil, err
}
return data, nil
case MessageResponse:
hash := value.(types.Hash)
data, _ := hash.MarshalText()
return data, nil
case PovStatus:
status := value.(*protos.PovStatus)
data, err := protos.PovStatusToProto(status)
if err != nil {
return nil, err
}
return data, nil
case PovPublishReq:
packet := protos.PovPublishBlock{
Blk: value.(*types.PovBlock),
}
data, err := protos.PovPublishBlockToProto(&packet)
if err != nil {
return nil, err
}
return data, nil
case PovBulkPullReq:
req := value.(*protos.PovBulkPullReq)
data, err := protos.PovBulkPullReqToProto(req)
if err != nil {
return nil, err
}
return data, nil
case PovBulkPullRsp:
rsp := value.(*protos.PovBulkPullRsp)
data, err := protos.PovBulkPullRspToProto(rsp)
if err != nil {
return nil, err
}
return data, nil
default:
return nil, errors.New("unKnown Message Type")
}
}
func (ms *MessageService) addPerformanceTime(hash types.Hash) {
if exit, err := ms.ledger.IsPerformanceTimeExist(hash); !exit && err == nil {
if b, err := ms.ledger.HasStateBlock(hash); !b && err == nil {
t := &types.PerformanceTime{
Hash: hash,
T0: time.Now().UnixNano(),
T1: 0,
T2: 0,
T3: 0,
}
err = ms.ledger.AddOrUpdatePerformance(t)
if err != nil {
ms.netService.node.logger.Error("error when run AddOrUpdatePerformance in onConfirmAck func")
}
}
}
}
// InSliceIface checks given interface in interface slice.
func inSliceIface(v interface{}, sl []*cacheValue) bool {
for _, vv := range sl {
if vv == v {
return true
}
}
return false
}
// SliceDiff returns diff slice of slice1 - slice2.
func sliceDiff(slice1, slice2 []*cacheValue) (diffslice []*cacheValue) {
for _, v := range slice1 {
if !inSliceIface(v, slice2) {
diffslice = append(diffslice, v)
}
}
return
}
|
package utils
/*
import (
"context"
"math/big"
"sync"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/golang/glog"
"sub_account_service/blockchain/lib"
)
// Gobal var of All the nonce in api server
var MyNonce *Nonce
type Nonce struct {
m map[string]uint64
Lock sync.RWMutex
}
func NewNonceMap() {
MyNonce = &Nonce{
m: make(map[string]uint64),
}
}
// Change the value of the local nonce map,so we can write
// to the block chain node concurrency.
func (this *Nonce) Calc(auth *bind.TransactOpts, client *ethclient.Client) int64 {
this.Lock.Lock()
defer this.Lock.Unlock()
user_address := lib.Byte20ToStr(auth.From)
// If nonce of corresponding user_address is not exist
// get it from the block_chain by rpc func. And reflash
// the MyNonce map.
if _, ok := this.m[user_address]; !ok {
nonce, err := client.PendingNonceAt(context.TODO(), auth.From)
if err != nil {
glog.Errorln("Nonce Calc error :", err)
return -1
}
this.m[user_address] = nonce
auth.Nonce = big.NewInt(int64(nonce))
this.m[user_address]++
return int64(nonce)
} else {
// Else just add is just fine.
nonce := this.m[user_address]
this.m[user_address]++
auth.Nonce = big.NewInt(int64(nonce))
return int64(nonce)
}
}
*/
|
package peer
import (
"bytes"
"testing"
)
func TestID(t *testing.T) {
var (
publicKey1 = []byte("12345678901234567890123456789012")
publicKey2 = []byte("12345678901234567890123456789011")
publicKey3 = []byte("12345678901234567890123456789013")
address = "localhost:12345"
id1 = CreateID(address, publicKey1)
id2 = CreateID(address, publicKey2)
id3 = CreateID(address, publicKey3)
)
t.Run("CreateID()", func(t *testing.T) {
if !bytes.Equal(id1.PublicKey, publicKey1) {
t.Fatalf("wrong public key: %s != %s", id1.PublicKey, publicKey1)
}
if id1.Address != address {
t.Fatalf("wrong address: %s != %s", id1.Address, address)
}
})
t.Run("String()", func(t *testing.T) {
if id1.String() != "ID{PublicKey: [49 50 51 52 53 54 55 56 57 48 49 50 51 52 53 54 55 56 57 48 49 50 51 52 53 54 55 56 57 48 49 50], Address: localhost:12345}" {
t.Fatalf("string() error: %s", id1.String())
}
})
t.Run("Equals()", func(t *testing.T) {
if !id1.Equals(CreateID(address, publicKey1)) {
t.Fatalf("%s != %s", id1.PublicKeyHex(), id2.PublicKeyHex())
}
})
t.Run("Less()", func(t *testing.T) {
if id1.Less(id2) {
t.Fatalf("%s < %s", id1.PublicKeyHex(), id2.PublicKeyHex())
}
if !id1.Less(id3) {
t.Fatalf("%s >= %s", id1.PublicKeyHex(), id3.PublicKeyHex())
}
})
t.Run("PublicKeyHex()", func(t *testing.T) {
if id1.PublicKeyHex() != "3132333435363738393031323334353637383930313233343536373839303132" {
t.Fatalf("publickeyhex() error or hex.encodetostring() changed definition? value: %v", id1.PublicKeyHex())
}
})
t.Run("Xor()", func(t *testing.T) {
xor := CreateID(
address,
[]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1},
)
result := id1.Xor(id3)
if !xor.Equals(result) {
t.Fatalf("%v != %v", xor, result)
}
})
}
|
package handlers
import (
"fmt"
"log"
"strconv"
"strings"
"time"
"github.com/openfaas/faas/gateway/metrics"
"github.com/prometheus/client_golang/prometheus"
)
// HTTPNotifier notify about HTTP request/response
type HTTPNotifier interface {
Notify(method string, URL string, originalURL string, statusCode int, duration time.Duration)
}
// PrometheusServiceNotifier notifier for core service endpoints
type PrometheusServiceNotifier struct {
ServiceMetrics *metrics.ServiceMetricOptions
}
// Notify about service metrics
func (psn PrometheusServiceNotifier) Notify(method string, URL string, originalURL string, statusCode int, duration time.Duration) {
code := fmt.Sprintf("%d", statusCode)
path := urlToLabel(URL)
psn.ServiceMetrics.Counter.WithLabelValues(method, path, code).Inc()
psn.ServiceMetrics.Histogram.WithLabelValues(method, path, code).Observe(duration.Seconds())
}
func urlToLabel(path string) string {
if len(path) > 0 {
path = strings.TrimRight(path, "/")
}
if path == "" {
path = "/"
}
return path
}
// PrometheusFunctionNotifier records metrics to Prometheus
type PrometheusFunctionNotifier struct {
Metrics *metrics.MetricOptions
}
// Notify records metrics in Prometheus
func (p PrometheusFunctionNotifier) Notify(method string, URL string, originalURL string, statusCode int, duration time.Duration) {
seconds := duration.Seconds()
serviceName := getServiceName(originalURL)
p.Metrics.GatewayFunctionsHistogram.
WithLabelValues(serviceName).
Observe(seconds)
code := strconv.Itoa(statusCode)
p.Metrics.GatewayFunctionInvocation.
With(prometheus.Labels{"function_name": serviceName, "code": code}).
Inc()
}
func getServiceName(urlValue string) string {
var serviceName string
forward := "/function/"
if strings.HasPrefix(urlValue, forward) {
// With a path like `/function/xyz/rest/of/path?q=a`, the service
// name we wish to locate is just the `xyz` portion. With a positive
// match on the regex below, it will return a three-element slice.
// The item at index `0` is the same as `urlValue`, at `1`
// will be the service name we need, and at `2` the rest of the path.
matcher := functionMatcher.Copy()
matches := matcher.FindStringSubmatch(urlValue)
if len(matches) == hasPathCount {
serviceName = matches[nameIndex]
}
}
return strings.Trim(serviceName, "/")
}
// LoggingNotifier notifies a log about a request
type LoggingNotifier struct {
}
// Notify a log about a request
func (LoggingNotifier) Notify(method string, URL string, originalURL string, statusCode int, duration time.Duration) {
log.Printf("Forwarded [%s] to %s - [%d] - %f seconds", method, originalURL, statusCode, duration.Seconds())
}
|
package planets
import "context"
type Service interface {
CountMovies(ctx context.Context, planetName string) (int, error)
}
|
package server
import (
"context"
"fmt"
"math/rand"
"net/http"
"time"
"github.com/go-co-op/gocron"
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
"google.golang.org/grpc"
"github.com/go-sink/sink/internal/app/config"
"github.com/go-sink/sink/internal/app/handlers"
"github.com/go-sink/sink/internal/app/handlers/sinkapi"
"github.com/go-sink/sink/internal/app/repository"
"github.com/go-sink/sink/internal/cron"
"github.com/go-sink/sink/internal/pkg/gateway"
"github.com/go-sink/sink/internal/app/service"
"github.com/go-sink/sink/internal/pkg/urlgen"
)
// Server contains application dependencies.
type Server struct {
httpAddr, grpcAddr string
allowedOrigins []string
grpcServer *grpc.Server
httpServer *http.Server
registrar handlers.Registrar
}
// InitApp initializes handlers and transport.
func InitApp(ctx context.Context, config config.Config) (*Server, error) {
s := &Server{
allowedOrigins: config.App.AllowedOrigins,
httpAddr: config.App.HTTPAddr,
grpcAddr: config.App.GRPCAddr,
}
// set up database
dbcfg := config.Database
db, err := setUpDb(dbcfg)
if err != nil {
return nil, fmt.Errorf("error opening database connection: %s", err)
}
random := rand.New(rand.NewSource(time.Now().Unix())) //nolint:gosec
linkRepo := repository.NewLinkRepository(db)
linkStatusChecker := cron.NewLinkStatusChecker(linkRepo)
// TODO: Add leader election
scheduler := gocron.NewScheduler(time.UTC)
_, err = scheduler.Every(10).Second().Do(linkStatusChecker.CheckLinks, ctx)
if err != nil {
return nil, err
}
scheduler.StartAsync()
urlEncoder := service.NewGenerator(urlgen.NewRandomURLGenerator(random), linkRepo)
sinkAPIHandler := sinkapi.New(urlEncoder)
s.registrar = handlers.NewRegistrar(sinkAPIHandler)
if err = s.initTransport(ctx, runtime.WithForwardResponseOption(gateway.ReplaceHeaders())); err != nil {
return nil, fmt.Errorf("error initializing transport: %w", err)
}
return s, nil
}
|
package main
import (
"net"
"google.golang.org/grpc"
"context"
"math/rand"
"time"
"fmt"
"sms-grpc/main/util"
"sms-grpc/main/sms"
"encoding/json"
)
type SmsServer struct{}
func (s *SmsServer) SendSms(ctx context.Context, in *sms.SmsRequest) (*sms.SmsReply, error) {
rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
code := fmt.Sprintf("%06v", rnd.Int31n(1000000))
m := make(map[string]string)
m["code"] = code
jsonStr, _ := json.Marshal(m)
str := string(jsonStr)
response := new(sms.SmsReply)
if err := util.SendSms(in.Phone, str); err != nil {
response.Code = 400
response.Result = "false"
response.Message = err.Error()
} else {
response.Code = 200
response.Result = "true"
response.Message = "向手机号" + in.Phone + "发送了一条短信验证码为:" + code
}
return response, nil
}
func main() {
serviceAddress := "127.0.0.1:8001"
smsServer := new(SmsServer)
//创建tcp监听
fmt.Println("服务端启动成功,监听端口:" + serviceAddress)
ls, _ := net.Listen("tcp", serviceAddress)
//创建grpc服务
gs := grpc.NewServer()
//注册smsServer
sms.RegisterSmsServiceServer(gs, smsServer)
//启动服务
gs.Serve(ls)
}
|
package golum
import (
"fmt"
"os"
"testing"
)
func TestCreateOneHistogram(t *testing.T) {
file := "data/labeled_iris.csv"
cols := []string{"sepal_length"}
df, err := GetDFFromCSV(file, cols)
if err != nil {
t.Error(err.Error())
}
if err := CreateHistograms(&df, nil); err != nil {
t.Error(err.Error())
}
for _, col := range cols {
name := fmt.Sprintf("%s_histogram.png", col)
os.Remove(name)
}
}
func TestMultipleHistograms(t *testing.T) {
file := "data/labeled_iris.csv"
cols := []string{"sepal_length", "sepal_width"}
df, err := GetDFFromCSV(file, cols)
if err != nil {
t.Error(err.Error())
}
if err := CreateHistograms(&df, cols); err != nil {
t.Error(err.Error())
}
for _, col := range cols {
name := fmt.Sprintf("%s_histogram.png", col)
os.Remove(name)
}
}
|
package pipeline
import (
"sync"
"sync/atomic"
"github.com/sherifabdlnaby/prism/app/component"
"github.com/sherifabdlnaby/prism/app/pipeline/node"
"github.com/sherifabdlnaby/prism/app/pipeline/persistence"
"github.com/sherifabdlnaby/prism/pkg/job"
"github.com/sherifabdlnaby/prism/pkg/response"
"go.uber.org/zap"
)
//Pipelines Holds the recursive tree of Nodes and their next nodes, etc
type pipeline struct {
name string
hash string
root *node.Next
resource component.Resource
receiveJobChan <-chan job.Job
handleAsyncJobs chan *job.Async
nodeMap map[node.ID]*node.Node
bucket persistence.Bucket
activeJobs sync.WaitGroup
jobsCounter int32
logger zap.SugaredLogger
}
const root node.ID = ""
//Start starts the pipeline and start accepting Input
func (p *pipeline) Start() error {
// start pipeline nodes
err := p.root.Start()
if err != nil {
return err
}
go p.asyncResponsesManager()
go p.serve()
return nil
}
// Stop stops the pipeline, that means that any job received on this pipeline after stopping will return
// error response unless re-started again.
func (p *pipeline) Stop() error {
// Wait all running jobs to return
p.activeJobs.Wait()
// Stop
err := p.root.Stop()
if err != nil {
return err
}
return nil
}
func (p *pipeline) serve() {
for value := range p.receiveJobChan {
go p.handleJob(value, root)
}
}
func (p *pipeline) handleJob(Job job.Job, nodeID node.ID) {
p.activeJobs.Add(1)
atomic.AddInt32(&p.jobsCounter, 1)
err := p.resource.Acquire(Job.Context)
if err != nil {
Job.ResponseChan <- response.NoAck(err)
return
}
// -----------------------------------------
responseChan := make(chan response.Response, 1)
// If node name supplied, send job to said node, else root.
if nodeID == root {
p.root.HandleJob(job.Job{
Payload: Job.Payload,
Data: Job.Data,
ResponseChan: responseChan,
Context: Job.Context,
})
} else {
p.nodeMap[nodeID].HandleJob(job.Job{
Payload: Job.Payload,
Data: Job.Data,
ResponseChan: responseChan,
Context: Job.Context,
})
}
// await response
Job.ResponseChan <- <-responseChan
// -----------------------------------------
p.resource.Release()
atomic.AddInt32(&p.jobsCounter, -1)
p.activeJobs.Done()
}
func (p *pipeline) convertToAsync(ID node.ID, j job.Job) (*job.Job, error) {
asyncJob, err := p.bucket.CreateAsyncJob(ID, j)
if err != nil {
return nil, err
}
p.startAsyncJob(asyncJob)
// Respond to Awaiting sender as now the new process is gonna be handled by Async Manager
j.ResponseChan <- response.ACK
return &asyncJob.Job, nil
}
func (p *pipeline) startAsyncJob(asyncJob *job.Async) {
// Acquire Resources
p.activeJobs.Add(1)
atomic.AddInt32(&p.jobsCounter, 1)
// Send Async JOB to async handler to deal with its response
p.handleAsyncJobs <- asyncJob
}
func (p *pipeline) asyncResponsesManager() {
for asyncJob := range p.handleAsyncJobs {
go p.waitAndFinalizeAsyncJob(*asyncJob)
}
}
func (p *pipeline) waitAndFinalizeAsyncJob(asyncJob job.Async) {
defer func() {
atomic.AddInt32(&p.jobsCounter, -1)
p.activeJobs.Done()
}()
response := <-asyncJob.JobResponseChan
if response.Error != nil {
p.logger.Errorw("error occurred when processing an async request", "error", response.Error.Error())
}
// Delete Entry from Repository
err := p.bucket.DeleteAsyncJob(&asyncJob)
if err != nil {
p.logger.Errorw("an error occurred while applying persisted async requests", "error", err.Error())
}
p.logger.Debug("DONE WITH ", asyncJob.Data["count"])
}
func (p *pipeline) ActiveJobs() int {
return int(p.jobsCounter)
}
|
// Package lissajous generates GIF animations of random Lissajous figures with given parameters. Exercises 1.5, 1.12
package lissajous
import (
"image"
"image/color"
"image/gif"
"io"
"math"
"math/rand"
)
var palette = []color.Color{color.Black, color.RGBA{G: 255, R: 0, B: 0, A: 100}}
const (
blackIndex = 0 // first color in palette
greenIndex = 1 // next color in palette
)
func Lissajous(out io.Writer, parseCycles int, parseRes float64, parseSize, parseNframes, parseDelay int) {
cycles, res, size, nframes, delay := getParameters(parseCycles, parseRes, parseSize, parseNframes, parseDelay)
freq := rand.Float64() * 3.0 // relative frequency of y oscillator
anim := gif.GIF{LoopCount: nframes}
phase := 0.0 // phase difference
for i := 0; i < nframes; i++ {
rect := image.Rect(0, 0, 2*size+1, 2*size+1)
img := image.NewPaletted(rect, palette)
for t := 0.0; t < float64(cycles)*2*math.Pi; t += res {
x := math.Sin(t)
y := math.Sin(t*freq + phase)
img.SetColorIndex(size+int(x*float64(size)+0.5), size+int(y*float64(size)+0.5),
greenIndex)
}
phase += 0.1
anim.Delay = append(anim.Delay, delay)
anim.Image = append(anim.Image, img)
}
gif.EncodeAll(out, &anim) // NOTE: ignoring encoding errors
}
func getParameters(parseCycles int, parseRes float64, parseSize int, parseNframes int, parseDelay int,
) (int, float64, int, int, int) {
var (
cycles = 5 // number of complete x oscillator revolutions
res = 0.001 // angular resolution
size = 100 // image canvas covers [-size..+size]
nframes = 64 // number of animation frames
delay = 8 // delay between frames in 10ms units
)
if parseCycles != 0 {
cycles = parseCycles
}
if parseRes != 0.0 {
res = parseRes
}
if parseSize != 0 {
size = parseSize
}
if parseNframes != 0 {
nframes = parseNframes
}
if parseDelay != 0 {
delay = parseDelay
}
return cycles, res, size, nframes, delay
}
|
package game_map
import (
"fmt"
"github.com/steelx/go-rpg-cgm/combat"
)
type CEAttack struct {
name string
countDown float64
owner *combat.Actor
Targets []*combat.Actor
Scene *CombatState
Finished bool
Character *Character
Storyboard *Storyboard
AttackEntityDef EntityDefinition
DefaultTargeter func(state *CombatState) []*combat.Actor
options AttackOptions
}
type AttackOptions struct {
Counter bool
}
func CEAttackCreate(scene *CombatState, owner *combat.Actor, targets []*combat.Actor, options AttackOptions) *CEAttack {
c := &CEAttack{
options: options,
Scene: scene,
owner: owner,
Targets: targets,
Character: scene.ActorCharMap[owner],
name: fmt.Sprintf("Attack for %s ->)", owner.Name),
}
c.Character.Controller.Change(csRunanim, csProne, true) //CombatState, CombatAnimationID
var storyboardEvents []interface{}
if owner.IsPlayer() {
c.DefaultTargeter = CombatSelector.WeakestEnemy
c.AttackEntityDef = Entities["slash"]
storyboardEvents = []interface{}{
//stateMachine, stateID, ...animID, additionalParams
RunState(c.Character.Controller, csMove, CSMoveParams{Dir: 3}),
RunState(c.Character.Controller, csRunanim, csAttack, false),
RunFunction(c.DoAttack),
RunState(c.Character.Controller, csMove, CSMoveParams{Dir: -3}),
RunFunction(c.onFinished),
RunState(c.Character.Controller, csRunanim, csStandby, false),
}
} else {
c.DefaultTargeter = CombatSelector.RandomAlivePlayer
c.AttackEntityDef = Entities["claw"]
storyboardEvents = []interface{}{
RunState(c.Character.Controller, csMove, CSMoveParams{Dir: -3, Distance: 10, Time: 0.2}),
RunFunction(c.DoAttack),
RunState(c.Character.Controller, csMove, CSMoveParams{Dir: 3, Distance: 10, Time: 0.4}),
RunFunction(c.onFinished),
RunState(c.Character.Controller, csRunanim, csStandby, false),
}
}
c.Storyboard = StoryboardCreate(scene.InternalStack, scene.win, storyboardEvents, false)
return c
}
func (c CEAttack) Name() string {
return c.name
}
func (c CEAttack) CountDown() float64 {
return c.countDown
}
func (c *CEAttack) CountDownSet(t float64) {
c.countDown = t
}
func (c CEAttack) Owner() *combat.Actor {
return c.owner
}
func (c CEAttack) Update() {
}
func (c CEAttack) IsFinished() bool {
return c.Finished
}
func (c *CEAttack) Execute(queue *EventQueue) {
c.Scene.InternalStack.Push(c.Storyboard)
for i := len(c.Targets) - 1; i >= 0; i-- {
v := c.Targets[i]
hpNow := v.Stats.Get("HpNow")
if hpNow <= 0 {
c.Targets = c.removeAtIndex(c.Targets, i)
}
}
//find next Target!
if len(c.Targets) == 0 {
c.Targets = c.DefaultTargeter(c.Scene)
}
}
func (c CEAttack) removeAtIndex(arr []*combat.Actor, i int) []*combat.Actor {
return append(arr[:i], arr[i+1:]...)
}
func (c CEAttack) TimePoints(queue *EventQueue) float64 {
speed := c.Owner().Stats.Get("Speed")
return queue.SpeedToTimePoints(speed)
}
func (c *CEAttack) onFinished() {
c.Finished = true
}
//CounterTarget - Decide if the attack is countered.
func (c *CEAttack) CounterTarget(target *combat.Actor) {
countered := Formula.IsCountered(c.Scene, c.owner, target)
if countered {
c.Scene.ApplyCounter(target, c.owner)
}
}
func (c *CEAttack) DoAttack() {
for _, v := range c.Targets {
c.attackTarget(v)
if !c.options.Counter {
c.CounterTarget(v)
}
}
}
func (c *CEAttack) attackTarget(target *combat.Actor) {
//hit result lets us know the status of this attack
damage, hitResult := Formula.MeleeAttack(c.Scene, c.owner, target)
entity := c.Scene.ActorCharMap[target].Entity
if hitResult == HitResultMiss {
c.Scene.ApplyMiss(target)
return
} else if hitResult == HitResultDodge {
c.Scene.ApplyDodge(target)
}
var isCrit bool
if hitResult == HitResultCritical {
isCrit = true
}
c.Scene.ApplyDamage(target, damage, isCrit)
//FX
pos := entity.GetSelectPosition()
x, y := pos.X, pos.Y-entity.Height/2
effect := AnimEntityFxCreate(x, y, c.AttackEntityDef, c.AttackEntityDef.Frames)
c.Scene.AddEffect(effect)
}
|
package main
import (
"encoding/json"
"fmt"
"log"
"math/rand"
"net/http"
"time"
"github.com/go-chi/chi"
"github.com/go-chi/chi/middleware"
"github.com/ironarachne/culturegen"
"github.com/ironarachne/random"
)
func getCulture(w http.ResponseWriter, r *http.Request) {
id := chi.URLParam(r, "id")
var newCulture culturegen.Culture
random.SeedFromString(id)
newCulture = culturegen.GenerateCulture()
json.NewEncoder(w).Encode(newCulture)
}
func getCultureRandom(w http.ResponseWriter, r *http.Request) {
var newCulture culturegen.Culture
rand.Seed(time.Now().UnixNano())
newCulture = culturegen.GenerateCulture()
json.NewEncoder(w).Encode(newCulture)
}
func main() {
r := chi.NewRouter()
r.Use(middleware.RequestID)
r.Use(middleware.RealIP)
r.Use(middleware.Logger)
r.Use(middleware.Recoverer)
r.Use(middleware.URLFormat)
r.Use(middleware.SetHeader("Content-Type", "application/json"))
r.Use(middleware.Timeout(60 * time.Second))
r.Get("/", getCultureRandom)
r.Get("/{id}", getCulture)
fmt.Println("Culture Generator API is online.")
log.Fatal(http.ListenAndServe(":9913", r))
}
|
package main
import "logging"
import "sensor"
import "sensor/reader"
import "actuator"
/* Device configuration is currently hard-coded here.
* That is:
* - Fan curves information
* - Sensors information (file path, command line, ...)
* - Actuators informations (file path, command line, ...)
*/
func loadConfigurationInto(
map[string]*sensor.Sensor,
map[string]actuator.Curve,
map[string]*actuator.Actuator,
map[string]*ControlLoop,
) {
logging.Trace("Loading configuration")
globalCurve := actuator.ClampedLinear(50000, 30, 70000, 100)
curves["global"] = globalCurve
cpuT := sensor.New(
"cpuT",
//reader.FromFile("/sys/devices/pci0000:00/0000:00:18.3/hwmon/hwmon0/temp1_input"),
reader.FromFile("/sys/class/hwmon/hwmon1/temp2_input"),
sensor.MilliCelsius,
)
// CPU Should draw ~90 watts under load at stock, ~150W overclocked
// https://bit-tech.net/reviews/tech/amd-ryzen-5-1600-review/6/
// For idle, a wild guess would be 10-15W ?
// So, Draw[W] = 15[W] + (90[W]-15[W])*utilization[%]
cpuP := sensor.New(
"cpuP",
reader.FromCmd(
reader.NewCmdLine("s-tui", "-t"),
reader.NewCmdLine("sed", "s|.*Util: Avg: \\([0-9.]*\\).*|\\1|"),
),
sensor.Watt,
75, 100, 15000,
)
cpuPWM := sensor.New(
"cpuPWM",
//reader.FromFile("/sys/devices/pci0000:00/0000:00:18.3/hwmon/hwmon0/pwm2"),
reader.FromFile("/sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm2"),
sensor.Natural,
)
gpuT := sensor.New(
"gpuT",
reader.FromCmd(reader.NewCmdLine("nvidia-smi", "--format=csv,noheader", "--query-gpu=temperature.gpu")),
sensor.Celsius,
)
gpuP := sensor.New(
"gpuP",
reader.FromCmd(reader.NewCmdLine("nvidia-smi", "--format=csv,noheader,nounits", "--query-gpu=power.draw")),
sensor.Watt,
)
gpuPWM := sensor.New(
"gpuPWM",
//reader.FromFile("/sys/devices/pci0000:00/0000:00:18.3/hwmon/hwmon0/pwm1"),
reader.FromFile("/sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1"),
sensor.Natural,
)
sensors["cpuT"]=cpuT
sensors["cpuP"]=cpuP
sensors["cpuPWM"]=cpuPWM
sensors["gpuT"]=gpuT
sensors["gpuP"]=gpuP
sensors["gpuPWM"]=gpuPWM
cpuA := actuator.New(
"cpuA",
curves["global"],
0, 255, // Full range here
samplep, 2, 8, // Max speed in 10s
"/sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm2",
sensors["cpuPWM"],
)
gpuA := actuator.New(
"gpuA",
curves["global"],
0, 255, // Full range here
samplep, 2, 6, // Max speed in 10s
"/sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm3",
sensors["gpuPWM"],
)
actuators["cpuA"] = cpuA
actuators["gpuA"] = gpuA
cpuL := NewControlLoop("cpuL", cpuT, cpuP, cpuA, hysteresis)
gpuL := NewControlLoop("gpuL", gpuT, gpuP, gpuA, hysteresis)
ctrlLoops["cpuL"] = cpuL
ctrlLoops["gpuL"] = gpuL
} |
package main
import (
"fmt"
"time"
)
func StartApp8() {
test8001()
fmt.Println("================================")
s := []int{7, 2, 8, -9, 4, 0}
//信道是带有类型的管道,你可以通过它用信道操作符 <- 来发送或者接收值。
c := make(chan int)
go sum(s[:len(s)/2], c)
go sum(s[len(s)/2:], c)
//“箭头”就是数据流的方向
x, y := <-c, <-c
fmt.Println(x, y)
fmt.Println("================================")
//信道可以是 带缓冲的。将缓冲长度作为第二个参数提供给 make 来初始化一个带缓冲的信道
//仅当信道的缓冲区填满后,向其发送数据时才会阻塞。当缓冲区为空时,接受方会阻塞
ch1 := make(chan int, 2)
ch1 <- 1
ch1 <- 2
fmt.Println(<-ch1)
fmt.Println(<-ch1)
fmt.Println("================================")
c1 := make(chan int, 10)
go fibonacci2(cap(c1), c1)
for i := range c1 {
fmt.Println(i)
}
fmt.Println("================================")
}
// Tree 不同二叉树的叶节点上可以保存相同的值序列。例如,以下两个二叉树都保存了序列 `1,1,2,3,5,8,13`。
//在大多数语言中,检查两个二叉树是否保存了相同序列的函数都相当复杂。 我们将使用 Go 的并发和信道来编写一个简单的解法。
type Tree struct {
Left *Tree
Value int
Right *Tree
}
//当 select 中的其它分支都没有准备好时,default 分支就会执行。
//为了在尝试发送或者接收时不发生阻塞,可使用 default 分支:
func test8003() {
tick := time.Tick(100 * time.Millisecond)
boom := time.After(500 * time.Millisecond)
for {
select {
case <-tick:
fmt.Println("tick.")
case <-boom:
fmt.Println("BOOM!")
return
default:
fmt.Println(" .")
time.Sleep(50 * time.Millisecond)
}
}
}
//select 语句使一个 Go 程可以等待多个通信操作。
//select 会阻塞到某个分支可以继续执行为止,这时就会执行该分支。当多个分支都准备好时会随机选择一个执行。
func test8002() {
c := make(chan int)
quit := make(chan int)
go func() {
for i := 0; i < 10; i++ {
fmt.Println(<-c)
}
quit <- 0
}()
fibonacci3(c, quit)
}
func fibonacci3(c, quit chan int) {
x, y := 0, 1
for {
select {
case c <- x:
x, y = y, x+y
case <-quit:
fmt.Println("quit")
return
}
}
}
func fibonacci2(n int, c chan int) {
x, y := 0, 1
for i := 0; i < n; i++ {
c <- x
x, y = y, x+y
}
close(c)
}
func sum(s []int, c chan int) {
sum := 0
for _, v := range s {
sum += v
}
c <- sum
}
func test8001() {
//goroutine
go say("world")
say("hello")
}
func say(s string) {
for i := 0; i < 5; i++ {
time.Sleep(100 * time.Millisecond)
fmt.Println(s)
}
}
|
package main
import "fmt"
func main() {
//DECLARE a BOOLEAN variable
var x bool
fmt.Println("Zero value of BOOLEAN :: ", x)
x = true
fmt.Println("Modified value of 'x' :: ", x)
a := 7
b := 42
fmt.Println("Some boolean operators ::")
fmt.Println("a == b", a == b)
fmt.Println("a != b", a != b)
fmt.Println("a <= b", a <= b)
fmt.Println("a >= b", a >= b)
}
|
package dao
import (
"log"
"github.com/jinzhu/gorm"
_ "github.com/jinzhu/gorm/dialects/mysql"
)
var (
DB *gorm.DB
)
func InitDB() (err error) {
dsn := "root:123456@(127.0.0.1:3306)/db_todo?charset=utf8mb4&parseTime=True&loc=Local"
DB, err = gorm.Open("mysql", dsn)
if err != nil {
log.Panicf("open database failure, err: %v", err)
}
return DB.DB().Ping()
}
func CloseDB() {
DB.Close()
}
|
package service
import (
"context"
"errors"
"github.com/golang/mock/gomock"
"github.com/polundrra/shortlink/internal/repo"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/suite"
"testing"
)
type LinkServiceSuit struct {
suite.Suite
mockCtrl *gomock.Controller
repoMock *repo.MockLinkRepo
SUT Service
}
func TestService(t *testing.T) {
suite.Run(t, new(LinkServiceSuit))
}
func (s *LinkServiceSuit) SetupTest() {
ctrl := gomock.NewController(s.T())
s.mockCtrl = ctrl
s.repoMock = repo.NewMockLinkRepo(ctrl)
s.SUT = New(repo.Opts{
Timeout: 1,
}, s.repoMock)
}
func (s *LinkServiceSuit) TestCreateShortLink_CustomEnd() {
defer s.mockCtrl.Finish()
a := assert.New(s.T())
ctx := context.Background()
url := "foo12345"
customEnd := "foo"
s.repoMock.EXPECT().IsCodeExists(ctx, customEnd).Times(1).Return(false, nil)
s.repoMock.EXPECT().SetLink(ctx, url, customEnd, true).Times(1).Return(nil)
res, err := s.SUT.CreateShortLink(ctx, url, customEnd)
a.NoError(err)
a.Equal(customEnd, res)
}
func (s *LinkServiceSuit) TestCreateShortLink_CodeConflictErr() {
defer s.mockCtrl.Finish()
a := assert.New(s.T())
ctx := context.Background()
url := "foo12345"
customEnd := "foo"
exUrl := "bar12345"
s.repoMock.EXPECT().IsCodeExists(ctx, customEnd).Times(1).Return(true, nil)
s.repoMock.EXPECT().GetLongLinkByCode(ctx, customEnd).Times(1).Return(exUrl, nil)
res, err := s.SUT.CreateShortLink(ctx, url, customEnd)
a.Error(ErrCodeConflict, err)
a.Equal("", res)
}
func (s *LinkServiceSuit) TestCreateShortLink_CustomEnd_SameURL() {
defer s.mockCtrl.Finish()
a := assert.New(s.T())
ctx := context.Background()
url := "foo12345"
customEnd := "foo"
exUrl := "foo12345"
s.repoMock.EXPECT().IsCodeExists(ctx, customEnd).Times(1).Return(true, nil)
s.repoMock.EXPECT().GetLongLinkByCode(ctx, customEnd).Times(1).Return(exUrl, nil)
res2, err := s.SUT.CreateShortLink(ctx, url, customEnd)
a.NoError(err)
a.Equal(customEnd, res2)
}
func (s *LinkServiceSuit) TestCreateShortLink_CodeExists() {
defer s.mockCtrl.Finish()
a := assert.New(s.T())
ctx := context.Background()
url := "foo12345"
exCode := "f"
s.repoMock.EXPECT().GetCodeByLongLink(ctx, url).Times(1).Return(exCode, nil)
res, err := s.SUT.CreateShortLink(ctx, url, "")
a.NoError(err)
a.Equal(exCode, res)
}
func (s *LinkServiceSuit) TestCreateShortLink_IsCodeExistsErr() {
defer s.mockCtrl.Finish()
a := assert.New(s.T())
ctx := context.Background()
url := "foo12345"
customEnd := "foo"
expected := errors.New("any")
s.repoMock.EXPECT().IsCodeExists(ctx, customEnd).Times(1).Return(false, expected)
res, err := s.SUT.CreateShortLink(ctx, url, customEnd)
a.Error(expected, err)
a.Equal("", res)
}
func (s *LinkServiceSuit) TestCreateShortLink_GetLongLinkByCodeErr() {
defer s.mockCtrl.Finish()
a := assert.New(s.T())
ctx := context.Background()
url := "foo12345"
customEnd := "foo"
expected := errors.New("any")
s.repoMock.EXPECT().IsCodeExists(ctx, customEnd).Times(1).Return(true, nil)
s.repoMock.EXPECT().GetLongLinkByCode(ctx, customEnd).Times(1).Return("", expected)
res, err := s.SUT.CreateShortLink(ctx, url, customEnd)
a.Error(expected, err)
a.Equal("", res)
}
func (s *LinkServiceSuit) TestCreateShortLink_GetCodeByLongLinkErr() {
defer s.mockCtrl.Finish()
a := assert.New(s.T())
ctx := context.Background()
url := "foo12345"
customEnd := ""
expected := errors.New("any")
s.repoMock.EXPECT().GetCodeByLongLink(ctx, url).Times(1).Return("", expected)
res, err := s.SUT.CreateShortLink(ctx, url, customEnd)
a.Error(expected, err)
a.Equal("", res)
}
func (s *LinkServiceSuit) TestCreateShortLink() {
defer s.mockCtrl.Finish()
a := assert.New(s.T())
ctx := context.Background()
url := "foo"
customEnd := ""
code := "1"
s.repoMock.EXPECT().GetCodeByLongLink(ctx, url).Times(1).Return("", nil)
s.repoMock.EXPECT().GetNextSeq(ctx).Times(1).Return(uint64(1), nil)
s.repoMock.EXPECT().IsCodeExists(ctx, code).Times(1).Return(false, nil)
s.repoMock.EXPECT().SetLink(ctx, url, code, false).Times(1).Return(nil)
res, err := s.SUT.CreateShortLink(ctx, url, customEnd)
a.NoError(err)
a.Equal(code, res)
}
func (s *LinkServiceSuit) TestCreateShortLink_SetLinkErr() {
defer s.mockCtrl.Finish()
a := assert.New(s.T())
ctx := context.Background()
url := "foo"
customEnd := ""
code := "1"
expected := errors.New("any")
s.repoMock.EXPECT().GetCodeByLongLink(ctx, url).Times(1).Return("", nil)
s.repoMock.EXPECT().GetNextSeq(ctx).Times(1).Return(uint64(1), nil)
s.repoMock.EXPECT().IsCodeExists(ctx, code).Times(1).Return(false, nil)
s.repoMock.EXPECT().SetLink(ctx, url, code, false).Times(1).Return(expected)
res, err := s.SUT.CreateShortLink(ctx, url, customEnd)
a.Error(expected, err)
a.Equal("", res)
}
func (s *LinkServiceSuit) TestGetLongLink() {
defer s.mockCtrl.Finish()
a := assert.New(s.T())
ctx := context.Background()
code := "f"
url := "foo123"
s.repoMock.EXPECT().GetLongLinkByCode(ctx, code).Times(1).Return(url, nil)
res, err := s.SUT.GetLongLink(ctx, code)
a.NoError(err)
a.Equal(url, res)
}
func (s *LinkServiceSuit) TestGetLongLink_GetLongLingByCodeErr() {
defer s.mockCtrl.Finish()
a := assert.New(s.T())
ctx := context.Background()
code := "f"
expected := errors.New("any")
s.repoMock.EXPECT().GetLongLinkByCode(ctx, code).Times(1).Return("", expected)
res, err := s.SUT.GetLongLink(ctx, code)
a.Error(expected, err)
a.Equal("", res)
}
func (s *LinkServiceSuit) TestGetLongLink_ErrLinkNotFound() {
defer s.mockCtrl.Finish()
a := assert.New(s.T())
ctx := context.Background()
code := "f"
url := ""
s.repoMock.EXPECT().GetLongLinkByCode(ctx, code).Times(1).Return(url, nil)
res, err := s.SUT.GetLongLink(ctx, code)
a.Error(ErrLongLinkNotFound, err)
a.Equal(url, res)
}
func TestToBase62(t *testing.T) {
a := assert.New(t)
testCases := []struct {
n uint64
expected string
}{
{0, "0"},
{1, "1"},
{10, "a"},
{36, "A"},
{100, "1C"},
{1234567891, "1ly7vl"},
}
for _, tc := range testCases {
res := toBase62(tc.n)
a.Equal(tc.expected, res)
}
} |
package files
import (
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"net/url"
"testing"
)
func TestWebFile(t *testing.T) {
http.HandleFunc("/my/url/content.txt", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "Hello world!")
})
s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "Hello world!")
}))
defer s.Close()
u, err := url.Parse(s.URL)
if err != nil {
t.Fatal(err)
}
wf := NewWebFile(u)
body, err := ioutil.ReadAll(wf)
if err != nil {
t.Fatal(err)
}
if string(body) != "Hello world!" {
t.Fatal("should have read the web file")
}
}
|
package main
import (
"fmt"
"os"
"os/signal"
"strconv"
"sync"
"syscall"
"time"
)
var (
// dataCh = make(chan Person)
idx = 0
maxCount = 20
)
var dataCh chan Person
func main() {
// dataCh = make(chan Person)
// var wg sync.WaitGroup
// startT := time.Now()
// fmt.Println("Start deal with data")
// consumeInts(&wg)
// produceInts()
// wg.Wait()
// fmt.Println("End deal with ", idx, " data")
// endT := time.Since(startT)
// fmt.Println("Run time: ", endT)
// time.Sleep(time.Second)
// testBoring2()
// testFakeSearch()
// testFirstSearch()
ch := make(chan int)
go func() {
for i := 0; ; i++ {
ch <- i * 3
}
}()
go func() {
for i := 0; ; i++ {
ch <- i * 5
}
}()
go func() {
for v := range ch {
fmt.Println(v)
}
}()
sig := make(chan os.Signal, 1)
signal.Notify(sig, syscall.SIGINT, syscall.SIGTERM)
fmt.Printf("quit (%v)\n", <-sig)
}
func produceInts() {
for {
if idx >= maxCount { // define when to break the for loop
break
}
person := Person{
Name: "zhu" + strconv.Itoa(idx),
Desc: "staff" + strconv.Itoa(idx*idx),
}
dataCh <- person
idx++
}
person := Person{
Name: "zhu" + strconv.Itoa(idx),
Desc: "staff" + strconv.Itoa(idx*idx),
}
dataCh <- person
close(dataCh)
}
func consumeInts(wg *sync.WaitGroup) {
for i := 0; i < 1000; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for p := range dataCh {
doPerson(p)
}
}()
}
}
func doPerson(p Person) {
time.Sleep(200 * time.Millisecond)
fmt.Println("Current Person is:", p)
}
// Person :
type Person struct {
Name string
Desc string
}
|
package main
import (
"errors"
"fmt"
"io"
"os"
"path/filepath"
"s3upload/helpers"
"strings"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/namsral/flag"
)
//Application ...
type Application struct {
awsProfile string
bucket string
region string
dir string
upload bool
list bool
json bool
}
//S3Uploader will allow us to mock uploads in the test by implementing an Upload function or method
type S3Uploader interface {
Upload(*s3manager.UploadInput, ...func(*s3manager.Uploader)) (*s3manager.UploadOutput, error)
}
func main() {
var (
awsProfile string
bucket string
region string
dir string
upload bool
list bool
json bool
output io.Writer
)
flag.StringVar(&awsProfile, "aws_default_profile", "", "Set your AWS Default profile via flag or export to env variable: AWS_DEFAULT_PROFILE")
flag.StringVar(&bucket, "bucket", "", "Set bucket via flag or env variable export: export BUCKET")
flag.StringVar(®ion, "aws_region", "us-east-1", "Set region via flag or env variable export: export AWS_REGION, default set to: us-east-1")
flag.StringVar(&dir, "dir", "", "Directory to upload. export DIR or pass via flag")
flag.BoolVar(&upload, "upload", false, "pass --upload flag to trigger upload")
flag.BoolVar(&list, "list", false, "pass --list flag to list contents of bucket")
flag.BoolVar(&json, "json", false, "include --json flag to print JSON output")
flag.Parse()
//values from flags
app := Application{
awsProfile,
bucket,
region,
dir,
upload,
list,
json,
}
//set output, in this case to os.Stdout, but can take any Writer interface
output = os.Stdout
if err := app.run(output); err != nil {
fmt.Println("Ensure your variables/flags are set: ")
flag.PrintDefaults()
}
}
func (app *Application) run(w io.Writer) error {
var err error
switch {
case app.upload:
if app.dir == "" {
err = errors.New("set the directory flag")
fmt.Fprintln(w, "\033[31mError occurred:\033[0m", err)
}
if _, err := os.Stat(app.dir); os.IsNotExist(err) {
err = errors.New("Directory does not exist")
fmt.Fprintln(w, "\033[31mError occurred:\033[0m", err)
}
if err = app.s3UploadDir(w); err != nil {
fmt.Fprintln(w, "\033[31mError occurred:\033[0m", err)
}
case app.list:
if err = app.s3ListBucketObjects(w); err != nil {
fmt.Fprintln(w, "\033[31mError occurred:\033[0m", err)
}
default:
fmt.Println("Ensure your variables/flags are set. \nDon't forget to include the '--upload' flag to trigger upload")
flag.PrintDefaults()
}
return err
}
//s3Session creates an AWS s3 Session
func (app *Application) s3Session() (*session.Session, error) {
session, err := session.NewSessionWithOptions(session.Options{
Profile: app.awsProfile,
Config: aws.Config{
Region: aws.String(app.region),
},
})
return session, err
}
func (app *Application) s3UploadDir(w io.Writer) error {
session, err := app.s3Session()
if err != nil {
fmt.Fprintln(w, "Could not S3 create session: ", err)
return err
}
uploader := s3manager.NewUploader(session, func(u *s3manager.Uploader) {
u.PartSize = 100 * 1024 * 1024
u.LeavePartsOnError = false
u.Concurrency = 100
})
fileList := []string{}
filepath.Walk(app.dir, func(path string, f os.FileInfo, err error) error {
fileList = append(fileList, path)
return nil
})
for _, pathOfFile := range fileList[1:] {
if err := app.s3UploadFile(pathOfFile, session, w, uploader); err != nil {
return err
}
}
fmt.Fprintln(w, "\033[34mDone!")
return nil
}
//s3UploadFile uploads the actual file.
func (app *Application) s3UploadFile(pathOfFile string, session *session.Session, w io.Writer, s3svc S3Uploader) error {
stat, err := os.Stat(pathOfFile)
if err != nil {
fmt.Fprintln(w, "Error getting file size: ", err)
return err
}
//open file and add to buffer
file, err := os.Open(pathOfFile)
if err != nil {
fmt.Fprintln(w, "Error opening file: ", err)
return err
}
defer file.Close()
path := file.Name()
fileSize := stat.Size()
progReader := helpers.NewProgressReader(file, fileSize)
fmt.Fprintln(w, "\033[32mUploading: ", pathOfFile)
progReader.ProgBar.Format("\033[32m\x00=\x00>\x00-\x00]")
progReader.ProgBar.Start()
defer progReader.ProgBar.Finish()
_, err = s3svc.Upload(&s3manager.UploadInput{
Bucket: aws.String(app.bucket),
Key: aws.String(path),
Body: progReader, //fileBytes
})
if err != nil {
//directories in S3 are initially created as zero-sized objects which causes
//an error in the progress bar due to a zero-byte read prior to upload. We can ignore this error below.
//rest of the contents are properly calculated once the directory is created
//and upload starts. Other errors we will catch. So create a message to user during upload
//that directory is being created instead.
if strings.Contains(err.Error(), "BodyHash") {
fmt.Fprintln(w, "\033[34m Creating Directory...\033[32m")
return nil
}
fmt.Fprintln(w, "\033[31mError uploading file \033[0m", err)
return err
}
return nil
}
//s3ListBucketObjects lists all the objects in the bucket (including directory tree structures)
//in the bucket
func (app *Application) s3ListBucketObjects(w io.Writer) error {
session, err := app.s3Session()
if err != nil {
fmt.Fprintln(w, "Error getting file size: ", err)
return err
}
svc := s3.New(session)
input := &s3.ListObjectsV2Input{
Bucket: aws.String(app.bucket),
MaxKeys: aws.Int64(5000), //hard-coded to max 5000 objects to return. Should be enough
}
result, err := svc.ListObjectsV2(input)
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
switch aerr.Code() {
default:
fmt.Fprintln(w, fmt.Errorf("No such bucket - please check name or credentials"))
fmt.Fprintln(w, aerr.Error())
}
} else {
fmt.Fprintln(w, err.Error())
}
return err
}
if app.json {
fmt.Fprintln(w, *result)
} else {
for _, v := range result.Contents {
fmt.Fprintln(w, *v.Key)
}
}
return nil
}
|
package fileupload
import (
"fmt"
"io"
"mime/multipart"
"net/http"
"os"
"path/filepath"
"github.com/majid-cj/go-docker-mongo/util"
"github.com/thoas/go-funk"
)
// AllowedImages ....
var AllowedImages = []string{"image/jpeg", "image/jpg", "image/png"}
// UploadFile ...
type UploadFile struct{}
// UploadFileInterface ...
type UploadFileInterface interface {
UploadFile(*multipart.FileHeader, multipart.File, string, string) (string, error)
}
var _ UploadFileInterface = &UploadFile{}
// NewUploadFile ...
func NewUploadFile() *UploadFile {
return &UploadFile{}
}
// UploadFile ...
func (uf *UploadFile) UploadFile(fileHeader *multipart.FileHeader, file multipart.File, dest, host string) (string, error) {
fileHeader.Filename = FormatFile(fileHeader.Filename)
src, err := fileHeader.Open()
if err != nil {
return "", util.GetError("general_error")
}
defer src.Close()
size := fileHeader.Size
if size > 10<<20 {
return "", util.GetError("image_size_error")
}
buffer := make([]byte, size)
_, err = file.Read(buffer)
if err != nil {
return "", util.GetError("general_error")
}
filetype := http.DetectContentType(buffer)
if !funk.ContainsString(AllowedImages, filetype) {
return "", util.GetError("not_supported_type")
}
os.Mkdir(dest, os.FileMode(0766))
output, err := os.OpenFile(filepath.Join(dest, fileHeader.Filename), os.O_RDWR|os.O_CREATE, os.FileMode(0766))
if err != nil {
return "", util.GetError("general_error")
}
defer output.Close()
fileHeader.Filename = fmt.Sprintf("%s/%s%s", host, dest, fileHeader.Filename)
_, err = io.Copy(output, src)
if err != nil {
return "", util.GetError("general_error")
}
return fileHeader.Filename, nil
}
|
package handler
import (
"github.com/go-redis/redis"
"go-admin/config"
)
var RedisClient = new(redis.Client)
func init() {
RedisNewClient(config.RedisConnConfig.Addr, config.RedisConnConfig.Password, config.RedisConnConfig.DB)
}
func RedisNewClient(addr string, password string, db int) {
//timeout := time.Duration(readTimeout)
RedisClient = redis.NewClient(&redis.Options{
Addr: addr,
Password: password, // no password set
DB: db, // use default DB
//ReadTimeout: ,
})
}
|
package table
import (
"encoding/json"
"errors"
"fmt"
"html/template"
"io/ioutil"
"net/http"
"strconv"
"strings"
"time"
"github.com/GoAdminGroup/go-admin/modules/config"
"github.com/GoAdminGroup/go-admin/modules/db"
"github.com/GoAdminGroup/go-admin/modules/db/dialect"
errs "github.com/GoAdminGroup/go-admin/modules/errors"
"github.com/GoAdminGroup/go-admin/modules/language"
"github.com/GoAdminGroup/go-admin/modules/logger"
"github.com/GoAdminGroup/go-admin/plugins/admin/modules"
"github.com/GoAdminGroup/go-admin/plugins/admin/modules/constant"
"github.com/GoAdminGroup/go-admin/plugins/admin/modules/form"
"github.com/GoAdminGroup/go-admin/plugins/admin/modules/paginator"
"github.com/GoAdminGroup/go-admin/plugins/admin/modules/parameter"
"github.com/GoAdminGroup/go-admin/template/types"
)
// DefaultTable is an implementation of table.Table
type DefaultTable struct {
*BaseTable
connectionDriver string
connectionDriverMode string
connection string
sourceURL string
getDataFun GetDataFun
dbObj db.Connection
}
type GetDataFun func(params parameter.Parameters) ([]map[string]interface{}, int)
func NewDefaultTable(cfgs ...Config) Table {
var cfg Config
if len(cfgs) > 0 && cfgs[0].PrimaryKey.Name != "" {
cfg = cfgs[0]
} else {
cfg = DefaultConfig()
}
return &DefaultTable{
BaseTable: &BaseTable{
Info: types.NewInfoPanel(cfg.PrimaryKey.Name),
Form: types.NewFormPanel(),
NewForm: types.NewFormPanel(),
Detail: types.NewInfoPanel(cfg.PrimaryKey.Name),
CanAdd: cfg.CanAdd,
Editable: cfg.Editable,
Deletable: cfg.Deletable,
Exportable: cfg.Exportable,
PrimaryKey: cfg.PrimaryKey,
OnlyNewForm: cfg.OnlyNewForm,
OnlyUpdateForm: cfg.OnlyUpdateForm,
OnlyDetail: cfg.OnlyDetail,
OnlyInfo: cfg.OnlyInfo,
},
connectionDriver: cfg.Driver,
connectionDriverMode: cfg.DriverMode,
connection: cfg.Connection,
sourceURL: cfg.SourceURL,
getDataFun: cfg.GetDataFun,
}
}
// Copy copy a new table.Table from origin DefaultTable
func (tb *DefaultTable) Copy() Table {
return &DefaultTable{
BaseTable: &BaseTable{
Form: types.NewFormPanel().SetTable(tb.Form.Table).
SetDescription(tb.Form.Description).
SetTitle(tb.Form.Title),
NewForm: types.NewFormPanel().SetTable(tb.Form.Table).
SetDescription(tb.Form.Description).
SetTitle(tb.Form.Title),
Info: types.NewInfoPanel(tb.PrimaryKey.Name).SetTable(tb.Info.Table).
SetDescription(tb.Info.Description).
SetTitle(tb.Info.Title).
SetGetDataFn(tb.Info.GetDataFn),
Detail: types.NewInfoPanel(tb.PrimaryKey.Name).SetTable(tb.Detail.Table).
SetDescription(tb.Detail.Description).
SetTitle(tb.Detail.Title).
SetGetDataFn(tb.Detail.GetDataFn),
CanAdd: tb.CanAdd,
Editable: tb.Editable,
Deletable: tb.Deletable,
Exportable: tb.Exportable,
PrimaryKey: tb.PrimaryKey,
},
connectionDriver: tb.connectionDriver,
connectionDriverMode: tb.connectionDriverMode,
connection: tb.connection,
sourceURL: tb.sourceURL,
getDataFun: tb.getDataFun,
}
}
// GetData query the data set.
func (tb *DefaultTable) GetData(params parameter.Parameters) (PanelInfo, error) {
var (
data []map[string]interface{}
size int
beginTime = time.Now()
)
if tb.Info.UpdateParametersFns != nil {
for _, fn := range tb.Info.UpdateParametersFns {
fn(¶ms)
}
}
if tb.Info.QueryFilterFn != nil {
var ids []string
var stopQuery bool
if tb.getDataFun == nil && tb.Info.GetDataFn == nil {
ids, stopQuery = tb.Info.QueryFilterFn(params, tb.db())
} else {
ids, stopQuery = tb.Info.QueryFilterFn(params, nil)
}
if stopQuery {
return tb.GetDataWithIds(params.WithPKs(ids...))
}
}
if tb.getDataFun != nil {
data, size = tb.getDataFun(params)
} else if tb.sourceURL != "" {
data, size = tb.getDataFromURL(params)
} else if tb.Info.GetDataFn != nil {
data, size = tb.Info.GetDataFn(params)
} else if params.IsAll() {
return tb.getAllDataFromDatabase(params)
} else {
return tb.getDataFromDatabase(params)
}
infoList := make(types.InfoList, 0)
for i := 0; i < len(data); i++ {
infoList = append(infoList, tb.getTempModelData(data[i], params, []string{}))
}
thead, _, _, _, _, filterForm := tb.getTheadAndFilterForm(params, []string{})
endTime := time.Now()
extraInfo := ""
if !tb.Info.IsHideQueryInfo {
extraInfo = fmt.Sprintf("<b>" + language.Get("query time") + ": </b>" +
fmt.Sprintf("%.3fms", endTime.Sub(beginTime).Seconds()*1000))
}
return PanelInfo{
Thead: thead,
InfoList: infoList,
Paginator: paginator.Get(paginator.Config{
Size: size,
Param: params,
PageSizeList: tb.Info.GetPageSizeList(),
}).SetExtraInfo(template.HTML(extraInfo)),
Title: tb.Info.Title,
FilterFormData: filterForm,
Description: tb.Info.Description,
}, nil
}
type GetDataFromURLRes struct {
Data []map[string]interface{}
Size int
}
func (tb *DefaultTable) getDataFromURL(params parameter.Parameters) ([]map[string]interface{}, int) {
u := ""
if strings.Contains(tb.sourceURL, "?") {
u = tb.sourceURL + "&" + params.Join()
} else {
u = tb.sourceURL + "?" + params.Join()
}
res, err := http.Get(u + "&pk=" + strings.Join(params.PKs(), ","))
if err != nil {
return []map[string]interface{}{}, 0
}
defer func() {
_ = res.Body.Close()
}()
body, err := ioutil.ReadAll(res.Body)
if err != nil {
return []map[string]interface{}{}, 0
}
var data GetDataFromURLRes
err = json.Unmarshal(body, &data)
if err != nil {
return []map[string]interface{}{}, 0
}
return data.Data, data.Size
}
// GetDataWithIds query the data set.
func (tb *DefaultTable) GetDataWithIds(params parameter.Parameters) (PanelInfo, error) {
var (
data []map[string]interface{}
size int
beginTime = time.Now()
)
if tb.getDataFun != nil {
data, size = tb.getDataFun(params)
} else if tb.sourceURL != "" {
data, size = tb.getDataFromURL(params)
} else if tb.Info.GetDataFn != nil {
data, size = tb.Info.GetDataFn(params)
} else {
return tb.getDataFromDatabase(params)
}
infoList := make([]map[string]types.InfoItem, 0)
for i := 0; i < len(data); i++ {
infoList = append(infoList, tb.getTempModelData(data[i], params, []string{}))
}
thead, _, _, _, _, filterForm := tb.getTheadAndFilterForm(params, []string{})
endTime := time.Now()
return PanelInfo{
Thead: thead,
InfoList: infoList,
Paginator: paginator.Get(paginator.Config{
Size: size,
Param: params,
PageSizeList: tb.Info.GetPageSizeList(),
}).
SetExtraInfo(template.HTML(fmt.Sprintf("<b>" + language.Get("query time") + ": </b>" +
fmt.Sprintf("%.3fms", endTime.Sub(beginTime).Seconds()*1000)))),
Title: tb.Info.Title,
FilterFormData: filterForm,
Description: tb.Info.Description,
}, nil
}
func (tb *DefaultTable) getTempModelData(res map[string]interface{}, params parameter.Parameters, columns Columns) map[string]types.InfoItem {
var tempModelData = map[string]types.InfoItem{
"__goadmin_edit_params": {},
"__goadmin_delete_params": {},
"__goadmin_detail_params": {},
}
headField := ""
editParams := ""
deleteParams := ""
detailParams := ""
primaryKeyValue := db.GetValueFromDatabaseType(tb.PrimaryKey.Type, res[tb.PrimaryKey.Name], len(columns) == 0)
for _, field := range tb.Info.FieldList {
headField = field.Field
if field.Joins.Valid() {
headField = field.Joins.Last().GetTableName() + parameter.FilterParamJoinInfix + field.Field
}
if field.Hide {
continue
}
if !modules.InArrayWithoutEmpty(params.Columns, headField) {
continue
}
typeName := field.TypeName
if field.Joins.Valid() {
typeName = db.Varchar
}
combineValue := db.GetValueFromDatabaseType(typeName, res[headField], len(columns) == 0).String()
// TODO: ToDisplay some same logic execute repeatedly, it can be improved.
var value interface{}
if len(columns) == 0 || modules.InArray(columns, headField) || field.Joins.Valid() {
value = field.ToDisplay(types.FieldModel{
ID: primaryKeyValue.String(),
Value: combineValue,
Row: res,
})
} else {
value = field.ToDisplay(types.FieldModel{
ID: primaryKeyValue.String(),
Value: "",
Row: res,
})
}
var valueStr string
var ok bool
if valueStr, ok = value.(string); ok {
tempModelData[headField] = types.InfoItem{
Content: template.HTML(valueStr),
Value: combineValue,
}
} else {
valueStr = string(value.(template.HTML))
tempModelData[headField] = types.InfoItem{
Content: value.(template.HTML),
Value: combineValue,
}
}
if field.IsEditParam {
editParams += "__goadmin_edit_" + field.Field + "=" + valueStr + "&"
}
if field.IsDeleteParam {
deleteParams += "__goadmin_delete_" + field.Field + "=" + valueStr + "&"
}
if field.IsDetailParam {
detailParams += "__goadmin_detail_" + field.Field + "=" + valueStr + "&"
}
}
if editParams != "" {
tempModelData["__goadmin_edit_params"] = types.InfoItem{Content: template.HTML("&" + editParams[:len(editParams)-1])}
}
if deleteParams != "" {
tempModelData["__goadmin_delete_params"] = types.InfoItem{Content: template.HTML("&" + deleteParams[:len(deleteParams)-1])}
}
if detailParams != "" {
tempModelData["__goadmin_detail_params"] = types.InfoItem{Content: template.HTML("&" + detailParams[:len(detailParams)-1])}
}
primaryKeyField := tb.Info.FieldList.GetFieldByFieldName(tb.PrimaryKey.Name)
value := primaryKeyField.ToDisplay(types.FieldModel{
ID: primaryKeyValue.String(),
Value: primaryKeyValue.String(),
Row: res,
})
if valueStr, ok := value.(string); ok {
tempModelData[tb.PrimaryKey.Name] = types.InfoItem{
Content: template.HTML(valueStr),
Value: primaryKeyValue.String(),
}
} else {
tempModelData[tb.PrimaryKey.Name] = types.InfoItem{
Content: value.(template.HTML),
Value: primaryKeyValue.String(),
}
}
return tempModelData
}
func (tb *DefaultTable) getAllDataFromDatabase(params parameter.Parameters) (PanelInfo, error) {
var (
connection = tb.db()
queryStatement = "select %s from %s %s %s %s order by " + modules.Delimiter(connection.GetDelimiter(), connection.GetDelimiter2(), "%s") + " %s"
)
columns, _ := tb.getColumns(tb.Info.Table)
thead, fields, joins := tb.Info.FieldList.GetThead(types.TableInfo{
Table: tb.Info.Table,
Delimiter: connection.GetDelimiter(),
Delimiter2: connection.GetDelimiter2(),
Driver: tb.connectionDriver,
PrimaryKey: tb.PrimaryKey.Name,
}, params, columns)
fields += tb.Info.Table + "." + modules.FilterField(tb.PrimaryKey.Name, connection.GetDelimiter(), connection.GetDelimiter2())
groupBy := ""
if joins != "" {
groupBy = " GROUP BY " + tb.Info.Table + "." + modules.Delimiter(connection.GetDelimiter(), connection.GetDelimiter2(), tb.PrimaryKey.Name)
}
var (
wheres = ""
whereArgs = make([]interface{}, 0)
existKeys = make([]string, 0)
)
wheres, whereArgs, existKeys = params.Statement(wheres, tb.Info.Table, connection.GetDelimiter(), connection.GetDelimiter2(), whereArgs, columns, existKeys,
tb.Info.FieldList.GetFieldFilterProcessValue)
wheres, whereArgs = tb.Info.Wheres.Statement(wheres, connection.GetDelimiter(), connection.GetDelimiter2(), whereArgs, existKeys, columns)
wheres, whereArgs = tb.Info.WhereRaws.Statement(wheres, whereArgs)
if wheres != "" {
wheres = " where " + wheres
}
if !modules.InArray(columns, params.SortField) {
params.SortField = tb.PrimaryKey.Name
}
queryCmd := fmt.Sprintf(queryStatement, fields, tb.Info.Table, joins, wheres, groupBy, params.SortField, params.SortType)
logger.LogSQL(queryCmd, []interface{}{})
res, err := connection.QueryWithConnection(tb.connection, queryCmd, whereArgs...)
if err != nil {
return PanelInfo{}, err
}
infoList := make([]map[string]types.InfoItem, 0)
for i := 0; i < len(res); i++ {
infoList = append(infoList, tb.getTempModelData(res[i], params, columns))
}
return PanelInfo{
InfoList: infoList,
Thead: thead,
Title: tb.Info.Title,
Description: tb.Info.Description,
}, nil
}
// TODO: refactor
func (tb *DefaultTable) getDataFromDatabase(params parameter.Parameters) (PanelInfo, error) {
var (
connection = tb.db()
delimiter = connection.GetDelimiter()
delimiter2 = connection.GetDelimiter2()
placeholder = modules.Delimiter(delimiter, delimiter2, "%s")
queryStatement string
countStatement string
ids = params.PKs()
table = modules.Delimiter(delimiter, delimiter2, tb.Info.Table)
pk = table + "." + modules.Delimiter(delimiter, delimiter2, tb.PrimaryKey.Name)
)
beginTime := time.Now()
if len(ids) > 0 {
countExtra := ""
if connection.Name() == db.DriverMssql {
countExtra = "as [size]"
}
// %s means: fields, table, join table, pk values, group by, order by field, order by type
queryStatement = "select %s from " + placeholder + " %s where " + pk + " in (%s) %s ORDER BY %s." + placeholder + " %s"
// %s means: table, join table, pk values
countStatement = "select count(*) " + countExtra + " from " + placeholder + " %s where " + pk + " in (%s)"
} else {
if connection.Name() == db.DriverMssql {
// %s means: order by field, order by type, fields, table, join table, wheres, group by
queryStatement = "SELECT * FROM (SELECT ROW_NUMBER() OVER (ORDER BY %s." + placeholder + " %s) as ROWNUMBER_, %s from " +
placeholder + "%s %s %s ) as TMP_ WHERE TMP_.ROWNUMBER_ > ? AND TMP_.ROWNUMBER_ <= ?"
// %s means: table, join table, wheres
countStatement = "select count(*) as [size] from (select count(*) as [size] from " + placeholder + " %s %s %s) src"
} else {
// %s means: fields, table, join table, wheres, group by, order by field, order by type
queryStatement = "select %s from " + placeholder + "%s %s %s order by " + placeholder + "." + placeholder + " %s LIMIT ? OFFSET ?"
// %s means: table, join table, wheres
countStatement = "select count(*) from (select " + pk + " from " + placeholder + " %s %s %s) src"
}
}
columns, _ := tb.getColumns(tb.Info.Table)
thead, fields, joinFields, joins, joinTables, filterForm := tb.getTheadAndFilterForm(params, columns)
fields += pk
allFields := fields
groupFields := fields
if joinFields != "" {
allFields += "," + joinFields[:len(joinFields)-1]
if connection.Name() == db.DriverMssql {
for _, field := range tb.Info.FieldList {
if field.TypeName == db.Text || field.TypeName == db.Longtext {
f := modules.Delimiter(connection.GetDelimiter(), connection.GetDelimiter2(), field.Field)
headField := table + "." + f
allFields = strings.ReplaceAll(allFields, headField, "CAST("+headField+" AS NVARCHAR(MAX)) as "+f)
groupFields = strings.ReplaceAll(groupFields, headField, "CAST("+headField+" AS NVARCHAR(MAX))")
}
}
}
}
if !modules.InArray(columns, params.SortField) {
params.SortField = tb.PrimaryKey.Name
}
var (
wheres = ""
whereArgs = make([]interface{}, 0)
args = make([]interface{}, 0)
existKeys = make([]string, 0)
)
if len(ids) > 0 {
for _, value := range ids {
if value != "" {
wheres += "?,"
args = append(args, value)
}
}
wheres = wheres[:len(wheres)-1]
} else {
// parameter
wheres, whereArgs, existKeys = params.Statement(wheres, tb.Info.Table, connection.GetDelimiter(), connection.GetDelimiter2(), whereArgs, columns, existKeys,
tb.Info.FieldList.GetFieldFilterProcessValue)
// pre query
wheres, whereArgs = tb.Info.Wheres.Statement(wheres, connection.GetDelimiter(), connection.GetDelimiter2(), whereArgs, existKeys, columns)
wheres, whereArgs = tb.Info.WhereRaws.Statement(wheres, whereArgs)
if wheres != "" {
wheres = " where " + wheres
}
if connection.Name() == db.DriverMssql {
args = append(whereArgs, (params.PageInt-1)*params.PageSizeInt, params.PageInt*params.PageSizeInt)
} else {
args = append(whereArgs, params.PageSizeInt, (params.PageInt-1)*params.PageSizeInt)
}
}
groupBy := ""
if len(joinTables) > 0 {
if connection.Name() == db.DriverMssql {
groupBy = " GROUP BY " + groupFields
} else {
groupBy = " GROUP BY " + pk
}
}
queryCmd := ""
if connection.Name() == db.DriverMssql && len(ids) == 0 {
queryCmd = fmt.Sprintf(queryStatement, tb.Info.Table, params.SortField, params.SortType,
allFields, tb.Info.Table, joins, wheres, groupBy)
} else {
queryCmd = fmt.Sprintf(queryStatement, allFields, tb.Info.Table, joins, wheres, groupBy,
tb.Info.Table, params.SortField, params.SortType)
}
logger.LogSQL(queryCmd, args)
res, err := connection.QueryWithConnection(tb.connection, queryCmd, args...)
if err != nil {
return PanelInfo{}, err
}
infoList := make([]map[string]types.InfoItem, 0)
for i := 0; i < len(res); i++ {
infoList = append(infoList, tb.getTempModelData(res[i], params, columns))
}
// TODO: use the dialect
var size int
if len(ids) == 0 {
countCmd := fmt.Sprintf(countStatement, tb.Info.Table, joins, wheres, groupBy)
total, err := connection.QueryWithConnection(tb.connection, countCmd, whereArgs...)
if err != nil {
return PanelInfo{}, err
}
logger.LogSQL(countCmd, nil)
if tb.connectionDriver == "postgresql" {
if tb.connectionDriverMode == "h2" {
size = int(total[0]["count(*)"].(int64))
} else if config.GetDatabases().GetDefault().DriverMode == "h2" {
size = int(total[0]["count(*)"].(int64))
} else {
size = int(total[0]["count"].(int64))
}
} else if tb.connectionDriver == db.DriverMssql {
size = int(total[0]["size"].(int64))
} else {
size = int(total[0]["count(*)"].(int64))
}
}
endTime := time.Now()
return PanelInfo{
Thead: thead,
InfoList: infoList,
Paginator: tb.GetPaginator(size, params,
template.HTML(fmt.Sprintf("<b>"+language.Get("query time")+": </b>"+
fmt.Sprintf("%.3fms", endTime.Sub(beginTime).Seconds()*1000)))),
Title: tb.Info.Title,
FilterFormData: filterForm,
Description: tb.Info.Description,
}, nil
}
func getDataRes(list []map[string]interface{}, _ int) map[string]interface{} {
if len(list) > 0 {
return list[0]
}
return nil
}
// GetDataWithId query the single row of data.
func (tb *DefaultTable) GetDataWithId(param parameter.Parameters) (FormInfo, error) {
var (
res map[string]interface{}
columns Columns
id = param.PK()
)
if tb.getDataFun != nil {
res = getDataRes(tb.getDataFun(param))
} else if tb.sourceURL != "" {
res = getDataRes(tb.getDataFromURL(param))
} else if tb.Detail.GetDataFn != nil {
res = getDataRes(tb.Detail.GetDataFn(param))
} else if tb.Info.GetDataFn != nil {
res = getDataRes(tb.Info.GetDataFn(param))
} else {
columns, _ = tb.getColumns(tb.Form.Table)
var (
fields, joinFields, joins, groupBy = "", "", "", ""
err error
joinTables = make([]string, 0)
args = []interface{}{id}
connection = tb.db()
delimiter = connection.GetDelimiter()
delimiter2 = connection.GetDelimiter2()
tableName = modules.Delimiter(delimiter, delimiter2, tb.GetForm().Table)
pk = tableName + "." + modules.Delimiter(delimiter, delimiter2, tb.PrimaryKey.Name)
queryStatement = "select %s from %s %s where " + pk + " = ? %s "
)
for i := 0; i < len(tb.Form.FieldList); i++ {
if tb.Form.FieldList[i].Field != pk && modules.InArray(columns, tb.Form.FieldList[i].Field) &&
!tb.Form.FieldList[i].Joins.Valid() {
fields += tableName + "." + modules.FilterField(tb.Form.FieldList[i].Field, delimiter, delimiter2) + ","
}
if tb.Form.FieldList[i].Joins.Valid() {
headField := tb.Form.FieldList[i].Joins.Last().GetTableName() + parameter.FilterParamJoinInfix + tb.Form.FieldList[i].Field
joinFields += db.GetAggregationExpression(connection.Name(), tb.Form.FieldList[i].Joins.Last().GetTableName(delimiter, delimiter2)+"."+
modules.FilterField(tb.Form.FieldList[i].Field, delimiter, delimiter2), headField, types.JoinFieldValueDelimiter) + ","
for _, join := range tb.Form.FieldList[i].Joins {
if !modules.InArray(joinTables, join.GetTableName(delimiter, delimiter2)) {
joinTables = append(joinTables, join.GetTableName(delimiter, delimiter2))
if join.BaseTable == "" {
join.BaseTable = tableName
}
joins += " left join " + modules.FilterField(join.Table, delimiter, delimiter2) + " " + join.TableAlias + " on " +
join.GetTableName(delimiter, delimiter2) + "." + modules.FilterField(join.JoinField, delimiter, delimiter2) + " = " +
join.BaseTable + "." + modules.FilterField(join.Field, delimiter, delimiter2)
}
}
}
}
fields += pk
groupFields := fields
if joinFields != "" {
fields += "," + joinFields[:len(joinFields)-1]
if connection.Name() == db.DriverMssql {
for i := 0; i < len(tb.Form.FieldList); i++ {
if tb.Form.FieldList[i].TypeName == db.Text || tb.Form.FieldList[i].TypeName == db.Longtext {
f := modules.Delimiter(connection.GetDelimiter(), connection.GetDelimiter2(), tb.Form.FieldList[i].Field)
headField := tb.Info.Table + "." + f
fields = strings.ReplaceAll(fields, headField, "CAST("+headField+" AS NVARCHAR(MAX)) as "+f)
groupFields = strings.ReplaceAll(groupFields, headField, "CAST("+headField+" AS NVARCHAR(MAX))")
}
}
}
}
if len(joinTables) > 0 {
if connection.Name() == db.DriverMssql {
groupBy = " GROUP BY " + groupFields
} else {
groupBy = " GROUP BY " + pk
}
}
queryCmd := fmt.Sprintf(queryStatement, fields, tableName, joins, groupBy)
logger.LogSQL(queryCmd, args)
result, err := connection.QueryWithConnection(tb.connection, queryCmd, args...)
if err != nil {
return FormInfo{Title: tb.Form.Title, Description: tb.Form.Description}, err
}
if len(result) == 0 {
return FormInfo{Title: tb.Form.Title, Description: tb.Form.Description}, errors.New(errs.WrongID)
}
res = result[0]
}
var (
groupFormList = make([]types.FormFields, 0)
groupHeaders = make([]string, 0)
)
if len(tb.Form.TabGroups) > 0 {
groupFormList, groupHeaders = tb.Form.GroupFieldWithValue(tb.PrimaryKey.Name, id, columns, res, tb.sqlObjOrNil)
return FormInfo{
FieldList: tb.Form.FieldList,
GroupFieldList: groupFormList,
GroupFieldHeaders: groupHeaders,
Title: tb.Form.Title,
Description: tb.Form.Description,
}, nil
}
var fieldList = tb.Form.FieldsWithValue(tb.PrimaryKey.Name, id, columns, res, tb.sqlObjOrNil)
return FormInfo{
FieldList: fieldList,
GroupFieldList: groupFormList,
GroupFieldHeaders: groupHeaders,
Title: tb.Form.Title,
Description: tb.Form.Description,
}, nil
}
// UpdateData update data.
func (tb *DefaultTable) UpdateData(dataList form.Values) error {
dataList.Add(form.PostTypeKey, "0")
var (
errMsg = ""
err error
)
if tb.Form.PostHook != nil {
defer func() {
dataList.Add(form.PostTypeKey, "0")
dataList.Add(form.PostResultKey, errMsg)
go func() {
defer func() {
if err := recover(); err != nil {
logger.Error(err)
}
}()
err := tb.Form.PostHook(dataList)
if err != nil {
logger.Error(err)
}
}()
}()
}
if tb.Form.Validator != nil {
if err := tb.Form.Validator(dataList); err != nil {
errMsg = "post error: " + err.Error()
return err
}
}
if tb.Form.PreProcessFn != nil {
dataList = tb.Form.PreProcessFn(dataList)
}
if tb.Form.UpdateFn != nil {
dataList.Delete(form.PostTypeKey)
err = tb.Form.UpdateFn(tb.PreProcessValue(dataList, types.PostTypeUpdate))
if err != nil {
errMsg = "post error: " + err.Error()
}
return err
}
if len(dataList) == 0 {
return nil
}
_, err = tb.sql().Table(tb.Form.Table).
Where(tb.PrimaryKey.Name, "=", dataList.Get(tb.PrimaryKey.Name)).
Update(tb.getInjectValueFromFormValue(dataList, types.PostTypeUpdate))
// NOTE: some errors should be ignored.
if db.CheckError(err, db.UPDATE) {
if err != nil {
errMsg = "post error: " + err.Error()
}
return err
}
return nil
}
// InsertData insert data.
func (tb *DefaultTable) InsertData(dataList form.Values) error {
dataList.Add(form.PostTypeKey, "1")
var (
id = int64(0)
err error
errMsg = ""
f = tb.GetActualNewForm()
)
if f.PostHook != nil {
defer func() {
dataList.Add(form.PostTypeKey, "1")
dataList.Add(tb.GetPrimaryKey().Name, strconv.Itoa(int(id)))
dataList.Add(form.PostResultKey, errMsg)
go func() {
defer func() {
if err := recover(); err != nil {
logger.Error(err)
}
}()
err := f.PostHook(dataList)
if err != nil {
logger.Error(err)
}
}()
}()
}
if f.Validator != nil {
if err := f.Validator(dataList); err != nil {
errMsg = "post error: " + err.Error()
return err
}
}
if f.PreProcessFn != nil {
dataList = f.PreProcessFn(dataList)
}
if f.InsertFn != nil {
dataList.Delete(form.PostTypeKey)
err = f.InsertFn(tb.PreProcessValue(dataList, types.PostTypeCreate))
if err != nil {
errMsg = "post error: " + err.Error()
}
return err
}
if len(dataList) == 0 {
return nil
}
id, err = tb.sql().Table(f.Table).Insert(tb.getInjectValueFromFormValue(dataList, types.PostTypeCreate))
// NOTE: some errors should be ignored.
if db.CheckError(err, db.INSERT) {
errMsg = "post error: " + err.Error()
return err
}
return nil
}
func (tb *DefaultTable) getInjectValueFromFormValue(dataList form.Values, typ types.PostType) dialect.H {
var (
value = make(dialect.H)
exceptString = make([]string, 0)
columns, auto = tb.getColumns(tb.Form.Table)
fun types.PostFieldFilterFn
)
// If a key is a auto increment primary key, it can`t be insert or update.
if auto {
exceptString = []string{tb.PrimaryKey.Name, form.PreviousKey, form.MethodKey, form.TokenKey,
constant.IframeKey, constant.IframeIDKey}
} else {
exceptString = []string{form.PreviousKey, form.MethodKey, form.TokenKey,
constant.IframeKey, constant.IframeIDKey}
}
if !dataList.IsSingleUpdatePost() {
for i := 0; i < len(tb.Form.FieldList); i++ {
if tb.Form.FieldList[i].FormType.IsMultiSelect() {
if _, ok := dataList[tb.Form.FieldList[i].Field+"[]"]; !ok {
dataList[tb.Form.FieldList[i].Field+"[]"] = []string{""}
}
}
}
}
dataList = dataList.RemoveRemark()
for k, v := range dataList {
k = strings.ReplaceAll(k, "[]", "")
if !modules.InArray(exceptString, k) {
if modules.InArray(columns, k) {
field := tb.Form.FieldList.FindByFieldName(k)
delimiter := ","
if field != nil {
fun = field.PostFilterFn
delimiter = modules.SetDefault(field.DefaultOptionDelimiter, ",")
}
vv := modules.RemoveBlankFromArray(v)
if fun != nil {
value[k] = fun(types.PostFieldModel{
ID: dataList.Get(tb.PrimaryKey.Name),
Value: vv,
Row: dataList.ToMap(),
PostType: typ,
})
} else {
if len(vv) > 1 {
value[k] = strings.Join(vv, delimiter)
} else if len(vv) > 0 {
value[k] = vv[0]
} else {
value[k] = ""
}
}
} else {
field := tb.Form.FieldList.FindByFieldName(k)
if field != nil && field.PostFilterFn != nil {
field.PostFilterFn(types.PostFieldModel{
ID: dataList.Get(tb.PrimaryKey.Name),
Value: modules.RemoveBlankFromArray(v),
Row: dataList.ToMap(),
PostType: typ,
})
}
}
}
}
return value
}
func (tb *DefaultTable) PreProcessValue(dataList form.Values, typ types.PostType) form.Values {
exceptString := []string{form.PreviousKey, form.MethodKey, form.TokenKey,
constant.IframeKey, constant.IframeIDKey}
dataList = dataList.RemoveRemark()
var fun types.PostFieldFilterFn
for k, v := range dataList {
k = strings.ReplaceAll(k, "[]", "")
if !modules.InArray(exceptString, k) {
field := tb.Form.FieldList.FindByFieldName(k)
if field != nil {
fun = field.PostFilterFn
}
vv := modules.RemoveBlankFromArray(v)
if fun != nil {
dataList.Add(k, fmt.Sprintf("%s", fun(types.PostFieldModel{
ID: dataList.Get(tb.PrimaryKey.Name),
Value: vv,
Row: dataList.ToMap(),
PostType: typ,
})))
}
}
}
return dataList
}
// DeleteData delete data.
func (tb *DefaultTable) DeleteData(id string) error {
var (
idArr = strings.Split(id, ",")
err error
)
if tb.Info.DeleteHook != nil {
defer func() {
go func() {
defer func() {
if recoverErr := recover(); recoverErr != nil {
logger.Error(recoverErr)
}
}()
if hookErr := tb.Info.DeleteHook(idArr); hookErr != nil {
logger.Error(hookErr)
}
}()
}()
}
if tb.Info.DeleteHookWithRes != nil {
defer func() {
go func() {
defer func() {
if recoverErr := recover(); recoverErr != nil {
logger.Error(recoverErr)
}
}()
if hookErr := tb.Info.DeleteHookWithRes(idArr, err); hookErr != nil {
logger.Error(hookErr)
}
}()
}()
}
if tb.Info.PreDeleteFn != nil {
if err = tb.Info.PreDeleteFn(idArr); err != nil {
return err
}
}
if tb.Info.DeleteFn != nil {
err = tb.Info.DeleteFn(idArr)
return err
}
if len(idArr) == 0 || tb.Info.Table == "" {
err = errors.New("delete error: wrong parameter")
return err
}
err = tb.delete(tb.Info.Table, tb.PrimaryKey.Name, idArr)
return err
}
func (tb *DefaultTable) GetNewFormInfo() FormInfo {
f := tb.GetActualNewForm()
if len(f.TabGroups) == 0 {
return FormInfo{FieldList: f.FieldsWithDefaultValue(tb.sqlObjOrNil)}
}
newForm, headers := f.GroupField(tb.sqlObjOrNil)
return FormInfo{GroupFieldList: newForm, GroupFieldHeaders: headers}
}
// ***************************************
// helper function for database operation
// ***************************************
func (tb *DefaultTable) delete(table, key string, values []string) error {
var vals = make([]interface{}, len(values))
for i, v := range values {
vals[i] = v
}
return tb.sql().Table(table).
WhereIn(key, vals).
Delete()
}
func (tb *DefaultTable) getTheadAndFilterForm(params parameter.Parameters, columns Columns) (types.Thead,
string, string, string, []string, []types.FormField) {
return tb.Info.FieldList.GetTheadAndFilterForm(types.TableInfo{
Table: tb.Info.Table,
Delimiter: tb.delimiter(),
Delimiter2: tb.delimiter2(),
Driver: tb.connectionDriver,
PrimaryKey: tb.PrimaryKey.Name,
}, params, columns, tb.sqlObjOrNil)
}
// db is a helper function return raw db connection.
func (tb *DefaultTable) db() db.Connection {
if tb.dbObj == nil {
tb.dbObj = db.GetConnectionFromService(services.Get(tb.connectionDriver))
}
return tb.dbObj
}
func (tb *DefaultTable) delimiter() string {
if tb.getDataFromDB() {
return tb.db().GetDelimiter()
}
return ""
}
func (tb *DefaultTable) delimiter2() string {
if tb.getDataFromDB() {
return tb.db().GetDelimiter2()
}
return ""
}
func (tb *DefaultTable) getDataFromDB() bool {
return tb.sourceURL == "" && tb.getDataFun == nil && tb.Info.GetDataFn == nil && tb.Detail.GetDataFn == nil
}
// sql is a helper function return db sql.
func (tb *DefaultTable) sql() *db.SQL {
return db.WithDriverAndConnection(tb.connection, tb.db())
}
// sqlObjOrNil is a helper function return db sql obj or nil.
func (tb *DefaultTable) sqlObjOrNil() *db.SQL {
if tb.connectionDriver != "" && tb.getDataFromDB() {
return db.WithDriverAndConnection(tb.connection, tb.db())
}
return nil
}
type Columns []string
func (tb *DefaultTable) getColumns(table string) (Columns, bool) {
columnsModel, _ := tb.sql().Table(table).ShowColumns()
columns := make(Columns, len(columnsModel))
switch tb.connectionDriver {
case db.DriverPostgresql:
auto := false
for key, model := range columnsModel {
columns[key] = model["column_name"].(string)
if columns[key] == tb.PrimaryKey.Name {
if v, ok := model["column_default"].(string); ok {
if strings.Contains(v, "nextval") {
auto = true
}
}
}
}
return columns, auto
case db.DriverMysql:
auto := false
for key, model := range columnsModel {
columns[key] = model["Field"].(string)
if columns[key] == tb.PrimaryKey.Name {
if v, ok := model["Extra"].(string); ok {
if v == "auto_increment" {
auto = true
}
}
}
}
return columns, auto
case db.DriverSqlite:
for key, model := range columnsModel {
columns[key] = string(model["name"].(string))
}
num, _ := tb.sql().Table("sqlite_sequence").
Where("name", "=", tb.GetForm().Table).Count()
return columns, num > 0
case db.DriverMssql:
for key, model := range columnsModel {
columns[key] = string(model["column_name"].(string))
}
return columns, true
default:
panic("wrong driver")
}
}
|
// Start an HTTP GraphQL API server, which is loaded multiple Databases for serving
package main
import (
"log"
"net/http"
"time"
graphql "github.com/graph-gophers/graphql-go"
"github.com/tonyghita/graphql-go-example/handler"
"loader"
)
func main(){
// Tweakable Arugments
var (
port = ":8000"
readHeaderTimeout = 1 * time.Second
writeTimeOut = 10 * time.Second
idleTimeout = 90 * time.Second
maxHeaderBytes = http.DefaultMaxHeaderBytes
)
log.SetFlags(log.Lshortfile | log. LstdFlags)
// Register handlers to routes.
mux := http.NewServeMux()
mux.Handle("/", handler.GraphiQL{})
mux.Handle("/graphql/", h)
mux.Handle("/graphql", h) // Register without a trailing slash to avoid redirect.
// Configure the HTTP server
server := &http.Server{
Addr: port,
Handler: mux,
ReadHeaderTimeout: readHeaderTimeout,
WriteTimeout: writeTimeOut,
IdleTimeout: idleTimeout,
MaxHeaderBytes: maxHeaderBytes
}
// Begin listing for requests.
log.Printf("Listing for requests on %s", server.Addr)
if err = server.ListenAndServe(); err != nil{
log.Println("server.ListenAndServer:", err)
}
log.Println("Shut down.")
}
|
package end
import (
"testing"
. "github.com/bborbe/assert"
io_mock "github.com/bborbe/io/mock"
"github.com/bborbe/server/renderer"
)
func TestImplementsRenderer(t *testing.T) {
r := NewEndRenderer()
var i (*renderer.Renderer) = nil
err := AssertThat(r, Implements(i).Message("check implements renderer.Renderer"))
if err != nil {
t.Fatal(err)
}
}
func TestOutput(t *testing.T) {
var err error
endRenderer := NewEndRenderer()
writer := io_mock.NewWriter()
err = endRenderer.Render(writer)
if err != nil {
t.Fatal(err)
}
err = AssertThat(len(writer.Content()), Gt(0))
if err != nil {
t.Fatal(err)
}
err = AssertThat(string(writer.Content()), Contains(`<td class="end node"><div class="content"><a href="#start">end</a></div></td>`))
if err != nil {
t.Fatal(err)
}
}
|
package main
import "fmt"
type Test1 struct {
is int
}
func main() {
i := 5
Test(func(test1 Test1) bool {
return i == test1.is
})
}
func Test(f func(Test1) bool) {
t := Test1{2}
fmt.Println(f(t))
}
|
//go:build !ASCII
// +build !ASCII
package es
// 对应C里面的UNICODE宏定义启用,这里是默认使用
var (
EverythingSetSearch = everythingSetSearchW
EverythingGetSearch = everythingGetSearchW
EverythingQuery = everythingQueryW
EverythingGetResultFileName = everythingGetResultFileNameW
EverythingGetResultPath = everythingGetResultPathW
EverythingGetResultFullPathName = everythingGetResultFullPathNameW
EverythingGetResultExtension = everythingGetResultExtensionW
EverythingGetResultFileListFileName = everythingGetResultFileListFileNameW
EverythingGetResultHighlightedFileName = everythingGetResultHighlightedFileNameW
EverythingGetResultHighlightedPath = everythingGetResultHighlightedPathW
EverythingGetResultHighlightedFullPathAndFileName = everythingGetResultHighlightedFullPathAndFileNameW
EverythingGetRunCountFromFileName = everythingGetRunCountFromFileNameW
EverythingSetRunCountFromFileName = everythingSetRunCountFromFileNameW
EverythingIncRunCountFromFileName = everythingIncRunCountFromFileNameW
)
|
// ImgChangeInfo
package DaeseongLib
import (
_ "fmt"
"image"
"image/gif"
"image/jpeg"
"image/png"
"io/ioutil"
"os"
"path/filepath"
"strings"
"syscall"
"unsafe"
)
var (
kernel32B = syscall.NewLazyDLL("kernel32.dll")
GetModuleFileNameProc = kernel32B.NewProc("GetModuleFileNameW")
)
func GetModuleFileName() string {
var wpath [syscall.MAX_PATH]uint16
r1, _, _ := GetModuleFileNameProc.Call(0, uintptr(unsafe.Pointer(&wpath[0])), uintptr(len(wpath)))
if r1 == 0 {
return ""
}
return syscall.UTF16ToString(wpath[:])
}
func GetModulePath() string {
var wpath [syscall.MAX_PATH]uint16
r1, _, _ := GetModuleFileNameProc.Call(0, uintptr(unsafe.Pointer(&wpath[0])), uintptr(len(wpath)))
if r1 == 0 {
return ""
}
return filepath.Dir(syscall.UTF16ToString(wpath[:]))
}
func CreateFolder() {
var sCurPath, sCheckPath string
sCurPath = GetModulePath()
sCheckPath = filepath.Join(sCurPath, "PNG")
_, err := os.Stat(sCheckPath)
if os.IsNotExist(err) {
os.MkdirAll(sCheckPath, 0744)
}
sCheckPath = filepath.Join(sCurPath, "JPG")
_, err = os.Stat(sCheckPath)
if os.IsNotExist(err) {
os.MkdirAll(sCheckPath, 0744)
}
sCheckPath = filepath.Join(sCurPath, "GIF")
_, err = os.Stat(sCheckPath)
if os.IsNotExist(err) {
os.MkdirAll(sCheckPath, 0744)
}
}
func GetFilePath(sFilename string) string {
return filepath.Dir(sFilename)
}
func GetFileName(sFilename string) string {
return filepath.Base(sFilename)
}
var (
allimgList = []string{}
)
func FindimageList(sPath string) {
dirlst, err := ioutil.ReadDir(sPath)
if err != nil {
panic(err)
}
for _, filelst := range dirlst {
sDir := filepath.Join(sPath, filelst.Name())
if filelst.IsDir() {
FindimageList(sDir)
} else {
sExt := strings.ToUpper(filepath.Ext(filelst.Name()))
if strings.HasSuffix(sExt, ".GIF") || strings.HasSuffix(sExt, ".PNG") || strings.HasSuffix(sExt, ".JPG") {
allimgList = append(allimgList, sDir)
}
}
}
}
func ImageSize(sPath string) (int, int) {
file, err := os.Open(sPath)
if err != nil {
return 0, 0
}
defer file.Close()
img, _, err := image.DecodeConfig(file)
if err != nil {
return 0, 0
}
return img.Width, img.Height
}
func LoadImage(sPath string) (image.Image, string, error) {
file, err := os.Open(sPath)
if err != nil {
return nil, "", err
}
defer file.Close()
img, format, err := image.Decode(file)
if err != nil {
return nil, "", err
}
return img, format, err
}
func SaveToFileImage(sPath string, nImage int) bool {
var sSavedPath string
filename := GetFileName(sPath)
onlyFilename := filename[:strings.LastIndex(filename, ".")]
//fmt.Println(onlyFilename)
if nImage == 1 {
sSavedPath = filepath.Join(GetModulePath(), "PNG", onlyFilename+".png")
} else if nImage == 2 {
sSavedPath = filepath.Join(GetModulePath(), "JPG", onlyFilename+".jpg")
} else if nImage == 3 {
sSavedPath = filepath.Join(GetModulePath(), "GIF", onlyFilename+".gif")
}
//fmt.Println(sSavedPath)
readfile, err := os.Open(sPath)
if err != nil {
return false
}
defer readfile.Close()
readimg, _, err := image.Decode(readfile)
if err != nil {
return false
}
Savefile, err := os.Create(sSavedPath)
if err != nil {
return false
}
defer Savefile.Close()
if nImage == 1 {
err = png.Encode(Savefile, readimg)
} else if nImage == 2 {
var opt jpeg.Options
opt.Quality = 75
err = jpeg.Encode(Savefile, readimg, &opt)
} else if nImage == 3 {
var opt gif.Options
opt.NumColors = 256
err = gif.Encode(Savefile, readimg, &opt)
}
return true
}
/*
func f1() {
dir1, err := filepath.Abs(filepath.Dir(os.Args[0]))
if err != nil {
fmt.Println(err)
}
fmt.Println(dir1)
dir2, err := os.Getwd()
if err != nil {
fmt.Println(err)
}
fmt.Println(dir2)
dir3 := GetModulePath()
fmt.Println(dir3)
}
func f2() {
sFolderPath := "D:\\image"
FindimageList(sFolderPath)
for _, file := range allimgList {
imgwidht, imgheight := ImageSize(file)
fmt.Println("width:", imgwidht, "height:", imgheight)
_, format, _ := LoadImage(file)
fmt.Println(format)
}
}
func f3() {
CreateFolder()
sFolderPath := "D:\\image"
FindimageList(sFolderPath)
for _, file := range allimgList {
SaveToFileImage(file, 1)
SaveToFileImage(file, 2)
SaveToFileImage(file, 3)
}
}
func main() {
f3()
}
*/
|
package resources
import (
"encoding/json"
"net/http"
"github.com/gorilla/mux"
"github.com/shwetha-pingala/HyperledgerProject/InvoiveProject/go-api/models"
ResourcesModel "github.com/shwetha-pingala/HyperledgerProject/InvoiveProject/go-api/models/v1/resources"
"github.com/shwetha-pingala/HyperledgerProject/InvoiveProject/go-api/hyperledger"
)
func Update(clients *hyperledger.Clients) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
id := vars["id"]
var opts ResourcesModel.UpdateOpts
var resource models.Resource
decoder := json.NewDecoder(r.Body)
defer r.Body.Close()
if err := decoder.Decode(&resource); err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
if r.Method == "PUT" {
opts.Replace = true
}
updatedResource, err := ResourcesModel.Update(clients, id, &resource, &opts)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
packet, err := json.Marshal(updatedResource)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.Write(packet)
}
}
|
package handlers
import (
rest "github.com/danteay/ginrest"
"github.com/gin-gonic/gin"
)
// PingHandler is a simple get endpoint that can be used for healt check
func PingHandler() func(c *gin.Context) {
return func(c *gin.Context) {
u := c.Request.RequestURI
r := rest.New(u, "").SetGin(c)
r.Res(200, rest.Payload{
"status": "success",
"object": "lanago.get.ping",
"code": 200,
}, "pong")
return
}
}
|
package main
import (
"crypto/tls"
"crypto/x509"
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"math/rand"
"runtime"
"sync"
"time"
"github.com/gholt/brimtime"
gp "github.com/pandemicsyn/oort/api/groupproto"
vp "github.com/pandemicsyn/oort/api/valueproto"
"github.com/pkg/profile"
"github.com/spaolacci/murmur3"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
)
type Scrambled struct {
r rand.Source
}
func NewScrambled() *Scrambled {
return &Scrambled{r: rand.NewSource(time.Now().UnixNano())}
}
func (s *Scrambled) Read(bs []byte) {
for i := len(bs) - 1; i >= 0; {
v := s.r.Int63()
for j := 7; i >= 0 && j >= 0; j-- {
bs[i] = byte(v)
i--
v >>= 8
}
}
}
func OnlyLogIf(err error) {
if err != nil {
log.Println(err)
}
}
func omg(err error) {
if err != nil {
log.Fatalln(err)
}
}
type ValueClientConfig struct {
id int
count int
wm []*vp.WriteRequest
rm []*vp.ReadRequest
value *[]byte
addr string
wg *sync.WaitGroup
}
type GroupClientConfig struct {
id int
count int
wm []*gp.WriteRequest
rm []*gp.ReadRequest
value *[]byte
addr string
wg *sync.WaitGroup
}
func ValueStreamWrite(c *ValueClientConfig) {
defer c.wg.Done()
var err error
var opts []grpc.DialOption
var creds credentials.TransportAuthenticator
creds = credentials.NewTLS(&tls.Config{
InsecureSkipVerify: true,
})
opts = append(opts, grpc.WithTransportCredentials(creds))
conn, err := grpc.Dial(c.addr, opts...)
if err != nil {
log.Fatalln(fmt.Sprintf("Failed to dial server: %s", err))
}
defer conn.Close()
client := vp.NewValueStoreClient(conn)
empty := []byte("")
stream, err := client.StreamWrite(context.Background())
for i, _ := range c.wm {
c.wm[i].Value = *c.value
if err := stream.Send(c.wm[i]); err != nil {
log.Println(err)
continue
}
res, err := stream.Recv()
if err == io.EOF {
return
}
if err != nil {
log.Println(err)
continue
}
if res.TimestampMicro > c.wm[i].TimestampMicro {
log.Printf("TSM is newer than attempted, Key %d-%d Got %s, Sent: %s", c.id, i, brimtime.UnixMicroToTime(res.TimestampMicro), brimtime.UnixMicroToTime(c.wm[i].TimestampMicro))
}
c.wm[i].Value = empty
}
stream.CloseSend()
}
func GroupStreamWrite(c *GroupClientConfig) {
defer c.wg.Done()
var err error
var opts []grpc.DialOption
var creds credentials.TransportAuthenticator
creds = credentials.NewTLS(&tls.Config{
InsecureSkipVerify: true,
})
opts = append(opts, grpc.WithTransportCredentials(creds))
conn, err := grpc.Dial(c.addr, opts...)
if err != nil {
log.Fatalln(fmt.Sprintf("Failed to dial server: %s", err))
}
defer conn.Close()
client := gp.NewGroupStoreClient(conn)
empty := []byte("")
stream, err := client.StreamWrite(context.Background())
for i, _ := range c.wm {
c.wm[i].Value = *c.value
if err := stream.Send(c.wm[i]); err != nil {
log.Println(err)
continue
}
res, err := stream.Recv()
if err == io.EOF {
return
}
if err != nil {
log.Println(err)
continue
}
if res.TimestampMicro > c.wm[i].TimestampMicro {
log.Printf("TSM is newer than attempted, Key %d-%d Got %s, Sent: %s", c.id, i, brimtime.UnixMicroToTime(res.TimestampMicro), brimtime.UnixMicroToTime(c.wm[i].TimestampMicro))
}
c.wm[i].Value = empty
}
stream.CloseSend()
}
func ValueStreamRead(c *ValueClientConfig) {
defer c.wg.Done()
var err error
var opts []grpc.DialOption
var creds credentials.TransportAuthenticator
creds = credentials.NewTLS(&tls.Config{
InsecureSkipVerify: true,
})
opts = append(opts, grpc.WithTransportCredentials(creds))
conn, err := grpc.Dial(c.addr, opts...)
if err != nil {
log.Fatalln(fmt.Sprintf("Failed to dial server: %s", err))
}
defer conn.Close()
client := vp.NewValueStoreClient(conn)
stream, err := client.StreamRead(context.Background())
for i, _ := range c.rm {
if err := stream.Send(c.rm[i]); err != nil {
log.Println(err)
continue
}
_, err := stream.Recv()
if err == io.EOF {
return
}
if err != nil {
log.Println(err)
continue
}
}
stream.CloseSend()
}
func GroupStreamRead(c *GroupClientConfig) {
defer c.wg.Done()
var err error
var opts []grpc.DialOption
var creds credentials.TransportAuthenticator
creds = credentials.NewTLS(&tls.Config{
InsecureSkipVerify: true,
})
opts = append(opts, grpc.WithTransportCredentials(creds))
conn, err := grpc.Dial(c.addr, opts...)
if err != nil {
log.Fatalln(fmt.Sprintf("Failed to dial server: %s", err))
}
defer conn.Close()
client := gp.NewGroupStoreClient(conn)
stream, err := client.StreamRead(context.Background())
for i, _ := range c.rm {
if err := stream.Send(c.rm[i]); err != nil {
log.Println(err)
continue
}
_, err := stream.Recv()
if err == io.EOF {
return
}
if err != nil {
log.Println(err)
continue
}
}
stream.CloseSend()
}
func ValueWrite(c *ValueClientConfig) {
defer c.wg.Done()
var err error
var opts []grpc.DialOption
var creds credentials.TransportAuthenticator
creds = credentials.NewTLS(&tls.Config{
InsecureSkipVerify: true,
})
opts = append(opts, grpc.WithTransportCredentials(creds))
conn, err := grpc.Dial(c.addr, opts...)
if err != nil {
log.Fatalln(fmt.Sprintf("Failed to dial server: %s", err))
}
defer conn.Close()
client := vp.NewValueStoreClient(conn)
w := &vp.WriteRequest{
Value: *c.value,
}
empty := []byte("")
for i, _ := range c.wm {
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
c.wm[i].Value = *c.value
w.TimestampMicro = brimtime.TimeToUnixMicro(time.Now())
res, err := client.Write(ctx, c.wm[i])
if err != nil {
log.Println("Client", c.id, ":", err)
}
if res.TimestampMicro > w.TimestampMicro {
log.Printf("TSM is newer than attempted, Key %d-%d Got %s, Sent: %s", c.id, i, brimtime.UnixMicroToTime(res.TimestampMicro), brimtime.UnixMicroToTime(w.TimestampMicro))
}
c.wm[i].Value = empty
}
}
func GroupWrite(c *GroupClientConfig) {
defer c.wg.Done()
var err error
var opts []grpc.DialOption
var creds credentials.TransportAuthenticator
creds = credentials.NewTLS(&tls.Config{
InsecureSkipVerify: true,
})
opts = append(opts, grpc.WithTransportCredentials(creds))
conn, err := grpc.Dial(c.addr, opts...)
if err != nil {
log.Fatalln(fmt.Sprintf("Failed to dial server: %s", err))
}
defer conn.Close()
client := gp.NewGroupStoreClient(conn)
w := &gp.WriteRequest{
Value: *c.value,
}
empty := []byte("")
for i, _ := range c.wm {
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
c.wm[i].Value = *c.value
w.TimestampMicro = brimtime.TimeToUnixMicro(time.Now())
res, err := client.Write(ctx, c.wm[i])
if err != nil {
log.Println("Client", c.id, ":", err)
}
if res.TimestampMicro > w.TimestampMicro {
log.Printf("TSM is newer than attempted, Key %d-%d Got %s, Sent: %s", c.id, i, brimtime.UnixMicroToTime(res.TimestampMicro), brimtime.UnixMicroToTime(w.TimestampMicro))
}
c.wm[i].Value = empty
}
}
func ValueRead(c *ValueClientConfig) {
defer c.wg.Done()
var err error
var opts []grpc.DialOption
var creds credentials.TransportAuthenticator
creds = credentials.NewTLS(&tls.Config{
InsecureSkipVerify: true,
})
opts = append(opts, grpc.WithTransportCredentials(creds))
conn, err := grpc.Dial(c.addr, opts...)
if err != nil {
log.Fatalln(fmt.Sprintf("Failed to dial server: %s", err))
}
defer conn.Close()
client := vp.NewValueStoreClient(conn)
for i, _ := range c.rm {
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
_, err := client.Read(ctx, c.rm[i])
if err != nil {
log.Println("Client", c.id, ":", err)
}
}
}
func GroupRead(c *GroupClientConfig) {
defer c.wg.Done()
var err error
var opts []grpc.DialOption
var creds credentials.TransportAuthenticator
creds = credentials.NewTLS(&tls.Config{
InsecureSkipVerify: true,
})
opts = append(opts, grpc.WithTransportCredentials(creds))
conn, err := grpc.Dial(c.addr, opts...)
if err != nil {
log.Fatalln(fmt.Sprintf("Failed to dial server: %s", err))
}
defer conn.Close()
client := gp.NewGroupStoreClient(conn)
for i, _ := range c.rm {
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
_, err := client.Read(ctx, c.rm[i])
if err != nil {
log.Println("Client", c.id, ":", err)
}
}
}
func newClientTLSFromFile(certFile, serverName string, SkipVerify bool) (*tls.Config, error) {
b, err := ioutil.ReadFile(certFile)
if err != nil {
return &tls.Config{}, err
}
cp := x509.NewCertPool()
if !cp.AppendCertsFromPEM(b) {
return &tls.Config{}, fmt.Errorf("failed to append certificates for client ca store")
}
return &tls.Config{ServerName: serverName, RootCAs: cp, InsecureSkipVerify: SkipVerify}, nil
}
func VSTests() {
vsconfigs := make([]ValueClientConfig, *clients)
var wg sync.WaitGroup
for w := 0; w < *clients; w++ {
vsconfigs[w].addr = *vsServer
vsconfigs[w].id = w
vsconfigs[w].count = perClient
vsconfigs[w].value = &value
vsconfigs[w].wg = &wg
vsconfigs[w].wm = make([]*vp.WriteRequest, perClient)
vsconfigs[w].rm = make([]*vp.ReadRequest, perClient)
for k := 0; k < perClient; k++ {
vsconfigs[w].wm[k] = &vp.WriteRequest{}
vsconfigs[w].rm[k] = &vp.ReadRequest{}
vsconfigs[w].wm[k].KeyA, vsconfigs[w].wm[k].KeyB = murmur3.Sum128([]byte(fmt.Sprintf("somethingtestkey%d-%d", vsconfigs[w].id, k)))
vsconfigs[w].wm[k].TimestampMicro = brimtime.TimeToUnixMicro(time.Now())
vsconfigs[w].rm[k].KeyA = vsconfigs[w].wm[k].KeyA
vsconfigs[w].rm[k].KeyB = vsconfigs[w].wm[k].KeyB
}
}
log.Println("ValueStore Key/hash generation complete. Spawning tests.")
// ValueStore Tests
if *vsWriteTest {
t := time.Now()
for w := 0; w < *clients; w++ {
wg.Add(1)
if *streamTest {
go ValueStreamWrite(&vsconfigs[w])
} else {
go ValueWrite(&vsconfigs[w])
}
}
wg.Wait()
log.Println("Issued", *clients*perClient, "VS WRITES")
ts := time.Since(t).Seconds()
log.Println("Total run time was:", ts, "seconds")
log.Printf("Per second: %.2f\n", float64(*clients*perClient)/ts)
}
if *vsReadTest {
t := time.Now()
for w := 0; w < *clients; w++ {
wg.Add(1)
if *streamTest {
go ValueStreamRead(&vsconfigs[w])
} else {
go ValueRead(&vsconfigs[w])
}
}
wg.Wait()
log.Println("Issued", *clients*perClient, "VS READS")
ts := time.Since(t).Seconds()
log.Println("Total run time was:", ts, "seconds")
log.Printf("Per second: %.2f\n", float64(*clients*perClient)/ts)
}
}
func GSTests() {
gsconfigs := make([]GroupClientConfig, *clients)
var wg sync.WaitGroup
for w := 0; w < *clients; w++ {
gsconfigs[w].addr = *gsServer
gsconfigs[w].id = w
gsconfigs[w].count = perClient
gsconfigs[w].value = &value
gsconfigs[w].wg = &wg
perGroup := perClient / *groups
for g := 0; g < *groups; g++ {
grpA, grpB := murmur3.Sum128([]byte(fmt.Sprintf("group%d-%d", gsconfigs[w].id, g)))
for k := 0; k < perGroup; k++ {
tsm := brimtime.TimeToUnixMicro(time.Now())
wr := &gp.WriteRequest{
KeyA: grpA,
KeyB: grpB,
TimestampMicro: tsm,
}
wr.ChildKeyA, wr.ChildKeyB = murmur3.Sum128([]byte(fmt.Sprintf("somethingtestkey%d-%d", gsconfigs[w].id, k)))
rr := &gp.ReadRequest{
KeyA: grpA,
KeyB: grpB,
ChildKeyA: wr.ChildKeyA,
ChildKeyB: wr.ChildKeyB,
}
gsconfigs[w].wm = append(gsconfigs[w].wm, wr)
gsconfigs[w].rm = append(gsconfigs[w].rm, rr)
}
}
}
log.Println("GroupStore Key/hash generation complete. Spawning tests.")
if *gsWriteTest {
t := time.Now()
for w := 0; w < *clients; w++ {
wg.Add(1)
if *streamTest {
go GroupStreamWrite(&gsconfigs[w])
} else {
go GroupWrite(&gsconfigs[w])
}
}
wg.Wait()
log.Println("Issued", *clients*perClient, "GS WRITES")
ts := time.Since(t).Seconds()
log.Println("Total run time was:", ts, "seconds")
log.Printf("Per second: %.2f\n", float64(*clients*perClient)/ts)
}
if *gsReadTest {
t := time.Now()
for w := 0; w < *clients; w++ {
wg.Add(1)
if *streamTest {
go GroupStreamRead(&gsconfigs[w])
} else {
go GroupRead(&gsconfigs[w])
}
}
wg.Wait()
log.Println("Issued", *clients*perClient, "GS READS")
ts := time.Since(t).Seconds()
log.Println("Total run time was:", ts, "seconds")
log.Printf("Per second: %.2f\n", float64(*clients*perClient)/ts)
}
}
var (
num = flag.Int("num", 1000, "total # of entries to write")
vsize = flag.Int("vsize", 128, "value size")
procs = flag.Int("procs", 1, "gomaxprocs count")
clients = flag.Int("clients", 1, "# of client workers to split writes across")
vsWriteTest = flag.Bool("vswrite", false, "do valuestore write test")
vsReadTest = flag.Bool("vsread", false, "do valuestore read test")
groups = flag.Int("groups", 1, "# of groups per client to split writes across")
gsWriteTest = flag.Bool("gswrite", false, "do groupstore write test")
gsReadTest = flag.Bool("gsread", false, "do groupstore read test")
streamTest = flag.Bool("stream", false, "use streaming api")
profileEnable = flag.Bool("profile", false, "enable cpu profiling")
vsServer = flag.String("vshost", "localhost:6379", "")
gsServer = flag.String("gshost", "localhost:6380", "")
perClient = 0
value = []byte("")
)
func main() {
flag.Parse()
if *clients > *num {
log.Println("# of clients can't be greater than # of keys written")
return
}
runtime.GOMAXPROCS(*procs)
if *profileEnable {
defer profile.Start().Stop()
}
s := NewScrambled()
value = make([]byte, *vsize)
s.Read(value)
perClient = *num / *clients
perGroup := perClient / *groups
if perGroup == 0 {
log.Printf("Can't split %d writes across %d groups", perClient, *groups)
log.Println("Need -num to be at least:", *clients**groups)
return
}
log.Println("Using streaming api:", *streamTest)
if *vsWriteTest || *vsReadTest {
VSTests()
}
if *gsWriteTest || *gsReadTest {
GSTests()
}
return
}
|
package common
/*
Generated using mavgen - https://github.com/ArduPilot/pymavlink/
Copyright 2020 queue-b <https://github.com/queue-b>
Permission is hereby granted, free of charge, to any person obtaining a copy
of the generated software (the "Generated Software"), to deal
in the Generated Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Generated Software, and to permit persons to whom the Generated
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Generated Software.
THE GENERATED SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE GENERATED SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE GENERATED SOFTWARE.
*/
import (
"bytes"
"encoding/binary"
"fmt"
"math"
"text/tabwriter"
"github.com/queue-b/go-mavlink2"
"github.com/queue-b/go-mavlink2/util"
)
/*V2Extension Message implementing parts of the V2 payload specs in V1 frames for transitional support. */
type V2Extension struct {
/*MessageType A code that identifies the software component that understands this message (analogous to USB device classes or mime type strings). If this code is less than 32768, it is considered a 'registered' protocol extension and the corresponding entry should be added to https://github.com/mavlink/mavlink/extension-message-ids.xml. Software creators can register blocks of message IDs as needed (useful for GCS specific metadata, etc...). Message_types greater than 32767 are considered local experiments and should not be checked in to any widely distributed codebase. */
MessageType uint16
/*TargetNetwork Network ID (0 for broadcast) */
TargetNetwork uint8
/*TargetSystem System ID (0 for broadcast) */
TargetSystem uint8
/*TargetComponent Component ID (0 for broadcast) */
TargetComponent uint8
/*Payload Variable length payload. The length is defined by the remaining message length when subtracting the header and other fields. The entire content of this block is opaque unless you understand any the encoding message_type. The particular encoding used can be extension specific and might not always be documented as part of the mavlink specification. */
Payload [249]byte
/*HasExtensionFieldValues indicates if this message has any extensions and */
HasExtensionFieldValues bool
/*PayloadLength Length of the variable length Payload*/
PayloadLength uint8
}
func (m *V2Extension) String() string {
format := ""
var buffer bytes.Buffer
writer := tabwriter.NewWriter(&buffer, 0, 0, 2, ' ', 0)
format += "Name:\t%v/%v\n"
format += "MessageType:\t%v\n"
format += "TargetNetwork:\t%v\n"
format += "TargetSystem:\t%v\n"
format += "TargetComponent:\t%v\n"
format += "Payload:\t%v\n"
format += "PayloadLength:\t%v\n"
fmt.Fprintf(
writer,
format,
m.GetDialect(),
m.GetMessageName(),
m.MessageType,
m.TargetNetwork,
m.TargetSystem,
m.TargetComponent,
m.Payload,
m.PayloadLength,
)
writer.Flush()
return string(buffer.Bytes())
}
// GetVersion gets the MAVLink version of the Message contents
func (m *V2Extension) GetVersion() int {
if m.HasExtensionFieldValues {
return 2
}
return 1
}
// GetDialect gets the name of the dialect that defines the Message
func (m *V2Extension) GetDialect() string {
return "common"
}
// GetMessageName gets the name of the Message
func (m *V2Extension) GetMessageName() string {
return "V2Extension"
}
// GetID gets the ID of the Message
func (m *V2Extension) GetID() uint32 {
return 248
}
// HasExtensionFields returns true if the message definition contained extensions; false otherwise
func (m *V2Extension) HasExtensionFields() bool {
return false
}
func (m *V2Extension) getV1Length() int {
return 5
}
func (m *V2Extension) getIOSlice() []byte {
// 249 payload + 5 description + bool + uint8
return make([]byte, 256)
}
// GetPayloadSlice gets the valid Payload bytes as a slice
func (m *V2Extension) GetPayload() []byte {
return m.Payload[0:m.PayloadLength]
}
// SetPayloadSlice sets the Payload from the slice value provided
func (m *V2Extension) SetPayloadSlice(payload []byte) error {
if len(payload) > len(m.Payload) {
copy(m.Payload[:], payload[:len(m.Payload)])
m.PayloadLength = uint8(len(m.Payload))
return mavlink2.ErrValueTooLong
}
copy(m.Payload[:], payload)
m.PayloadLength = uint8(len(payload))
return nil
}
// Read sets the field values of the message from the raw message payload
func (m *V2Extension) Read(frame mavlink2.Frame) (err error) {
version := frame.GetVersion()
// Ensure only Version 1 or Version 2 were specified
if version != 1 && version != 2 {
err = mavlink2.ErrUnsupportedVersion
return
}
// binary.Read can panic; swallow the panic and return a sane error
defer func() {
if r := recover(); r != nil {
err = mavlink2.ErrPrivateField
}
}()
// Get a slice with enough capacity for a full read
ioBytes := m.getIOSlice()
copy(ioBytes, frame.GetMessageBytes())
// Add the length of the payload to the end of the array so that it is read in as PayloadLength
ioBytes[len(ioBytes)-1] = uint8(math.Max(0, float64(frame.GetMessageLength()-5)))
buffer := bytes.NewBuffer(ioBytes)
err = binary.Read(buffer, binary.LittleEndian, m)
if err != nil {
return
}
return
}
// Write encodes the field values of the message to a byte array
func (m *V2Extension) Write(version int) (output []byte, err error) {
var buffer bytes.Buffer
// Ensure only Version 1 or Version 2 were specified
if version != 1 && version != 2 {
err = mavlink2.ErrUnsupportedVersion
return
}
err = binary.Write(&buffer, binary.LittleEndian, m)
if err != nil {
return
}
output = buffer.Bytes()
// V2Extension packets are always variable length,
// regardless of whether they are in a V1 or V2 stream
// Cut off the trailing bytes past the end of the payload
output = output[:5+m.PayloadLength]
if version == 2 {
output = util.TruncateV2(output)
}
return
}
|
package main
// code snippets were taken from: https://outcrawl.com/image-recognition-api-go-tensorflow/
import (
"bufio"
"bytes"
"context"
"encoding/json"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"time"
"github.com/google/uuid"
"github.com/sdeoras/token/proto"
"github.com/sirupsen/logrus"
tf "github.com/tensorflow/tensorflow/tensorflow/go"
"google.golang.org/grpc"
)
func runWithScheduler() error {
t0 := time.Now()
var b bytes.Buffer
bw := bufio.NewWriter(&b)
if *jobID == "default" {
*jobID = uuid.New().String()
logrus.Info("using job id:", *jobID)
}
// load tf graph and start a session
logrus.Info("loading graph")
if err := loadGraph(); err != nil {
return err
}
logrus.Info("graph loaded")
logrus.Info("starting session")
dirName := *inDir
session, err := tf.NewSession(graph, nil)
if err != nil {
return err
}
defer session.Close()
logrus.Info("started session")
// grpc dialing
logrus.Info("dialing grpc server: ", *host)
ctx := context.Background()
conn, err := grpc.Dial(*host, grpc.WithInsecure())
if err != nil {
return err
}
defer conn.Close()
client := proto.NewTokensClient(conn)
logrus.Info("connected to grpc server: ", *host)
// loop over number of batches
for i := 0; i < *numBatches; i++ {
// request tokens from server
logrus.Info("requested tokens: ", *batchSize)
tokens, err := client.Get(ctx, &proto.JobID{ID: *jobID, BatchSize: int32(*batchSize)})
if err != nil {
return err
}
if len(tokens.Tokens) == 0 {
logrus.Info("received tokens: ", len(tokens.Tokens), ", exiting")
break
} else {
logrus.Info("received tokens: ", len(tokens.Tokens))
}
// start heartbeat-ing
heartBeat := proto.NewHeartBeat(client, *jobID, tokens.Key)
heartBeat.Start()
// loop over tokens
logrus.Info("computing")
var b0 bytes.Buffer
bw0 := bufio.NewWriter(&b0)
t := time.Now()
for _, token := range tokens.Tokens {
tLoop := time.Now()
fileName := filepath.Join(dirName, token)
image, err := ioutil.ReadFile(fileName)
if err != nil {
logrus.Error("error on file read: ", err, ", ", fileName)
continue
}
fileSize := uint64(len(image))
fileIOTime := time.Since(tLoop)
tLoop = time.Now()
tensor, err := makeTensorFromImage(bytes.NewBuffer(image), "jpg")
if err != nil {
logrus.Error("error on making tensor from image: ", err, ", ", fileName)
continue
}
output, err := session.Run(
map[tf.Output]*tf.Tensor{
graph.Operation("input").Output(0): tensor,
},
[]tf.Output{
graph.Operation("output").Output(0),
},
nil)
if err != nil {
logrus.Error("error in running session:", err, ", ", fileName)
continue
}
computeTime := time.Since(tLoop)
jb, err := json.Marshal(findBestLabels(token, output[0].Value().([][]float32)[0], fileSize, fileIOTime, computeTime))
if err != nil {
logrus.Error("error in json marshaling:", err, ", ", fileName)
continue
}
if nn, err := bw0.Write(jb); err != nil {
logrus.Error("error in writing to bw0:", err, ", ", fileName)
continue
} else {
if nn != len(jb) {
logrus.Error("error in writing to bw0:", err, ", ", fileName)
continue
}
}
bb := []byte("\n")
if nn, err := bw0.Write(bb); err != nil {
logrus.Error("could not write to bytes buffer", err)
continue
} else {
if nn != len(bb) {
logrus.Error("could not write to bytes buffer", err)
continue
}
}
// check server's response to client's heartbeat
if err := heartBeat.Check(); err != nil {
heartBeat.Close()
return err
}
}
logrus.Info("client looping over tokens took: ", time.Since(t), ", for jobID: ", *jobID, ", key: ", tokens.Key)
// stop heartbeat-ing
heartBeat.Close()
// send done confirmation to server
if ack, err := client.Done(ctx, &proto.JobID{Key: tokens.Key, ID: *jobID}); err != nil {
return err
} else {
if ack.Status {
if err := bw0.Flush(); err != nil {
return err
}
bb := b0.Bytes()
if nn, err := bw.Write(bb); err != nil {
logrus.Error("could not write to bytes buffer", err)
continue
} else {
if nn != len(bb) {
logrus.Error("could not write to bytes buffer", err)
continue
}
}
}
}
}
// output
timeStamp := strconv.FormatInt(time.Now().UnixNano(), 16)
dirName = filepath.Join(*outDir, *jobID)
fileName := filepath.Join(dirName, *jobID+"_"+timeStamp+".json")
if err := os.MkdirAll(dirName, 0755); err != nil {
return err
}
if err := bw.Flush(); err != nil {
return err
}
jb := b.Bytes()
if len(jb) > 0 {
if err := ioutil.WriteFile(fileName, jb, 0644); err != nil {
return err
}
logrus.Info("writing output: ", fileName)
}
// all done
logrus.Info("all done: ", time.Since(t0))
return nil
}
|
package scheduler
import (
"time"
)
//DurationWatcher is the interface
type DurationWatcher interface {
Duration() time.Duration
Watch() chan time.Duration
}
//Schedule will schedule the execution of the function f, exery Duration(), it will automatically change Duration()
//if a change is made to the desired Duration time
func Schedule(f func(), dn DurationWatcher) {
c := dn.Watch()
go func() {
ticker := time.NewTicker(dn.Duration())
for {
select {
case durationChange := <-c:
ticker.Stop()
ticker = time.NewTicker(durationChange)
case <-ticker.C:
f()
}
}
}()
}
|
// Copyright 2023 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package compute
import (
"context"
"fmt"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl"
dclService "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured"
)
type VpnTunnel struct{}
func VpnTunnelToUnstructured(r *dclService.VpnTunnel) *unstructured.Resource {
u := &unstructured.Resource{
STV: unstructured.ServiceTypeVersion{
Service: "compute",
Version: "beta",
Type: "VpnTunnel",
},
Object: make(map[string]interface{}),
}
if r.Description != nil {
u.Object["description"] = *r.Description
}
if r.DetailedStatus != nil {
u.Object["detailedStatus"] = *r.DetailedStatus
}
if r.Id != nil {
u.Object["id"] = *r.Id
}
if r.IkeVersion != nil {
u.Object["ikeVersion"] = *r.IkeVersion
}
if r.Labels != nil {
rLabels := make(map[string]interface{})
for k, v := range r.Labels {
rLabels[k] = v
}
u.Object["labels"] = rLabels
}
var rLocalTrafficSelector []interface{}
for _, rLocalTrafficSelectorVal := range r.LocalTrafficSelector {
rLocalTrafficSelector = append(rLocalTrafficSelector, rLocalTrafficSelectorVal)
}
u.Object["localTrafficSelector"] = rLocalTrafficSelector
if r.Location != nil {
u.Object["location"] = *r.Location
}
if r.Name != nil {
u.Object["name"] = *r.Name
}
if r.PeerExternalGateway != nil {
u.Object["peerExternalGateway"] = *r.PeerExternalGateway
}
if r.PeerExternalGatewayInterface != nil {
u.Object["peerExternalGatewayInterface"] = *r.PeerExternalGatewayInterface
}
if r.PeerGcpGateway != nil {
u.Object["peerGcpGateway"] = *r.PeerGcpGateway
}
if r.PeerIP != nil {
u.Object["peerIP"] = *r.PeerIP
}
if r.Project != nil {
u.Object["project"] = *r.Project
}
var rRemoteTrafficSelector []interface{}
for _, rRemoteTrafficSelectorVal := range r.RemoteTrafficSelector {
rRemoteTrafficSelector = append(rRemoteTrafficSelector, rRemoteTrafficSelectorVal)
}
u.Object["remoteTrafficSelector"] = rRemoteTrafficSelector
if r.Router != nil {
u.Object["router"] = *r.Router
}
if r.SelfLink != nil {
u.Object["selfLink"] = *r.SelfLink
}
if r.SharedSecret != nil {
u.Object["sharedSecret"] = *r.SharedSecret
}
if r.SharedSecretHash != nil {
u.Object["sharedSecretHash"] = *r.SharedSecretHash
}
if r.Status != nil {
u.Object["status"] = string(*r.Status)
}
if r.TargetVpnGateway != nil {
u.Object["targetVpnGateway"] = *r.TargetVpnGateway
}
if r.VpnGateway != nil {
u.Object["vpnGateway"] = *r.VpnGateway
}
if r.VpnGatewayInterface != nil {
u.Object["vpnGatewayInterface"] = *r.VpnGatewayInterface
}
return u
}
func UnstructuredToVpnTunnel(u *unstructured.Resource) (*dclService.VpnTunnel, error) {
r := &dclService.VpnTunnel{}
if _, ok := u.Object["description"]; ok {
if s, ok := u.Object["description"].(string); ok {
r.Description = dcl.String(s)
} else {
return nil, fmt.Errorf("r.Description: expected string")
}
}
if _, ok := u.Object["detailedStatus"]; ok {
if s, ok := u.Object["detailedStatus"].(string); ok {
r.DetailedStatus = dcl.String(s)
} else {
return nil, fmt.Errorf("r.DetailedStatus: expected string")
}
}
if _, ok := u.Object["id"]; ok {
if i, ok := u.Object["id"].(int64); ok {
r.Id = dcl.Int64(i)
} else {
return nil, fmt.Errorf("r.Id: expected int64")
}
}
if _, ok := u.Object["ikeVersion"]; ok {
if i, ok := u.Object["ikeVersion"].(int64); ok {
r.IkeVersion = dcl.Int64(i)
} else {
return nil, fmt.Errorf("r.IkeVersion: expected int64")
}
}
if _, ok := u.Object["labels"]; ok {
if rLabels, ok := u.Object["labels"].(map[string]interface{}); ok {
m := make(map[string]string)
for k, v := range rLabels {
if s, ok := v.(string); ok {
m[k] = s
}
}
r.Labels = m
} else {
return nil, fmt.Errorf("r.Labels: expected map[string]interface{}")
}
}
if _, ok := u.Object["localTrafficSelector"]; ok {
if s, ok := u.Object["localTrafficSelector"].([]interface{}); ok {
for _, ss := range s {
if strval, ok := ss.(string); ok {
r.LocalTrafficSelector = append(r.LocalTrafficSelector, strval)
}
}
} else {
return nil, fmt.Errorf("r.LocalTrafficSelector: expected []interface{}")
}
}
if _, ok := u.Object["location"]; ok {
if s, ok := u.Object["location"].(string); ok {
r.Location = dcl.String(s)
} else {
return nil, fmt.Errorf("r.Location: expected string")
}
}
if _, ok := u.Object["name"]; ok {
if s, ok := u.Object["name"].(string); ok {
r.Name = dcl.String(s)
} else {
return nil, fmt.Errorf("r.Name: expected string")
}
}
if _, ok := u.Object["peerExternalGateway"]; ok {
if s, ok := u.Object["peerExternalGateway"].(string); ok {
r.PeerExternalGateway = dcl.String(s)
} else {
return nil, fmt.Errorf("r.PeerExternalGateway: expected string")
}
}
if _, ok := u.Object["peerExternalGatewayInterface"]; ok {
if i, ok := u.Object["peerExternalGatewayInterface"].(int64); ok {
r.PeerExternalGatewayInterface = dcl.Int64(i)
} else {
return nil, fmt.Errorf("r.PeerExternalGatewayInterface: expected int64")
}
}
if _, ok := u.Object["peerGcpGateway"]; ok {
if s, ok := u.Object["peerGcpGateway"].(string); ok {
r.PeerGcpGateway = dcl.String(s)
} else {
return nil, fmt.Errorf("r.PeerGcpGateway: expected string")
}
}
if _, ok := u.Object["peerIP"]; ok {
if s, ok := u.Object["peerIP"].(string); ok {
r.PeerIP = dcl.String(s)
} else {
return nil, fmt.Errorf("r.PeerIP: expected string")
}
}
if _, ok := u.Object["project"]; ok {
if s, ok := u.Object["project"].(string); ok {
r.Project = dcl.String(s)
} else {
return nil, fmt.Errorf("r.Project: expected string")
}
}
if _, ok := u.Object["remoteTrafficSelector"]; ok {
if s, ok := u.Object["remoteTrafficSelector"].([]interface{}); ok {
for _, ss := range s {
if strval, ok := ss.(string); ok {
r.RemoteTrafficSelector = append(r.RemoteTrafficSelector, strval)
}
}
} else {
return nil, fmt.Errorf("r.RemoteTrafficSelector: expected []interface{}")
}
}
if _, ok := u.Object["router"]; ok {
if s, ok := u.Object["router"].(string); ok {
r.Router = dcl.String(s)
} else {
return nil, fmt.Errorf("r.Router: expected string")
}
}
if _, ok := u.Object["selfLink"]; ok {
if s, ok := u.Object["selfLink"].(string); ok {
r.SelfLink = dcl.String(s)
} else {
return nil, fmt.Errorf("r.SelfLink: expected string")
}
}
if _, ok := u.Object["sharedSecret"]; ok {
if s, ok := u.Object["sharedSecret"].(string); ok {
r.SharedSecret = dcl.String(s)
} else {
return nil, fmt.Errorf("r.SharedSecret: expected string")
}
}
if _, ok := u.Object["sharedSecretHash"]; ok {
if s, ok := u.Object["sharedSecretHash"].(string); ok {
r.SharedSecretHash = dcl.String(s)
} else {
return nil, fmt.Errorf("r.SharedSecretHash: expected string")
}
}
if _, ok := u.Object["status"]; ok {
if s, ok := u.Object["status"].(string); ok {
r.Status = dclService.VpnTunnelStatusEnumRef(s)
} else {
return nil, fmt.Errorf("r.Status: expected string")
}
}
if _, ok := u.Object["targetVpnGateway"]; ok {
if s, ok := u.Object["targetVpnGateway"].(string); ok {
r.TargetVpnGateway = dcl.String(s)
} else {
return nil, fmt.Errorf("r.TargetVpnGateway: expected string")
}
}
if _, ok := u.Object["vpnGateway"]; ok {
if s, ok := u.Object["vpnGateway"].(string); ok {
r.VpnGateway = dcl.String(s)
} else {
return nil, fmt.Errorf("r.VpnGateway: expected string")
}
}
if _, ok := u.Object["vpnGatewayInterface"]; ok {
if i, ok := u.Object["vpnGatewayInterface"].(int64); ok {
r.VpnGatewayInterface = dcl.Int64(i)
} else {
return nil, fmt.Errorf("r.VpnGatewayInterface: expected int64")
}
}
return r, nil
}
func GetVpnTunnel(ctx context.Context, config *dcl.Config, u *unstructured.Resource) (*unstructured.Resource, error) {
c := dclService.NewClient(config)
r, err := UnstructuredToVpnTunnel(u)
if err != nil {
return nil, err
}
r, err = c.GetVpnTunnel(ctx, r)
if err != nil {
return nil, err
}
return VpnTunnelToUnstructured(r), nil
}
func ListVpnTunnel(ctx context.Context, config *dcl.Config, project string, location string) ([]*unstructured.Resource, error) {
c := dclService.NewClient(config)
l, err := c.ListVpnTunnel(ctx, project, location)
if err != nil {
return nil, err
}
var resources []*unstructured.Resource
for {
for _, r := range l.Items {
resources = append(resources, VpnTunnelToUnstructured(r))
}
if !l.HasNext() {
break
}
if err := l.Next(ctx, c); err != nil {
return nil, err
}
}
return resources, nil
}
func ApplyVpnTunnel(ctx context.Context, config *dcl.Config, u *unstructured.Resource, opts ...dcl.ApplyOption) (*unstructured.Resource, error) {
c := dclService.NewClient(config)
r, err := UnstructuredToVpnTunnel(u)
if err != nil {
return nil, err
}
if ush := unstructured.FetchStateHint(opts); ush != nil {
sh, err := UnstructuredToVpnTunnel(ush)
if err != nil {
return nil, err
}
opts = append(opts, dcl.WithStateHint(sh))
}
r, err = c.ApplyVpnTunnel(ctx, r, opts...)
if err != nil {
return nil, err
}
return VpnTunnelToUnstructured(r), nil
}
func VpnTunnelHasDiff(ctx context.Context, config *dcl.Config, u *unstructured.Resource, opts ...dcl.ApplyOption) (bool, error) {
c := dclService.NewClient(config)
r, err := UnstructuredToVpnTunnel(u)
if err != nil {
return false, err
}
if ush := unstructured.FetchStateHint(opts); ush != nil {
sh, err := UnstructuredToVpnTunnel(ush)
if err != nil {
return false, err
}
opts = append(opts, dcl.WithStateHint(sh))
}
opts = append(opts, dcl.WithLifecycleParam(dcl.BlockDestruction), dcl.WithLifecycleParam(dcl.BlockCreation), dcl.WithLifecycleParam(dcl.BlockModification))
_, err = c.ApplyVpnTunnel(ctx, r, opts...)
if err != nil {
if _, ok := err.(dcl.ApplyInfeasibleError); ok {
return true, nil
}
return false, err
}
return false, nil
}
func DeleteVpnTunnel(ctx context.Context, config *dcl.Config, u *unstructured.Resource) error {
c := dclService.NewClient(config)
r, err := UnstructuredToVpnTunnel(u)
if err != nil {
return err
}
return c.DeleteVpnTunnel(ctx, r)
}
func VpnTunnelID(u *unstructured.Resource) (string, error) {
r, err := UnstructuredToVpnTunnel(u)
if err != nil {
return "", err
}
return r.ID()
}
func (r *VpnTunnel) STV() unstructured.ServiceTypeVersion {
return unstructured.ServiceTypeVersion{
"compute",
"VpnTunnel",
"beta",
}
}
func (r *VpnTunnel) SetPolicyMember(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, member *unstructured.Resource) (*unstructured.Resource, error) {
return nil, unstructured.ErrNoSuchMethod
}
func (r *VpnTunnel) GetPolicyMember(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, role, member string) (*unstructured.Resource, error) {
return nil, unstructured.ErrNoSuchMethod
}
func (r *VpnTunnel) DeletePolicyMember(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, member *unstructured.Resource) error {
return unstructured.ErrNoSuchMethod
}
func (r *VpnTunnel) SetPolicy(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, policy *unstructured.Resource) (*unstructured.Resource, error) {
return nil, unstructured.ErrNoSuchMethod
}
func (r *VpnTunnel) SetPolicyWithEtag(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, policy *unstructured.Resource) (*unstructured.Resource, error) {
return nil, unstructured.ErrNoSuchMethod
}
func (r *VpnTunnel) GetPolicy(ctx context.Context, config *dcl.Config, resource *unstructured.Resource) (*unstructured.Resource, error) {
return nil, unstructured.ErrNoSuchMethod
}
func (r *VpnTunnel) Get(ctx context.Context, config *dcl.Config, resource *unstructured.Resource) (*unstructured.Resource, error) {
return GetVpnTunnel(ctx, config, resource)
}
func (r *VpnTunnel) Apply(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, opts ...dcl.ApplyOption) (*unstructured.Resource, error) {
return ApplyVpnTunnel(ctx, config, resource, opts...)
}
func (r *VpnTunnel) HasDiff(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, opts ...dcl.ApplyOption) (bool, error) {
return VpnTunnelHasDiff(ctx, config, resource, opts...)
}
func (r *VpnTunnel) Delete(ctx context.Context, config *dcl.Config, resource *unstructured.Resource) error {
return DeleteVpnTunnel(ctx, config, resource)
}
func (r *VpnTunnel) ID(resource *unstructured.Resource) (string, error) {
return VpnTunnelID(resource)
}
func init() {
unstructured.Register(&VpnTunnel{})
}
|
package fakes
import "github.com/cloudfoundry-incubator/notifications/models"
type ClientsRepo struct {
Clients map[string]models.Client
UpsertError error
FindError error
}
func NewClientsRepo() *ClientsRepo {
return &ClientsRepo{
Clients: make(map[string]models.Client),
}
}
func (fake *ClientsRepo) Create(conn models.ConnectionInterface, client models.Client) (models.Client, error) {
if _, ok := fake.Clients[client.ID]; ok {
return client, models.ErrDuplicateRecord{}
}
fake.Clients[client.ID] = client
return client, nil
}
func (fake *ClientsRepo) Update(conn models.ConnectionInterface, client models.Client) (models.Client, error) {
fake.Clients[client.ID] = client
return client, nil
}
func (fake *ClientsRepo) Upsert(conn models.ConnectionInterface, client models.Client) (models.Client, error) {
fake.Clients[client.ID] = client
return client, fake.UpsertError
}
func (fake *ClientsRepo) Find(conn models.ConnectionInterface, id string) (models.Client, error) {
if client, ok := fake.Clients[id]; ok {
return client, fake.FindError
}
return models.Client{}, models.ErrRecordNotFound{}
}
|
package compile
import(
"github.com/Evedel/fortify/src/dictionary"
)
func toClang(SyntaxTree dictionary.TokenNode) (Result string) {
tn := SyntaxTree
tnid := tn.This.Id
if tnid == dictionary.Program {
for ttch := range SyntaxTree.List {
Result += toClang(SyntaxTree.List[ttch])
}
} else {
tnchid := tn.List[0].This.Id
chlist := tn.List[0].List
if (tnchid == dictionary.CarriageReturn) ||
(tnchid == dictionary.Space) ||
(tnchid == dictionary.CommentAll) ||
(tnchid == dictionary.DontCompileF90) {
//------------//
// do nothing //
//------------//
} else if tnchid == dictionary.DontCompileTex {
for ttch := range tn.List[0].List {
Result += toClang(tn.List[0].List[ttch])
}
} else if tnchid == dictionary.Print {
for i := 1; i < len(chlist)-1; i++ {
Result += "\t" + "printf(" + chlist[i].This.Value + ");\n"
}
}
}
return
}
|
package resource
import (
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
wfv1 "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1"
)
func DurationForPod(pod *corev1.Pod) wfv1.ResourcesDuration {
summaries := Summaries{}
for _, c := range append(pod.Spec.InitContainers, pod.Spec.Containers...) {
// Initialize summaries with default limits for CPU and memory.
summaries[c.Name] = Summary{ResourceList: map[corev1.ResourceName]resource.Quantity{
// https://medium.com/@betz.mark/understanding-resource-limits-in-kubernetes-cpu-time-9eff74d3161b
corev1.ResourceCPU: resource.MustParse("100m"),
// https://medium.com/@betz.mark/understanding-resource-limits-in-kubernetes-memory-6b41e9a955f9
corev1.ResourceMemory: resource.MustParse("100Mi"),
}}
// Update with user-configured resources (falls back to limits as == requests, same as Kubernetes).
for name, quantity := range c.Resources.Limits {
summaries[c.Name].ResourceList[name] = quantity
}
for name, quantity := range c.Resources.Requests {
summaries[c.Name].ResourceList[name] = quantity
}
}
for _, c := range append(pod.Status.InitContainerStatuses, pod.Status.ContainerStatuses...) {
summaries[c.Name] = Summary{ResourceList: summaries[c.Name].ResourceList, ContainerState: c.State}
}
return summaries.Duration()
}
|
package main
import (
"fmt"
"log"
"os"
"github.com/joho/godotenv"
"github.com/taglme/nfc-goclient/pkg/client"
)
func main() {
err := godotenv.Load()
if err != nil {
log.Fatal("Error loading .env file")
}
privateRSAKey, err := client.PrivateRSAKeyFromB64String(os.Getenv("SECRET"))
if err != nil {
log.Fatal(err)
}
auth := client.NewSigner(os.Getenv("APP_ID"), privateRSAKey, os.Getenv("CERT"))
client := client.New("127.0.0.1:3011", auth)
adapters, err := client.Adapters.GetAll()
if err != nil {
log.Fatal(err)
}
fmt.Println(adapters)
}
|
package btrfs
import (
"bufio"
"errors"
"fmt"
"os"
"os/exec"
"path"
"strings"
"syscall"
)
type FsMagic int64
const (
FsMagicBtrfs = FsMagic(0x9123683E)
FsMagicBtrfs32Bit = FsMagic(-1859950530)
)
var (
ErrPrerequisites = errors.New("prerequisites for driver not satisfied (wrong filesystem?)")
)
func Init(home string) (*Driver, error) {
rootdir := path.Dir(home + "/")
var buf syscall.Statfs_t
if err := syscall.Statfs(rootdir, &buf); err != nil {
return nil, err
}
if !(FsMagic(buf.Type) == FsMagicBtrfs || FsMagic(buf.Type) == FsMagicBtrfs32Bit) {
return nil, ErrPrerequisites
}
if err := os.MkdirAll(home, 0700); err != nil {
return nil, err
}
return &Driver{
home: home,
}, nil
}
type Driver struct {
home string
}
func (d *Driver) Snapshot(from, to string, readonly bool) error {
fromPath := fmt.Sprintf("%s/%s", d.home, from)
toPath := fmt.Sprintf("%s/%s", d.home, to)
if !d.Exists(from) {
return fmt.Errorf("Volume does not exist: %s", fromPath)
}
if d.Exists(to) {
return fmt.Errorf("Snapshot already exists: %s", toPath)
}
var cmd *exec.Cmd
if readonly {
cmd = raw("subvolume", "snapshot", "-r", fromPath, toPath)
} else {
cmd = raw("subvolume", "snapshot", fromPath, toPath)
}
if err := cmd.Run(); err != nil {
return err
}
// create recursive snapshots.
subvolumes, err := d.ListSubSubvolumes(from)
if err != nil {
return err
}
for _, subvol := range subvolumes {
subvolFrom := fmt.Sprintf("%s/%s", from, subvol)
subvolTo := fmt.Sprintf("%s/%s", to, subvol)
// delete empty directory
if d.Exists(subvolTo) {
if err := os.Remove(fmt.Sprintf("%s/%s", d.home, subvolTo)); err != nil {
return err
}
}
err = d.Snapshot(subvolFrom, subvolTo, readonly)
if err != nil {
return err
}
}
return nil
}
func (d *Driver) Subvolume(vol string) error {
volPath := fmt.Sprintf("%s/%s", d.home, vol)
if _, err := os.Stat(volPath); err == nil {
return fmt.Errorf("Subvolume already exists: %s", volPath)
}
return raw("subvolume", "create", volPath).Run()
}
func (d *Driver) Exists(vol string) bool {
volPath := fmt.Sprintf("%s/%s", d.home, vol)
_, err := os.Stat(volPath)
if err == nil {
return true
} else {
// check os.IsNotExist(err) ?
return false
}
}
func (d *Driver) GetSubvolumeDetail(vol, detail string) (string, error) {
volPath := fmt.Sprintf("%s/%s", d.home, vol)
o, _ := raw("subvolume", "show", volPath).Output()
output := string(o)
lines := strings.Split(output, "\n")
for _, line := range lines {
fields := strings.Split(line, ":")
if len(fields) > 1 {
key, val := strings.Trim(fields[0], " \t"), strings.Trim(fields[1], " \t")
if strings.EqualFold(key, detail) {
return val, nil
}
}
}
return "", fmt.Errorf("Subvolume detail %s not found", detail)
}
func (d *Driver) GetSubvolumeParentUuid(vol string) (string, error) {
return d.GetSubvolumeDetail(vol, "Parent uuid")
}
func (d *Driver) GetSubvolumeUuid(vol string) (string, error) {
return d.GetSubvolumeDetail(vol, "uuid")
}
func (d *Driver) ListSubvolumes() ([]string, error) {
var volumes []string
// find sub-subvolumes
cmd := raw("subvolume", "list", "-o", d.home, "-u")
output, err := cmd.StdoutPipe()
if err != nil {
return volumes, fmt.Errorf("Can't access subvolume list of %s: %v", d.home, err)
}
defer output.Close()
err = cmd.Start()
if err != nil {
return volumes, err
}
scanner := bufio.NewScanner(output)
for scanner.Scan() {
line := scanner.Text()
if strings.Contains(line, "__active") {
line = strings.Replace(line, "__active/", "", 1)
}
volumes = append(volumes, line)
}
return volumes, nil
}
func (d *Driver) GetLayerByUuid(uuid string) (string, error) {
layers, err := d.ListSubvolumes()
if err != nil {
return "", err
}
replaceHome := strings.Replace(d.home, "/", "", 1) + "/"
for _, layer := range layers {
if strings.Contains(layer, fmt.Sprintf(" uuid %s ", uuid)) {
layerDetails := strings.Split(layer, " ")
if len(layerDetails) > 10 {
return strings.Replace(layerDetails[10], replaceHome, "", 1), nil
}
}
}
return "", fmt.Errorf("No layer found")
}
func (d *Driver) ListSubSubvolumes(vol string) ([]string, error) {
var volumes []string
volPath := fmt.Sprintf("%s/%s", d.home, vol)
if !d.Exists(vol) {
return volumes, fmt.Errorf("Volume does not exist: %s", volPath)
}
// find sub-subvolumes
cmd := raw("subvolume", "list", "-o", volPath)
output, err := cmd.StdoutPipe()
if err != nil {
return volumes, fmt.Errorf("Can't access subvolume list of %s: %v", volPath, err)
}
defer output.Close()
err = cmd.Start()
if err != nil {
return volumes, err
}
scanner := bufio.NewScanner(output)
for scanner.Scan() {
line := strings.Split(scanner.Text(), " ")
if len(line) > 8 {
subvol := strings.Join(line[8:], " ")
// remove beginning of volume path - relative to conair home
if strings.Contains(subvol, "__active") {
subvol = strings.Replace(subvol, "__active/", "", 1)
}
if strings.HasPrefix(subvol, volPath) {
volumes = append(volumes, strings.Replace(subvol, fmt.Sprintf("%s/", strings.Replace(volPath, "/", "", 1)), "", 1))
}
if strings.HasPrefix(subvol, vol) {
volumes = append(volumes, strings.Replace(subvol, fmt.Sprintf("%s/", vol), "", 1))
}
}
}
err = scanner.Err()
if err != nil {
return volumes, fmt.Errorf("Can't read subvolume list of %s: %v", volPath, err)
}
return volumes, nil
}
func (d *Driver) Remove(vol string) error {
volPath := fmt.Sprintf("%s/%s", d.home, vol)
if !d.Exists(vol) {
return fmt.Errorf("Volume does not exist: %s", volPath)
}
subvolumes, err := d.ListSubSubvolumes(vol)
if err != nil {
return err
}
for _, subvol := range subvolumes {
if err := d.Remove(fmt.Sprintf("%s/%s", vol, subvol)); err != nil {
return err
}
}
return raw("subvolume", "delete", volPath).Run()
}
func raw(args ...string) *exec.Cmd {
return exec.Command("btrfs", args...)
}
|
package main
import (
"fmt"
)
type User struct {
FirstName string
LastName string
}
func (u *User) FullName() string {
fullname := fmt.Sprintf("%s %s", u.FirstName, u.LastName)
return fullname
}
func NewUser(firstName, lastName string) *User {
return &User{
FirstName: firstName,
LastName: lastName,
}
}
type Task struct {
ID int
Detail string
done bool
*User // Userを埋め込む
}
func NewTask(id int, detail, firstName, lastName string) *Task {
return &Task{
ID: id,
Detail: detail,
done: false,
User: NewUser(firstName, lastName),
}
}
func main() {
task := NewTask(1, "buy the book", "tama", "nabe")
fmt.Println(task.FirstName)
fmt.Println(task.LastName)
fmt.Println(task.User.FullName())
fmt.Println(task.FullName())
}
|
// Package transmitter provides functionality for transmitting
// arbitrary webhook messages on Discord.
//
// Existing webhooks are used for messages sent, and if necessary,
// new webhooks are created to ensure messages in multiple popular channels
// don't cause messages to be registered as new users.
package transmitter
import (
"strings"
"github.com/hashicorp/go-multierror"
"github.com/bwmarrin/discordgo"
"github.com/pkg/errors"
)
// A Transmitter represents a message manager instance for a single guild.
type Transmitter struct {
session *discordgo.Session
guild string
prefix string
webhook *discordgo.Webhook
}
// New returns a new Transmitter given a Discord session, guild ID, and webhook prefix.
func New(session *discordgo.Session, guild string, prefix string, limit int) (*Transmitter, error) {
// Get all existing webhooks
hooks, err := session.GuildWebhooks(guild)
// Check to make sure we have permissions
if err != nil {
restErr := err.(*discordgo.RESTError)
if restErr.Message != nil && restErr.Message.Code == discordgo.ErrCodeMissingPermissions {
return nil, errors.Wrap(err, "the 'Manage Webhooks' permission is required")
}
return nil, errors.Wrap(err, "could not get webhooks")
}
// Delete existing webhooks with the same prefix
for _, wh := range hooks {
if strings.HasPrefix(wh.Name, prefix) {
if err := session.WebhookDelete(wh.ID); err != nil {
return nil, errors.Wrapf(err, "could not remove hook %s", wh.ID)
}
}
}
t := &Transmitter{
session: session,
guild: guild,
prefix: prefix,
webhook: nil,
}
return t, nil
}
// Close immediately stops all active webhook timers and deletes webhooks.
func (t *Transmitter) Close() error {
var result error
// Delete all the webhooks
if wh := t.webhook; wh != nil {
err := t.session.WebhookDelete(wh.ID)
if err != nil {
result = multierror.Append(result, errors.Wrapf(err, "could not remove hook %s", wh.ID)).ErrorOrNil()
}
}
return result
}
// Message transmits a message to the given channel with the given username, avatarURL, and content.
//
// Note that this function will wait until Discord responds with an answer.
func (t *Transmitter) Message(channel string, username string, avatarURL string, content string) (err error) {
// Create a webhook if there is no free webhook
if t.webhook == nil {
err = t.createWebhook(channel)
if err != nil {
return err // this error is already wrapped by us
}
}
params := discordgo.WebhookParams{
Username: username,
AvatarURL: avatarURL,
Content: content,
}
wh := t.webhook
_, err = t.session.WebhookEdit(wh.ID, "", "", channel)
if err != nil {
exists, checkErr := t.checkAndDeleteWebhook(channel)
// If there was error performing the check, compose the list
if checkErr != nil {
err = multierror.Append(err, checkErr).ErrorOrNil()
}
// If the webhook exists OR there was an error performing the check
// return the error to the caller
if exists || checkErr != nil {
return errors.Wrap(err, "could not edit existing webhook")
}
// Otherwise just try and send the message again
return t.Message(channel, username, avatarURL, content)
}
_, err = t.session.WebhookExecute(wh.ID, wh.Token, true, ¶ms)
if err != nil {
return errors.Wrap(err, "could not execute existing webhook")
}
return nil
}
func (t *Transmitter) GetID() string {
if t.webhook == nil {
return ""
}
return t.webhook.ID
}
|
package role
import (
"errors"
"time"
"xorm.io/builder"
"yj-app/app/yjgframe/db"
"yj-app/app/yjgframe/utils/excel"
"yj-app/app/yjgframe/utils/page"
)
// Entity is the golang structure for table sys_role.
type EntityFlag struct {
RoleId int64 `json:"role_id" xorm:"not null pk autoincr comment('角色ID') BIGINT(20)"`
RoleName string `json:"role_name" xorm:"not null comment('角色名称') VARCHAR(30)"`
RoleKey string `json:"role_key" xorm:"not null comment('角色权限字符串') VARCHAR(100)"`
RoleSort int `json:"role_sort" xorm:"not null comment('显示顺序') INT(4)"`
DataScope string `json:"data_scope" xorm:"default '1' comment('数据范围(1:全部数据权限 2:自定数据权限 3:本部门数据权限 4:本部门及以下数据权限)') CHAR(1)"`
Status string `json:"status" xorm:"not null comment('角色状态(0正常 1停用)') CHAR(1)"`
DelFlag string `json:"del_flag" xorm:"default '0' comment('删除标志(0代表存在 2代表删除)') CHAR(1)"`
CreateBy string `json:"create_by" xorm:"default '' comment('创建者') VARCHAR(64)"`
CreateTime time.Time `json:"create_time" xorm:"comment('创建时间') DATETIME"`
UpdateBy string `json:"update_by" xorm:"default '' comment('更新者') VARCHAR(64)"`
UpdateTime time.Time `json:"update_time" xorm:"comment('更新时间') DATETIME"`
Remark string `json:"remark" xorm:"comment('备注') VARCHAR(500)"`
Flag bool `json:"flag" xorm:"comment('标记') BOOL"`
}
//数据权限保存请求参数
type DataScopeReq struct {
RoleId int64 `form:"roleId" binding:"required"`
RoleName string `form:"roleName" binding:"required"`
RoleKey string `form:"roleKey" binding:"required"`
DataScope string `form:"dataScope" binding:"required"`
DeptIds string `form:"deptIds"`
}
//检查角色名称请求参数
type CheckRoleNameReq struct {
RoleId int64 `form:"roleId" binding:"required"`
RoleName string `form:"roleName" binding:"required"`
}
//检查权限字符请求参数
type CheckRoleKeyReq struct {
RoleId int64 `form:"roleId" binding:"required"`
RoleKey string `form:"roleKey" binding:"required"`
}
//检查角色名称请求参数
type CheckRoleNameALLReq struct {
RoleName string `form:"roleName" binding:"required"`
}
//检查权限字符请求参数
type CheckRoleKeyALLReq struct {
RoleKey string `form:"roleKey" binding:"required"`
}
//分页请求参数
type SelectPageReq struct {
RoleName string `form:"roleName"` //角色名称
Status string `form:"status"` //状态
RoleKey string `form:"roleKey"` //角色键
DataScope string `form:"dataScope"` //数据范围
BeginTime string `form:"beginTime"` //开始时间
EndTime string `form:"endTime"` //结束时间
PageNum int `form:"pageNum"` //当前页码
PageSize int `form:"pageSize"` //每页数
OrderByColumn string `form:"orderByColumn"` //排序字段
IsAsc string `form:"isAsc"` //排序方式
}
//新增页面请求参数
type AddReq struct {
RoleName string `form:"roleName" binding:"required"`
RoleKey string `form:"roleKey" binding:"required"`
RoleSort string `form:"roleSort" binding:"required"`
Status string `form:"status"`
Remark string `form:"remark"`
MenuIds string `form:"menuIds""`
}
//修改页面请求参数
type EditReq struct {
RoleId int64 `form:"roleId" binding:"required"`
RoleName string `form:"roleName" binding:"required"`
RoleKey string `form:"roleKey" binding:"required"`
RoleSort string `form:"roleSort" binding:"required"`
Status string `form:"status"`
Remark string `form:"remark"`
MenuIds string `form:"menuIds"`
}
//根据条件分页查询角色数据
func SelectListPage(param *SelectPageReq) ([]Entity, *page.Paging, error) {
db := db.Instance().Engine()
p := new(page.Paging)
if db == nil {
return nil, p, errors.New("获取数据库连接失败")
}
model := db.Table(TableName()).Alias("r").Where("r.del_flag = '0'")
if param.RoleName != "" {
model.Where("r.role_name like ?", "%"+param.RoleName+"%")
}
if param.Status != "" {
model.Where("r.status = ?", param.Status)
}
if param.RoleKey != "" {
model.Where("r.role_key like ?", "%"+param.RoleKey+"%")
}
if param.DataScope != "" {
model.Where("r.data_scope = ?", param.DataScope)
}
if param.BeginTime != "" {
model.Where("date_format(r.create_time,'%y%m%d') >= date_format(?,'%y%m%d') ", param.BeginTime)
}
if param.EndTime != "" {
model.Where("date_format(r.create_time,'%y%m%d') <= date_format(?,'%y%m%d') ", param.EndTime)
}
totalModel := model.Clone()
total, err := totalModel.Count()
if err != nil {
return nil, p, errors.New("读取行数失败")
}
p = page.CreatePaging(param.PageNum, param.PageSize, int(total))
if param.OrderByColumn != "" {
model.OrderBy(param.OrderByColumn + " " + param.IsAsc + " ")
}
model.Limit(p.Pagesize, p.StartNum)
var result []Entity
err = model.Find(&result)
return result, p, err
}
// 导出excel
func SelectListExport(param *SelectPageReq, head, col []string) (string, error) {
db := db.Instance().Engine()
if db == nil {
return "", errors.New("获取数据库连接失败")
}
build := builder.Select(col...).From(TableName(), "t")
if param != nil {
if param.RoleName != "" {
build.Where(builder.Like{"t.role_name", param.RoleName})
}
if param.Status != "" {
build.Where(builder.Eq{"t.status": param.Status})
}
if param.RoleKey != "" {
build.Where(builder.Like{"t.role_key", param.RoleKey})
}
if param.DataScope != "" {
build.Where(builder.Eq{"t.data_scope": param.DataScope})
}
if param.BeginTime != "" {
build.Where(builder.Gte{"date_format(t.create_time,'%y%m%d')": "date_format('" + param.BeginTime + "','%y%m%d')"})
}
if param.EndTime != "" {
build.Where(builder.Lte{"date_format(t.create_time,'%y%m%d')": "date_format('" + param.EndTime + "','%y%m%d')"})
}
}
sqlStr, _, _ := build.ToSQL()
arr, err := db.SQL(sqlStr).QuerySliceString()
path, err := excel.DownlaodExcel(head, arr)
return path, err
}
//获取所有角色数据
func SelectListAll(param *SelectPageReq) ([]EntityFlag, error) {
db := db.Instance().Engine()
if db == nil {
return nil, errors.New("获取数据库连接失败")
}
model := db.Table(TableName()).Alias("r").Select("r.*,false as flag").Where("r.del_flag = '0'")
if param != nil {
if param.RoleName != "" {
model.Where("r.role_name like ?", "%"+param.RoleName+"%")
}
if param.Status != "" {
model.Where("r.status = ", param.Status)
}
if param.RoleKey != "" {
model.Where("r.role_key like ?", "%"+param.RoleKey+"%")
}
if param.DataScope != "" {
model.Where("r.data_scope = ", param.DataScope)
}
if param.BeginTime != "" {
model.Where("date_format(r.create_time,'%y%m%d') >= date_format(?,'%y%m%d') ", param.BeginTime)
}
if param.EndTime != "" {
model.Where("date_format(r.create_time,'%y%m%d') <= date_format(?,'%y%m%d') ", param.EndTime)
}
}
var result []EntityFlag
err := model.Find(&result)
return result, err
}
//根据用户ID查询角色
func SelectRoleContactVo(userId int64) ([]Entity, error) {
db := db.Instance().Engine()
if db == nil {
return nil, errors.New("获取数据库连接失败")
}
model := db.Table(TableName()).Alias("r")
model.Join("Left", []string{"sys_user_role", "ur"}, "ur.role_id = r.role_id")
model.Join("Left", []string{"sys_user", "u"}, "u.user_id = ur.user_id")
model.Join("Left", []string{"sys_dept", "d"}, "u.dept_id = d.dept_id")
model.Where("r.del_flag = '0'")
model.Where("ur.user_id = ?", userId)
model.Select("distinct r.role_id, r.role_name, r.role_key, r.role_sort, r.data_scope,r.status, r.del_flag, r.create_time, r.remark")
var result []Entity
err := model.Find(&result)
return result, err
}
//检查角色键是否唯一
func CheckRoleNameUniqueAll(roleName string) (*Entity, error) {
var entity Entity
entity.RoleName = roleName
_, err := entity.FindOne()
ok, err := entity.FindOne()
if ok {
return &entity, err
} else {
return nil, err
}
}
//检查角色键是否唯一
func CheckRoleKeyUniqueAll(roleKey string) (*Entity, error) {
var entity Entity
entity.RoleKey = roleKey
ok, err := entity.FindOne()
if ok {
return &entity, err
} else {
return nil, err
}
}
|
package config
import "github.com/dank/go-csgsi"
// GameSetup creates and returns a Game object
func GameSetup() *csgsi.Game {
return csgsi.New(0)
}
|
package service
import (
"enter-module/core/common"
"enter-module/core/config"
"enter-module/core/info"
"enter-module/core/util"
"database/sql"
)
var sqlConf = config.InitDBInfo()
// 用户登录
func UserLogIn(userInfo info.LogInUserInfo) string {
db, err := sql.Open(sqlConf.SqlDriverName, sqlConf.DataSourceName)
if err != nil {
print(err)
return common.ErrorData("[UserLogin] db.Open error")
}
var existsUser = getUserByName(userInfo.UserName)
if existsUser.UserName == "" {
return common.ErrorDataNotExists("用户还未注册")
}
if userInfo.Password != existsUser.Password {
return common.ErrorDataNotExists("密码错误")
}
db.Close()
return common.SuccessData(userInfo)
}
//用户注册
func UserRegister(userInfo info.RegisterUserInfo) string {
var user = getUserByName(userInfo.UserName)
if user.UserName != "" {
return common.ErrorDataExists("用户已存在")
}
var id = util.GetUUID() // uuid是36位的
db, err := sql.Open(sqlConf.SqlDriverName, sqlConf.DataSourceName)
if err != nil {
return common.ErrorData("[UserRegister] db.Open error")
}
var sqlStatement = "INSERT user SET id=?,user_name=?,password=?,email=?,phone=?,address=?,create_time=?"
stmt, err := db.Prepare(sqlStatement)
if err != nil {
return common.ErrorData("[UserRegister] db.Prepare error")
}
res, err := stmt.Exec(id, userInfo.UserName, userInfo.Password, userInfo.Email, userInfo.Phone, userInfo.Address,
util.GetNowTime())
if err != nil {
print(res)
return common.ErrorData("[UserRegister] db.Exec error")
}
db.Close()
return common.SuccessData(userInfo)
}
/**
根据用户名查询数据
*/
func getUserByName(userName string) info.LogInUserInfo {
db, err := sql.Open(sqlConf.SqlDriverName, sqlConf.DataSourceName)
if err != nil {
print(err)
db.Close()
return info.LogInUserInfo{"", ""}
}
rows := db.QueryRow("select user_name, password from user where user_name=?", userName)
var existsName string
var password string
rows.Scan(&existsName, &password)
var userInfo = info.LogInUserInfo{UserName: existsName, Password: password}
db.Close()
return userInfo
}
/**
判断参数是否为空,
*/
func isParamEmpty(userName, password string) bool {
if userName == "" || password == "" {
return true
}
return false
}
|
package main
import (
"bufio"
"fmt"
"io"
"os"
"github.com/golang/glog"
"github.com/rivo/uniseg"
)
func main() {
br := bufio.NewReader(os.Stdin)
for {
line, c := br.ReadString('\n')
if c == io.EOF {
break
}
if c != nil {
glog.Fatal(c)
}
fmt.Println(line)
gr := uniseg.NewGraphemes(line)
for gr.Next() {
fmt.Println(gr.Str())
}
}
}
|
package trident
import (
"errors"
"time"
"trident.li/keyval"
pf "trident.li/pitchfork/lib"
)
type TriGroup interface {
pf.PfGroup
Add_default_attestations(ctx pf.PfCtx) (err error)
GetVouch_adminonly() bool
GetAttestations() (output []TriGroupAttestation, err error)
GetAttestationsKVS() (kvs keyval.KeyVals, err error)
}
type TriGroupS struct {
pf.PfGroup
Please_vouch bool `label:"Please Vouch" pfset:"group_admin" hint:"Members must vouch before becoming active"`
Vouch_adminonly bool `label:"Vouch group admins only" pfset:"group_admin" hint:"Only adminstators may Vouvh"`
Min_invouch int `label:"Minimum Inbound Vouches" pfset:"group_admin" hint:"Number of incoming vouches required to vett."`
Min_outvouch int `label:"Minimum Outbound Vouches" pfset:"group_admin" hint:"Number of outgoing vouches required"`
Target_invouch int `label:"Target Invouches" pfset:"group_admin"`
Max_inactivity string `label:"Maximum Inactivity" pfset:"group_admin" coalesce:"30 days"`
Can_time_out bool `label:"Can Time Out" pfset:"group_admin"`
Max_vouchdays int `label:"Maximum Vouch Days" pfset:"group_admin"`
Idle_guard string `label:"Idle Guard" pfset:"group_admin" coalesce:"30 days"`
Nom_enabled bool `label:"Nominations Enabled" pfset:"group_admin"`
}
/* Don't call directly, use ctx.NewGroup() */
func NewTriGroup() pf.PfGroup {
return &TriGroupS{PfGroup: pf.NewPfGroup()}
}
func (grp *TriGroupS) fetch(group_name string, nook bool) (err error) {
/* Make sure the name is mostly sane */
group_name, err = pf.Chk_ident("Group Name", group_name)
if err != nil {
return
}
p := []string{"ident"}
v := []string{group_name}
err = pf.StructFetchA(grp, "trustgroup", "", p, v, "", true)
if nook && err == pf.ErrNoRows {
/* No rows is sometimes okay */
} else if err != nil {
pf.Log("Group:fetch() " + err.Error() + " '" + group_name + "'")
}
return
}
func (grp *TriGroupS) Select(ctx pf.PfCtx, group_name string, perms pf.Perm) (err error) {
err = grp.fetch(group_name, false)
if err != nil {
/* Failed to fetch */
return
}
return grp.PfGroup.Select(ctx, group_name, perms)
}
func (grp *TriGroupS) Exists(group_name string) (exists bool) {
err := grp.fetch(group_name, true)
if err == pf.ErrNoRows {
return false
}
return true
}
func (grp *TriGroupS) Refresh() (err error) {
return grp.fetch(grp.GetGroupName(), false)
}
func (grp *TriGroupS) GetVouch_adminonly() bool {
return grp.Vouch_adminonly
}
func (grp *TriGroupS) ListGroupMembers(search string, username string, offset int, max int, nominated bool, inclhidden bool, exact bool) (members []pf.PfGroupMember, err error) {
var rows *pf.Rows
grpname := grp.GetGroupName()
members = nil
ord := "ORDER BY m.descr"
m := pf.NewPfGroupMember()
q := m.SQL_Selects() + ", " +
"COALESCE(for_vouches.num, 0) AS vouches_for, " +
"COALESCE(for_me_vouches.num, 0) AS vouches_for_me, " +
"COALESCE(by_vouches.num, 0) AS vouches_by, " +
"COALESCE(by_me_vouches.num, 0) AS vouches_by_me " +
m.SQL_Froms() + " " +
"LEFT OUTER JOIN ( " +
" SELECT 'for' AS dir, mv.vouchee AS member, COUNT(*) AS num " +
" FROM member_vouch mv " +
" WHERE mv.trustgroup = $1 " +
" AND mv.positive " +
" GROUP BY mv.vouchee " +
") as for_vouches on (m.ident = for_vouches.member) " +
"LEFT OUTER JOIN ( " +
" SELECT 'by' AS dir, mv.vouchor AS member, COUNT(*) AS num " +
" FROM member_vouch mv " +
" WHERE mv.trustgroup = $1 " +
" AND mv.positive " +
" GROUP BY mv.vouchor " +
") as by_vouches on (m.ident = by_vouches.member) " +
"LEFT OUTER JOIN ( " +
" SELECT 'for_me' AS dir, mv.vouchor AS member, COUNT(*) AS num " +
" FROM member_vouch mv " +
" WHERE ROW(mv.trustgroup, mv.vouchee) = ROW($1, $2) " +
" AND mv.positive " +
" GROUP BY mv.vouchor " +
") as for_me_vouches on (m.ident = for_me_vouches.member) " +
"LEFT OUTER JOIN ( " +
" SELECT 'by_me' AS dir, mv.vouchee AS member, COUNT(*) AS num " +
" FROM member_vouch mv " +
" WHERE ROW(mv.trustgroup, mv.vouchor) = ROW($1, $2) " +
" AND mv.positive " +
" GROUP BY mv.vouchee " +
") as by_me_vouches on (m.ident = by_me_vouches.member) " +
"WHERE grp.ident = $1 " +
"AND me.email = mt.email "
if inclhidden {
if nominated {
q += "AND ms.ident = 'nominated' "
}
} else {
if nominated {
q += "AND (NOT ms.hidden OR ms.ident = 'nominated') "
} else {
q += "AND NOT ms.hidden "
}
}
if search == "" {
if max != 0 {
q += ord + " LIMIT $4 OFFSET $3"
rows, err = pf.DB.Query(q, grpname, username, offset, max)
} else {
q += ord
rows, err = pf.DB.Query(q, grpname, username)
}
} else {
if exact {
q += "AND (m.ident ~* $3) " +
ord
} else {
q += "AND (m.ident ~* $3 " +
"OR m.descr ~* $3 " +
"OR m.affiliation ~* $3) " +
ord
}
if max != 0 {
q += " LIMIT $5 OFFSET $4"
rows, err = pf.DB.Query(q, grpname, username, search, offset, max)
} else {
rows, err = pf.DB.Query(q, grpname, username, search)
}
}
if err != nil {
pf.Log("Query failed: " + err.Error())
return
}
defer rows.Close()
for rows.Next() {
var fullname string
var username string
var affiliation string
var groupname string
var groupdesc string
var groupadmin bool
var groupstate string
var groupcansee bool
var email string
var pgpkey_id string
var entered string
var activity string
var tel string
var sms string
var airport string
member := NewTriGroupMember().(*TriGroupMemberS)
err = rows.Scan(
&username,
&fullname,
&affiliation,
&groupname,
&groupdesc,
&groupadmin,
&groupstate,
&groupcansee,
&email,
&pgpkey_id,
&entered,
&activity,
&tel,
&sms,
&airport,
&member.VouchesFor,
&member.VouchesForMe,
&member.VouchesBy,
&member.VouchesByMe)
if err != nil {
pf.Log("Error listing members: " + err.Error())
return nil, err
}
member.Set(groupname, groupdesc, username, fullname, affiliation, groupadmin, groupstate, groupcansee, email, pgpkey_id, entered, activity, sms, tel, airport)
members = append(members, member)
}
return members, nil
}
func (grp *TriGroupS) Add_default_attestations(ctx pf.PfCtx) (err error) {
att := make(map[string]string)
att["met"] = "I have met them in person more than once"
att["trust"] = "I trust them to take action"
att["fate"] = "I will share membership fate with them"
for a, descr := range att {
q := "INSERT INTO attestations " +
"(ident, descr, trustgroup) " +
"VALUES($1, $2, $3)"
err = pf.DB.Exec(ctx,
"Added default attestation $1 to group $3",
1, q,
a, descr, grp.GetGroupName())
if err != nil {
return
}
}
return
}
func (grp *TriGroupS) Add_default_mailinglists(ctx pf.PfCtx) (err error) {
err = grp.PfGroup.Add_default_mailinglists(ctx)
if err != nil {
return
}
mls := make(map[string]string)
mls["vetting"] = "Vetting and Vouching"
for lhs, descr := range mls {
err = pf.Ml_addv(ctx, grp.PfGroup, lhs, descr, true, true, true)
if err != nil {
return
}
}
return
}
func group_add(ctx pf.PfCtx, args []string) (err error) {
var group_name string
/* Make sure the name is mostly sane */
group_name, err = pf.Chk_ident("Group Name", args[0])
if err != nil {
return
}
d_maxin := 180 * 24 * time.Hour
i_maxin := d_maxin.Seconds()
d_guard := 7 * 24 * time.Hour
i_guard := d_guard.Seconds()
grp := ctx.NewGroup().(TriGroup)
exists := grp.Exists(group_name)
if exists {
err = errors.New("Group already exists")
return
}
q := "INSERT INTO trustgroup " +
"(ident, descr, shortname, min_invouch, pgp_required, " +
" please_vouch, vouch_adminonly, min_outvouch, max_inactivity, can_time_out, " +
" max_vouchdays, idle_guard, nom_enabled, target_invouch, has_wiki) " +
"VALUES($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15) "
err = pf.DB.Exec(ctx,
"Created group $1",
1, q,
group_name, group_name, group_name, 0, false,
true, false, 0, i_maxin, false,
0, i_guard, true, 0, false)
if err != nil {
err = errors.New("Group creation failed")
return
}
err = ctx.SelectGroup(group_name, pf.PERM_SYS_ADMIN)
if err != nil {
err = errors.New("Group creation failed")
return
}
/* Fetch our newly created group */
tctx := TriGetCtx(ctx)
grp = tctx.TriSelectedGroup()
/* Select yourself */
ctx.SelectMe()
if err != nil {
return
}
err = grp.Add_default_attestations(ctx)
if err != nil {
return
}
err = grp.Add_default_mailinglists(ctx)
if err != nil {
return
}
grp.Member_add(ctx)
grp.Member_set_state(ctx, pf.GROUP_STATE_APPROVED)
grp.Member_set_admin(ctx, true)
/* All worked */
ctx.OutLn("Creation of group %s complete", group_name)
return
}
func group_member_nominate(ctx pf.PfCtx, args []string) (err error) {
grp := ctx.SelectedGroup()
user := args[1]
err = ctx.SelectUser(user, pf.PERM_USER_NOMINATE)
if err != nil {
err = errors.New("User selection failed")
return
}
return grp.Member_add(ctx)
}
func group_menu(ctx pf.PfCtx, menu *pf.PfMenu) {
menu.Replace("add", group_add)
m := []pf.PfMEntry{
{"vouch", vouch_menu, 0, -1, nil, pf.PERM_USER, "Vouch Commands"},
{"nominate", group_member_nominate, 2, 2, []string{"group", "username"}, pf.PERM_GROUP_MEMBER, "Nominate a member for a group"},
}
menu.Add(m...)
}
|
// write to file
// read from file
package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
)
var DIR = func() string {
p := filepath.Join("testdata")
fi, err := os.Lstat(p)
if err != nil {
panic(err)
}
if !fi.IsDir() {
panic("is not directory " + p)
}
return p
}()
func readJSON() {
// JSONのkeyが大文字小文字の違いだけならstructにタグを書かなくてもマッピングしてくれる
type Task struct {
Name string
Description string
Command string
}
var ts []Task
p := filepath.Join(DIR, "read.json")
b, err := ioutil.ReadFile(p)
if err != nil {
panic(err)
}
err = json.Unmarshal(b, &ts)
if err != nil {
panic(err)
}
fmt.Printf("%+v\n", ts)
}
func writeJSON() {
// 書き込むときはタグを付けないとそのまま大文字で出力されてしまう
type Task struct {
Name string `json:"name"`
Description string `json:"description"`
Command string `json:"command"`
}
var ts []Task
ts = append(ts, Task{
Name: "write",
Description: "short description",
Command: "write",
})
var (
out []byte
err error
)
// marshal
out, err = json.Marshal(ts)
if err != nil {
panic(err)
}
fmt.Printf("%s\n", out)
// with indent
out, err = json.MarshalIndent(ts, "", " ")
if err != nil {
panic(err)
}
fmt.Printf("%s\n", out)
fmt.Println(`
Pick for write
ioutil.WriteFile("path/file", out, 0600)
fmt.Fprint(*os.File, out)
*os.File.Write(out)
etc...`)
}
func main() {
fmt.Println("readJSON:")
readJSON()
fmt.Println()
fmt.Println("writeJSON:")
writeJSON()
fmt.Println()
}
|
//go:generate sh -c "protoc --go_out=plugins=grpc:. *.proto"
package proto
|
// Copyright 2018 Diego Bernardes. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package flare
import (
"context"
"encoding/json"
"fmt"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/pkg/errors"
"github.com/diegobernardes/flare/infra/worker"
)
// Worker act as a queue that receive the messages from other workers and as a dispatcher as they
// process the requests from the same queue and deliver it to other workers.
type Worker struct {
Logger log.Logger
Pusher worker.Pusher
tasks map[string]worker.Processor
}
// Init validate if the worker has everything it needs to run.
func (w *Worker) Init() error {
if w.Logger == nil {
return errors.New("missing logger")
}
if w.Pusher == nil {
return errors.New("missing pusher")
}
w.tasks = make(map[string]worker.Processor)
return nil
}
// Process the message.
func (w *Worker) Process(ctx context.Context, rawContent []byte) error {
task, payload, err := w.unmarshal(rawContent)
if err != nil {
return errors.Wrap(err, "error during marshal content to json")
}
processor, ok := w.tasks[task]
if !ok {
level.Info(w.Logger).Log("message", "ignoring message as processor is not found", "task", task)
return nil
}
if err := processor.Process(ctx, payload); err != nil {
return errors.Wrap(err, "error during process")
}
return nil
}
// Enqueue the message to process it later.
func (w *Worker) Enqueue(ctx context.Context, rawContent []byte, task string) error {
content, err := w.marshal(task, rawContent)
if err != nil {
return errors.Wrap(err, "error during marshal message to json")
}
if err := w.Pusher.Push(ctx, content); err != nil {
return errors.Wrap(err, "error during push message")
}
return nil
}
// Register a processor for a given task.
func (w *Worker) Register(task string, processor worker.Processor) error {
if _, ok := w.tasks[task]; ok {
return fmt.Errorf("already have processor associated with the task '%s'", task)
}
w.tasks[task] = processor
return nil
}
func (w *Worker) marshal(task string, rawContent []byte) ([]byte, error) {
type protocol struct {
Task string `json:"task"`
Payload json.RawMessage `json:"payload"`
}
message := protocol{Task: task, Payload: rawContent}
content, err := json.Marshal(&message)
if err != nil {
return nil, err
}
return content, nil
}
func (w *Worker) unmarshal(content []byte) (string, []byte, error) {
type protocol struct {
Task string `json:"task"`
Payload json.RawMessage `json:"payload"`
}
var message protocol
if err := json.Unmarshal(content, &message); err != nil {
return "", nil, err
}
return message.Task, message.Payload, nil
}
|
package main
func blue(str string) string {
return "\033[1;34m" + str + "\033[0m"
}
func yellowWithBlueBG(str string) string {
return "\033[1;33;44m" + str + "\033[0m"
}
|
package main
import (
"fmt"
"net"
)
const addr = "localhost:8888"
func main() {
conns := &connections{
addrs: make(map[string]*net.UDPAddr),
}
fmt.Printf("serving on %s\n", addr)
// construct a udp addr
addr, err := net.ResolveUDPAddr("udp", addr)
if err != nil {
panic(err)
}
// listen on our specified addr
conn, err := net.ListenUDP("udp", addr)
if err != nil {
panic(err)
}
// cleanup
defer conn.Close()
// async send messages to all known clients
go broadcast(conn, conns)
msg := make([]byte, 1024)
for {
// receive a message to gather the ip address
// and port to send back to
_, retAddr, err := conn.ReadFromUDP(msg)
if err != nil {
continue
}
//store it in a map
conns.mu.Lock()
conns.addrs[retAddr.String()] = retAddr
conns.mu.Unlock()
fmt.Printf("%s connected\n", retAddr)
}
}
|
package gortex
import (
"fmt"
"github.com/vseledkin/gortex/assembler"
)
// Long Short Term Memory cell
type MultiplicativeNestedLSTM struct {
Wmx *Matrix
Umh *Matrix
Wf *Matrix
Uf *Matrix
Bf *Matrix
Wi *Matrix
Ui *Matrix
Bi *Matrix
Wo *Matrix
Uo *Matrix
Bo *Matrix
Wc *Matrix
Uc *Matrix
Bc *Matrix
Who *Matrix
innerMemory *MultiplicativeLSTM
}
func (lstm *MultiplicativeNestedLSTM) ForgetGateTrick(v float32) {
if lstm.Bf != nil {
assembler.Sset(v, lstm.Bf.W)
}
}
func MakeMultiplicativeNestedLSTM(x_size, h_size, out_size int) *MultiplicativeNestedLSTM {
rnn := new(MultiplicativeNestedLSTM)
rnn.Wmx = RandXavierMat(h_size, x_size)
rnn.Umh = RandXavierMat(h_size, h_size)
rnn.Wf = RandXavierMat(h_size, x_size)
rnn.Uf = RandXavierMat(h_size, h_size)
rnn.Bf = RandXavierMat(h_size, 1) // forget gate bias initialization trick will be applied here
rnn.Wi = RandXavierMat(h_size, x_size)
rnn.Ui = RandXavierMat(h_size, h_size)
rnn.Bi = RandXavierMat(h_size, 1)
rnn.Wo = RandXavierMat(h_size, x_size)
rnn.Uo = RandXavierMat(h_size, h_size)
rnn.Bo = RandXavierMat(h_size, 1)
rnn.Wc = RandXavierMat(h_size, x_size)
rnn.Uc = RandXavierMat(h_size, h_size)
rnn.Bc = RandXavierMat(h_size, 1)
rnn.Who = RandXavierMat(out_size, h_size)
rnn.innerMemory = MakeMultiplicativeLSTM(h_size, h_size, h_size)
return rnn
}
func (rnn *MultiplicativeNestedLSTM) GetParameters(namespace string) map[string]*Matrix {
params := map[string]*Matrix{
namespace + "_Wmx": rnn.Wmx,
namespace + "_Umh": rnn.Umh,
namespace + "_Wf": rnn.Wf,
namespace + "_Uf": rnn.Uf,
namespace + "_Bf": rnn.Bf,
namespace + "_Wi": rnn.Wi,
namespace + "_Ui": rnn.Ui,
namespace + "_Bi": rnn.Bi,
namespace + "_Wo": rnn.Wo,
namespace + "_Uo": rnn.Uo,
namespace + "_Bo": rnn.Bo,
namespace + "_Wc": rnn.Wc,
namespace + "_Uc": rnn.Uc,
namespace + "_Bc": rnn.Bc,
namespace + "_Who": rnn.Who,
}
for k, v := range rnn.innerMemory.GetParameters(namespace + "_inner") {
params[k] = v
}
return params
}
func (rnn *MultiplicativeNestedLSTM) SetParameters(namespace string, parameters map[string]*Matrix) error {
for k, v := range rnn.GetParameters(namespace) {
fmt.Printf("Look for %s parameters\n", k)
if m, ok := parameters[k]; ok {
fmt.Printf("Got %s parameters\n", k)
copy(v.W, m.W)
} else {
return fmt.Errorf("Model geometry is not compatible, parameter %s is unknown", k)
}
}
return nil
}
func (rnn *MultiplicativeNestedLSTM) Step(g *Graph, x, h_prev, c_prev, c_previn *Matrix) (h, c, cin, y *Matrix) {
// make MultiplicativeLSTM computation graph at one time-step
m := g.EMul(g.Mul(rnn.Wmx, x), g.Mul(rnn.Umh, h_prev))
f := g.Sigmoid(g.Add(g.Add(g.Mul(rnn.Wf, x), g.Mul(rnn.Uf, m)), rnn.Bf))
i := g.Sigmoid(g.Add(g.Add(g.Mul(rnn.Wi, x), g.Mul(rnn.Ui, m)), rnn.Bi))
o := g.Sigmoid(g.Add(g.Add(g.Mul(rnn.Wo, x), g.Mul(rnn.Uo, m)), rnn.Bo))
c = g.Tanh(g.Add(g.Add(g.Mul(rnn.Wc, x), g.Mul(rnn.Uc, m)), rnn.Bc))
//c = g.Add(g.EMul(f, c_prev), g.EMul(i, c))
c_new, cin, _ := rnn.innerMemory.Step(g, g.EMul(i, c), g.EMul(f, c_prev), c_previn)
h = g.EMul(o, g.Tanh(c_new))
y = g.Mul(rnn.Who, h)
return
}
|
package mock
import (
"context"
"testing"
addr "github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/network"
"github.com/ipfs/go-cid"
"github.com/minio/blake2b-simd"
)
// Build for fluent initialization of a mock runtime.
type RuntimeBuilder struct {
rt *Runtime
}
// Initializes a new builder with a receiving actor address.
func NewBuilder(ctx context.Context, receiver addr.Address) *RuntimeBuilder {
m := &Runtime{
ctx: ctx,
epoch: 0,
networkVersion: network.Version0,
receiver: receiver,
caller: addr.Address{},
callerType: cid.Undef,
miner: addr.Address{},
idAddresses: make(map[addr.Address]addr.Address),
circulatingSupply: abi.NewTokenAmount(0),
state: cid.Undef,
store: make(map[cid.Cid][]byte),
hashfunc: blake2b.Sum256,
balance: abi.NewTokenAmount(0),
valueReceived: abi.NewTokenAmount(0),
actorCodeCIDs: make(map[addr.Address]cid.Cid),
newActorAddr: addr.Undef,
t: nil, // Initialized at Build()
expectValidateCallerAny: false,
expectValidateCallerAddr: nil,
expectValidateCallerType: nil,
expectCreateActor: nil,
expectSends: make([]*expectedMessage, 0),
expectVerifySigs: make([]*expectVerifySig, 0),
}
return &RuntimeBuilder{m}
}
// Builds a new runtime object with the configured values.
func (b *RuntimeBuilder) Build(t testing.TB) *Runtime {
cpy := *b.rt
// Deep copy the mutable values.
cpy.store = make(map[cid.Cid][]byte)
for k, v := range b.rt.store { //nolint:nomaprange
cpy.store[k] = v
}
cpy.t = t
return &cpy
}
func (b *RuntimeBuilder) WithEpoch(epoch abi.ChainEpoch) *RuntimeBuilder {
b.rt.epoch = epoch
return b
}
func (b *RuntimeBuilder) WithNetworkVersion(nv network.Version) *RuntimeBuilder {
b.rt.networkVersion = nv
return b
}
func (b *RuntimeBuilder) WithCaller(address addr.Address, code cid.Cid) *RuntimeBuilder {
b.rt.caller = address
b.rt.callerType = code
return b
}
func (b *RuntimeBuilder) WithMiner(miner addr.Address) *RuntimeBuilder {
b.rt.miner = miner
return b
}
func (b *RuntimeBuilder) WithBalance(balance, received abi.TokenAmount) *RuntimeBuilder {
b.rt.balance = balance
b.rt.valueReceived = received
return b
}
func (b *RuntimeBuilder) WithActorType(addr addr.Address, code cid.Cid) *RuntimeBuilder {
b.rt.actorCodeCIDs[addr] = code
return b
}
func (b *RuntimeBuilder) WithHasher(f func(data []byte) [32]byte) *RuntimeBuilder {
b.rt.hashfunc = f
return b
}
|
package src
import (
"gopkg.in/go-playground/validator.v8"
"reflect"
"regexp"
)
func TopicsValidate(v *validator.Validate, topStruct reflect.Value, currentStructOrField reflect.Value,
field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
topics,ok:= topStruct.Interface().(*Topics)
if ok && topics.TopicListSize==len(topics.TopicList){
return true
}
return false
}
func TopicUrl(v *validator.Validate, topStruct reflect.Value, currentStructOrField reflect.Value,
field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
_,ok1:= topStruct.Interface().(*Topics)
_,ok2:= topStruct.Interface().(*Topic)
if ok1 || ok2{
if matched, _ := regexp.MatchString(`^\w{4,10}$`, field.String()); matched {
return true
}
}
return false
} |
package templates
import (
"database/sql/driver"
"embed"
"fmt"
"html/template"
"github.com/cswank/quimby/internal/schema"
)
var (
//go:embed static/*
Static embed.FS
//go:embed templates/*
tpls embed.FS
templates map[string]tmpl
deviceFuncs = template.FuncMap{
"format": func(v driver.Value, decimals int) string {
if v == nil {
return ""
}
t := fmt.Sprintf("%%.%df", decimals)
return fmt.Sprintf(t, v.(float64))
},
"command": func(devices map[string]map[string]schema.Message) map[string]schema.Message {
out := map[string]schema.Message{}
for location, statuses := range devices {
for dev, status := range statuses {
out[fmt.Sprintf("%s-%s", location, dev)] = status
}
}
return out
},
}
)
type tmpl struct {
template *template.Template
files []string
scripts []string
stylesheets []string
funcs template.FuncMap
bare bool
}
func Init() error {
data := map[string]string{}
files, err := tpls.ReadDir("templates")
if err != nil {
return err
}
for _, f := range files {
d, err := tpls.ReadFile(fmt.Sprintf("templates/%s", f.Name()))
if err != nil {
return err
}
data[f.Name()] = string(d)
}
templates = map[string]tmpl{
"login.ghtml": {},
"logout.ghtml": {},
"gadgets.ghtml": {},
"edit-method.ghtml": {files: []string{"edit-method.js"}},
"gadget.ghtml": {funcs: deviceFuncs, files: []string{"device.ghtml", "method.ghtml", "gadgets.js", "method.js"}, stylesheets: []string{"/static/switch.css"}},
}
base := []string{"head.ghtml", "base.ghtml", "navbar.ghtml", "menu-item.ghtml"}
for key, val := range templates {
t := template.New(key)
if val.funcs != nil {
t = t.Funcs(val.funcs)
}
var err error
files := append([]string{key}, val.files...)
files = append(files, base...)
for _, f := range files {
t, err = t.Parse(data[f])
if err != nil {
return err
}
}
val.template = t
templates[key] = val
}
return nil
}
func Get(k string) (*template.Template, []string, []string) {
t := templates[k]
return t.template, t.scripts, t.stylesheets
}
|
package main
import "fmt"
type User struct {
name string
monthlySalary int64
time int64
}
var userArr [3]User
func main() {
var name string
var money, time int64
// Initializing [3]Users
for i := range userArr {
fmt.Scan(&name)
userArr[i].name = name
fmt.Scan(&money)
userArr[i].monthlySalary = money
fmt.Scan(&time)
userArr[i].time = time
}
//fmt.Println(userArr)
// Output [3]Users
fmt.Println("№ | Name | Payment | Works | TotalPayment")
for i := range userArr {
//tmp := int64(userArr[i].monthlySalary*userArr[i].time)
//fmt.Printf("%T %d", tmp , tmp)
fmt.Printf("%d | %4s | %7d | %5d | %8d \n", i, userArr[i].name, userArr[i].monthlySalary, userArr[i].time,userArr[i].monthlySalary*userArr[i].time )
}
} |
package mysqldb
import (
"time"
)
// SemSendStatus 邮件发送状态
type SemSendStatus int32
const (
// Pending 待定
Pending SemSendStatus = 0
// Sending 发送中
Sending SemSendStatus = 1
// SendSucceed 发送成功
SendSucceed SemSendStatus = 2
// SendFailed 发送失败
SendFailed SemSendStatus = 3
)
// Language 语言
type Language string
const (
// SimpleChinese 简体中文
SimpleChinese Language = "zh-Hans"
// TraditionalChinese 繁体中文
TraditionalChinese Language = "zh-Hant"
// English 英文
English Language = "en"
// UndefinedLanguage 没有定义的语言
UndefinedLanguage Language = "undefined_language"
)
// SemRecord 邮件记录
type SemRecord struct {
SemID int32 `gorm:"primary_key"` // 邮件记录ID
ToAddress string `gorm:"column:to_address"` // 收信邮箱地址
SemStatus SemSendStatus `gorm:"column:sem_status"` // 是否发送成功
TemplateAction string `gorm:"column:template_action"` // 模版行为
PlatformType string `gorm:"column:platform_type"` // 平台
TemplateParam string `gorm:"column:template_param"` // 模版的填充值
Language Language `gorm:"column:language"` // 语言
SemErrorLog string `gorm:"column:sem_error_log"` // 错误日志
CreatedAt time.Time // 创建时间
UpdatedAt time.Time // 更新时间
DeletedAt *time.Time // 删除时间
}
// TemplateParamAndExpiredAt 模版的填充值和其过期时间
type TemplateParamAndExpiredAt struct {
TemplateParam string `gorm:"template_param"` // 模版的填充值
ExpiredAt time.Time `gorm:"column:expired_at"` // 过期时间
}
// TableName 返回 SemRecord对应的数据库数据表名
func (record SemRecord) TableName() string {
return "sem_record"
}
// CreateSemRecord 创建邮件记录
func (db *DbClient) CreateSemRecord(record *SemRecord) error {
return db.Create(record).Error
}
// SearchSemRecordCountsIn1Minute 搜索1分钟内的短信记录数目
func (db *DbClient) SearchSemRecordCountsIn1Minute(email string) (int, error) {
var count int
err := db.Model(&SemRecord{}).Where("SemRecord = ?and created_at > DATE_SUB(NOW(), INTERVAL 1 minute)", email).Count(&count).Error
if err != nil {
return 0, err
}
return count, nil
}
|
package affine_test
import (
"testing"
"github.com/mkamadeus/cipher/cipher/affine"
)
func TestEncrypt(t *testing.T) {
plain := "kripto"
expected := "CZOLNE"
encrypted, err := affine.Encrypt(plain, 7, 10)
if err != nil || encrypted != expected {
t.Fatalf("affine encryption failed, expected %v, found %v", expected, encrypted)
}
}
func TestNotRelativePrime(t *testing.T) {
plain := "kripto"
_, err := affine.Encrypt(plain, 2, 10)
if err == nil {
t.Fatalf("affine encryption should fail, m is expected to be relative prime to 26")
}
}
|
package seev
import (
"encoding/xml"
"github.com/thought-machine/finance-messaging/iso20022"
)
type Document03300101 struct {
XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:seev.033.001.01 Document"`
Message *CorporateActionInstructionV01 `xml:"CorpActnInstr"`
}
func (d *Document03300101) AddMessage() *CorporateActionInstructionV01 {
d.Message = new(CorporateActionInstructionV01)
return d.Message
}
// Scope
// An account owner sends the CorporateActionInstruction message to an account servicer to instruct election on a corporate action event.
// This message is used to provide the custodian with instructions on how the account owner wishes to proceed with a corporate action event. Instructions include investment decisions regarding the exercise of rights issues, the election of stock or cash when the option is available, and decisions on the conversion or tendering of securities.
// Usage
// The message may also be used to:
// - re-send a message previously sent (the sub-function of the message is Duplicate),
// - provide a third party with a copy of a message for information (the sub-function of the message is Copy),
// - re-send to a third party a copy of a message for information (the sub-function of the message is Copy Duplicate).
// ISO 15022 - 20022 COEXISTENCE
// This ISO 20022 message is reversed engineered from ISO 15022. Both standards will coexist for a certain number of years. Until this coexistence period ends, the usage of certain data types is restricted to ensure interoperability between ISO 15022 and 20022 users. Compliance to these rules is mandatory in a coexistence environment. The coexistence restrictions are described in a Textual Rule linked to the Message Items they concern. These coexistence textual rules are clearly identified as follows: “CoexistenceXxxxRule”.
type CorporateActionInstructionV01 struct {
// Information that unambiguously identifies a CorporateActionInstruction message as know by the account owner (or the instructing party acting on its behalf).
Identification *iso20022.DocumentIdentification12 `xml:"Id"`
// Identification of a previously sent cancelled instruction document.
CancelledInstructionIdentification *iso20022.DocumentIdentification15 `xml:"CancInstrId,omitempty"`
// Identification of a previously sent instruction cancellation request document.
InstructionCancellationRequestIdentification *iso20022.DocumentIdentification15 `xml:"InstrCxlReqId,omitempty"`
// Identification of other documents as well as the document number.
OtherDocumentIdentification []*iso20022.DocumentIdentification13 `xml:"OthrDocId,omitempty"`
// Identification of an other corporate action event that needs to be closely linked to the processing of the event notified in this document.
EventsLinkage []*iso20022.CorporateActionEventReference1 `xml:"EvtsLkg,omitempty"`
// General information about the corporate action event.
CorporateActionGeneralInformation *iso20022.CorporateActionGeneralInformation6 `xml:"CorpActnGnlInf"`
// General information about the safekeeping account, owner and account balance.
AccountDetails *iso20022.AccountAndBalance2 `xml:"AcctDtls"`
// Provides information about the beneficial owner of the securities.
BeneficialOwnerDetails []*iso20022.PartyIdentification33 `xml:"BnfclOwnrDtls,omitempty"`
// Information about the corporate action instruction.
CorporateActionInstruction *iso20022.CorporateActionOption5 `xml:"CorpActnInstr"`
// Provides additional information.
AdditionalInformation *iso20022.CorporateActionNarrative7 `xml:"AddtlInf,omitempty"`
// Party that originated the message, if other than the sender.
MessageOriginator *iso20022.PartyIdentification10Choice `xml:"MsgOrgtr,omitempty"`
// Party that is the final destination of the message, if other than the receiver.
MessageRecipient *iso20022.PartyIdentification10Choice `xml:"MsgRcpt,omitempty"`
// Additional information that can not be captured in the structured fields and/or any other specific block.
Extension []*iso20022.Extension2 `xml:"Xtnsn,omitempty"`
}
func (c *CorporateActionInstructionV01) AddIdentification() *iso20022.DocumentIdentification12 {
c.Identification = new(iso20022.DocumentIdentification12)
return c.Identification
}
func (c *CorporateActionInstructionV01) AddCancelledInstructionIdentification() *iso20022.DocumentIdentification15 {
c.CancelledInstructionIdentification = new(iso20022.DocumentIdentification15)
return c.CancelledInstructionIdentification
}
func (c *CorporateActionInstructionV01) AddInstructionCancellationRequestIdentification() *iso20022.DocumentIdentification15 {
c.InstructionCancellationRequestIdentification = new(iso20022.DocumentIdentification15)
return c.InstructionCancellationRequestIdentification
}
func (c *CorporateActionInstructionV01) AddOtherDocumentIdentification() *iso20022.DocumentIdentification13 {
newValue := new(iso20022.DocumentIdentification13)
c.OtherDocumentIdentification = append(c.OtherDocumentIdentification, newValue)
return newValue
}
func (c *CorporateActionInstructionV01) AddEventsLinkage() *iso20022.CorporateActionEventReference1 {
newValue := new(iso20022.CorporateActionEventReference1)
c.EventsLinkage = append(c.EventsLinkage, newValue)
return newValue
}
func (c *CorporateActionInstructionV01) AddCorporateActionGeneralInformation() *iso20022.CorporateActionGeneralInformation6 {
c.CorporateActionGeneralInformation = new(iso20022.CorporateActionGeneralInformation6)
return c.CorporateActionGeneralInformation
}
func (c *CorporateActionInstructionV01) AddAccountDetails() *iso20022.AccountAndBalance2 {
c.AccountDetails = new(iso20022.AccountAndBalance2)
return c.AccountDetails
}
func (c *CorporateActionInstructionV01) AddBeneficialOwnerDetails() *iso20022.PartyIdentification33 {
newValue := new(iso20022.PartyIdentification33)
c.BeneficialOwnerDetails = append(c.BeneficialOwnerDetails, newValue)
return newValue
}
func (c *CorporateActionInstructionV01) AddCorporateActionInstruction() *iso20022.CorporateActionOption5 {
c.CorporateActionInstruction = new(iso20022.CorporateActionOption5)
return c.CorporateActionInstruction
}
func (c *CorporateActionInstructionV01) AddAdditionalInformation() *iso20022.CorporateActionNarrative7 {
c.AdditionalInformation = new(iso20022.CorporateActionNarrative7)
return c.AdditionalInformation
}
func (c *CorporateActionInstructionV01) AddMessageOriginator() *iso20022.PartyIdentification10Choice {
c.MessageOriginator = new(iso20022.PartyIdentification10Choice)
return c.MessageOriginator
}
func (c *CorporateActionInstructionV01) AddMessageRecipient() *iso20022.PartyIdentification10Choice {
c.MessageRecipient = new(iso20022.PartyIdentification10Choice)
return c.MessageRecipient
}
func (c *CorporateActionInstructionV01) AddExtension() *iso20022.Extension2 {
newValue := new(iso20022.Extension2)
c.Extension = append(c.Extension, newValue)
return newValue
}
|
package main
import (
"fmt"
"github.com/gorilla/handlers"
"github.com/paddycakes/arranmore-api/internal/sensor"
transportHTTP "github.com/paddycakes/arranmore-api/internal/transport/http"
"log"
"net/http"
"os"
)
// App - the struct which contains things
// like pointers to database connections
type App struct {}
// Run - sets up Arranmore REST API
func (app *App) Run() error {
fmt.Println("Setting up Arranore REST API")
//var err error
//_, err = database.NewDatabase()
//if err != nil {
// return err
//}
sensorService := sensor.NewService()
handler := transportHTTP.NewHandler(sensorService)
handler.SetupRoutes()
// Where ORIGIN_ALLOWED is like `scheme://dns[:port]`, or `*` (insecure)
headers := handlers.AllowedHeaders([]string{"X-Requested-With", "Content-Type"})
methods := handlers.AllowedMethods([]string{"GET", "HEAD", "POST", "PUT", "OPTIONS"})
// Need to sort this out
// origins := handlers.AllowedOrigins([]string{os.Getenv("ORIGIN_ALLOWED")})
origins := handlers.AllowedOrigins([]string{"*"})
port := os.Getenv("PORT")
if port == "" {
port = "8080"
}
if err := http.ListenAndServe(":"+port, handlers.CORS(headers, methods, origins)(handler.Router)); err != nil {
return err
}
log.Println("Arranmore REST API successfully started and listening on port ", port)
// fmt.Println("Arranmore REST API")
return nil
}
func main() {
app := App{}
if err := app.Run(); err != nil {
fmt.Println("Error starting up Arranmore REST API")
fmt.Println(err)
}
}
|
package httpd
// xlattice_go/httpd/name2Hash.go
import (
xd "github.com/jddixon/xlOverlay_go/datakeyed"
"sync"
)
/**
* Maintains data structures mapping path names to NodeIDs, which
* are used to retrieve data from a MemCache, an in-memory cache of
* byte slices.
*/
type Name2Hash struct { // must implement xo.NameKeyedI
buildLists []string // should be specific object
hashCache *xd.MemCache
hashMap map[string][]byte
siteNames []string
mx sync.Mutex
}
|
/**
* Copyright (C) 2019, Xiongfa Li.
* All right reserved.
* @author xiongfa.li
* @date 2019/2/22
* @time 10:42
* @version V1.0
* Description:
*/
package test
import (
"container/list"
"fmt"
"github.com/xfali/gomem/commonPool"
"testing"
"time"
)
func TestCommonPool(t *testing.T) {
pb := commonPool.CommonPool{
MaxIdle: 10,
MaxSize: 20,
New: func() interface{} {
fmt.Println("create!")
i := make([]byte, 1000)
return i
},
WaitTimeout: time.Second * 10,
}
pb.Init()
defer pb.Close()
l := list.New()
go func() {
time.Sleep(time.Second)
i := 0
e := l.Front()
for e != nil {
pb.Put(e.Value)
r := e
e = e.Next()
l.Remove(r)
time.Sleep(time.Second)
if i < 5 {
i++
} else {
break
}
}
}()
for i := 0; i < 30; i++ {
now := time.Now()
buf := pb.Get()
fmt.Printf("use time :%d ms\n", time.Since(now)/time.Millisecond)
l.PushBack(buf)
}
}
|
package main
func ClosurePrint() {
for i :=0; i<3; i++{
defer func() {println(i)}()
}
}
/**
* Output:
* 3
* 3
* 3
*/
// 解释: 因为是闭包,在for迭代语句中,每个defer语句延时执行的函数引用都是同一个i迭代变量,
// 在循环结束后这个变量的值为3,因此最终输出的结果都是3
/**
* Output:
* 2
* 1
* 0
*/
// 修复思路: 在每轮迭代中为每一个defer语句 的闭包函数生成独有的变量。可以用下面两种方式:
func ClosurePrintV2() {
for i:=0;i<3;i++ {
i:=i // 定义一个循环体内局部变量i
defer func() {println(i)}()
}
}
func ClosurePrintV3() {
for i:=0;i<3;i++ {
defer func(i int ) { // 通过函数传入: defer语句会马上对调用参数求值
println(i)
}(i)
}
} |
package ykoath
import (
"fmt"
)
type tv struct {
tag byte
value []byte
}
type tvs []tv
// read will read a number of tagged values from a buffer
func read(buf []byte) (tvs tvs) {
var (
idx int
length int
tag byte
value []byte
)
for {
if len(buf)-idx == 0 {
return tvs
}
// read the tag
tag = buf[idx]
idx++
// read the length
length = int(buf[idx])
idx++
// read the value
value = buf[idx : idx+length]
idx = idx + length
// append the result
tvs = append(tvs, tv{
tag: tag,
value: value,
})
}
}
// Write produces a tlv or lv packet (if the tag is 0)
func write(tag byte, values ...[]byte) []byte {
var (
buf []byte
length int
data []byte
)
for _, value := range values {
// skip nil values (useful for optional tlv segments)
if value == nil {
continue
}
buf = append(buf, value...)
length = length + len(value)
}
// write the tag unless we skip it (useful for reusing Write for sending the
// APDU)
if tag != 0x00 {
data = append(data, tag)
}
// write some length unless this is a one byte value (e.g. for the PUT
// instruction's "property" byte)
if length > 1 {
data = append(data, byte(length))
}
if length > 255 {
panic(fmt.Sprintf("too much data too send (%d bytes)", length))
}
return append(data, buf...)
}
|
/*
twitter@hector_gool
*/
package main
import (
"fmt"
"strings"
)
func main() {
fmt.Printf("\n En Mayúsculas: %v \n", PasarAMayusculas("pera","uva"))
fmt.Printf("\n En Mayúsculas: %v \n", PasarAMayusculas())
fmt.Printf("\n En Mayúsculas: %v \n", PasarAMayusculas("manzana"))
frutas := []string{"mango", "sandía", "platano"}
fmt.Printf("\n En Mayúsculas: %v \n", PasarAMayusculas(frutas...))
}
func PasarAMayusculas(frutas ...string) []string{
var resultado []string
for _, fruta := range frutas{
resultado = append(resultado, strings.ToUpper(fruta))
}
return resultado
} |
package main
import (
"github.com/edaniels/golinters/deferfor"
"golang.org/x/tools/go/analysis/singlechecker"
)
func main() {
singlechecker.Main(deferfor.Analyzer)
}
|
//+build srv1
package main
import (
"fmt"
"log"
"math/rand"
"net/http"
"time"
dd "github.com/gchaincl/dd-go-opentracing"
"github.com/gorilla/mux"
opentracing "github.com/opentracing/opentracing-go"
"github.com/opentracing/opentracing-go/ext"
)
func init() {
tracer := dd.NewTracer()
tracer.(*dd.Tracer).DebugLoggingEnabled = true
opentracing.SetGlobalTracer(tracer)
}
func trace(op string, span opentracing.Span, req *http.Request) error {
ext.Component.Set(span, "/auth/{id}")
ext.PeerService.Set(span, "srv1")
dd.EnvTag.Set(span, "test")
return span.Tracer().Inject(
span.Context(),
opentracing.HTTPHeaders,
opentracing.HTTPHeadersCarrier(req.Header),
)
}
func PostUser(w http.ResponseWriter, req *http.Request) {
span := opentracing.StartSpan("POST")
defer span.Finish()
sleep := rand.Intn(1000)
fmt.Printf("%s %s (%dms) ...", req.Method, req.URL, sleep)
time.Sleep(time.Duration(sleep) * time.Millisecond)
req, _ = http.NewRequest("POST", "http://localhost:8002/auth/"+mux.Vars(req)["id"], nil)
if err := trace("POST", span, req); err != nil {
http.Error(w, err.Error(), 500)
return
}
http.DefaultClient.Do(req)
fmt.Println(" OK")
}
func main() {
var bind = ":8001"
m := mux.NewRouter()
m.HandleFunc("/users/{id}", PostUser).
Methods("POST")
log.Printf("bind = %+v\n", bind)
log.Fatalln(
http.ListenAndServe(bind, m),
)
}
|
package main
import (
"strconv"
"strings"
)
// Ex004 takes a string of comma-seperated numbers and returns a slice of int
func Ex004(input string) []int {
// create a map with the size of n
numbers := strings.Split(input, ",")
length := len(numbers)
var num = make([]int, length)
for index, v := range numbers {
s := strings.Trim(v, " ")
num[index], _ = strconv.Atoi(s)
}
return num
}
|
package ecal
import (
"flag"
"fmt"
"io/ioutil"
"log"
"math/rand"
"os"
"os/user"
"strings"
"testing"
"time"
"github.com/cfsalguero/ecal/proto"
"gopkg.in/v1/yaml"
)
type APIServers struct {
APIHostname string `yaml:"ecal-host"`
APIKey string `yaml:"ecal-key"`
APISecret string `yaml:"ecal-secret"`
}
type Config struct {
Hosts map[string]APIServers `yaml:"hosts,flow"`
}
var config Config
var configFile = "~/.ecal/config.yaml"
var eCalAPI *API
func loadConfig(config *Config) error {
curUser, err := user.Current()
if strings.HasPrefix(configFile, "~/") {
configFile = strings.Replace(configFile, "~/", curUser.HomeDir+"/", 1)
}
log.Printf("Reading config from %s", configFile)
buf, err := ioutil.ReadFile(configFile)
if err != nil {
fmt.Printf("Error while reading config: %+v\n", err)
return err
}
err = yaml.Unmarshal(buf, config)
if err != nil {
fmt.Printf("Cannot parse yaml file: %v\n", err)
return err
}
return nil
}
func TestMain(m *testing.M) {
flag.Parse()
err := loadConfig(&config)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
if _, ok := config.Hosts["testing"]; !ok {
fmt.Println("No 'testing' entry in hosts list")
os.Exit(1)
}
eCalAPI = NewECalAPI(config.Hosts["testing"].APIHostname, config.Hosts["testing"].APIKey, config.Hosts["testing"].APISecret)
os.Exit(m.Run())
}
func TestAPI(t *testing.T) {
c := proto.CreateCalendar{
Name: randomString(20),
Type: "event",
Categories: "test",
Reference: randomString(20),
Logo: "",
Note: "zapp",
Tags: []string{"zapp", "brannigan"},
DraftCalendar: proto.CALENDAR_IS_VISIBLE,
}
createResponse, err := eCalAPI.CreateCalendar(c)
if err != nil {
t.Error(err)
} else {
if createResponse.Status != "200" {
t.Errorf("Error while creating a calendar. HTTP status %s", createResponse.Status)
}
fmt.Printf("Created calendar with ID: %s\n", createResponse.CalendarID)
}
calendar, err := eCalAPI.GetCalendar(createResponse.CalendarID)
if err != nil {
t.Errorf("Error while reading calendar %s: %v\n", createResponse.CalendarID, err)
}
if calendar.Data.Name != c.Name {
t.Errorf("Invalid data while retrieving the calendar. Name field does not match")
}
err = eCalAPI.DelCalendar(createResponse.CalendarID)
if err != nil {
t.Errorf("Error while deleting calendar %s: %v\n", createResponse.CalendarID, err)
}
}
func randomString(strlen int) string {
rand.Seed(time.Now().UTC().UnixNano())
const chars = "abcdefghijklmnopqrstuvwxyz0123456789"
result := make([]byte, strlen)
for i := 0; i < strlen; i++ {
result[i] = chars[rand.Intn(len(chars))]
}
return string(result)
}
|
package bitbucket
import (
"bytes"
"encoding/json"
"errors"
"io/ioutil"
"net/http"
"net/url"
"github.com/DaoCloud/go-bitbucket/oauth1"
)
var (
// Returned if the specified resource does not exist.
ErrNotFound = errors.New("Not Found")
// Returned if the caller attempts to make a call or modify a resource
// for which the caller is not authorized.
//
// The request was a valid request, the caller's authentication credentials
// succeeded but those credentials do not grant the caller permission to
// access the resource.
ErrForbidden = errors.New("Forbidden")
// Returned if the call requires authentication and either the credentials
// provided failed or no credentials were provided.
ErrNotAuthorized = errors.New("Unauthorized")
// Returned if the caller submits a badly formed request. For example,
// the caller can receive this return if you forget a required parameter.
ErrBadRequest = errors.New("Bad Request")
)
// DefaultClient uses DefaultTransport, and is used internall to execute
// all http.Requests. This may be overriden for unit testing purposes.
//
// IMPORTANT: this is not thread safe and should not be touched with
// the exception overriding for mock unit testing.
var DefaultClient = http.DefaultClient
func (c *Client) do(method string, path string, params url.Values, values url.Values, v interface{}) error {
// if this is the guest client then we don't need
// to sign the request ... we will execute just
// a simple http request.
if c == Guest {
return c.guest(method, path, params, values, v)
}
// create the client
var client = oauth1.Consumer{
ConsumerKey: c.ConsumerKey,
ConsumerSecret: c.ConsumerSecret,
}
// create the URI
uri, err := url.Parse("https://api.bitbucket.org/1.0" + path)
if err != nil {
return err
}
if params != nil && len(params) > 0 {
uri.RawQuery = params.Encode()
}
// create the access token
token := oauth1.NewAccessToken(c.AccessToken, c.TokenSecret, nil)
// create the request
req := &http.Request{
URL: uri,
Method: method,
ProtoMajor: 1,
ProtoMinor: 1,
Close: true,
}
if values != nil && len(values) > 0 {
body := []byte(values.Encode())
buf := bytes.NewBuffer(body)
req.Body = ioutil.NopCloser(buf)
}
// add the Form data to the request
// (we'll need this in order to sign the request)
req.Form = values
// sign the request
if err := client.Sign(req, token); err != nil {
return err
}
// make the request using the default http client
resp, err := DefaultClient.Do(req)
if err != nil {
return err
}
// Read the bytes from the body (make sure we defer close the body)
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
// Check for an http error status (ie not 200 StatusOK)
switch resp.StatusCode {
case 404:
return ErrNotFound
case 403:
return ErrForbidden
case 401:
return ErrNotAuthorized
case 400:
return ErrBadRequest
}
// Unmarshall the JSON response
if v != nil {
return json.Unmarshal(body, v)
}
return nil
}
func (c *Client) guest(method string, path string, params url.Values, values url.Values, v interface{}) error {
// create the URI
uri, err := url.Parse("https://api.bitbucket.org/1.0" + path)
if err != nil {
return err
}
if params != nil && len(params) > 0 {
uri.RawQuery = params.Encode()
}
// create the request
req := &http.Request{
URL: uri,
Method: method,
ProtoMajor: 1,
ProtoMinor: 1,
Close: true,
}
// add the Form values to the body
if values != nil && len(values) > 0 {
body := []byte(values.Encode())
buf := bytes.NewBuffer(body)
req.Body = ioutil.NopCloser(buf)
}
// make the request using the default http client
resp, err := DefaultClient.Do(req)
if err != nil {
return err
}
// Read the bytes from the body (make sure we defer close the body)
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
// Check for an http error status (ie not 200 StatusOK)
switch resp.StatusCode {
case 404:
return ErrNotFound
case 403:
return ErrForbidden
case 401:
return ErrNotAuthorized
case 400:
return ErrBadRequest
}
// Unmarshall the JSON response
if v != nil {
return json.Unmarshal(body, v)
}
return nil
}
|
package utils
type XY struct {
X int
Y int
}
|
package lib_test
import (
"accountapi/lib"
"testing"
)
func TestErrors(t *testing.T) {
e := lib.NewErrorInvalidEnum()
if !lib.IsErrorInvalidEnum(e) {
t.Error("ErrorInvalidNum not recognised.")
t.Fail()
}
eAPI := lib.NewErrorAPI(429, "test_api_error")
if !lib.IsErrorAPI(eAPI) {
t.Error("ErrorAPI not recognised.")
t.Fail()
}
if eAPI.Error() != "429:test_api_error" {
t.Errorf("Expected ErrorAPI(test_api_error), got '%s'", eAPI.Error())
t.Fail()
}
}
|
package datasetapi
import (
"bytes"
"context"
"io/ioutil"
dstypes "github.com/lexis-project/lexis-backend-services-interface-datasets.git/client/data_set_management"
models "github.com/lexis-project/lexis-backend-services-api.git/models"
"github.com/lexis-project/lexis-backend-services-api.git/restapi/operations/data_set_management"
"github.com/go-openapi/runtime/middleware"
l "gitlab.com/cyclops-utilities/logging"
)
func (p *DataSetAPI) Certificate(ctx context.Context,
params data_set_management.CertificateParams) middleware.Responder {
// no params
rparams := dstypes.NewCertificateParams()
//https://stackoverflow.com/questions/40316052/in-memory-file-for-testing
//https://stackoverflow.com/questions/23454940/getting-bytes-buffer-does-not-implement-io-writer-error-message/23454941
var buffermemory bytes.Buffer
buffer := ioutil.NopCloser(&buffermemory)
_, err := p.getClient(params.HTTPRequest).DataSetManagement.Certificate(ctx, rparams, &buffermemory)
if err != nil {
l.Info.Printf("Error calling Certificate endpoint\n")
switch err.(type) {
case *dstypes.CertificateUnauthorized: // 401
v := err.(*dstypes.CertificateUnauthorized)
payload := models.ErrorResponse{
ErrorString: v.Payload.ErrorString,
}
return data_set_management.NewCertificateUnauthorized().WithPayload(&payload)
case *dstypes.CertificateInternalServerError: // 500
v := err.(*dstypes.CertificateInternalServerError)
payload := models.ErrorResponse{
ErrorString: v.Payload.ErrorString,
}
return data_set_management.NewCertificateInternalServerError().WithPayload(&payload)
case *dstypes.CertificateBadGateway: // 502
v := err.(*dstypes.CertificateBadGateway)
payload := models.ErrorResponse{
ErrorString: v.Payload.ErrorString,
}
return data_set_management.NewCertificateBadGateway().WithPayload(&payload)
case *dstypes.CertificateServiceUnavailable: // 503
v := err.(*dstypes.CertificateServiceUnavailable)
payload := models.ErrorResponse{
ErrorString: v.Payload.ErrorString,
}
return data_set_management.NewCertificateServiceUnavailable().WithPayload(&payload)
default:
payload := fillErrorResponse(err)
return data_set_management.NewCertificateServiceUnavailable().WithPayload(payload)
}
}
// need to perform a type transformation here...
//return data_set_management.NewDownloadDatasetCreated().WithPayload(resp.Payload)
return data_set_management.NewCertificateOK().WithPayload(buffer)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.