text stringlengths 11 4.05M |
|---|
package function
import (
"fmt"
"log"
"net/http"
"os"
)
var (
stdLogger = log.New(os.Stdout, "", 0)
)
func TestDeployment(w http.ResponseWriter, r *http.Request) {
BuildHash := os.Getenv("BUILD_HASH")
stdLogger.Printf("BuildHash: %s", BuildHash)
RegularEnvVar := os.Getenv("REGULAR_ENV_VAR")
stdLogger.Printf("RegularEnvVar: %s", RegularEnvVar)
ApiKey := os.Getenv("API_KEY")
stdLogger.Printf("ApiKey: %s", ApiKey)
fmt.Fprintf(w,
"drone-gcf plugin, hash: [%s] api key: [%s] regular env var: [%s] other env vars: [%s] [%s] [%s]",
BuildHash,
ApiKey,
RegularEnvVar,
os.Getenv("ENV_VAR_WITH_SPACE"),
os.Getenv("ENV_VAR_WITH_QUOTES"),
os.Getenv("ENV_VAR_WITH_COMMA"),
)
}
|
package main
import (
"fmt"
"unsafe"
)
func main() {
caller()
}
func caller() {
var a int64 = 1
var b int64 = 2
c := callee(a, b)
fmt.Println(c)
var struct1 = struct{}{}
var int1 = 12
var chstruct = make(chan struct{}, 10)
fmt.Println(unsafe.Sizeof(struct1))
fmt.Printf("%p\n", &struct1)
fmt.Printf("%p\n", &int1)
fmt.Println("chstruct size:", cap(chstruct))
fmt.Printf("%p\n", &chstruct)
}
func callee(a, b int64) int64 {
c := a + b
return c
}
//go tool objdump -s "main\.main" channeldeadlock.exe | findstr CALL //windows 反编译某个可执行文件的某个方法,并过滤出CALL语句,linux用grp CALL
//可以用来查看golang源代码的调用始末
//go tool compile -N -l -S stackcall.go
|
/*
* Copyright (c) 2013 Landon Fuller <landonf@mac68k.info>
* All rights reserved.
*/
/* Select-based polling support for the pcap API */
package pcap
/*
#include <pcap/pcap.h>
#include <sys/select.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <stdint.h>
// Indirection required to use Go callbacks
extern void mac68k_pcap_dispatchCallback (unsigned char *user, struct pcap_pkthdr *h, unsigned char *bytes);
static void pcap_dispatch_cb_handler (u_char *user, const struct pcap_pkthdr *h, const u_char *bytes) {
mac68k_pcap_dispatchCallback((unsigned char *) user, (struct pcap_pkthdr *) h, (unsigned char *) bytes);
}
// cgo gets upset when we use 'select'
static int my_select (int nfds, fd_set *readfds, fd_set *writefds, fd_set *errorfds, struct timeval *timeout) {
return select(nfds, readfds, writefds, errorfds, timeout);
}
// cgo refuses to resolve the FD_* macros.
static void MY_FD_ZERO (fd_set *fdset) {
FD_ZERO(fdset);
}
static void MY_FD_SET (int fd, fd_set *fdset) {
FD_SET(fd, fdset);
}
static int MY_FD_ISSET (int fd, fd_set *fdset) {
return FD_ISSET(fd, fdset);
}
*/
import "C"
import (
"errors"
"fmt"
"unsafe"
)
// Server that manages polling the backing file descriptor
type pollServer struct {
// Channel that may be used to read packets
packets chan []byte
// The pcap file descriptor
pcapfd C.int
// If readable, the poll server should stop. Can be made readable
// by writing to the signalFD
waitfd C.int
// Write to this file descriptor to make waitFD readable.
signalfd C.int
// Backing capture source
source *captureSource
}
func max(x C.int, y C.int) C.int {
if x > y {
return x
}
return y
}
//export mac68k_pcap_dispatchCallback
func mac68k_pcap_dispatchCallback(user *C.uchar, h *C.struct_pcap_pkthdr, bytes *C.uchar) {
// TODO
}
func (server *pollServer) selector() {
/* Determine the maxfd */
var maxfd C.int
maxfd = 0
maxfd = max(server.pcapfd, maxfd)
maxfd = max(server.waitfd, maxfd)
maxfd += 1
/* Configure fd sets */
var master_readset C.fd_set
C.MY_FD_ZERO(&master_readset)
C.MY_FD_SET(server.pcapfd, &master_readset)
C.MY_FD_SET(server.waitfd, &master_readset)
for {
readset := master_readset
ret, err := C.my_select(maxfd, &readset, nil, nil, nil)
if ret == -1 {
// Shouldn't happen!
fmt.Println("Unexpected select error", err)
}
/* The select timed out */
if ret == 0 {
continue
}
/* Check for completion */
if C.MY_FD_ISSET(server.waitfd, &readset) != 0 {
fmt.Println("Cleaning up")
C.close(server.waitfd)
C.close(server.signalfd)
break
}
/* Check for pcap readability */
if C.MY_FD_ISSET(server.waitfd, &readset) != 0 {
/* Dispatch a read */
C.pcap_dispatch(server.source.cptr, -1, unsafe.Pointer(C.pcap_dispatch_cb_handler), nil)
}
}
}
// Create a new poll server for the given capture source
func newPollServer(source *captureSource) (*pollServer, error) {
server := new(pollServer)
server.source = source
/* Set up our error buffer */
errbuf := (*C.char)(C.calloc(C.PCAP_ERRBUF_SIZE, 1))
defer C.free(unsafe.Pointer(errbuf))
/* Mark source non-blocking */
if ret := C.pcap_setnonblock(source.cptr, 1, errbuf); ret != 0 {
return nil, errors.New(C.GoString(errbuf))
}
/* Configure the fd-based signaling mechanism */
var fds [2]C.int
if ret, err := C.pipe(&fds[0]); ret != 0 {
return nil, fmt.Errorf("Failed to create signal pipe: %v", err)
}
server.waitfd = fds[0]
server.signalfd = fds[1]
server.pcapfd = C.pcap_get_selectable_fd(source.cptr)
/* Fire off the background handler */
go server.selector()
return server, nil
}
|
package flow
import (
"math"
"sync"
"testing"
"time"
)
func TestBasic(t *testing.T) {
var wg sync.WaitGroup
wg.Add(100)
for i := 0; i < 100; i++ {
go func() {
defer wg.Done()
ticker := time.NewTicker(40 * time.Millisecond)
defer ticker.Stop()
m := new(Meter)
for i := 0; i < 100; i++ {
m.Mark(1000)
<-ticker.C
}
actual := m.Snapshot()
if !approxEq(actual.Rate, 25000, 500) {
t.Errorf("expected rate 25000 (±500), got %f", actual.Rate)
}
for i := 0; i < 200; i++ {
m.Mark(200)
<-ticker.C
}
// Adjusts
actual = m.Snapshot()
if !approxEq(actual.Rate, 5000, 200) {
t.Errorf("expected rate 5000 (±200), got %f", actual.Rate)
}
// Let it settle.
time.Sleep(2 * time.Second)
// get the right total
actual = m.Snapshot()
if actual.Total != 140000 {
t.Errorf("expected total %d, got %d", 120000, actual.Total)
}
}()
}
wg.Wait()
}
func TestShared(t *testing.T) {
var wg sync.WaitGroup
wg.Add(20 * 21)
for i := 0; i < 20; i++ {
m := new(Meter)
for j := 0; j < 20; j++ {
go func() {
defer wg.Done()
ticker := time.NewTicker(40 * time.Millisecond)
defer ticker.Stop()
for i := 0; i < 100; i++ {
m.Mark(50)
<-ticker.C
}
for i := 0; i < 200; i++ {
m.Mark(10)
<-ticker.C
}
}()
}
go func() {
defer wg.Done()
time.Sleep(40 * 100 * time.Millisecond)
actual := m.Snapshot()
if !approxEq(actual.Rate, 25000, 250) {
t.Errorf("expected rate 25000 (±250), got %f", actual.Rate)
}
time.Sleep(40 * 200 * time.Millisecond)
// Adjusts
actual = m.Snapshot()
if !approxEq(actual.Rate, 5000, 50) {
t.Errorf("expected rate 5000 (±50), got %f", actual.Rate)
}
// Let it settle.
time.Sleep(2 * time.Second)
// get the right total
actual = m.Snapshot()
if actual.Total != 140000 {
t.Errorf("expected total %d, got %d", 140000, actual.Total)
}
}()
}
wg.Wait()
}
func TestUnregister(t *testing.T) {
var wg sync.WaitGroup
wg.Add(100 * 2)
pause := make(chan struct{})
for i := 0; i < 100; i++ {
m := new(Meter)
go func() {
defer wg.Done()
ticker := time.NewTicker(100 * time.Millisecond)
defer ticker.Stop()
for i := 0; i < 40; i++ {
m.Mark(1)
<-ticker.C
}
<-pause
time.Sleep(2 * time.Second)
for i := 0; i < 40; i++ {
m.Mark(2)
<-ticker.C
}
}()
go func() {
defer wg.Done()
time.Sleep(40 * 100 * time.Millisecond)
actual := m.Snapshot()
if !approxEq(actual.Rate, 10, 1) {
t.Errorf("expected rate 10 (±1), got %f", actual.Rate)
}
<-pause
actual = m.Snapshot()
if actual.Total != 40 {
t.Errorf("expected total 4000, got %d", actual.Total)
}
time.Sleep(2*time.Second + 40*100*time.Millisecond)
actual = m.Snapshot()
if !approxEq(actual.Rate, 20, 4) {
t.Errorf("expected rate 20 (±4), got %f", actual.Rate)
}
time.Sleep(2 * time.Second)
actual = m.Snapshot()
if actual.Total != 120 {
t.Errorf("expected total 120, got %d", actual.Total)
}
}()
}
time.Sleep(60 * time.Second)
globalSweeper.mutex.Lock()
if len(globalSweeper.meters) != 0 {
t.Errorf("expected all sweepers to be unregistered: %d", len(globalSweeper.meters))
}
globalSweeper.mutex.Unlock()
close(pause)
wg.Wait()
globalSweeper.mutex.Lock()
if len(globalSweeper.meters) != 100 {
t.Errorf("expected all sweepers to be registered: %d", len(globalSweeper.meters))
}
globalSweeper.mutex.Unlock()
}
func approxEq(a, b, err float64) bool {
return math.Abs(a-b) < err
}
|
package connect
import (
"encoding/binary"
"errors"
"io"
"github.com/xgheaven/localmap/logger"
)
const (
HEL = iota
HELRLY
REQCON
REQRLY
CLOSE
HEART
)
const (
BLOCKREADSIZE = 1024
)
type (
BlockHeader struct {
Type uint8
Flag uint8
Len uint16
}
Block struct {
*BlockHeader
Data []byte
}
baseBlock struct {
Len uint32
Raw []byte
Type byte
}
HelloBlock struct {
*Block
Version string
}
HelloReplyBlcok struct {
*Block
Version string
Sport uint16
Cport uint16
}
ReqConnBlock struct {
*Block
}
CloseBlock struct {
*Block
}
HeartBlock struct {
*Block
}
)
func NewBlock(reader io.Reader) (*Block, error) {
rawHeader := make([]byte, 4)
n, err := reader.Read(rawHeader)
logger.Debug(n, err == nil)
if err != nil {
return nil, err
}
if n != 4 {
return nil, errors.New("wrong block header")
}
length := binary.LittleEndian.Uint16(rawHeader[2:4])
typ := rawHeader[0]
flag := rawHeader[1]
base := &BlockHeader{Len: length, Flag: flag, Type: typ}
if err != nil && length > 0 {
return nil, err
}
data := make([]byte, length, length)
for length > 0 {
n, err := reader.Read(data[uint16(len(data))-length:])
if uint16(n) > length {
return nil, errors.New("block length error")
}
length -= uint16(n)
if err != nil {
return nil, err
}
}
logger.Debug("Block: HEADER ", rawHeader, "BODY", data)
return &Block{BlockHeader: base, Data: data}, nil
}
func NewHelloBlock(block *Block) (helloBlock *HelloBlock, err error) {
helloBlock = &HelloBlock{Block: block}
helloBlock.Version = string(block.Data)
return
}
func NewHelloReplyBlock(block *Block) (helloReplyBlcok *HelloReplyBlcok, err error) {
helloReplyBlcok = &HelloReplyBlcok{Block: block}
helloReplyBlcok.Sport = binary.LittleEndian.Uint16(helloReplyBlcok.Data[:2])
helloReplyBlcok.Cport = binary.LittleEndian.Uint16(helloReplyBlcok.Data[2:4])
helloReplyBlcok.Version = string(block.Data[4:])
return
}
func NewReqConnBlock(block *Block) (reqConnBlock *ReqConnBlock, err error) {
reqConnBlock = &ReqConnBlock{Block: block}
return
}
func NewCloseBlock(block *Block) (closeBlock *CloseBlock, err error) {
closeBlock = &CloseBlock{Block: block}
return
}
func NewHeartBlock(block *Block) (heartBlock *HeartBlock, err error) {
heartBlock = &HeartBlock{Block: block}
return
}
|
// DRUNKWATER TEMPLATE(add description and prototypes)
// Question Title and Description on leetcode.com
// Function Declaration and Function Prototypes on leetcode.com
//68. Text Justification
//Given an array of words and a width maxWidth, format the text such that each line has exactly maxWidth characters and is fully (left and right) justified.
//You should pack your words in a greedy approach; that is, pack as many words as you can in each line. Pad extra spaces ' ' when necessary so that each line has exactly maxWidth characters.
//Extra spaces between words should be distributed as evenly as possible. If the number of spaces on a line do not divide evenly between words, the empty slots on the left will be assigned more spaces than the slots on the right.
//For the last line of text, it should be left justified and no extra space is inserted between words.
//Note:
//A word is defined as a character sequence consisting of non-space characters only.
//Each word's length is guaranteed to be greater than 0 and not exceed maxWidth.
//The input array words contains at least one word.
//Example 1:
//Input:
//words = ["This", "is", "an", "example", "of", "text", "justification."]
//maxWidth = 16
//Output:
//[
// "This is an",
// "example of text",
// "justification. "
//]
//Example 2:
//Input:
//words = ["What","must","be","acknowledgment","shall","be"]
//maxWidth = 16
//Output:
//[
// "What must be",
// "acknowledgment ",
// "shall be "
//]
//Explanation: Note that the last line is "shall be " instead of "shall be",
// because the last line must be left-justified instead of fully-justified.
// Note that the second line is also left-justified becase it contains only one word.
//Example 3:
//Input:
//words = ["Science","is","what","we","understand","well","enough","to","explain",
// "to","a","computer.","Art","is","everything","else","we","do"]
//maxWidth = 20
//Output:
//[
// "Science is what we",
// "understand well",
// "enough to explain to",
// "a computer. Art is",
// "everything else we",
// "do "
//]
//func fullJustify(words []string, maxWidth int) []string {
//}
// Time Is Money |
// Copyright 2019-present PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package lockstore
// Iterator iterates the entries in the MemStore.
type Iterator struct {
ls *MemStore
key []byte
val []byte
}
// NewIterator returns a new Iterator for the lock store.
func (ls *MemStore) NewIterator() *Iterator {
return &Iterator{
ls: ls,
}
}
// Valid returns true iff the iterator is positioned at a valid node.
func (it *Iterator) Valid() bool { return len(it.key) != 0 }
// Key returns the key at the current position.
func (it *Iterator) Key() []byte {
return it.key
}
// Value returns value.
func (it *Iterator) Value() []byte {
return it.val
}
// Next moves the iterator to the next entry.
func (it *Iterator) Next() {
e, _ := it.ls.findGreater(it.key, false)
it.setKeyValue(e)
}
// Prev moves the iterator to the previous entry.
func (it *Iterator) Prev() {
e, _ := it.ls.findLess(it.key, false) // find <. No equality allowed.
it.setKeyValue(e)
}
// Seek locates the iterator to the first entry with a key >= seekKey.
func (it *Iterator) Seek(seekKey []byte) {
e, _ := it.ls.findGreater(seekKey, true) // find >=.
it.setKeyValue(e)
}
// SeekForPrev locates the iterator to the last entry with key <= target.
func (it *Iterator) SeekForPrev(target []byte) {
e, _ := it.ls.findLess(target, true) // find <=.
it.setKeyValue(e)
}
// SeekForExclusivePrev locates the iterator to the last entry with key < target.
func (it *Iterator) SeekForExclusivePrev(target []byte) {
e, _ := it.ls.findLess(target, false)
it.setKeyValue(e)
}
// SeekToFirst locates the iterator to the first entry.
func (it *Iterator) SeekToFirst() {
e := it.ls.getNext(it.ls.head, 0)
it.setKeyValue(e)
}
// SeekToLast locates the iterator to the last entry.
func (it *Iterator) SeekToLast() {
e := it.ls.findLast()
it.setKeyValue(e)
}
func (it *Iterator) setKeyValue(e entry) {
it.key = append(it.key[:0], e.key...)
it.val = append(it.val[:0], e.getValue(it.ls.getArena())...)
}
|
package config
import (
"os"
"sync"
"github.com/DATA-DOG/go-sqlmock"
"github.com/jmoiron/sqlx"
"github.com/subosito/gotenv"
)
type BbbxConfig struct {
DB *sqlx.DB
Mock sqlmock.Sqlmock
}
var (
bbbxConfig *BbbxConfig
once sync.Once
)
func GetInstanceWithEnv(env string) *BbbxConfig {
once.Do(func() {
gotenv.Load(os.Getenv("GOPATH") + "/src/github.com/rbpermadi/bobobox/.env")
if env != "" {
os.Setenv("ENV", env)
}
db, mock := NewMySQL()
bbbxConfig = &BbbxConfig{DB: db, Mock: mock}
})
return bbbxConfig
}
func GetInstance() *BbbxConfig {
env := os.Getenv("ENV")
return GetInstanceWithEnv(env)
}
|
package adapter
import "fmt"
type INonBattery interface {
Use()
}
type IReBattery interface {
Use()
Charge()
}
type NonA struct {
}
func (NonA) Use() {
fmt.Println("NonA using")
}
//适配可充电电池使用接口
type AdapterNonToYes struct {
INonBattery
}
func (AdapterNonToYes) Charge() {
fmt.Println("AdapterNonToYes Charging")
}
//接口的适配器模式
type ReBatteryAbstract struct {
}
func (ReBatteryAbstract) Use() {
fmt.Println("ReBatteryAbstract using")
}
func (ReBatteryAbstract) Charge() {
fmt.Println("ReBatteryAbstract Charging")
}
type NonReB struct {
ReBatteryAbstract
}
func (NonReB) Use() {
fmt.Println("NonReB using")
}
//test
func AdapterTest() {
var battery IReBattery
battery = AdapterNonToYes{NonA{}}
battery.Use()
battery.Charge()
battery = NonReB{}
battery.Use()
battery.Charge()
}
type IPlayer interface {
Player(name string)
Attack()
Defense()
}
type Fowards struct {
name string
}
func (f *Fowards) Player(name string) {
f.name = name
}
func (f *Fowards) Attack() {
fmt.Printf("前锋%s进攻\n", f.name)
}
func (f *Fowards) Defense() {
fmt.Printf("前锋%s防守\n", f.name)
}
type Center struct {
name string
}
func (f *Center) Player(name string) {
f.name = name
}
func (f *Center) Attack() {
fmt.Printf("中锋%s进攻\n", f.name)
}
func (f *Center) Defense() {
fmt.Printf("中锋%s防守\n", f.name)
}
type Guards struct {
name string
}
func (f *Guards) Player(name string) {
f.name = name
}
func (f *Guards) Attack() {
fmt.Printf("后卫%s进攻\n", f.name)
}
func (f *Guards) Defense() {
fmt.Printf("后卫%s防守\n", f.name)
}
type ForCenter struct {
name string
}
func (fc *ForCenter) Player(name string) {
fc.name = name
}
func (fc *ForCenter) 进攻() {
fmt.Printf("外籍中锋%s进攻\n", fc.name)
}
func (fc *ForCenter) 防守() {
fmt.Printf("外籍中锋%s防守\n", fc.name)
}
type Translator struct{
ForCenter
}
func (t *Translator) Player(name string) {
t.ForCenter.Player(name)
}
func (t *Translator) Attack() {
t.进攻()
}
func (t *Translator) Defense() {
t.防守()
}
|
package config
import (
"code.cloudfoundry.org/cli/plugin"
"errors"
"fmt"
"github.com/pivotal-cf/spring-cloud-services-cli-plugin/cfutil"
"github.com/pivotal-cf/spring-cloud-services-cli-plugin/httpclient"
"github.com/pivotal-cf/spring-cloud-services-cli-plugin/serviceutil"
)
type Refresher interface {
Refresh(string) error
}
type refresher struct {
cliConnection plugin.CliConnection
authenticatedClient httpclient.AuthenticatedClient
serviceInstanceUrlResolver serviceutil.ServiceInstanceResolver
}
func (r *refresher) Refresh(configServerInstanceName string) error {
accessToken, err := cfutil.GetToken(r.cliConnection)
if err != nil {
return err
}
serviceInstanceUrl, err := r.serviceInstanceUrlResolver.GetServiceInstanceUrl(configServerInstanceName, accessToken)
if err != nil {
return fmt.Errorf("error obtaining config server URL: %s", err)
}
_, status, e := r.authenticatedClient.DoAuthenticatedPost(fmt.Sprintf("%sactuator/refreshmirrors", serviceInstanceUrl), "application/json", "", accessToken)
if e != nil {
return e
}
if status != 200 {
return errors.New("failed to refresh mirror")
}
return nil
}
func NewRefresher(connection plugin.CliConnection, client httpclient.AuthenticatedClient, resolver serviceutil.ServiceInstanceResolver) Refresher {
return &refresher{
cliConnection: connection,
authenticatedClient: client,
serviceInstanceUrlResolver: resolver,
}
}
|
package data
import (
"database/sql"
"fmt"
"log"
"os"
"reflect"
"strings"
_ "github.com/mattn/go-sqlite3"
)
const (
dbName = "./mediaWebServerDatabase.db"
driver = "sqlite3"
create = "CREATE TABLE IF NOT EXISTS"
asterisk = "*"
colon = ":"
blankSapace = " "
interrogation = "?"
comma = ","
parBeg = "("
parEnd = ")"
argumentForQuery = "%d"
movieCreateFields = "id integer primary key, title text, year text, runtime, text, country text, plot text, rating text, poster text, imdbid text, type text, filepath text"
userCreateFields = "id integer primary key, name text, password text"
actorCreateFields = "id integer primary key, name text"
directorCreateFields = "id integer primary key, name text"
episodeCreateFields = "id integer primary key, season_id integer, episode text, filepath text"
genreCreateFields = "id integer primary key, name text"
seasonCreateFields = "id integer primary key, movie_id integer, season text"
movieCastCreateFields = "id integer primary key, id_movie integer, id_actor integer"
movieDirCastCreateFields = "id integer primary key, id_movie integer, id_director integer"
movieGenresCreateFields = "id integer primary key, id_movie integer, id_genre integer"
movieFields = "title,year,runtime,country,plot,imdb_rating,poster,imdb_id,type,filepath"
seasonFields = "movie_id,season"
movieTableName = "movie"
userTableName = "user"
seasonTableName = "season"
actorTableName = "actor"
directorTableName = "director"
movieCastTableName = "movie_cast"
movieDirCastTableName = "movie_dir_cast"
genreTableName = "genre"
movieGenresTableName = "movie_genres"
episodeTableName = "episode"
nullString = "null"
integerString = "integer"
textString = "ext"
imdbIDFieldName = "imdb_id"
movieIDField = "movie_id"
seasonNumberField = "season"
seasonIDField = "season_id"
episodeNumberField = "episode"
movieCastFields = "id integer primary key, id_actor integer, id_movie integer, role text"
movieDirCastFields = "id integer primary key, id_director integer, id_movie integer"
movieGenresFields = "id integer primary key, id_genre integer, id_movie integer"
moviesDirectory = "movies_directory"
)
type DatabaseAPI struct {
}
func CheckDatabase() {
var databaseCreated bool = true
if _, err := os.Stat(dbName); os.IsNotExist(err) {
fmt.Println("The database does not exists, creating...")
databaseCreated = false
}
if !databaseCreated {
database := OpenConnection()
createDatabaseStructure(&database)
database.Close()
}
}
func OpenConnection() sql.DB {
database, err := sql.Open(driver, dbName)
if err != nil {
log.Fatal("Error opening database:", err)
}
return *database
}
func getAppDirectory() string {
actualDir, err := os.Getwd()
if err != nil {
log.Fatal("Cannot access the application directory:", err)
}
return actualDir
}
func createDatabaseStructure(database *sql.DB) {
fmt.Println("Creating database structure...")
createTable(database, movieTableName, movieFields)
createTable(database, userTableName, userCreateFields)
createTable(database, seasonTableName, seasonCreateFields)
createTable(database, actorTableName, actorCreateFields)
createTable(database, directorTableName, directorCreateFields)
createTable(database, genreTableName, genreCreateFields)
createTable(database, episodeTableName, episodeCreateFields)
createTable(database, movieCastTableName, movieCastCreateFields)
createTable(database, movieDirCastTableName, movieDirCastCreateFields)
createTable(database, movieGenresTableName, movieGenresCreateFields)
fmt.Println("Database created, closing...")
database.Close()
}
func createTable(database *sql.DB, table string, fields string) {
_, err := database.Exec(create + blankSapace + table + parBeg + fields + parEnd)
if err != nil {
log.Fatal("Error creating table", table, colon, err)
}
fmt.Println("Table", table, "created")
}
func getObjectFieldsForCreateTabble(fields interface{}) []string {
return getObjectFields(fields, true)
}
func getObjectFields(fields interface{}, isForCreate bool) []string {
var fieldsSlice []string
val := reflect.ValueOf(fields).Elem()
for i := 0; i < val.NumField(); i++ {
var name string = val.Type().Field(i).Name
if isForCreate {
name = addDataTypeToField(val, name, i)
}
fieldsSlice = append(fieldsSlice, strings.ToUpper(name))
}
return fieldsSlice
}
func addDataTypeToField(val reflect.Value, name string, i int) string {
var datatype string = getDatatypeForSQLite(val.Type().Field(i).Type.String())
if datatype == blankSapace {
return ""
}
var primary string = ""
if name == "Id" {
primary = "primary key"
}
return name + blankSapace + datatype + blankSapace + primary
}
func getDatatypeForSQLite(goDatatype string) string {
switch goDatatype {
case "int":
return integerString
case "string":
return textString
}
return blankSapace
}
|
package main
import (
"flag"
"fmt"
"gopkg.in/headzoo/surf.v1"
)
var (
link = flag.String("url", "http://m.newsmth.net", "url to get")
)
func main() {
flag.Parse()
bow := surf.NewBrowser()
err := bow.Open(*link)
if err != nil {
panic(err)
}
fmt.Println(bow.Title())
fmt.Println(bow.Body())
}
|
package main
import (
"github.com/gin-gonic/autotls"
"github.com/gin-gonic/gin"
)
func main(){
r:=gin.Default()
r.GET("/test", func(c *gin.Context) {
c.String(200, "hello test")
})
// 自动化证书配置: 调用证书下载的包,然后就是https的过程:1、生成本地密钥,2、然后用这个密钥去证书颁发机构获取私钥,3、进行本地私钥验证,验证成功把私钥保存,下次再有请求,用这个私钥加密
autotls.Run(r, "www.itpp.tk") // 运行需要真实域名
}
|
/**
* @author liangbo
* @email liangbogopher87@gmail.com
* @date 2017/10/22 18:21
*/
package model
import (
"time"
"pet/utils"
)
// 文章
type Article struct {
Id int64 `gorm:"primary_key"; sql:"AUTO_INCREMENT"`
Title string `sql:"type:varchar(128)"`
Content string `sql:"type:text"`
Type int `sql:"type:smallint(6)"` // 类型,1:展会动态
CreateTime time.Time `sql:"type:datetime"`
}
func (article *Article) TableName() string {
return "pet.article"
}
func (article *Article) Create() error {
article.CreateTime = time.Now()
err := PET_DB.Table(article.TableName()).Create(article).Error
if nil != err {
err = utils.NewInternalError(utils.DbErrCode, err)
utils.Logger.Error("create article error: %v", err)
return err
}
return nil
}
func (article *Article) Save() error {
if article.Id == 0 {
article.CreateTime = time.Now()
}
err := PET_DB.Table(article.TableName()).Save(article).Error
if nil != err {
err = utils.NewInternalError(utils.DbErrCode, err)
utils.Logger.Error("save article error: %v", err)
return err
}
return nil
}
func (article *Article) GetArticleListByPage(article_type int, page_num, page_size int) (article_list []Article, total_num int, err error) {
if page_size < 0 {
page_size = 10
}
offset := (page_num - 1) * page_size
if offset < 0 {
offset = 0
}
query := PET_DB.Table(article.TableName())
if article_type != 0 {
query = query.Where("type = ?", article_type)
}
if err2 := query.Count(&total_num).Error; nil != err2 {
utils.Logger.Error("count article list err: %v", err2)
err = utils.NewInternalError(utils.DbErrCode, err2)
return
}
query = query.Order("create_time desc").Limit(page_size).Offset(offset)
err = query.Find(&article_list).Error
if nil != err {
utils.Logger.Error("get article list by page error :%s\n", err.Error())
err = utils.NewInternalError(utils.DbErrCode, err)
return
}
return
}
|
package rrdp
import (
"github.com/cpusoft/goutil/belogs"
"github.com/cpusoft/goutil/ginserver"
"github.com/gin-gonic/gin"
model "rpstir2-model"
)
// start to rrdp from sync
func RrdpRequest(c *gin.Context) {
belogs.Debug("RrdpRequest(): start")
syncUrls := model.SyncUrls{}
err := c.ShouldBindJSON(&syncUrls)
belogs.Info("RrdpRequest(): syncUrls:", syncUrls, err)
if err != nil {
belogs.Error("RrdpRequest(): ShouldBindJSON:", err)
ginserver.ResponseFail(c, err, nil)
return
}
go rrdpRequest(&syncUrls)
ginserver.ResponseOk(c, nil)
}
|
package nv4
import (
"context"
"github.com/filecoin-project/go-state-types/big"
init0 "github.com/filecoin-project/specs-actors/actors/builtin/init"
cid "github.com/ipfs/go-cid"
cbor "github.com/ipfs/go-ipld-cbor"
"golang.org/x/xerrors"
init2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/init"
)
type initMigrator struct {
}
func (m initMigrator) MigrateState(ctx context.Context, store cbor.IpldStore, head cid.Cid, _ MigrationInfo) (*StateMigrationResult, error) {
var inState init0.State
if err := store.Get(ctx, head, &inState); err != nil {
return nil, err
}
// Migrate address resolution map
addrMapRoot, err := m.migrateAddrs(ctx, store, inState.AddressMap)
if err != nil {
return nil, xerrors.Errorf("migrate addrs: %w", err)
}
outState := init2.State{
AddressMap: addrMapRoot,
NextID: inState.NextID,
NetworkName: inState.NetworkName,
}
newHead, err := store.Put(ctx, &outState)
return &StateMigrationResult{
NewHead: newHead,
Transfer: big.Zero(),
}, err
}
func (m *initMigrator) migrateAddrs(ctx context.Context, store cbor.IpldStore, root cid.Cid) (cid.Cid, error) {
// The HAMT has changed, but the value type (Address) is identical.
return migrateHAMTRaw(ctx, store, root)
}
|
package chapter2
import "fmt"
func init() {
fmt.Println("=== Function Variadic ===")
var d1 = "A1 Auto"
var d2 = "Discount AUto"
var d3 = "Riverside Automart"
var dealers = []string {d1, d2, d3}
printDealers(dealers...)
}
/**
input parameter : dealers 가변 변수
*/
func printDealers(dealers ... string) {
// 배열요소는 첫번째 인덱스, 두번째 배열요소 값
for _, dealerName := range dealers {
fmt.Println(dealerName)
}
} |
package integration_test
import (
"fmt"
"os"
"os/exec"
"time"
"github.com/cloudfoundry/libbuildpack/cutlass"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("CF Staticfile Buildpack", func() {
var (
app *cutlass.App
createdServices []string
dynatraceAPI *cutlass.App
dynatraceAPIURI string
)
BeforeEach(func() {
dynatraceAPI = cutlass.New(Fixtures("fake_dynatrace_api"))
// TODO: remove this once go-buildpack runs on cflinuxfs4
// This is done to have the dynatrace broker app written in go up and running
if os.Getenv("CF_STACK") == "cflinuxfs4" {
dynatraceAPI.Stack = "cflinuxfs3"
}
dynatraceAPI.SetEnv("BP_DEBUG", "true")
Expect(dynatraceAPI.Push()).To(Succeed())
Eventually(func() ([]string, error) { return dynatraceAPI.InstanceStates() }, 60*time.Second).Should(Equal([]string{"RUNNING"}))
var err error
dynatraceAPIURI, err = dynatraceAPI.GetUrl("")
Expect(err).NotTo(HaveOccurred())
app = cutlass.New(Fixtures("logenv"))
app.SetEnv("BP_DEBUG", "true")
PushAppAndConfirm(app)
createdServices = make([]string, 0)
})
AfterEach(func() {
if app != nil {
app.Destroy()
app = nil
}
if dynatraceAPI != nil {
dynatraceAPI.Destroy()
}
dynatraceAPI = nil
for _, service := range createdServices {
command := exec.Command("cf", "delete-service", "-f", service)
_, err := command.Output()
Expect(err).To(BeNil())
}
})
Context("deploying a staticfile app with Dynatrace agent with single credentials service", func() {
It("checks if Dynatrace injection was successful", func() {
serviceName := "dynatrace-" + cutlass.RandStringRunes(20) + "-service"
command := exec.Command("cf", "cups", serviceName, "-p", fmt.Sprintf("'{\"apitoken\":\"secretpaastoken\",\"apiurl\":\"%s\",\"environmentid\":\"envid\"}'", dynatraceAPIURI))
_, err := command.CombinedOutput()
Expect(err).To(BeNil())
createdServices = append(createdServices, serviceName)
command = exec.Command("cf", "bind-service", app.Name, serviceName)
_, err = command.CombinedOutput()
Expect(err).To(BeNil())
command = exec.Command("cf", "restage", app.Name)
_, err = command.Output()
Expect(err).To(BeNil())
Expect(app.ConfirmBuildpack(buildpackVersion)).To(Succeed())
Expect(app.Stdout.String()).To(ContainSubstring("Dynatrace service credentials found. Setting up Dynatrace OneAgent."))
Expect(app.Stdout.String()).To(ContainSubstring("Starting Dynatrace OneAgent installer"))
Expect(app.Stdout.String()).To(ContainSubstring("Copy dynatrace-env.sh"))
Expect(app.Stdout.String()).To(ContainSubstring("Dynatrace OneAgent installed."))
Expect(app.Stdout.String()).To(ContainSubstring("Dynatrace OneAgent injection is set up."))
})
})
Context("deploying a staticfile app with Dynatrace agent with configured network zone", func() {
It("checks if Dynatrace injection was successful", func() {
serviceName := "dynatrace-" + cutlass.RandStringRunes(20) + "-service"
command := exec.Command("cf", "cups", serviceName, "-p", fmt.Sprintf("'{\"apitoken\":\"secretpaastoken\",\"apiurl\":\"%s\",\"environmentid\":\"envid\", \"networkzone\":\"testzone\"}'", dynatraceAPIURI))
_, err := command.CombinedOutput()
Expect(err).To(BeNil())
createdServices = append(createdServices, serviceName)
command = exec.Command("cf", "bind-service", app.Name, serviceName)
_, err = command.CombinedOutput()
Expect(err).To(BeNil())
command = exec.Command("cf", "restage", app.Name)
_, err = command.Output()
Expect(err).To(BeNil())
Expect(app.ConfirmBuildpack(buildpackVersion)).To(Succeed())
Expect(app.Stdout.String()).To(ContainSubstring("Dynatrace service credentials found. Setting up Dynatrace OneAgent."))
Expect(app.Stdout.String()).To(ContainSubstring("Starting Dynatrace OneAgent installer"))
Expect(app.Stdout.String()).To(ContainSubstring("Copy dynatrace-env.sh"))
Expect(app.Stdout.String()).To(ContainSubstring("Setting DT_NETWORK_ZONE..."))
Expect(app.Stdout.String()).To(ContainSubstring("Dynatrace OneAgent installed."))
Expect(app.Stdout.String()).To(ContainSubstring("Dynatrace OneAgent injection is set up."))
})
})
Context("deploying a staticfile app with Dynatrace agent with two credentials services", func() {
It("checks if detection of second service with credentials works", func() {
credentialsServiceName := "dynatrace-" + cutlass.RandStringRunes(20) + "-service"
command := exec.Command("cf", "cups", credentialsServiceName, "-p", fmt.Sprintf("'{\"apitoken\":\"secretpaastoken\",\"apiurl\":\"%s\",\"environmentid\":\"envid\"}'", dynatraceAPIURI))
_, err := command.CombinedOutput()
Expect(err).To(BeNil())
createdServices = append(createdServices, credentialsServiceName)
duplicateCredentialsServiceName := "dynatrace-dupe-" + cutlass.RandStringRunes(20) + "-service"
command = exec.Command("cf", "cups", duplicateCredentialsServiceName, "-p", fmt.Sprintf("'{\"apitoken\":\"secretpaastoken\",\"apiurl\":\"%s\",\"environmentid\":\"envid\"}'", dynatraceAPIURI))
_, err = command.CombinedOutput()
Expect(err).To(BeNil())
createdServices = append(createdServices, duplicateCredentialsServiceName)
command = exec.Command("cf", "bind-service", app.Name, credentialsServiceName)
_, err = command.CombinedOutput()
Expect(err).To(BeNil())
command = exec.Command("cf", "bind-service", app.Name, duplicateCredentialsServiceName)
_, err = command.CombinedOutput()
Expect(err).To(BeNil())
command = exec.Command("cf", "restage", app.Name)
_, err = command.Output()
Expect(err).To(BeNil())
Expect(app.Stdout.String()).To(ContainSubstring("More than one matching service found!"))
})
})
Context("deploying a staticfile app with Dynatrace agent with failing agent download and ignoring errors", func() {
It("checks if skipping download errors works", func() {
credentialsServiceName := "dynatrace-" + cutlass.RandStringRunes(20) + "-service"
command := exec.Command("cf", "cups", credentialsServiceName, "-p", fmt.Sprintf("'{\"apitoken\":\"secretpaastoken\",\"apiurl\":\"%s/no-such-endpoint\",\"environmentid\":\"envid\",\"skiperrors\":\"true\"}'", dynatraceAPIURI))
_, err := command.CombinedOutput()
Expect(err).To(BeNil())
createdServices = append(createdServices, credentialsServiceName)
command = exec.Command("cf", "bind-service", app.Name, credentialsServiceName)
_, err = command.CombinedOutput()
Expect(err).To(BeNil())
command = exec.Command("cf", "restage", app.Name)
_, err = command.Output()
Expect(err).To(BeNil())
Expect(app.Stdout.String()).To(ContainSubstring("Download returned with status 404"))
Expect(app.Stdout.String()).To(ContainSubstring("Error during installer download, skipping installation"))
})
})
Context("deploying a staticfile app with Dynatrace agent with two dynatrace services", func() {
It("check if service detection isn't disturbed by a service with tags", func() {
credentialsServiceName := "dynatrace-" + cutlass.RandStringRunes(20) + "-service"
command := exec.Command("cf", "cups", credentialsServiceName, "-p", fmt.Sprintf("'{\"apitoken\":\"secretpaastoken\",\"apiurl\":\"%s\",\"environmentid\":\"envid\"}'", dynatraceAPIURI))
_, err := command.CombinedOutput()
Expect(err).To(BeNil())
createdServices = append(createdServices, credentialsServiceName)
tagsServiceName := "dynatrace-tags-" + cutlass.RandStringRunes(20) + "-service"
command = exec.Command("cf", "cups", tagsServiceName, "-p", "'{\"tag:dttest\":\"dynatrace_test\"}'")
_, err = command.CombinedOutput()
Expect(err).To(BeNil())
createdServices = append(createdServices, tagsServiceName)
command = exec.Command("cf", "bind-service", app.Name, credentialsServiceName)
_, err = command.CombinedOutput()
Expect(err).To(BeNil())
command = exec.Command("cf", "bind-service", app.Name, tagsServiceName)
_, err = command.CombinedOutput()
Expect(err).To(BeNil())
command = exec.Command("cf", "restage", app.Name)
_, err = command.Output()
Expect(err).To(BeNil())
Expect(app.ConfirmBuildpack(buildpackVersion)).To(Succeed())
Expect(app.Stdout.String()).To(ContainSubstring("Dynatrace service credentials found. Setting up Dynatrace OneAgent."))
Expect(app.Stdout.String()).To(ContainSubstring("Starting Dynatrace OneAgent installer"))
Expect(app.Stdout.String()).To(ContainSubstring("Copy dynatrace-env.sh"))
Expect(app.Stdout.String()).To(ContainSubstring("Dynatrace OneAgent installed."))
Expect(app.Stdout.String()).To(ContainSubstring("Dynatrace OneAgent injection is set up."))
})
})
Context("deploying a staticfile app with Dynatrace agent with single credentials service and without manifest.json", func() {
It("checks if Dynatrace injection was successful", func() {
serviceName := "dynatrace-" + cutlass.RandStringRunes(20) + "-service"
command := exec.Command("cf", "cups", serviceName, "-p", fmt.Sprintf("'{\"apitoken\":\"secretpaastoken\",\"apiurl\":\"%s\",\"environmentid\":\"envid\"}'", dynatraceAPIURI))
_, err := command.CombinedOutput()
Expect(err).To(BeNil())
createdServices = append(createdServices, serviceName)
command = exec.Command("cf", "bind-service", app.Name, serviceName)
_, err = command.CombinedOutput()
Expect(err).To(BeNil())
command = exec.Command("cf", "restage", app.Name)
_, err = command.Output()
Expect(err).To(BeNil())
Expect(app.ConfirmBuildpack(buildpackVersion)).To(Succeed())
Expect(app.Stdout.String()).To(ContainSubstring("Dynatrace service credentials found. Setting up Dynatrace OneAgent."))
Expect(app.Stdout.String()).To(ContainSubstring("Starting Dynatrace OneAgent installer"))
Expect(app.Stdout.String()).To(ContainSubstring("Copy dynatrace-env.sh"))
Expect(app.Stdout.String()).To(ContainSubstring("Dynatrace OneAgent installed."))
Expect(app.Stdout.String()).To(ContainSubstring("Dynatrace OneAgent injection is set up."))
})
})
Context("deploying a staticfile app with Dynatrace agent with failing agent download and checking retry", func() {
It("checks if retrying downloads works", func() {
credentialsServiceName := "dynatrace-" + cutlass.RandStringRunes(20) + "-service"
command := exec.Command("cf", "cups", credentialsServiceName, "-p", fmt.Sprintf("'{\"apitoken\":\"secretpaastoken\",\"apiurl\":\"%s/no-such-endpoint\",\"environmentid\":\"envid\"}'", dynatraceAPIURI))
_, err := command.CombinedOutput()
Expect(err).To(BeNil())
createdServices = append(createdServices, credentialsServiceName)
command = exec.Command("cf", "bind-service", app.Name, credentialsServiceName)
_, err = command.CombinedOutput()
Expect(err).To(BeNil())
command = exec.Command("cf", "restage", app.Name)
_, err = command.CombinedOutput()
Eventually(app.Stdout.String).Should(ContainSubstring("Error during installer download, retrying in 4s"))
Eventually(app.Stdout.String).Should(ContainSubstring("Error during installer download, retrying in 5s"))
Eventually(app.Stdout.String).Should(ContainSubstring("Error during installer download, retrying in 7s"))
Eventually(app.Stdout.String).Should(ContainSubstring("Download returned with status 404"))
Eventually(app.Stdout.String).Should(ContainSubstring("Failed to compile droplet"))
})
})
Context("deploying a staticfile app with Dynatrace agent with single credentials service and a redis service", func() {
It("checks if Dynatrace injection was successful", func() {
serviceName := "dynatrace-" + cutlass.RandStringRunes(20) + "-service"
command := exec.Command("cf", "cups", serviceName, "-p", fmt.Sprintf("'{\"apitoken\":\"secretpaastoken\",\"apiurl\":\"%s\",\"environmentid\":\"envid\"}'", dynatraceAPIURI))
_, err := command.CombinedOutput()
Expect(err).To(BeNil())
createdServices = append(createdServices, serviceName)
command = exec.Command("cf", "bind-service", app.Name, serviceName)
_, err = command.CombinedOutput()
Expect(err).To(BeNil())
redisServiceName := "redis-" + cutlass.RandStringRunes(20) + "-service"
command = exec.Command("cf", "cups", redisServiceName, "-p", "'{\"name\":\"redis\", \"credentials\":{\"db_type\":\"redis\", \"instance_administration_api\":{\"deployment_id\":\"12345asdf\", \"instance_id\":\"12345asdf\", \"root\":\"https://doesnotexi.st\"}}}'")
_, err = command.CombinedOutput()
Expect(err).To(BeNil())
createdServices = append(createdServices, redisServiceName)
command = exec.Command("cf", "bind-service", app.Name, redisServiceName)
_, err = command.CombinedOutput()
Expect(err).To(BeNil())
command = exec.Command("cf", "restage", app.Name)
_, err = command.Output()
Expect(err).To(BeNil())
Expect(app.ConfirmBuildpack(buildpackVersion)).To(Succeed())
Expect(app.Stdout.String()).To(ContainSubstring("Dynatrace service credentials found. Setting up Dynatrace OneAgent."))
Expect(app.Stdout.String()).To(ContainSubstring("Starting Dynatrace OneAgent installer"))
Expect(app.Stdout.String()).To(ContainSubstring("Copy dynatrace-env.sh"))
Expect(app.Stdout.String()).To(ContainSubstring("Dynatrace OneAgent installed."))
Expect(app.Stdout.String()).To(ContainSubstring("Dynatrace OneAgent injection is set up."))
})
})
Context("deploying a staticfile app with Dynatrace agent with single credentials service and a redis service", func() {
It("checks if Dynatrace injection was successful", func() {
serviceName := "dynatrace-" + cutlass.RandStringRunes(20) + "-service"
command := exec.Command("cf", "cups", serviceName, "-p", fmt.Sprintf("'{\"apitoken\":\"secretpaastoken\",\"apiurl\":\"%s\",\"environmentid\":\"envid\"}'", dynatraceAPIURI))
_, err := command.CombinedOutput()
Expect(err).To(BeNil())
createdServices = append(createdServices, serviceName)
command = exec.Command("cf", "bind-service", app.Name, serviceName)
_, err = command.CombinedOutput()
Expect(err).To(BeNil())
command = exec.Command("cf", "restage", app.Name)
_, err = command.Output()
Expect(err).To(BeNil())
Expect(app.ConfirmBuildpack(buildpackVersion)).To(Succeed())
Expect(app.Stdout.String()).To(ContainSubstring("Fetching updated OneAgent configuration from tenant..."))
Expect(app.Stdout.String()).To(ContainSubstring("Finished writing updated OneAgent config back to"))
})
})
})
|
package bean
import (
"bytes"
"encoding/binary"
"errors"
"github.com/HNB-ECO/HNB-Blockchain/HNB/p2pNetwork/common"
)
type Ping struct {
Height uint64
}
//Serialize message payload
func (this Ping) Serialization() ([]byte, error) {
p := bytes.NewBuffer([]byte{})
err := binary.Write(p, binary.LittleEndian, &(this.Height))
if err != nil {
return nil, errors.New("123")
}
return p.Bytes(), nil
}
func (this *Ping) CmdType() string {
return common.PING_TYPE
}
//Deserialize message payload
func (this *Ping) Deserialization(p []byte) error {
var height uint64
buf := bytes.NewBuffer(p)
err := binary.Read(buf, binary.LittleEndian, &(height))
if err != nil {
return errors.New("456")
}
this.Height = height
return nil
}
|
// Copyright 2019 GoAdmin Core Team. All rights reserved.
// Use of this source code is governed by a Apache-2.0 style
// license that can be found in the LICENSE file.
package dialect
type sqlite struct {
commonDialect
}
func (sqlite) GetName() string {
return "sqlite"
}
func (sqlite) ShowColumns(table string) string {
return "PRAGMA table_info(" + table + ");"
}
func (sqlite) ShowTables() string {
return "SELECT name as tablename FROM sqlite_master WHERE type ='table'"
}
|
package config
import (
"errors"
"fmt"
"github.com/spf13/viper"
"os"
"path/filepath"
"strings"
"gopkg.in/yaml.v2"
)
// ErrUnknownCfg is thrown when the provided config doesn't match anything known, be it in "kind" or the version of it.
var ErrUnknownCfg = errors.New("unknown framework configuration")
// Metadata describes job metadata
type Metadata struct {
Tags []string `yaml:"tags" json:"tags,omitempty"`
Build string `yaml:"build" json:"build"`
}
// SauceConfig represents sauce labs related settings.
type SauceConfig struct {
Region string `yaml:"region,omitempty" json:"region"`
Metadata Metadata `yaml:"metadata,omitempty" json:"metadata"`
Tunnel Tunnel `yaml:"tunnel,omitempty" json:"tunnel,omitempty"`
Concurrency int `yaml:"concurrency,omitempty" json:"concurrency,omitempty"`
Sauceignore string `yaml:"sauceignore,omitempty" json:"sauceignore,omitempty"`
Experiments map[string]string `yaml:"experiments,omitempty" json:"experiments,omitempty"`
}
// DeviceOptions represents the devices capabilities required from a real device.
type DeviceOptions struct {
CarrierConnectivity bool `yaml:"carrierConnectivity,omitempty" json:"carrierConnectivity"`
DeviceType string `yaml:"deviceType,omitempty" json:"deviceType,omitempty"`
Private bool `yaml:"private,omitempty" json:"private,omitempty"`
}
// Device represents the RDC device configuration.
type Device struct {
ID string `yaml:"id,omitempty" json:"id"`
Name string `yaml:"name,omitempty" json:"name"`
PlatformName string `yaml:"platformName,omitempty" json:"platformName"`
PlatformVersion string `yaml:"platformVersion,omitempty" json:"platformVersion"`
Options DeviceOptions `yaml:"options,omitempty" json:"options,omitempty"`
}
// Emulator represents the emulator configuration.
type Emulator struct {
Name string `yaml:"name,omitempty" json:"name,omitempty"`
PlatformName string `yaml:"platformName,omitempty" json:"platformName"`
Orientation string `yaml:"orientation,omitempty" json:"orientation,omitempty"`
PlatformVersions []string `yaml:"platformVersions,omitempty" json:"platformVersions,omitempty"`
}
// Simulator represents the simulator configuration.
type Simulator Emulator
// When represents a conditional status for when artifacts should be downloaded.
type When string
// These conditions indicate when artifacts are to be downloaded.
const (
WhenFail When = "fail"
WhenPass When = "pass"
WhenNever When = "never"
WhenAlways When = "always"
)
// ArtifactDownload represents the test artifacts configuration.
type ArtifactDownload struct {
Match []string `yaml:"match,omitempty" json:"match"`
When When `yaml:"when,omitempty" json:"when"`
Directory string `yaml:"directory,omitempty" json:"directory"`
}
// Artifacts represents the test artifacts configuration.
type Artifacts struct {
Download ArtifactDownload `yaml:"download,omitempty" json:"download"`
}
// Tunnel represents a sauce labs tunnel.
type Tunnel struct {
ID string `yaml:"id,omitempty" json:"id"`
Parent string `yaml:"parent,omitempty" json:"parent,omitempty"`
}
// TypeDef represents the type definition of the config.
type TypeDef struct {
APIVersion string `yaml:"apiVersion,omitempty"`
Kind string `yaml:"kind,omitempty"`
}
// DockerFileMode represent the file providing method
type DockerFileMode string
// DockerFile* represent the different modes
const (
DockerFileMount DockerFileMode = "mount"
DockerFileCopy DockerFileMode = "copy"
)
// Docker represents docker settings.
type Docker struct {
FileTransfer DockerFileMode `yaml:"fileTransfer,omitempty" json:"fileTransfer"`
Image string `yaml:"image,omitempty" json:"image"`
}
// Npm represents the npm settings
type Npm struct {
Registry string `yaml:"registry,omitempty" json:"registry,omitempty"`
Packages map[string]string `yaml:"packages,omitempty" json:"packages"`
StrictSSL bool `yaml:"strictSSL,omitempty" json:"strictSSL"`
}
// Defaults represents default suite settings.
type Defaults struct {
Mode string `yaml:"mode,omitempty" json:"mode"`
}
func readYaml(cfgFilePath string) ([]byte, error) {
if cfgFilePath == "" {
return nil, errors.New("no config file was provided")
}
pwd, err := os.Getwd()
if err != nil {
return nil, err
}
fp := cfgFilePath
if !filepath.IsAbs(fp) {
fp = filepath.Join(pwd, cfgFilePath)
}
return os.ReadFile(fp)
}
// Describe returns a description of the given config that is cfgPath.
func Describe(cfgPath string) (TypeDef, error) {
var d TypeDef
if cfgPath == "" {
return TypeDef{}, nil
}
yamlFile, err := readYaml(cfgPath)
if err != nil {
return TypeDef{}, fmt.Errorf("failed to locate project configuration: %v", err)
}
if err = yaml.Unmarshal(yamlFile, &d); err != nil {
return TypeDef{}, fmt.Errorf("failed to parse project configuration: %v", err)
}
// Sanity check.
if d.APIVersion == "" {
return TypeDef{}, errors.New("invalid sauce config, which is either malformed or corrupt, please refer to https://docs.saucelabs.com/testrunner-toolkit/configuration for creating a valid config")
}
// Normalize certain values for ease of use.
d.Kind = strings.ToLower(d.Kind)
return d, nil
}
// ExpandEnv expands environment variables inside metadata fields.
func (m *Metadata) ExpandEnv() {
m.Build = os.ExpandEnv(m.Build)
for i, v := range m.Tags {
m.Tags[i] = os.ExpandEnv(v)
}
}
// StandardizeVersionFormat remove the leading v in version to ensure reliable comparisons.
func StandardizeVersionFormat(version string) string {
if strings.HasPrefix(version, "v") {
return version[1:]
}
return version
}
// SupportedDeviceTypes contains the list of supported device types.
var SupportedDeviceTypes = []string{"ANY", "PHONE", "TABLET"}
// IsSupportedDeviceType check that the specified deviceType is valid.
func IsSupportedDeviceType(deviceType string) bool {
for _, dt := range SupportedDeviceTypes {
if dt == deviceType {
return true
}
}
return false
}
// Unmarshal parses the file cfgPath into the given project struct.
func Unmarshal(cfgPath string, project interface{}) error {
if cfgPath != "" {
name := strings.TrimSuffix(filepath.Base(cfgPath), filepath.Ext(cfgPath)) // config name without extension
viper.SetConfigName(name)
viper.AddConfigPath(filepath.Dir(cfgPath))
if err := viper.ReadInConfig(); err != nil {
return fmt.Errorf("failed to locate project config: %v", err)
}
}
return viper.Unmarshal(&project)
}
|
package main
import (
"log"
"net"
"github.com/iheanyi/grpc-phonebook/api"
"github.com/iheanyi/grpc-phonebook/server"
"google.golang.org/grpc"
)
func main() {
lis, err := net.Listen("tcp", ":50051")
if err != nil {
log.Fatalf("failed to listen: %v", err)
}
srv := grpc.NewServer()
svc := server.New()
api.RegisterPhoneBookServer(srv, svc)
log.Print("Starting up the server")
srv.Serve(lis)
}
|
package vips
import (
"fmt"
"github.com/sherifabdlnaby/bimg"
cfg "github.com/sherifabdlnaby/prism/pkg/config"
"github.com/sherifabdlnaby/prism/pkg/payload"
)
type flip struct {
Raw flipRawConfig `mapstructure:",squash"`
direction cfg.Selector
}
type flipRawConfig struct {
Direction string
}
func (o *flip) Init() (bool, error) {
var err error
if o.Raw == *flipDefaults() {
return false, nil
}
o.direction, err = cfg.NewSelector(o.Raw.Direction)
if err != nil {
return false, err
}
return true, nil
}
func (o *flip) Apply(p *bimg.Options, data payload.Data) error {
// --------------------------------------------------------------------
direction, err := o.direction.Evaluate(data)
if err != nil {
return err
}
// --------------------------------------------------------------------
switch direction {
case "horizontal":
p.Flip = true
case "vertical":
p.Flop = true
case "both":
p.Flip = true
p.Flop = true
case "none":
default:
err = fmt.Errorf("invalid value for field [direction], got: %s", direction)
}
return err
}
|
package controller
import (
"github.com/gin-gonic/gin"
"net/http"
"myzone/utils"
)
func GotoHome(c *gin.Context){
user := utils.GetSession(c,"user")
if user == nil {
c.HTML(http.StatusOK, "login.html", gin.H{
"msg": "请登录!",
})
}else {
c.HTML(http.StatusOK, "home.html", gin.H{
"msg": "Home",
"user": user,
})
}
}
|
package main
import (
"bufio"
"log"
"os"
"sync"
"unsafe"
"github.com/go-redis/redis"
"github.com/google/flatbuffers/go"
cli "gopkg.in/urfave/cli.v1"
"github.com/transactional-cloud-serving-benchmark/tcsb/serialization_util"
"github.com/transactional-cloud-serving-benchmark/tcsb/serialized_messages"
)
func main() {
app := cli.NewApp()
app.Flags = []cli.Flag{
cli.StringFlag{Name: "address", Value: "127.0.0.1:6379", Usage: "Address for redis server."},
cli.Uint64Flag{Name: "workers", Value: 1, Usage: "Number of parallel workers to use when submitting requests."},
}
app.Action = func(c *cli.Context) error {
address := c.String("address")
log.Printf("Address: %v\n", address)
nWorkers := c.Int("workers")
log.Printf("Workers: %d\n", nWorkers)
run(address, nWorkers)
return nil
}
err := app.Run(os.Args)
if err != nil {
log.Fatal(err)
}
}
func run(address string, nWorkers int) {
bufpPool := &sync.Pool{
New: func() interface{} {
x := make([]byte, 0, 4096)
return &x
},
}
in := bufio.NewReader(os.Stdin)
out := bufio.NewWriter(os.Stdout)
defer func() {
out.Flush()
os.Stdout.Close()
}()
redisClient := NewRedisClient(address)
redisClient.Setup()
defer redisClient.Teardown()
clientPoolInputs := make(chan clientPoolInput, 1000)
outputBufps := make(chan *[]byte, 1000)
go func() {
for {
bufp := bufpPool.Get().(*[]byte)
req, err := serialization_util.DecodeNextCommand(in, bufp)
if err != nil {
break
}
clientPoolInputs <- clientPoolInput{req, bufp}
}
close(clientPoolInputs)
}()
wg := &sync.WaitGroup{}
for i := 0; i < nWorkers; i++ {
wg.Add(1)
go func() {
builder := flatbuffers.NewBuilder(4096)
for cpi := range clientPoolInputs {
builder.Reset()
redisClient.HandleRequestResponse(builder, bufpPool, cpi.req)
if len(builder.FinishedBytes()) == 0 {
log.Fatal("bad reply serialization")
}
*cpi.bufp = (*cpi.bufp)[:0]
bufpPool.Put(cpi.bufp)
cpi.bufp = nil
// The Builder contains the output bytes, so
// copy the data and send a new bufp along.
bufp := bufpPool.Get().(*[]byte)
(*bufp) = (*bufp)[:len(builder.FinishedBytes())]
copy(*bufp, builder.FinishedBytes())
outputBufps <- bufp
}
wg.Done()
}()
}
go func() {
wg.Wait()
close(outputBufps)
}()
for outputBufp := range outputBufps {
out.Write(*outputBufp)
*outputBufp = (*outputBufp)[:0]
bufpPool.Put(outputBufp)
}
}
type RedisClient struct {
address string
client *redis.Client
}
func NewRedisClient(address string) *RedisClient {
return &RedisClient{
address: address,
}
}
func (rc *RedisClient) Setup() {
client := redis.NewClient(&redis.Options{
Addr: rc.address,
})
_, err := client.Ping().Result()
if err != nil {
log.Fatal(err)
}
rc.client = client
}
func (rc *RedisClient) HandleRequestResponse(builder *flatbuffers.Builder, bufpPool *sync.Pool, req serialized_messages.Request) {
builder.Reset()
if req.RequestUnionType() == serialized_messages.RequestUnionReadRequest {
// Decode read request
t := flatbuffers.Table{}
if ok := req.RequestUnion(&t); !ok {
log.Fatal("logic error: bad RequestUnion decoding")
}
rr := serialized_messages.ReadRequest{}
rr.Init(t.Bytes, t.Pos)
if len(rr.KeyBytes()) == 0 {
log.Fatal("missing keybytes")
}
valbufp := bufpPool.Get().(*[]byte)
///////////////////////////////////
// Begin Redis-specific read logic.
///////////////////////////////////
keyString := zeroAllocBytesToString(rr.KeyBytes())
retval, err := rc.client.Get(keyString).Result()
if err != nil {
// This is okay, because we just show an empty
// validation value to the user.
}
*valbufp = append(*valbufp, []byte(retval)...)
/////////////////////////////////
// End Redis-specific read logic.
/////////////////////////////////
// Encode the read reply information for the IPC driver.
serialization_util.EncodeReadReplyWithFraming(builder, rr.KeyBytes(), *valbufp)
// Reset and store the bufp.
*valbufp = (*valbufp)[:0]
bufpPool.Put(valbufp)
} else if req.RequestUnionType() == serialized_messages.RequestUnionBatchWriteRequest {
// Decode batch write request:
t := flatbuffers.Table{}
if ok := req.RequestUnion(&t); !ok {
log.Fatal("logic error: bad RequestUnion decoding")
}
bwr := serialized_messages.BatchWriteRequest{}
bwr.Init(t.Bytes, t.Pos)
////////////////////////////////////
// Begin Redis-specific write logic.
////////////////////////////////////
pipeline := rc.client.Pipeline()
for i := 0; i < bwr.KeyValuePairsLength(); i++ {
kvp := serialized_messages.KeyValuePair{}
bwr.KeyValuePairs(&kvp, i)
keyString := zeroAllocBytesToString(kvp.KeyBytes())
pipeline.Set(keyString, kvp.ValueBytes(), 0)
}
if _, err := pipeline.Exec(); err != nil {
log.Fatal("write batch: ", err)
}
//////////////////////////////////
// End Redis-specific write logic.
//////////////////////////////////
// Encode the batch write reply information for the IPC driver.
serialization_util.EncodeBatchWriteReplyWithFraming(builder, uint64(bwr.KeyValuePairsLength()))
} else {
log.Fatal("logic error: invalid request type")
}
}
func (rc *RedisClient) Teardown() {
rc.client.Close()
}
func zeroAllocBytesToString(x []byte) string {
return *(*string)(unsafe.Pointer(&x))
}
type clientPoolInput struct {
req serialized_messages.Request
bufp *[]byte
}
|
package main
import (
"fmt"
"image/color"
"io"
"strconv"
"strings"
"time"
"fyne.io/fyne/v2"
"fyne.io/fyne/v2/app"
"fyne.io/fyne/v2/canvas"
"fyne.io/fyne/v2/container"
"fyne.io/fyne/v2/data/binding"
"fyne.io/fyne/v2/dialog"
"fyne.io/fyne/v2/theme"
"fyne.io/fyne/v2/widget"
"github.com/archon/backend"
"github.com/archon/gui"
)
const APP_NAME = "Archon"
const DEFAULT_SESSION_NAME = "Untitled Session"
const STARTING_WIDTH = 600
const STARTING_HEIGHT = 400
const MAX_WIN_TITLE_LENGTH = 50
// Handles the rendering for NoteBoxes. Implements the fyne.WidgetRenderer interface.
type MainInterfaceRenderer struct {
cont *fyne.Container // the container holding all of the items of the main application window
mi *MainInterface // the app window this is rendering
}
// The minimum size of the app window. Necessary to implement the fyne.WidgetRenderer interface.
func (m *MainInterfaceRenderer) MinSize() fyne.Size {
return m.cont.MinSize()
}
// Position and resize the items within the app window. Necessary to implement the fyne.WidgetRenderer interface.
func (m *MainInterfaceRenderer) Layout(size fyne.Size) {
m.cont.Resize(size)
m.cont.Layout.Layout(m.cont.Objects, m.cont.Size())
}
// Apply a theme to all items within the app window. Necessary to implement the fyne.WidgetRenderer interface.
// This function does nothing when called externally, do not call it.
func (m *MainInterfaceRenderer) ApplyTheme() {
// no-op, allow fyne to manage main interface theme
}
// Triggers when the app window changes or when the theme is changed. Necessary to implement the fyne.WidgetRenderer interface.
func (m *MainInterfaceRenderer) Refresh() {
canvas.Refresh(m.cont)
for _, object := range m.cont.Objects {
canvas.Refresh(object)
}
}
// Returns the list of objects this renders. Necessary to implement the fyne.WidgetRenderer interface.
func (m *MainInterfaceRenderer) Objects() []fyne.CanvasObject {
return []fyne.CanvasObject{m.cont}
}
// Called when this rendered is ,no longer needed. Necessary to implement the fyne.WidgetRenderer interface.
func (m *MainInterfaceRenderer) Destroy() {
// no-op, no resources to close
}
// Represents the main interface of the application window. Implements the widget.Widget interface.
type MainInterface struct {
widget.BaseWidget
session *backend.Session // The state of this application session
entry *gui.EnterEntry // The entry field
indicator *gui.SavingIndicator // an indicator that flashes when a save is initiated
infoButton *widget.Button // a button containing info for the session
boundTitle binding.ExternalString // a binding for the session title
boundNumber binding.String // a binding for the session number
window fyne.Window // the window this is rendered in
}
// Bind the session info to the binding strings.
func (m *MainInterface) BindSessionInfo() {
m.boundTitle = binding.BindString(&m.session.SessionTitle)
m.boundNumber = binding.NewString()
m.boundNumber.Set(strconv.Itoa(m.session.SessionNumber))
}
// Creates a renderer for the main window. Necessary to implement the widget.Widget inteface.
func (m *MainInterface) CreateRenderer() fyne.WidgetRenderer {
list := widget.NewList(
m.listLength,
m.listCreateItem,
m.listUpdateItem,
)
toolbar := widget.NewToolbar(
widget.NewToolbarAction(theme.DocumentSaveIcon(), m.Save),
widget.NewToolbarAction(theme.FolderOpenIcon(), m.Load),
)
m.indicator = gui.NewSavingIndicator()
m.BindSessionInfo()
m.infoButton = widget.NewButton(
m.getInfoButtonText(),
m.HandleSessionInfoButton,
)
cont := container.NewBorder(
container.NewVBox(container.NewHBox(toolbar, m.infoButton), m.indicator),
m.entry,
nil,
nil,
list,
)
return &MainInterfaceRenderer{
cont: cont,
mi: m,
}
}
// Handles the actions taken when the session info button is tapped.
func (m *MainInterface) HandleSessionInfoButton() {
titleEntry := widget.NewEntryWithData(m.boundTitle)
numberEntry := widget.NewEntryWithData(m.boundNumber)
numberEntry.Validator = backend.ValidateSessionNumber
titleForm := widget.NewFormItem("Session title", titleEntry)
numberForm := widget.NewFormItem("Session number", numberEntry)
formSize := fyne.NewSize(m.window.Canvas().Size().Width*0.8, m.window.Canvas().Size().Height*0.5)
callback := func(confirm bool) {
if confirm {
number, _ := m.boundNumber.Get()
numAsInt, _ := strconv.Atoi(number)
if numAsInt != m.session.SessionNumber {
m.session.SessionNumber = numAsInt
}
m.infoButton.SetText(m.getInfoButtonText())
}
}
dialog := dialog.NewForm("", "Confirm", "Cancel", []*widget.FormItem{titleForm, numberForm}, callback, m.window)
dialog.Resize(formSize)
dialog.Show()
}
// Open an existing session file. Shows a dialog box to open a file.
func (m *MainInterface) Load() {
dialog.ShowFileOpen(
m.load,
m.window,
)
}
// Save the current session. Show a dialog box if the user has yet to save before.
func (m *MainInterface) Save() {
// if the user has yet to save their work
if m.session.Path == "" {
dialog.ShowFileSave(
m.save,
m.window,
)
} else { // the user has already saved their work
err := m.session.Save()
if err != nil {
dialog.ShowError(err, m.window)
}
m.animateIndicator()
m.SetWindowTitle()
}
}
// Set the title of the window based on the session title, session number, and path.
func (m *MainInterface) SetWindowTitle() {
window_title := ""
if m.session.SessionTitle != "" {
window_title = m.session.SessionTitle
}
if m.session.SessionTitle == "" && m.session.SessionNumber > backend.NO_SESSION_NUMBER {
window_title = fmt.Sprintf("Session %d %s", m.session.SessionNumber, m.session.Path)
}
if len(window_title) > MAX_WIN_TITLE_LENGTH {
// subtract 3 to account for the max length, subtract 1 because indexing starts at 0
window_title = window_title[:MAX_WIN_TITLE_LENGTH-3-1] + "..."
}
window_title = fmt.Sprintf(window_title+" - %s", APP_NAME)
m.window.SetTitle(window_title)
}
// Flash the saving indicator.
func (m *MainInterface) animateIndicator() {
disabledToForeground := canvas.NewColorRGBAAnimation(
theme.DisabledColor(),
theme.ForegroundColor(),
canvas.DurationShort,
func(c color.Color) {
m.indicator.SetColor(c)
m.indicator.Refresh()
})
disabledToForeground.AutoReverse = true
disabledToForeground.Start()
}
// Builds the string that will serve as the info button text.
func (m *MainInterface) getInfoButtonText() string {
buttonText := m.session.Date.Format("1/2/2006")
number, _ := m.boundNumber.Get()
title, _ := m.boundTitle.Get()
numAsInt, _ := strconv.Atoi(number)
if numAsInt > backend.NO_SESSION_NUMBER {
buttonText += " Session " + strconv.Itoa(m.session.SessionNumber)
if title != "" {
buttonText += ":"
}
}
if title != "" {
buttonText += " " + m.session.SessionTitle
}
return buttonText
}
// Returns the length of the data the list widget is displaying.
func (m *MainInterface) listLength() int {
return len(m.session.Notes)
}
// Creates a template item for the list widget.
func (m *MainInterface) listCreateItem() fyne.CanvasObject {
return gui.NewNoteBox("", time.Time{})
}
// Sets the actual content of a template item for the list widget when it is displayed.
func (m *MainInterface) listUpdateItem(i widget.ListItemID, o fyne.CanvasObject) {
o.(*gui.NoteBox).SetContent(m.session.Notes[i].Content)
o.(*gui.NoteBox).SetTime(m.session.Notes[i].Time)
}
// Creates a session from the loaded data. Displays a dialog box if there is an error loading the session.
func (m *MainInterface) load(uc fyne.URIReadCloser, e error) {
// the user pressed 'cancel'
if uc == nil {
return
}
builder := new(strings.Builder)
data := make([]byte, 1024)
// read data from file into array
for {
_, err := uc.Read(data)
if err == io.EOF {
break
}
if err != nil {
dialog.ShowError(err, m.window)
}
}
// write data into string builder
builder.Write(data)
// construct session from string
var err error
m.session, err = backend.FromJSON(builder.String())
m.entry.SetSession(m.session)
if err != nil {
dialog.ShowError(err, m.window)
}
m.session.Path = uc.URI().Path()
m.BindSessionInfo()
m.SetWindowTitle()
m.infoButton.SetText(m.getInfoButtonText())
}
// Writes the current session to file, and displays a dialog box with any errors if they occur.
func (m *MainInterface) save(uc fyne.URIWriteCloser, e error) {
// the user pressed 'cancel'
if uc == nil {
return
}
reader := strings.NewReader(m.session.ToJSON())
_, err := reader.WriteTo(uc)
if e != nil {
dialog.ShowError(e, m.window)
}
if err != nil {
dialog.ShowError(err, m.window)
}
m.session.Path = uc.URI().Path()
m.SetWindowTitle()
}
// Create an interface. This interface composes the entire window.
func NewMainInterface(window fyne.Window) *MainInterface {
session := backend.NewSession(DEFAULT_SESSION_NAME, backend.NO_SESSION_NUMBER)
mi := &MainInterface{session: session, window: window}
textEntry := gui.NewEnterEntry(mi.session)
mi.entry = textEntry
mi.ExtendBaseWidget(mi)
return mi
}
// Apply custom settings to the window.
func setUpWindow(window fyne.Window) {
main := NewMainInterface(window)
main.window.SetTitle(main.session.SessionTitle + " - " + APP_NAME)
main.window.SetContent(main)
main.window.Resize(fyne.NewSize(STARTING_WIDTH, STARTING_HEIGHT))
main.window.Canvas().Focus(main.entry)
}
func main() {
a := app.New()
w := a.NewWindow(APP_NAME)
setUpWindow(w)
w.ShowAndRun()
}
|
package v1
import (
"bytes"
"encoding/json"
"io/ioutil"
"net/http"
"net/http/httptest"
"testing"
api "github.com/motonary/Fortuna/api/v1"
"github.com/motonary/Fortuna/entity"
)
type Response struct {
Status int `json:"status"`
User *entity.User `json:"user,omitempty"`
Token string `json:"token,omitempty"`
}
func TestCreateUserHandler(t *testing.T) {
w := httptest.NewRecorder()
r := httptest.NewRequest("POST", "/users", bytes.NewBuffer(testBody))
api.Router().ServeHTTP(w, r)
rw := w.Result()
defer rw.Body.Close()
if rw.StatusCode != http.StatusOK {
t.Fatalf("unexpected status code : %d\n\n", rw.StatusCode)
}
var response Response
bytes, _ := ioutil.ReadAll(rw.Body)
json.Unmarshal(bytes, &response)
if response.User.Name != testUser.Name {
t.Fatalf("response data is unexpected : %v\n\n", response.User)
}
}
func TestGetUserHandlerResponse(t *testing.T) {
w := httptest.NewRecorder()
r := httptest.NewRequest("GET", "/users/2", nil)
r.Header.Set("Authorization", "Bearer "+tokenString)
api.Router().ServeHTTP(w, r)
rw := w.Result()
defer rw.Body.Close()
if rw.StatusCode != http.StatusOK {
t.Fatalf("unexpected status code : %d\n\n", rw.StatusCode)
}
}
// updateのサーバとクライアントのインターフェースが未定のためペンディング
// func TestUpdatetUserHandlerResponse(t *testing.T) {
// w := httptest.NewRecorder()
// r := httptest.NewRequest("PUT", "/users/2", nil)
// r.Header.Set("Authorization", "Bearer "+tokenString)
// api.Router().ServeHTTP(w, r)
// rw := w.Result()
// defer rw.Body.Close()
// if rw.StatusCode != http.StatusOK {
// t.Fatalf("unexpected status code : %d\n\n", rw.StatusCode)
// }
// }
func TestDeleteUserHandlerResponse(t *testing.T) {
w := httptest.NewRecorder()
r := httptest.NewRequest("DELETE", "/users/2", nil)
r.Header.Set("Authorization", "Bearer "+tokenString)
api.Router().ServeHTTP(w, r)
rw := w.Result()
defer rw.Body.Close()
if rw.StatusCode != http.StatusOK {
t.Fatalf("unexpected status code : %d\n\n", rw.StatusCode)
}
}
|
package main
import "fmt"
//输出小写得a-z和大写得Z-A
func main() {
for i := 0; i < 26; i++ {
fmt.Printf("%c ", 'a'+i)
}
fmt.Println()
for i := 0; i < 26; i++ {
fmt.Printf("%c ", 'Z'-i)
}
}
|
// This file was generated for SObject LoginGeo, API Version v43.0 at 2018-07-30 03:47:32.50817275 -0400 EDT m=+18.851486547
package sobjects
import (
"fmt"
"strings"
)
type LoginGeo struct {
BaseSObject
City string `force:",omitempty"`
Country string `force:",omitempty"`
CountryIso string `force:",omitempty"`
CreatedById string `force:",omitempty"`
CreatedDate string `force:",omitempty"`
Id string `force:",omitempty"`
IsDeleted bool `force:",omitempty"`
LastModifiedById string `force:",omitempty"`
LastModifiedDate string `force:",omitempty"`
Latitude float64 `force:",omitempty"`
LoginTime string `force:",omitempty"`
Longitude float64 `force:",omitempty"`
PostalCode string `force:",omitempty"`
Subdivision string `force:",omitempty"`
SystemModstamp string `force:",omitempty"`
}
func (t *LoginGeo) ApiName() string {
return "LoginGeo"
}
func (t *LoginGeo) String() string {
builder := strings.Builder{}
builder.WriteString(fmt.Sprintf("LoginGeo #%s - %s\n", t.Id, t.Name))
builder.WriteString(fmt.Sprintf("\tCity: %v\n", t.City))
builder.WriteString(fmt.Sprintf("\tCountry: %v\n", t.Country))
builder.WriteString(fmt.Sprintf("\tCountryIso: %v\n", t.CountryIso))
builder.WriteString(fmt.Sprintf("\tCreatedById: %v\n", t.CreatedById))
builder.WriteString(fmt.Sprintf("\tCreatedDate: %v\n", t.CreatedDate))
builder.WriteString(fmt.Sprintf("\tId: %v\n", t.Id))
builder.WriteString(fmt.Sprintf("\tIsDeleted: %v\n", t.IsDeleted))
builder.WriteString(fmt.Sprintf("\tLastModifiedById: %v\n", t.LastModifiedById))
builder.WriteString(fmt.Sprintf("\tLastModifiedDate: %v\n", t.LastModifiedDate))
builder.WriteString(fmt.Sprintf("\tLatitude: %v\n", t.Latitude))
builder.WriteString(fmt.Sprintf("\tLoginTime: %v\n", t.LoginTime))
builder.WriteString(fmt.Sprintf("\tLongitude: %v\n", t.Longitude))
builder.WriteString(fmt.Sprintf("\tPostalCode: %v\n", t.PostalCode))
builder.WriteString(fmt.Sprintf("\tSubdivision: %v\n", t.Subdivision))
builder.WriteString(fmt.Sprintf("\tSystemModstamp: %v\n", t.SystemModstamp))
return builder.String()
}
type LoginGeoQueryResponse struct {
BaseQuery
Records []LoginGeo `json:"Records" force:"records"`
}
|
package model
import "time"
type Users struct {
Id int `json:"id" gorm:"column:id"`
Name string `json:"name" gorm:"column:name"`
Email string `json:"email" gorm:"column:email"`
EmailVerified time.Time `json:"email_verified_at" gorm:"column:email_verified_at"`
Password string `json:"password" gorm:"column:password"`
NomorIdentitas string `json:"nomor_identitas" gorm:"column:nomor_identitas"`
NomorTelpKantor string `json:"nomor_telp_kantor" gorm:"column:nomor_telp_kantor"`
IdUnit int `json:"id_unit" gorm:"column:id_unit"`
NamaUnit string `json:"nama_unit"`
IdJabatan int `json:"id_jabatan" gorm:"column:id_jabatan"`
Token string `json:"remember_token" gorm:"column:remember_token"`
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
}
|
package libRestful
import (
"github.com/bb-orz/gt/utils"
"io"
"text/template"
)
func NewFormatterGinEngine() *FormatterGinEngine {
return new(FormatterGinEngine)
}
type FormatterGinEngine struct {
FormatterStruct
}
func (f *FormatterGinEngine) Format(name string) IFormatter {
f.PackageName = "restful"
f.StructName = utils.CamelString(name)
f.RouteGroup = utils.SnakeString(name)
f.ImportList = make(map[string]ImportItem)
f.ImportList["sync"] = ImportItem{Alias: "", Package: "sync"}
f.ImportList["fmt"] = ImportItem{Alias: "", Package: "fmt"}
f.ImportList["gin"] = ImportItem{Alias: "", Package: "github.com/gin-gonic/gin"}
f.ImportList["xgin"] = ImportItem{Alias: "", Package: "github.com/bb-orz/goinfras/XGin"}
return f
}
func (f *FormatterGinEngine) WriteOut(writer io.Writer) error {
return template.Must(template.New("GinRestfulTemplate").Parse(GinRestfulCodeTemplate)).Execute(writer, *f)
}
const GinRestfulCodeTemplate = `package {{ .PackageName }}
import (
{{- range .ImportList }}
{{ .Alias }} "{{ .Package }}"
{{- end}}
)
func init() {
var once sync.Once
once.Do(func() {
// 初始化时自动注册该API到Gin Engine
XGin.RegisterApi(new({{ .StructName }}Api))
})
}
type {{ .StructName }}Api struct {}
// SetRouter由Gin Engine 启动时调用
func (s *{{ .StructName }}Api) SetRoutes() {
engine := XGin.XEngine()
{{ .RouteGroup }}Group := engine.Group("/{{ .RouteGroup }}")
{{ .RouteGroup }}Group.GET("/foo", s.Foo)
{{ .RouteGroup }}Group.GET("/bar", s.Bar)
}
func (s *{{ .StructName }}Api) Foo(ctx *gin.Context) {
// TODO Call Service Or Domain Method
fmt.Println("{{ .StructName }}.Foo Restful API")
}
func (s *{{ .StructName }}Api) Bar(ctx *gin.Context) {
// TODO Call Service Or Domain Method
fmt.Println("{{ .StructName }}.Bar Restful API")
}
`
|
package testcase
import goplvalidator "github.com/go-playground/validator/v10"
var v = goplvalidator.New()
func init() {
if err := v.RegisterValidation("adult", func(fl goplvalidator.FieldLevel) bool {
return fl.Field().Int() >= 18
}); err != nil {
panic(err)
}
}
type Options struct {
amount int `option:"mandatory"`
age int `option:"mandatory" validate:"adult"`
}
func (Options) Validator() *goplvalidator.Validate {
return v
}
|
// Copyright 2017 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package api_test
import (
"encoding/json"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"io/ioutil"
"net/http"
"net/url"
)
var _ = Describe("API Service", func() {
It("should return vars from /exp/vars with request counter", func() {
uri, err := url.Parse(testServer.URL)
Expect(err).NotTo(HaveOccurred())
uri.Path = "/exp/vars"
resp, err := http.Get(uri.String())
Expect(err).ShouldNot(HaveOccurred())
defer resp.Body.Close()
Expect(resp.StatusCode).Should(Equal(http.StatusOK))
body, err := ioutil.ReadAll(resp.Body)
var m map[string]interface{}
err = json.Unmarshal(body, &m)
Expect(err).ShouldNot(HaveOccurred())
requests := m["requests"].(map[string]interface{})
Expect(requests["/exp/vars"]).Should(Equal(float64(1)))
})
})
|
package main
import (
"bytes"
"fmt"
"net"
"net/http"
"os"
"strconv"
"sync"
"time"
"./shared"
"github.com/ant0ine/go-json-rest/rest"
"github.com/jinzhu/gorm"
_ "github.com/jinzhu/gorm/dialects/postgres"
)
// var payload = bytes.NewBuffer([]byte(`{"message":"Buy cheese and bread for breakfast."}`))
type Reminder struct {
Id int64 `json:"id"`
Message string `sql:"size:1024" json:"message"`
CreatedAt time.Time `json:"createdAt"`
UpdatedAt time.Time `json:"updatedAt"`
}
type Impl struct {
DB *gorm.DB
}
func StartServer() net.Listener {
i := Impl{}
var err error
i.DB, err = gorm.Open("postgres", "host=localhost user=postgres dbname=mydb sslmode=disable password=postgres")
shared.CheckErr(err)
i.DB.Delete(Reminder{})
i.DB.LogMode(false)
i.DB.AutoMigrate(&Reminder{})
api := rest.NewApi()
api.Use(&rest.TimerMiddleware{},
&rest.RecorderMiddleware{},
&rest.PoweredByMiddleware{},
&rest.RecoverMiddleware{})
router, err := rest.MakeRouter(
rest.Post("/reminders", i.PostReminder),
rest.Get("/reminders/:id", i.GetReminder),
)
shared.CheckErr(err)
api.SetApp(router)
l, err := net.Listen("tcp", ":"+shared.PORT)
shared.CheckErr(err)
go func() {
http.Serve(l, api.MakeHandler())
// shared.CheckErr(e)
}()
return l
}
func (i *Impl) GetReminder(w rest.ResponseWriter, r *rest.Request) {
id := r.PathParam("id")
reminder := Reminder{}
if i.DB.First(&reminder, id).Error != nil {
rest.NotFound(w, r)
return
}
w.WriteJson(&reminder)
}
func (i *Impl) PostReminder(w rest.ResponseWriter, r *rest.Request) {
reminder := Reminder{}
if err := r.DecodeJsonPayload(&reminder); err != nil {
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
if err := i.DB.Save(&reminder).Error; err != nil {
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.WriteJson(&reminder)
}
func Query() *shared.Result {
var req *http.Response
var err error
res := &shared.Result{}
res.Start = shared.GetTime()
req, err = http.Post(shared.WEBSERVER_URL, "application/json", bytes.NewBuffer([]byte(`{"message":"Buy cheese and bread for breakfast."}`)))
res.End = shared.GetTime()
// fmt.Printf("")
shared.CheckErr(err)
req.Body.Close()
return res
}
func RunTest(clients int, seconds int) chan *shared.Result {
// Effectively make the channel's buffer infinite-- dont want it to block
results := make(chan *shared.Result, seconds*100000*clients)
closed := false
var wg sync.WaitGroup
wg.Add(clients)
for i := 0; i < clients; i++ {
go func(j int) {
var res []*shared.Result
for {
if r := Query(); r == nil {
continue
} else if closed {
// Its time to stop issueing requests. Push results onto the channel
for _, r := range res {
results <- r
}
// Signal close and return
wg.Done()
return
} else {
r.ClientNum = j
res = append(res, r)
time.Sleep(20 * time.Millisecond)
}
}
}(i)
}
<-time.After(time.Duration(seconds) * time.Second)
closed = true
wg.Wait()
close(results)
return results
}
// Write out results as csv. Each line has the form: clientID,start,end,duration,type
func Output(clients, requests int, res chan *shared.Result, fname string) {
f, err := os.Create(fmt.Sprintf("%s%dc-%dr", fname, clients, requests))
shared.CheckErr(err)
for r := range res {
f.Write([]byte(fmt.Sprintf("%d,%d,%d,%d,%s\n",
r.ClientNum,
r.Start,
r.End,
r.End-r.Start,
r.CallType)))
}
f.Close()
}
func main() {
shared.LoadWindowsTimer()
CLIENTS, err := strconv.Atoi(os.Args[1])
shared.CheckErr(err)
REQUESTS, e := strconv.Atoi(os.Args[2])
shared.CheckErr(e)
OUTPUT_DIR := os.Args[3]
fmt.Printf("Webserver %v clients %v seconds, output: %v\n", CLIENTS, REQUESTS, OUTPUT_DIR)
server := StartServer()
results := RunTest(CLIENTS, REQUESTS)
server.Close()
Output(CLIENTS, REQUESTS, results, OUTPUT_DIR)
os.Exit(0)
// for i := 0; i < 100; i++ {
// a := shared.GetTime()
// c := time.Now()
// time.Sleep(1 * time.Microsecond)
// d := time.Now()
// b := shared.GetTime()
// fmt.Printf("%d %v, %v\n", i, b-a, d.Sub(c).Nanoseconds())
// }
}
|
package singleton
var hungryInstance = &HungrySingleton{}
// 单例模式-饿汉式
type HungrySingleton struct {
}
func GetHungryInstance() *HungrySingleton {
return hungryInstance
}
|
package tsin
import (
"encoding/xml"
"github.com/thought-machine/finance-messaging/iso20022"
)
type Document01200101 struct {
XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:tsin.012.001.01 Document"`
Message *PartyRegistrationAndGuaranteeAcknowledgementV01 `xml:"PtyRegnAndGrntAck"`
}
func (d *Document01200101) AddMessage() *PartyRegistrationAndGuaranteeAcknowledgementV01 {
d.Message = new(PartyRegistrationAndGuaranteeAcknowledgementV01)
return d.Message
}
// The message PartyManagementPaymentAcknowledgement is sent from a trade partner to any partner requested through a PartyManagementPaymentAcknowledgemenNotification message to acknowledge the notified factoring service agreement. Depending on legal contexts, the acknowledgement may be required in order for the financial service agreement to become effective.
// The message references related messages and may include referenced data.
// The message can carry digital signatures if required by context.
type PartyRegistrationAndGuaranteeAcknowledgementV01 struct {
// Set of characteristics that unambiguously identify the acknowlegement, common parameters, documents and identifications.
Header *iso20022.BusinessLetter1 `xml:"Hdr"`
// List of party management acknowledgements.
AcknowledgementList []*iso20022.FinancingAgreementList1 `xml:"AckList"`
// Number of acknowledgement lists as control value.
AcknowledgementCount *iso20022.Max15NumericText `xml:"AckCnt"`
// Total number of individual items in all lists.
ItemCount *iso20022.Max15NumericText `xml:"ItmCnt,omitempty"`
// Total of all individual amounts included in all lists, irrespective of currencies or direction.
ControlSum *iso20022.DecimalNumber `xml:"CtrlSum,omitempty"`
// Referenced or related business message.
AttachedMessage []*iso20022.EncapsulatedBusinessMessage1 `xml:"AttchdMsg,omitempty"`
}
func (p *PartyRegistrationAndGuaranteeAcknowledgementV01) AddHeader() *iso20022.BusinessLetter1 {
p.Header = new(iso20022.BusinessLetter1)
return p.Header
}
func (p *PartyRegistrationAndGuaranteeAcknowledgementV01) AddAcknowledgementList() *iso20022.FinancingAgreementList1 {
newValue := new(iso20022.FinancingAgreementList1)
p.AcknowledgementList = append(p.AcknowledgementList, newValue)
return newValue
}
func (p *PartyRegistrationAndGuaranteeAcknowledgementV01) SetAcknowledgementCount(value string) {
p.AcknowledgementCount = (*iso20022.Max15NumericText)(&value)
}
func (p *PartyRegistrationAndGuaranteeAcknowledgementV01) SetItemCount(value string) {
p.ItemCount = (*iso20022.Max15NumericText)(&value)
}
func (p *PartyRegistrationAndGuaranteeAcknowledgementV01) SetControlSum(value string) {
p.ControlSum = (*iso20022.DecimalNumber)(&value)
}
func (p *PartyRegistrationAndGuaranteeAcknowledgementV01) AddAttachedMessage() *iso20022.EncapsulatedBusinessMessage1 {
newValue := new(iso20022.EncapsulatedBusinessMessage1)
p.AttachedMessage = append(p.AttachedMessage, newValue)
return newValue
}
|
package http
import (
"context"
"crypto/tls"
"database/sql"
"fmt"
"log"
"net/http"
"os"
"os/signal"
"syscall"
"time"
"github.com/javiercbk/jayoak/files"
"github.com/javiercbk/jayoak/api/sound"
"github.com/gin-contrib/sessions"
"github.com/gin-contrib/sessions/redis"
"github.com/gin-gonic/gin"
"github.com/gin-gonic/gin/binding"
// imports the postgres sql driver
_ "github.com/lib/pq"
)
const cookieName = "joss"
// Config contains the configuration for the server
type Config struct {
Address string
FilesFolder string
RedisAddress string
RedisPassword string
RedisSecret string
DBName string
DBHost string
DBUser string
DBPass string
}
// Serve http connections
func Serve(cnf Config, logger *log.Logger) error {
store, err := redis.NewStore(10, "tcp", cnf.RedisAddress, cnf.RedisPassword, []byte(cnf.RedisSecret))
if err != nil {
return err
}
postgresOpts := fmt.Sprintf("dbname=%s host=%s user=%s password=%s", cnf.DBName, cnf.DBHost, cnf.DBUser, cnf.DBPass)
db, err := sql.Open("postgres", postgresOpts)
if err != nil {
return err
}
err = db.Ping()
if err != nil {
return err
}
repository := files.NewRepository(cnf.FilesFolder)
// set new validator
binding.Validator = new(defaultValidator)
router := gin.Default()
apiRouter := router.Group("/api")
apiRouter.Use(sessions.Sessions(cookieName, store))
{
soundHandlers := sound.NewHandlers(logger, db, repository)
soundHandlers.Routes(apiRouter)
}
srv := newServer(router, cnf.Address)
go func() {
// service connections
if err := srv.ListenAndServe(); err != nil && err != http.ErrServerClosed {
log.Fatalf("listen: %s\n", err)
}
}()
// Wait for interrupt signal to gracefully shutdown the server with
// a timeout of 5 seconds.
quit := make(chan os.Signal, 1)
// kill (no param) default send syscanll.SIGTERM
// kill -2 is syscall.SIGINT
// kill -9 is syscall. SIGKILL but can't be catched, so don't need add it
signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM)
<-quit
log.Println("Shutdown Server ...")
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
if err := srv.Shutdown(ctx); err != nil {
log.Fatal("Server Shutdown:", err)
}
<-ctx.Done()
log.Println("timeout of 5 seconds.")
log.Println("Server exiting")
return nil
}
func newServer(handler http.Handler, address string) *http.Server {
// see https://blog.cloudflare.com/exposing-go-on-the-internet/
tlsConfig := &tls.Config{
// Causes servers to use Go's default ciphersuite preferences,
// which are tuned to avoid attacks. Does nothing on clients.
PreferServerCipherSuites: true,
// Only use curves which have assembly implementations
CurvePreferences: []tls.CurveID{
tls.CurveP256,
tls.X25519, // Go 1.8 only
},
MinVersion: tls.VersionTLS12,
CipherSuites: []uint16{
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, // Go 1.8 only
tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, // Go 1.8 only
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
// Best disabled, as they don't provide Forward Secrecy,
// but might be necessary for some clients
// tls.TLS_RSA_WITH_AES_256_GCM_SHA384,
// tls.TLS_RSA_WITH_AES_128_GCM_SHA256,
},
}
return &http.Server{
Addr: address,
ReadTimeout: 5 * time.Second,
WriteTimeout: 5 * time.Second,
IdleTimeout: 120 * time.Second,
TLSConfig: tlsConfig,
Handler: handler,
}
}
|
package main
import (
"fmt"
"math"
)
// the way to signal an error is to return it
func sqrt(n float64) (float64, error) {
if n < 0 {
return 0.0, fmt.Errorf("Sqrt og negative value (%f)", n)
}
return math.Sqrt(n), nil // null in Golang
}
func testMain() {
s1, err := sqrt(2.0)
if err != nil {
fmt.Printf("ERROR: %s\n", err)
} else {
fmt.Println(s1)
}
}
|
package middleware
import (
"net/http"
)
type Adapter func(http.HandlerFunc) http.HandlerFunc
type GoMiddleware struct {
}
func InitMidleware() *GoMiddleware {
return &GoMiddleware{}
}
func (gm *GoMiddleware) Method(m string) Adapter {
return func(f http.HandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
if m != r.Method {
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
f(w, r)
}
}
}
func (gm *GoMiddleware) ApplyMiddleware(f http.HandlerFunc, mw ...Adapter) http.HandlerFunc {
for _, m := range mw {
f = m(f)
}
return f
}
|
package pagination
import (
"math"
)
// Pagination is a general purpose pagination type, it knows how to calculate
// offset and number of pages. It also contains some utility functions
// that helps common tasks. One special utility is the PagesStream method
// that returns a channel to range over for presenting a list of all pages
// without adding them all to a slice.
type Pagination struct {
itemsPerPage int
numberOfItems int
currentPage int
}
// New returns a new Pagination with the provided values.
// The current page is normalized to be inside the bounds
// of the available pages. So if the current page supplied
// is less than 1 the current page is normalized as 1, and if
// it is larger than the number of pages needed its normalized
// as the last available page.
func New(numberOfItems, itemsPerPage, currentPage int) *Pagination {
if currentPage == 0 {
currentPage = 1
}
n := int(math.Ceil(float64(numberOfItems) / float64(itemsPerPage)))
if currentPage > n {
currentPage = n
}
return &Pagination{
itemsPerPage: itemsPerPage,
numberOfItems: numberOfItems,
currentPage: currentPage,
}
}
// PagesStream returns a channel that will be incremented to
// the available number of pages. Useful to range over when
// building a list of pages.
func (p *Pagination) PagesStream() chan int {
stream := make(chan int)
go func() {
for i := 1; i <= p.NumberOfPages(); i++ {
stream <- i
}
close(stream)
}()
return stream
}
// Offset calculates the offset into the collection the current page represents.
func (p *Pagination) Offset() int {
return (p.CurrentPage() - 1) * p.ItemsPerPage()
}
// NumberOfPages calculates the number of pages needed
// based on number of items and items per page.
func (p *Pagination) NumberOfPages() int {
return int(math.Ceil(float64(p.NumberOfItems()) / float64(p.ItemsPerPage())))
}
// PreviousPage returns the page number of the page before current page.
// If current page is the first in the list of pages, 1 is returned.
func (p *Pagination) PreviousPage() int {
if p.CurrentPage() <= 1 {
return 1
}
return p.CurrentPage() - 1
}
// NextPage returns the page number of the page after current page.
// If current page is the last in the list of pages, the last page number is returned.
func (p *Pagination) NextPage() int {
if p.CurrentPage() >= p.NumberOfPages() {
return p.NumberOfPages()
}
return p.CurrentPage() + 1
}
// IsCurrentPage checks a number to see if it matches the current page.
func (p *Pagination) IsCurrentPage(page int) bool {
return p.CurrentPage() == page
}
// Pages returns a list with all page numbers.
// Eg. [1 2 3 4 5]
func (p *Pagination) Pages() []int {
s := make([]int, 0, p.NumberOfPages())
for i := 1; i <= p.NumberOfPages(); i++ {
s = append(s, i)
}
return s
}
// Show returns true if the pagination should be used.
// Ie. if there is more than one page.
func (p *Pagination) Show() bool {
return p.NumberOfPages() > 1
}
// CurrentPage returns the current page.
func (p *Pagination) CurrentPage() int {
return p.currentPage
}
// NumberOfItems returns the number of items.
func (p *Pagination) NumberOfItems() int {
return p.numberOfItems
}
// ItemsPerPage returns the number of items to show per page.
func (p *Pagination) ItemsPerPage() int {
return p.itemsPerPage
}
|
package infrastructure
import (
"github.com/golobby/container"
"github.com/morteza-r/flexdb"
"github.com/morteza-r/flexdb-server/app/application"
)
func SetUp() {
container.Singleton(func() application.DbService {
var db *flexdb.Database
db = flexdb.NewDb()
return application.DbService{
Db: db,
}
})
}
|
package wallet
import (
"fmt"
"github.com/textileio/go-textile/keypair"
"github.com/tyler-smith/go-bip39"
)
var ErrInvalidWordCount = fmt.Errorf("invalid word count (must be 12, 15, 18, 21, or 24)")
type WordCount int
const (
TwelveWords WordCount = 12
FifteenWords WordCount = 15
EighteenWords WordCount = 18
TwentyOneWords WordCount = 21
TwentyFourWords WordCount = 24
)
func NewWordCount(cnt int) (*WordCount, error) {
var wc WordCount
switch cnt {
case 12:
wc = TwelveWords
case 15:
wc = FifteenWords
case 18:
wc = EighteenWords
case 21:
wc = TwentyOneWords
case 24:
wc = TwentyFourWords
default:
return nil, ErrInvalidWordCount
}
return &wc, nil
}
func (w WordCount) EntropySize() int {
switch w {
case TwelveWords:
return 128
case FifteenWords:
return 160
case EighteenWords:
return 192
case TwentyOneWords:
return 224
case TwentyFourWords:
return 256
default:
return 256
}
}
// Wallet is a BIP32 Hierarchical Deterministic Wallet based on stellar's
// implementation of https://github.com/satoshilabs/slips/blob/master/slip-0010.md,
// https://github.com/stellar/stellar-protocol/pull/63
type Wallet struct {
RecoveryPhrase string
}
func WalletFromWordCount(wordCount int) (*Wallet, error) {
wcount, err := NewWordCount(wordCount)
if err != nil {
return nil, err
}
return WalletFromEntropy(wcount.EntropySize())
}
func WalletFromEntropy(entropySize int) (*Wallet, error) {
entropy, err := bip39.NewEntropy(entropySize)
if err != nil {
return nil, err
}
mnemonic, err := bip39.NewMnemonic(entropy)
if err != nil {
return nil, err
}
return &Wallet{RecoveryPhrase: mnemonic}, nil
}
func WalletFromMnemonic(mnemonic string) *Wallet {
return &Wallet{RecoveryPhrase: mnemonic}
}
// To understand how this works, refer to the living document:
// https://paper.dropbox.com/doc/Hierarchical-Deterministic-Wallets--Ae0TOjGObNq_zlyYFh7Ea0jNAQ-t7betWDTvXtK6qqD8HXKf
func (w *Wallet) AccountAt(index int, passphrase string) (*keypair.Full, error) {
seed, err := bip39.NewSeedWithErrorChecking(w.RecoveryPhrase, passphrase)
if err != nil {
if err == bip39.ErrInvalidMnemonic {
return nil, fmt.Errorf("invalid mnemonic phrase")
}
return nil, err
}
masterKey, err := DeriveForPath(TextileAccountPrefix, seed)
if err != nil {
return nil, err
}
key, err := masterKey.Derive(FirstHardenedIndex + uint32(index))
if err != nil {
return nil, err
}
return keypair.FromRawSeed(key.RawSeed())
}
|
package main
import "fmt"
func main() {
var party = map[string]bool{
"Calisca": true,
"Heodan": true,
}
fmt.Println(party["Calisca"])
for name, value := range party {
fmt.Println(name, value)
}
}
|
package main
import "fmt"
type fruits struct {
fruit []string
}
func (f *fruits) input(num int) {
var fruit string
for i := 0; i < num; i++ {
fmt.Print("fruit[", i, "]:")
fmt.Scanln(&fruit)
f.fruit = append(f.fruit, fruit)
}
}
func (f *fruits) print() {
for _, fruit := range f.fruit {
fmt.Println(fruit)
}
}
func main() {
var num int
f := &fruits{}
fmt.Print("fruit num:")
fmt.Scanln(&num)
f.input(num)
f.print()
}
|
package sql
type Order struct {
Column Column
Direction Direction
}
type Direction string
const Ascending Direction = "ASC"
const Descending Direction = "DESC"
|
package models
import (
"errors"
"labix.org/v2/mgo/bson"
)
/*
* 单个实体查找
*/
func (d *CateDal) FindByID(id int) Cate {
result := []Cate{}
uc := d.session.DB(DbName).C(CateCollection)
err := uc.Find(bson.M{"categoryID": id}).All(&result)
if err != nil {
panic(err)
}
if len(result)>0 {
return result[0]
} else {
return Cate{}
}
}
/*
* 获取列表
*/
func (d *CateDal) List() []Cate {
result := []Cate{}
uc := d.session.DB(DbName).C(CateCollection)
uc.Find(nil).All(&result)
return result
}
/*
* 增加
*/
func (d *CateDal) Add(cate *Cate) error {
uc := d.session.DB(DbName).C(CateCollection)
i, _ := uc.Find(bson.M{"categoryName": cate.CategoryName}).Count()
if i != 0 {
return errors.New("名称已经被使用")
}
maxObj := Cate{}
maxID := 0
err := uc.Find(nil).Sort("-categoryID").One(&maxObj)
if err == nil {
maxID = maxObj.CategoryID
}
cate.Id = bson.NewObjectId()
cate.CategoryID = maxID + 1
err = uc.Insert(&cate)
return err
}
/*
* 修改
*/
func (d *CateDal) Edit(cate *Cate) error {
uc := d.session.DB(DbName).C(CateCollection)
i, _ := uc.Find(bson.M{"cateName": cate.CategoryName,"categoryID": bson.M{"$ne": cate.CategoryID}}).Count()
if i != 0 {
return errors.New("名称已经被使用")
}
colQuerier := bson.M{"categoryID": cate.CategoryID}
change := bson.M{"$set": bson.M{"categoryName": cate.CategoryName,"description": cate.Description}}
err := uc.Update(colQuerier, change)
return err
}
/*
* 删除
*/
func (d *CateDal) Delete(categoryID int) error {
uc := d.session.DB(DbName).C(CateCollection)
colQuerier := bson.M{"categoryID": categoryID}
err := uc.Remove(colQuerier)
return err
}
|
package port
import (
"context"
"github.com/mirzaakhena/danarisan/domain/repository"
"github.com/mirzaakhena/danarisan/domain/service"
)
// KocokUndianOutport ...
type KocokUndianOutport interface {
repository.FindOneArisanRepo
repository.FindOneUndianRepo
repository.FindAllSlotNotWinYetRepo
repository.FindAllSlotRepo
repository.FindOnePesertaRepo
repository.FindLastSaldoAkunRepo
repository.SaveSlotRepo
repository.SaveArisanRepo
repository.SaveUndianRepo
repository.SaveTagihanRepo
repository.SaveJurnalRepo
repository.SaveSaldoAkunRepo
repository.SavePesertaRepo
service.TransactionDB
service.IDGenerator
GetRandomNumber(ctx context.Context, req GetRandomNumberRequest) (*GetRandomNumberResponse, error)
TopupPeserta(ctx context.Context, req TopupPesertaRequest) (*TopupPesertaResponse, error)
}
// GetRandomNumberRequest ...
type GetRandomNumberRequest struct {
Length int
}
// GetRandomNumberResponse ...
type GetRandomNumberResponse struct {
RandomNumber int
}
// TopupPesertaRequest ...
type TopupPesertaRequest struct {
PesertaID string
TotalTopup float64
}
// TopupPesertaResponse ...
type TopupPesertaResponse struct {
}
|
package camt
import (
"encoding/xml"
"github.com/thought-machine/finance-messaging/iso20022"
)
type Document04200104 struct {
XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:camt.042.001.04 Document"`
Message *FundDetailedEstimatedCashForecastReportV04 `xml:"FndDtldEstmtdCshFcstRpt"`
}
func (d *Document04200104) AddMessage() *FundDetailedEstimatedCashForecastReportV04 {
d.Message = new(FundDetailedEstimatedCashForecastReportV04)
return d.Message
}
// Scope
// A report provider, such as a transfer agent, sends the FundDetailedEstimatedCashForecastReport message to the report user, such as an investment manager or pricing agent, to report the estimated cash incomings and outgoings, sorted by country, institution name or other criteria defined by the user of one or more share classes of an investment fund on one or more trade dates.
// The cash movements may result from, for example, redemption, subscription, switch transactions or reinvestment of dividends.
// Usage
// The FundDetailedEstimatedCashForecastReport is used to provide estimated cash movements, that is, it is sent prior to the cut-off time and/or the price valuation of the fund. The message contains incoming and outgoing cash flows that are estimated, that is, the price has not been applied. If the price is definitive, then the FundDetailedConfirmedCashForecastReport message must be used.
// The message structure allows for the following uses:
// - to provide cash in and cash out amounts for a fund/sub fund and one or more share classes (a FundOrSubFundDetails sequence and one or more EstimatedFundCashForecastDetails sequences are used),
// - to provide cash in and cash out amounts for one or more share classes (one or more EstimatedFundCashForecastDetails sequences are used).
// If the report is to provide estimated cash in and cash out for a fund/sub fund only and not for one or more share classes, then the FundEstimatedCashForecastReport message must be used.
// The FundDetailedEstimatedCashForecastReport message is used to report cash movements in or out of a fund, organised by party, such as fund management company, country, currency or by some other criteria defined by the report provider. If the report is used to give the cash-in and cash-out for a party, then additional criteria, such as currency and country, can be specified.
// In addition, the underlying transaction type for the cash-in or cash-out movement can be specified, as well as information about the cash movement's underlying orders, such as commission and charges.
type FundDetailedEstimatedCashForecastReportV04 struct {
// Identifies the message.
MessageIdentification *iso20022.MessageIdentification1 `xml:"MsgId"`
// Collective reference identifying a set of messages.
PoolReference *iso20022.AdditionalReference3 `xml:"PoolRef,omitempty"`
// Reference to a linked message that was previously sent.
PreviousReference []*iso20022.AdditionalReference3 `xml:"PrvsRef,omitempty"`
// Reference to a linked message that was previously received.
RelatedReference []*iso20022.AdditionalReference3 `xml:"RltdRef,omitempty"`
// Pagination of the message.
MessagePagination *iso20022.Pagination `xml:"MsgPgntn"`
// Information about the fund/sub fund when the report either specifies cash flow for the fund/sub fund or for a share class of the fund/sub fund.
FundOrSubFundDetails *iso20022.Fund3 `xml:"FndOrSubFndDtls,omitempty"`
// Information related to the estimated cash-in and cash-out flows for a specific trade date as a result of transactions in shares in an investment fund, for example, subscriptions, redemptions or switches. The information provided is sorted by pre-defined criteria such as country, institution, currency or user defined criteria.
EstimatedFundCashForecastDetails []*iso20022.EstimatedFundCashForecast5 `xml:"EstmtdFndCshFcstDtls"`
// Estimated net cash as a result of the cash-in and cash-out flows specified in the fund cash forecast details.
ConsolidatedNetCashForecast *iso20022.NetCashForecast3 `xml:"CnsltdNetCshFcst,omitempty"`
// Additional information that can not be captured in the structured fields and/or any other specific block.
Extension []*iso20022.Extension1 `xml:"Xtnsn,omitempty"`
}
func (f *FundDetailedEstimatedCashForecastReportV04) AddMessageIdentification() *iso20022.MessageIdentification1 {
f.MessageIdentification = new(iso20022.MessageIdentification1)
return f.MessageIdentification
}
func (f *FundDetailedEstimatedCashForecastReportV04) AddPoolReference() *iso20022.AdditionalReference3 {
f.PoolReference = new(iso20022.AdditionalReference3)
return f.PoolReference
}
func (f *FundDetailedEstimatedCashForecastReportV04) AddPreviousReference() *iso20022.AdditionalReference3 {
newValue := new(iso20022.AdditionalReference3)
f.PreviousReference = append(f.PreviousReference, newValue)
return newValue
}
func (f *FundDetailedEstimatedCashForecastReportV04) AddRelatedReference() *iso20022.AdditionalReference3 {
newValue := new(iso20022.AdditionalReference3)
f.RelatedReference = append(f.RelatedReference, newValue)
return newValue
}
func (f *FundDetailedEstimatedCashForecastReportV04) AddMessagePagination() *iso20022.Pagination {
f.MessagePagination = new(iso20022.Pagination)
return f.MessagePagination
}
func (f *FundDetailedEstimatedCashForecastReportV04) AddFundOrSubFundDetails() *iso20022.Fund3 {
f.FundOrSubFundDetails = new(iso20022.Fund3)
return f.FundOrSubFundDetails
}
func (f *FundDetailedEstimatedCashForecastReportV04) AddEstimatedFundCashForecastDetails() *iso20022.EstimatedFundCashForecast5 {
newValue := new(iso20022.EstimatedFundCashForecast5)
f.EstimatedFundCashForecastDetails = append(f.EstimatedFundCashForecastDetails, newValue)
return newValue
}
func (f *FundDetailedEstimatedCashForecastReportV04) AddConsolidatedNetCashForecast() *iso20022.NetCashForecast3 {
f.ConsolidatedNetCashForecast = new(iso20022.NetCashForecast3)
return f.ConsolidatedNetCashForecast
}
func (f *FundDetailedEstimatedCashForecastReportV04) AddExtension() *iso20022.Extension1 {
newValue := new(iso20022.Extension1)
f.Extension = append(f.Extension, newValue)
return newValue
}
|
package main
import (
"fmt"
"github.com/soniakeys/graph"
)
// node represents a node in a directed graph. It represents directed edges
// from the node with the handy DijkstraNeighbor type from the graph package.
type node struct {
nbs []graph.DijkstraNeighbor // directed edges as DijkstraNeighbors
name string // example application specific data
}
// edge is a simple number representing an edge length/distance/weight.
type edge float64
// node implements graph.DijkstraNode, also fmt.Stringer
func (n *node) Neighbors([]graph.DijkstraNeighbor) []graph.DijkstraNeighbor {
return n.nbs
}
func (n *node) String() string { return n.name }
// edge implements graph.DijkstraEdge
func (e edge) Distance() float64 { return float64(e) }
// edgeData struct for simple specification of example data
type edgeData struct {
v1, v2 string
l float64
}
// example data
var (
exampleEdges = []edgeData{
{"a", "b", 7},
{"a", "c", 9},
{"a", "f", 14},
{"b", "c", 10},
{"b", "d", 15},
{"c", "d", 11},
{"c", "f", 2},
{"d", "e", 6},
{"e", "f", 9},
}
exampleStart = "a"
exampleEnd = "e"
)
// linkGraph constructs a linked representation of example data.
func linkGraph(g []edgeData, start, end string) (allNodes int, startNode, endNode *node) {
all := map[string]*node{}
// one pass over data to collect nodes
for _, e := range g {
if all[e.v1] == nil {
all[e.v1] = &node{name: e.v1}
}
if all[e.v2] == nil {
all[e.v2] = &node{name: e.v2}
}
}
// second pass to link neighbors
for _, ge := range g {
n1 := all[ge.v1]
n1.nbs = append(n1.nbs, graph.DijkstraNeighbor{edge(ge.l), all[ge.v2]})
}
return len(all), all[start], all[end]
}
func main() {
// construct linked representation of example data
allNodes, startNode, endNode :=
linkGraph(exampleEdges, exampleStart, exampleEnd)
// echo initial conditions
fmt.Printf("Directed graph with %d nodes, %d edges\n",
allNodes, len(exampleEdges))
// run Dijkstra's shortest path algorithm
p, l := graph.DijkstraShortestPath(startNode, endNode)
if p == nil {
fmt.Println("No path from start node to end node")
return
}
fmt.Println("Shortest path:", p)
fmt.Println("Path length:", l)
}
|
package models
import (
"encoding/json"
"log"
)
type Post struct {
Id int `json:"id"`
UserId int `json:"userId"`
Title string `json:"title"`
Body string `json:"body"`
}
func (p *Post) MarshalBinary() ([]byte, error) {
if p == nil {
return nil, nil
}
return json.Marshal(p)
}
func (p *Post) UnmarshalBinary(bytes []byte) error {
return json.Unmarshal(bytes, p)
}
func UnmarshalListBinary(byte []byte) ([]Post, error) {
var posts []Post
err := json.Unmarshal(byte, &posts)
if err != nil {
log.Println("error occurred while unmarshal list of posts: ", err)
return nil, err
}
return posts, nil
}
|
/*
This challenge has been divided into parts.
Your goal is to convert a sentence into a form of 'short-hand'
For Part 1 these are the rules
Take in 1 word
Remove all vowels, except the ones at the beginning and the end
If a letter is repeated more than once consecutively, reduce it to only one (e.g. Hello -> Hlo)
Create a function which converts the word into it's corresponding unicode characters based on above given rules, as given in the table
Letter Corresponding Unicode Character
A Λ
B L
C 𐍉
D Ꙅ
E ∟
F ⌽
G T
H |
I ⩗
J )
K <
L (
M ᨈ
N ᑎ
O ┼
P ⊥
Q ⩁
R \
S ⦵
T _
U ⋃
V ⌵
W ⌵ (same as V)
X X
Y ɥ
Z ᒣ
Test Cases
English Word Substituted Word
Quick ⩁𐍉<
Brown L\⌵ᑎ
Fox ⌽X
Jumped )ᨈ⊥Ꙅ
Over ┼⌵|\
The _|∟
Lazy (ᒣɥ
Dog ꙄT
As this is code-golf, shortest answer wins!
To get an idea of what part 2 will have in store check out this sandbox link.
Part 2 coming soon
*/
package main
import (
"bytes"
"strings"
)
func main() {
assert(conv("Quick") == "⩁𐍉<")
assert(conv("Brown") == "L\\⌵ᑎ")
assert(conv("Fox") == "⌽X")
assert(conv("Jumped") == ")ᨈ⊥Ꙅ")
assert(conv("Over") == "┼⌵\\")
assert(conv("The") == "_|∟")
assert(conv("Lazy") == "(ᒣɥ")
assert(conv("Dog") == "ꙄT")
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
func conv(s string) string {
m := map[rune]rune{
'A': 'Λ',
'B': 'L',
'C': '𐍉',
'D': 'Ꙅ',
'E': '∟',
'F': '⌽',
'G': 'T',
'H': '|',
'I': '⩗',
'J': ')',
'K': '<',
'L': '(',
'M': 'ᨈ',
'N': 'ᑎ',
'O': '┼',
'P': '⊥',
'Q': '⩁',
'R': '\\',
'S': '⦵',
'T': '_',
'U': '⋃',
'V': '⌵',
'W': '⌵',
'X': 'X',
'Y': 'ɥ',
'Z': 'ᒣ',
}
w := new(bytes.Buffer)
p := rune(-1)
for i, r := range s {
if 'a' <= r && r <= 'z' {
r = r - 'a' + 'A'
}
if r == p || (i > 0 && i+1 < len(s) && isvowel(r)) {
continue
}
if t, ok := m[r]; ok {
r = t
}
w.WriteRune(r)
p = r
}
return w.String()
}
func isvowel(r rune) bool {
return strings.IndexRune("AEIOU", r) >= 0
}
|
package game
import (
"life/engine"
"os"
"github.com/hajimehoshi/ebiten/v2"
)
const (
screenWidth = 320
screenHeight = 240
title = "micuffaro's Game of life"
)
func newGame() *game {
u := engine.NewUniverse(screenWidth, screenHeight)
return &game{
universe: u,
pixels: make([]byte, screenWidth*screenHeight*4),
active: true,
}
}
type game struct {
universe *engine.Universe
pixels []byte
active bool
}
// Set the cell at current mouse position to alive
func (g *game) drawMouse() {
if ebiten.IsMouseButtonPressed(ebiten.MouseButtonLeft) {
x, y := ebiten.CursorPosition()
g.universe.Resurrect(x, y)
}
}
// register keypresses to control the game
func (g *game) keys() {
switch {
case ebiten.IsKeyPressed(ebiten.KeySpace):
g.active = true
case ebiten.IsKeyPressed(ebiten.KeyS):
g.active = false
case ebiten.IsKeyPressed(ebiten.KeyK):
g.active = false
g.universe.Nuke()
case ebiten.IsKeyPressed(ebiten.KeyR):
g.active = false
g.universe.Nuke()
g.universe.Init((screenWidth * screenWidth) / 4)
case ebiten.IsKeyPressed(ebiten.KeyQ):
g.active = false
os.Exit(0)
}
}
func (g *game) Update() error {
g.keys()
if g.active {
g.universe.Update()
} else {
g.drawMouse()
}
return nil
}
func (g *game) Draw(screen *ebiten.Image) {
for i, v := range g.universe.Cells() {
if v {
g.pixels[4*i] = 0
g.pixels[4*i+1] = 0
g.pixels[4*i+2] = 0
g.pixels[4*i+3] = 0
} else {
g.pixels[4*i] = 0xff
g.pixels[4*i+1] = 0xff
g.pixels[4*i+2] = 0xff
g.pixels[4*i+3] = 0xff
}
}
screen.ReplacePixels(g.pixels)
}
func (g *game) Layout(outsideWidth, outsideHeight int) (int, int) {
return screenWidth, screenHeight
}
// Run the game
func Run() error {
g := newGame()
g.universe.Init((screenWidth * screenWidth) / 4)
ebiten.SetWindowSize(screenWidth*2, screenHeight*2)
ebiten.SetWindowTitle(title)
err := ebiten.RunGame(g)
if err != nil {
return err
}
return nil
}
|
package datastore
import (
"errors"
"github.com/RicardoCampos/goauth/oauth2"
)
type inMemoryReferenceTokenRepository struct {
tokens map[string]oauth2.ReferenceToken
}
func (r inMemoryReferenceTokenRepository) AddToken(token oauth2.ReferenceToken) error {
if token == nil {
return errors.New("you cannot add an empty token")
}
if r.tokens != nil {
r.tokens[token.TokenID()] = token
return nil
}
return errors.New("not implemented")
}
// GetToken Gets a token by ID
func (r inMemoryReferenceTokenRepository) GetToken(tokenID string) (oauth2.ReferenceToken, bool, error) {
if len(tokenID) < 1 {
return nil, false, errors.New("please provide a valid tokenID")
}
v, ok := r.tokens[tokenID]
if ok {
return v, true, nil
}
return nil, false, errors.New("could not find token")
}
// NewInMemoryReferenceTokenRepository returns an inMemoryReferenceTokenRepository
func NewInMemoryReferenceTokenRepository() oauth2.ReferenceTokenRepository {
return inMemoryReferenceTokenRepository{
tokens: make(map[string]oauth2.ReferenceToken),
}
}
|
// Model
// The Model component corresponds to all the data-related
// logic that the user works with. This can represent either
// the data that is being transferred between the View and Controller
// components or any other business logic-related data.
// For example, a Customer object will retrieve the customer information
// from the database, manipulate it and update it data back to the
// database or use it to render data.
// View
// The View component is used for all the UI logic of the application.
// For example, the Customer view will include all the UI components such as text boxes,
// dropdowns, etc. that the final user interacts with.
// Controller
// Controllers act as an interface between Model and View components to process all the business logic
// and incoming requests, manipulate data using the Model component and interact with the Views to
// render the final output. For example, the Customer controller will handle all the interactions
// and inputs from the Customer View and update the database using the Customer Model.
// The same controller will be used to view the Customer data
package main
import "fmt"
func sum(s []int, c chan int) {
sum := 0
for _, v := range s {
sum += v
}
c <- sum // send sum to c
}
func main() {
s := []int{7, 2, 8, -9, 4, 0}
c := make(chan int)
go sum(s[:len(s)/2], c)
go sum(s[len(s)/2:], c)
x, y := <-c, <-c // receive from c
fmt.Println(x, y, x+y)
}
|
package certgen
import (
"bytes"
"crypto/rand"
"encoding/base64"
"regexp"
"strings"
"text/template"
"path/filepath"
"os"
"io"
"github.com/Azure/acs-engine/pkg/certgen/templates"
"github.com/Azure/acs-engine/pkg/filesystem"
)
// PrepareMasterFiles creates the shared authentication and encryption secrets
func (c *Config) PrepareMasterFiles() error {
b := make([]byte, 24)
_, err := rand.Read(b)
if err != nil {
return err
}
c.AuthSecret = base64.StdEncoding.EncodeToString(b)
_, err = rand.Read(b)
if err != nil {
return err
}
c.EncSecret = base64.StdEncoding.EncodeToString(b)
return nil
}
// WriteMasterFiles writes the templated master config
func (c *Config) WriteMasterFiles(fs filesystem.Filesystem) error {
for _, name := range templates.AssetNames() {
if !strings.HasPrefix(name, "master/") {
continue
}
tb := templates.MustAsset(name)
t, err := template.New("template").Funcs(template.FuncMap{
"QuoteMeta": regexp.QuoteMeta,
}).Parse(string(tb))
if err != nil {
return err
}
b := &bytes.Buffer{}
err = t.Execute(b, c)
if err != nil {
return err
}
err = fs.WriteFile(strings.TrimPrefix(name, "master/"), b.Bytes(), 0666)
if err != nil {
return err
}
}
return nil
}
func FilePathWalkDir(root string) ([]string, error) {
var files []string
err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error {
if !info.IsDir() {
files = append(files, path)
}
return nil
})
return files, err
}
// WriteDynamicMasterFiles writes the run-time created master config
// Assumed in _output/tmp
func (c *Config) WriteDynamicMasterFiles(fs filesystem.Filesystem) error {
dynamicdir := "_output/" + GetDeploymentName() + "/tmp"
root := dynamicdir;
files, err := FilePathWalkDir(root)
for _, name := range files {
if !strings.HasPrefix(name, root + "/" + "master/") {
continue
}
b := bytes.NewBuffer(nil);
f, _ := os.Open(name)
io.Copy(b,f)
f.Close()
short_name := strings.TrimPrefix(name,root + "/" + "master/")
err = fs.WriteFile(short_name, b.Bytes(), 0666)
if err != nil {
return err
}
}
return nil
}
// WriteNodeFiles writes the templated node config
func (c *Config) WriteNodeFiles(fs filesystem.Filesystem) error {
for _, name := range templates.AssetNames() {
if !strings.HasPrefix(name, "node/") {
continue
}
tb := templates.MustAsset(name)
t, err := template.New("template").Funcs(template.FuncMap{
"QuoteMeta": regexp.QuoteMeta,
}).Parse(string(tb))
if err != nil {
return err
}
b := &bytes.Buffer{}
err = t.Execute(b, struct{}{})
if err != nil {
return err
}
err = fs.WriteFile(strings.TrimPrefix(name, "node/"), b.Bytes(), 0666)
if err != nil {
return err
}
}
return nil
}
|
/**
* Copyright 2019 Comcast Cable Communications Management, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package dbretry contains structs that implement various db interfaces as
// well as consume them. They allow consumers to easily try to interact with
// the database a configurable number of times, with configurable backoff
// options and metrics.
package dbretry
import (
"time"
"github.com/cenkalti/backoff/v3"
"github.com/go-kit/kit/metrics/provider"
db "github.com/xmidt-org/codex-db"
)
type retryConfig struct {
backoffConfig backoff.ExponentialBackOff
measures Measures
}
// Option is the function used to configure the retry object.
type Option func(r *retryConfig)
// WithBackoff sets the exponential backoff to use when retrying. If this
// isn't called, we use the backoff package's default ExponentialBackoff
// configuration. If any values are considered invalid, they are replaced with
// those defaults.
func WithBackoff(b backoff.ExponentialBackOff) Option {
return func(r *retryConfig) {
r.backoffConfig = b
if r.backoffConfig.InitialInterval < 0 {
r.backoffConfig.InitialInterval = backoff.DefaultInitialInterval
}
if r.backoffConfig.RandomizationFactor < 0 {
r.backoffConfig.RandomizationFactor = backoff.DefaultRandomizationFactor
}
if r.backoffConfig.Multiplier < 1 {
r.backoffConfig.Multiplier = backoff.DefaultMultiplier
}
if r.backoffConfig.MaxInterval < 0 {
r.backoffConfig.MaxInterval = backoff.DefaultMaxInterval
}
if r.backoffConfig.MaxElapsedTime < 0 {
r.backoffConfig.MaxElapsedTime = backoff.DefaultMaxElapsedTime
}
if r.backoffConfig.Clock == nil {
r.backoffConfig.Clock = backoff.SystemClock
}
}
}
// WithMeasures sets a provider to use for metrics.
func WithMeasures(p provider.Provider) Option {
return func(r *retryConfig) {
if p != nil {
r.measures = NewMeasures(p)
}
}
}
// RetryInsertService is a wrapper for a db.Inserter. If inserting fails, the
// retry service will continue to try until the configurable max elapsed time
// is reached. The retries will exponentially backoff in the manner configured.
// To read more about this, see the backoff package GoDoc:
// https://godoc.org/gopkg.in/cenkalti/backoff.v3
type RetryInsertService struct {
inserter db.Inserter
config retryConfig
}
// AddRetryMetric is a function to add to our metrics when we retry. The
// function is passed to the backoff package and is called when we are retrying.
func (ri RetryInsertService) AddRetryMetric(_ error, _ time.Duration) {
ri.config.measures.SQLQueryRetryCount.With(db.TypeLabel, db.InsertType).Add(1.0)
}
// InsertRecords uses the inserter to insert the records and uses the
// ExponentialBackoff to try again if inserting fails.
func (ri RetryInsertService) InsertRecords(records ...db.Record) error {
insertFunc := func() error {
return ri.inserter.InsertRecords(records...)
}
// with every insert, we have to make a copy of the ExponentialBackoff
// struct, as it is not thread safe, and each thread needs its own clock.
b := ri.config.backoffConfig
err := backoff.RetryNotify(insertFunc, &b, ri.AddRetryMetric)
ri.config.measures.SQLQueryEndCount.With(db.TypeLabel, db.InsertType).Add(1.0)
return err
}
// CreateRetryInsertService takes an inserter and the options provided and
// creates a RetryInsertService.
func CreateRetryInsertService(inserter db.Inserter, options ...Option) RetryInsertService {
ris := RetryInsertService{
inserter: inserter,
config: retryConfig{
backoffConfig: *backoff.NewExponentialBackOff(),
},
}
for _, o := range options {
o(&ris.config)
}
return ris
}
|
package version
import (
"database/sql"
"errors"
"github.com/gin-gonic/gin"
_ "github.com/go-sql-driver/mysql"
_ "github.com/wangfmD/rvs/log"
"github.com/wangfmD/rvs/setting"
"github.com/wangfmD/rvs/sshv"
"log"
"net/http"
)
func QueryVersionById(c *gin.Context) {
id := c.Param("versionid")
pVersMap := sqlQueryVersionById(id)
mVersMap := sqlQueryMediaVersionById(id)
vers := make(map[string]map[string]string)
vers["platform"] = pVersMap
vers["media"] = mVersMap
c.JSON(http.StatusOK, gin.H{
"status": "success",
"result": vers,
})
}
// QueryServerAddrs ...
func QueryServerAddrs(c *gin.Context) {
p := setting.GetPlatformAddrs()
addrs := make([]string, 0, 0)
for _, v := range p {
addrs = append(addrs, v.Addr)
}
log.Println(addrs)
c.JSON(http.StatusOK, gin.H{
"addrs": addrs,
})
}
func AddVersion(c *gin.Context) {
addr := c.Param("addr")
// 校验 addr todo
var m map[string]string
var err error
f := func() (*setting.ServerOs, error) {
p := setting.GetPlatformAddrs()
for _, v := range p {
if v.Addr == addr {
return v, nil
}
}
return nil, errors.New("error address")
}
server, err := f()
if err != nil {
m = make(map[string]string)
err = errors.New("查询服务器地址无效")
m["msg"] = "查询服务器地址无效"
} else {
m, err = sshv.GetVersionMaps(server.Name, server.Pwd, addr)
}
if err != nil {
log.Println(err)
c.JSON(http.StatusOK, gin.H{
"status": "failed",
"msg": m,
"err": err.Error(),
})
} else {
err = sqlAddversion(addr, m)
log.Println(err)
c.JSON(http.StatusOK, gin.H{
"status": "success",
"msg": m,
})
}
}
func QueryVersion(c *gin.Context) {
res := sqlQueryVersion()
c.JSON(http.StatusOK, gin.H{
"versions": res,
})
}
func sqlQueryVersion() []map[string]string {
db, err := sql.Open("mysql", "root:123456@tcp(localhost:3306)/casedb?charset=utf8")
CheckErr(err)
sql := `select id,
serveraddr,
agent,
interact,
interactbusiness,
jycenter,
jyresource,
middlecas,
middlecenter,
middlecenterfile,
middlecenterres,
middleclient,
middledriver,
middleresource,
middlewaremcu,
mysql,
nginx,
openfire,
redis,
update_time from server_version where id='last' and type='mgr'`
rows, err := db.Query(sql)
CheckErr(err)
columns, _ := rows.Columns()
scanArgs := make([]interface{}, len(columns))
values := make([]interface{}, len(columns))
for i := range values {
scanArgs[i] = &values[i]
}
resarr := []map[string]string{}
for rows.Next() {
err = rows.Scan(scanArgs...)
record := make(map[string]string)
for i, col := range values {
if col != nil {
record[columns[i]] = string(col.([]byte))
}
}
resarr = append(resarr, record)
}
db.Close()
return resarr
}
func CheckErr(err error) {
if err != nil {
log.Fatal(err)
}
}
// 查询测试服务器时插入server_version表的版本信息
func sqlAddversion(addr string, m map[string]string) error {
db, err := sql.Open("mysql", "root:123456@tcp(localhost:3306)/casedb?charset=utf8")
if err != nil {
log.Fatal(err)
return err
}
// delete from server_version where id='last';
sql_del := `delete from server_version where id='last' and serveraddr=?`
_, err = db.Exec(sql_del, addr)
if err != nil {
log.Fatal(err)
return err
}
sql := `
INSERT INTO server_version
(id,
serveraddr,
type,
agent,
interact,
interactbusiness,
jycenter,
jyresource,
middlecas,
middlecenter,
middlecenterfile,
middlecenterres,
middleclient,
middledriver,
middleresource,
middlewaremcu,
middledatabase,
mysql,
nginx,
openfire,
redis,
update_time)
VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,now());`
rs, err := db.Exec(sql, "last", addr, "mgr", m["agent"], m["interact"], m["interactbusiness"], m["jycenter"], m["jyresource"], m["middlecas"], m["middlecenter"], m["middlecenterfile"], m["middlecenterres"], m["middleclient"], m["middledriver"], m["middleresource"], m["middleware-mcu"], m["middledatabase"], m["mysql"], m["nginx"], m["openfire"], m["redis"])
if err != nil {
log.Fatal(err)
return err
}
i, err := rs.LastInsertId()
log.Println(i)
if err != nil {
log.Fatal(err)
return err
}
err = db.Close()
if err != nil {
log.Fatal(err)
return err
}
return nil
}
// 执行用例时插入server_version表的版本信息
func SqlAddVersionByExecCase(id string, addr string, m map[string]string) error {
db, err := sql.Open("mysql", "root:123456@tcp(localhost:3306)/casedb?charset=utf8")
if err != nil {
log.Fatal(err)
return err
}
sql := `
INSERT INTO server_version
(id,
serveraddr,
type,
agent,
interact,
interactbusiness,
jycenter,
jyresource,
middlecas,
middlecenter,
middlecenterfile,
middlecenterres,
middleclient,
middledriver,
middleresource,
middlewaremcu,
middledatabase,
mysql,
nginx,
openfire,
redis,
update_time)
VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,now());`
rs, err := db.Exec(sql, id, addr, "mgr", m["agent"], m["interact"], m["interactbusiness"], m["jycenter"], m["jyresource"], m["middlecas"], m["middlecenter"], m["middlecenterfile"], m["middlecenterres"], m["middleclient"], m["middledriver"], m["middleresource"], m["middleware-mcu"], m["middledatabase"], m["mysql"], m["nginx"], m["openfire"], m["redis"])
if err != nil {
log.Fatal(err)
return err
}
i, err := rs.LastInsertId()
log.Println(i)
if err != nil {
log.Fatal(err)
return err
}
err = db.Close()
if err != nil {
log.Fatal(err)
return err
}
return nil
}
// uuid1 := uuid.NewV4()
// id = uuid1.String()
func sqlQueryVersionById(id string) map[string]string {
db, err := sql.Open("mysql", "root:123456@tcp(localhost:3306)/casedb?charset=utf8")
CheckErr(err)
sql := `select id,
serveraddr,
agent,
interact,
interactbusiness,
jycenter,
jyresource,
middlecas,
middlecenter,
middlecenterfile,
middlecenterres,
middleclient,
middledriver,
middleresource,
middlewaremcu,
mysql,
nginx,
openfire,
redis,
update_time from server_version where id=? and type='mgr'`
rows, err := db.Query(sql, id)
CheckErr(err)
columns, _ := rows.Columns()
scanArgs := make([]interface{}, len(columns))
values := make([]interface{}, len(columns))
for i := range values {
scanArgs[i] = &values[i]
}
record := make(map[string]string)
for rows.Next() {
err = rows.Scan(scanArgs...)
for i, col := range values {
if col != nil {
record[columns[i]] = string(col.([]byte))
} else {
record[columns[i]] = ""
}
}
}
db.Close()
return record
}
func sqlQueryMediaVersionById(id string) map[string]string {
db, err := sql.Open("mysql", "root:123456@tcp(localhost:3306)/casedb?charset=utf8")
CheckErr(err)
sql := `select id,
serveraddr,
filesrv,
ftp,
mbs,
nginx,
update_time from server_version where id =? and type='media'`
rows, err := db.Query(sql, id)
CheckErr(err)
columns, _ := rows.Columns()
scanArgs := make([]interface{}, len(columns))
values := make([]interface{}, len(columns))
for i := range values {
scanArgs[i] = &values[i]
}
record := make(map[string]string)
for rows.Next() {
err = rows.Scan(scanArgs...)
for i, col := range values {
if col != nil {
record[columns[i]] = string(col.([]byte))
} else {
record[columns[i]] = ""
}
}
}
db.Close()
return record
}
|
package storage
import (
"fmt"
"time"
"github.com/gofrs/uuid"
"github.com/sasha-s/go-deadlock"
)
type CropEventStorage struct {
Lock *deadlock.RWMutex
CropEvents []CropEvent
}
type CropReadStorage struct {
Lock *deadlock.RWMutex
CropReadMap map[uuid.UUID]CropRead
}
func CreateCropReadStorage() *CropReadStorage {
rwMutex := deadlock.RWMutex{}
deadlock.Opts.DeadlockTimeout = time.Second * 10
deadlock.Opts.OnPotentialDeadlock = func() {
fmt.Println("CROP READ STORAGE DEADLOCK!")
}
return &CropReadStorage{CropReadMap: make(map[uuid.UUID]CropRead), Lock: &rwMutex}
}
type CropActivityStorage struct {
Lock *deadlock.RWMutex
CropActivityMap []CropActivity
}
func CreateCropActivityStorage() *CropActivityStorage {
rwMutex := deadlock.RWMutex{}
deadlock.Opts.DeadlockTimeout = time.Second * 10
deadlock.Opts.OnPotentialDeadlock = func() {
fmt.Println("CROP LIST STORAGE DEADLOCK!")
}
return &CropActivityStorage{CropActivityMap: []CropActivity{}, Lock: &rwMutex}
}
|
package rados
import (
"io"
)
const (
objectReaderBufferSize = 16 * 1024
)
// Object reader.
type objectReader struct {
ctx *Context
key string
size uint64
read uint64
buf []byte
fill int
ofs int
}
// Open Rados object for reading at a specific offset with a given size.
//
// This does not test if the file exists. If the file does not exist, the first
// read will return os.ErrNotExist.
func (c *Context) OpenWithSize(key string, off, size uint64) io.Reader {
return &objectReader{
ctx: c,
key: key,
read: off,
size: size,
buf: make([]byte, objectReaderBufferSize),
fill: 0,
ofs: 0,
}
}
// Open Rados object for reading at a specific offset.
//
// This will test if the file exists in order to get the size of the file. The
// size of the file will not be subsequently tested. If the file grows in size,
// a new reader should be instantiated.
func (c *Context) Open(key string, off uint64) (io.Reader, error) {
fi, fiErr := c.Stat(key)
if fiErr != nil {
return nil, fiErr
}
return c.OpenWithSize(key, off, uint64(fi.Size())), nil
}
func (r *objectReader) Read(buffer []byte) (n int, err error) {
if r.read - uint64(r.fill - r.ofs) == r.size {
return 0, io.EOF
}
for n == 0 {
// Fill what we have in the buffer.
if r.fill > r.ofs {
n = r.fill - r.ofs
if n > len(buffer) {
n = len(buffer)
}
copy(buffer[:n], r.buf[r.ofs:r.ofs + n])
r.ofs += n
return
}
// Determine how much we can safely read as librados will otherwise
// block forever if the object size does not grow and we are trying to
// read beyond the end.
rBuf := r.buf
if len(r.buf) > int(r.size - r.read) {
rBuf = r.buf[:r.size - r.read]
}
// Perform the read.
var rn int
if rn, err = r.ctx.ReadAt(r.key, rBuf, r.read); err != nil {
return
}
r.fill = rn
r.ofs = 0
r.read += uint64(rn)
}
return
}
|
package handler
import (
"encoding/json"
"errors"
"fmt"
"github.com/golang/protobuf/ptypes"
"github.com/jinmukeji/jiujiantang-services/analysis/aws"
"github.com/jinmukeji/jiujiantang-services/pkg/rpc"
"github.com/jinmukeji/jiujiantang-services/service/auth"
"github.com/jinmukeji/jiujiantang-services/service/mysqldb"
corepb "github.com/jinmukeji/proto/v3/gen/micro/idl/partner/xima/core/v1"
context "golang.org/x/net/context"
)
// maxRecords 测量历史记录显示数量上限
const maxRecords = 100
// minRecords 测量历史记录显示数量下限
const minRecords = 1
// Answer 答案
type Answer struct {
QuestionKey string `json:"question_key"`
Values []string `json:"values"`
}
// SearchHistory 查找测量历史记录
func (j *JinmuHealth) SearchHistory(ctx context.Context, req *corepb.SearchHistoryRequest, resp *corepb.SearchHistoryResponse) error {
l := rpc.ContextLogger(ctx)
accessTokenType, _ := auth.AccessTokenTypeFromContext(ctx)
var records []mysqldb.Record
// TODO: 消除重复代码
if accessTokenType == AccessTokenTypeWeChatValue {
wxUser, errFindWXUserByOpenID := j.datastore.FindWXUserByOpenID(ctx, req.OpenId)
if errFindWXUserByOpenID != nil {
return NewError(ErrDatabase, fmt.Errorf("failed to find wx user by openID %s: %s", req.OpenId, errFindWXUserByOpenID.Error()))
}
r, errFindValidPaginatedRecordsByUserID := j.datastore.FindValidPaginatedRecordsByUserID(ctx, int32(wxUser.UserID))
if errFindValidPaginatedRecordsByUserID != nil {
return NewError(ErrDatabase, fmt.Errorf("failed to find valid paginated records by user %d: %s", wxUser.UserID, errFindValidPaginatedRecordsByUserID.Error()))
}
records = r
} else {
_, err := validateSearchHistoryRequest(req)
if err != nil {
return NewError(ErrValidateSearchHistoryRequestFailure, err)
}
start, _ := ptypes.Timestamp(req.StartTime)
end, _ := ptypes.Timestamp(req.EndTime)
r, errFindValidPaginatedRecordsByDateRange := j.datastore.FindValidPaginatedRecordsByDateRange(ctx, int(req.UserId), int(req.Offset), int(req.Size), start, end)
if errFindValidPaginatedRecordsByDateRange != nil {
return NewError(ErrDatabase, fmt.Errorf("failed to find valid paginated records by user %d: %s", req.UserId, errFindValidPaginatedRecordsByDateRange.Error()))
}
records = r
}
var recordsReply []*corepb.RecordHistory
for _, record := range records {
//TODO:增加显示
createAt, _ := ptypes.TimestampProto(record.CreatedAt)
waveData, errgetPulseTestDataIntArray := getPartialPulseTestDataIntArray(record.S3Key, j.awsClient)
if errgetPulseTestDataIntArray != nil {
cause := fmt.Sprintf("failed to getPulseTestDataIntArray, record_id: %d, data length is %d", record.RecordID, len(waveData))
l.WithError(errgetPulseTestDataIntArray).Warn(cause)
waveData = make([]int32, 0)
}
var answers []Answer
// record.Answers 是空字符串 ,不做处理,直接跳过返回默认值
if record.Answers != "" {
errUnmarshal := json.Unmarshal([]byte(record.Answers), &answers)
if errUnmarshal != nil {
return NewError(ErrJSONUnmarshalFailure, fmt.Errorf("failed to unmarshal answers %s: %s", []byte(record.Answers), errUnmarshal.Error()))
}
}
tags := make([]*corepb.GeneralExplain, 0)
for _, answer := range answers {
for _, value := range answer.Values {
choice := j.analysisEngine.QuestionDoc.GetQuestionChoice(answer.QuestionKey, value)
if choice != nil && choice.Tag != nil {
tags = append(tags, &corepb.GeneralExplain{
Key: choice.Tag.Key,
Label: choice.Tag.Label,
Content: choice.Tag.Content,
})
}
}
}
var stressState = make(map[string]bool)
if record.StressState != "" {
errUnmarshal := json.Unmarshal([]byte(record.StressState), &stressState)
if errUnmarshal != nil {
continue
}
}
body := record.AnalyzeBody
var physicalDialectics []string
if body != "" {
var analysisReportRequestBody AnalysisReportRequestBody
errUnmarshal := json.Unmarshal([]byte(body), &analysisReportRequestBody)
if errUnmarshal != nil {
continue
}
physicalDialectics = make([]string, len(analysisReportRequestBody.PhysicalDialectics))
for idx, pd := range analysisReportRequestBody.PhysicalDialectics {
physicalDialectics[idx] = pd.Key
}
}
protoFinger, errMapDBFingerToProto := mapDBFingerToProto(record.Finger)
if errMapDBFingerToProto != nil {
return NewError(ErrInvalidFinger, errMapDBFingerToProto)
}
recordsReply = append(recordsReply, &corepb.RecordHistory{
C0: Int32ValBoundedBy10FromFloat(record.C0),
C1: Int32ValBoundedBy10FromFloat(record.C1),
C2: Int32ValBoundedBy10FromFloat(record.C2),
C3: Int32ValBoundedBy10FromFloat(record.C3),
C4: Int32ValBoundedBy10FromFloat(record.C4),
C5: Int32ValBoundedBy10FromFloat(record.C5),
C6: Int32ValBoundedBy10FromFloat(record.C6),
C7: Int32ValBoundedBy10FromFloat(record.C7),
G0: record.G0,
G1: record.G1,
G2: record.G2,
G3: record.G3,
G4: record.G4,
G5: record.G5,
G6: record.G6,
G7: record.G7,
RecordId: int32(record.RecordID),
Finger: protoFinger,
Info: waveData,
CreatedTime: createAt,
Hr: record.HeartRate,
AppHr: record.AppHeartRate,
RecordType: int32(record.RecordType),
Remark: record.Remark,
IsSportOrDrunk: corepb.Status(record.IsSportOrDrunk),
Cold: corepb.Status(record.Cold),
MenstrualCycle: corepb.Status(record.MenstrualCycle),
OvipositPeriod: corepb.Status(record.OvipositPeriod),
Lactation: corepb.Status(record.Lactation),
Pregnancy: corepb.Status(record.Pregnancy),
CmStatusA: corepb.Status(record.StatusA),
CmStatusB: corepb.Status(record.StatusB),
CmStatusC: corepb.Status(record.StatusC),
CmStatusD: corepb.Status(record.StatusD),
CmStatusE: corepb.Status(record.StatusE),
CmStatusF: corepb.Status(record.StatusF),
Tags: tags,
HasPaid: record.HasPaid,
ShowFullReport: record.ShowFullReport,
StressStatus: stressState,
HasStressState: record.HasStressState,
PhysicalDialectics: physicalDialectics,
})
}
resp.RecordHistories = recordsReply
return nil
}
// GetMeasurementRecord 获取测量记录
func (j *JinmuHealth) GetMeasurementRecord(ctx context.Context, req *corepb.GetMeasurementRecordRequest, resp *corepb.GetMeasurementRecordResponse) error {
recordID := req.RecordId
isExsit, err := j.datastore.ExistRecordByRecordID(ctx, recordID)
if err != nil || !isExsit {
return NewError(ErrGetRecordFailure, fmt.Errorf("failed to check record existence by recordID %d: %s", recordID, err.Error()))
}
accessTokenType, _ := auth.AccessTokenTypeFromContext(ctx)
// TODO: 以后修改
if accessTokenType != AccessTokenTypeLValue && accessTokenType != AccessTokenTypeWeChatValue {
userID, errGetUserIDByRecordID := j.datastore.GetUserIDByRecordID(ctx, recordID)
if errGetUserIDByRecordID != nil {
return NewError(ErrDatabase, fmt.Errorf("failed to get userID by record %d: %s", recordID, errGetUserIDByRecordID.Error()))
}
ownerID, ok := auth.UserIDFromContext(ctx)
if !ok {
return NewError(ErrInvalidUser, errors.New("failed to get userID from context"))
}
userOrganization, _ := j.datastore.FindOrganizationByUserID(ctx, int(userID))
ownerOrganization, _ := j.datastore.FindOrganizationByUserID(ctx, int(ownerID))
if ownerOrganization.OrganizationID != userOrganization.OrganizationID {
return NewError(ErrInvalidUser, fmt.Errorf("this user_id %d cannot get measurement by record_id %d", userID, recordID))
}
}
record, errGetRecordByRecordID := j.datastore.FindRecordByID(ctx, int(recordID))
if errGetRecordByRecordID != nil {
return NewError(ErrDatabase, fmt.Errorf("failed to find record by recordID %d: %s", recordID, errGetRecordByRecordID.Error()))
}
resp.AppHr = record.AppHeartRate
resp.Hr = int32(record.HeartRate)
resp.C0 = Int32ValBoundedBy10FromFloat(record.C0)
resp.C1 = Int32ValBoundedBy10FromFloat(record.C1)
resp.C2 = Int32ValBoundedBy10FromFloat(record.C2)
resp.C3 = Int32ValBoundedBy10FromFloat(record.C3)
resp.C4 = Int32ValBoundedBy10FromFloat(record.C4)
resp.C5 = Int32ValBoundedBy10FromFloat(record.C5)
resp.C6 = Int32ValBoundedBy10FromFloat(record.C6)
resp.C7 = Int32ValBoundedBy10FromFloat(record.C7)
protoFinger, errMapDBFingerToProto := mapDBFingerToProto(record.Finger)
if errMapDBFingerToProto != nil {
return NewError(ErrInvalidFinger, errMapDBFingerToProto)
}
resp.Finger = protoFinger
dataArray, errgetPulseTestDataIntArray := getPartialPulseTestDataIntArray(record.S3Key, j.awsClient)
if errgetPulseTestDataIntArray != nil {
resp.Info = []int32{}
} else {
resp.Info = dataArray
}
resp.Answers = record.Answers
return nil
}
// validatSearchHistoryRequest 验证请求数据
func validateSearchHistoryRequest(req *corepb.SearchHistoryRequest) (bool, error) {
if _, err := ptypes.Timestamp(req.StartTime); err != nil {
return false, fmt.Errorf("failed to parse timestamp of start time %s: %s", req.StartTime, err.Error())
}
if _, err := ptypes.Timestamp(req.EndTime); err != nil {
return false, fmt.Errorf("failed to parse timestamp of end time %s: %s", req.EndTime, err.Error())
}
if req.Size != -1 && (req.Size > maxRecords || req.Size < minRecords) {
return false, fmt.Errorf("size %d exceeds the maximum or minimum limit", req.Size)
}
return true, nil
}
// getPartialPulseTestDataIntArray 从aws上得到波形数据
func getPartialPulseTestDataIntArray(s3Key string, client aws.PulseTestRawDataS3Client) ([]int32, error) {
pulseTestRawData, err := client.Download(s3Key)
if err != nil {
return []int32{}, fmt.Errorf("failed to download raw data of s3key %s: %s", s3Key, err.Error())
}
return getPartialeWaveData(pulseTestRawData.Payloads)
}
// DeleteRecord 删除记录
func (j *JinmuHealth) DeleteRecord(ctx context.Context, req *corepb.DeleteRecordRequest, resp *corepb.DeleteRecordResponse) error {
userID, _ := j.datastore.GetUserIDByRecordID(ctx, req.RecordId)
if userID != req.UserId {
return NewError(ErrRecordNotBelongToUser, fmt.Errorf("failed to get user by record %d", req.RecordId))
}
return j.datastore.DeleteRecord(ctx, req.RecordId)
}
|
/*
The MIT License (MIT)
Copyright (c) 2019 Microsoft
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package controllers
import (
"context"
"fmt"
"reflect"
databricksv1alpha1 "github.com/microsoft/azure-databricks-operator/api/v1alpha1"
dbmodels "github.com/xinsnake/databricks-sdk-golang/azure/models"
)
func (r *DclusterReconciler) submit(instance *databricksv1alpha1.Dcluster) error {
r.Log.Info(fmt.Sprintf("Create cluster %s", instance.GetName()))
instance.Spec.ClusterName = instance.GetName()
if instance.Status != nil && instance.Status.ClusterInfo != nil && instance.Status.ClusterInfo.ClusterID != "" {
err := r.APIClient.Clusters().PermanentDelete(instance.Status.ClusterInfo.ClusterID)
if err != nil {
return err
}
}
clusterInfo, err := r.createCluster(instance)
if err != nil {
return err
}
var info databricksv1alpha1.DclusterInfo
instance.Status = &databricksv1alpha1.DclusterStatus{
ClusterInfo: info.FromDataBricksClusterInfo(clusterInfo),
}
return r.Update(context.Background(), instance)
}
func (r *DclusterReconciler) refresh(instance *databricksv1alpha1.Dcluster) error {
r.Log.Info(fmt.Sprintf("Refresh cluster %s", instance.GetName()))
if instance.Status == nil || instance.Status.ClusterInfo == nil {
return nil
}
clusterInfo, err := r.getCluster(instance.Status.ClusterInfo.ClusterID)
if err != nil {
return err
}
if reflect.DeepEqual(instance.Status.ClusterInfo, &clusterInfo) {
return nil
}
var info databricksv1alpha1.DclusterInfo
instance.Status = &databricksv1alpha1.DclusterStatus{
ClusterInfo: info.FromDataBricksClusterInfo(clusterInfo),
}
return r.Update(context.Background(), instance)
}
func (r *DclusterReconciler) delete(instance *databricksv1alpha1.Dcluster) error {
r.Log.Info(fmt.Sprintf("Deleting cluster %s", instance.GetName()))
if instance.Status == nil || instance.Status.ClusterInfo == nil {
return nil
}
execution := NewExecution("dclusters", "delete")
err := r.APIClient.Clusters().PermanentDelete(instance.Status.ClusterInfo.ClusterID)
execution.Finish(err)
return err
}
func (r *DclusterReconciler) getCluster(clusterID string) (cluster dbmodels.ClusterInfo, err error) {
execution := NewExecution("dclusters", "get")
cluster, err = r.APIClient.Clusters().Get(clusterID)
execution.Finish(err)
return cluster, err
}
func (r *DclusterReconciler) createCluster(instance *databricksv1alpha1.Dcluster) (cluster dbmodels.ClusterInfo, err error) {
execution := NewExecution("dclusters", "create")
cluster, err = r.APIClient.Clusters().Create(*instance.Spec)
execution.Finish(err)
return cluster, err
}
|
package main
import (
pb "github.com/ihippik/grpc-test/protocol"
"golang.org/x/net/context"
"net"
"fmt"
"google.golang.org/grpc"
)
type server struct{}
func (s *server) Get(ctx context.Context, in *pb.GetUserRequest)(*pb.User, error){
fmt.Println(in.Id)
switch in.Id{
case 1:
return &pb.User{Name:"Вася", Email:"loop@lop.lp"}, nil
break
default:
return &pb.User{Name:"Петя", Email:"test@test.ts"}, nil
break
}
return &pb.User{Name:"Вася", Email:"loop@lop.lp"}, nil
}
func main() {
l, err:= net.Listen("tcp", ":55555")
if err !=nil{
fmt.Println(err.Error())
}
s:= grpc.NewServer()
pb.RegisterGetUserServer(s,&server{})
s.Serve(l)
} |
/*
* Copyright 2018- The Pixie Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package deployment
import (
"context"
"github.com/gofrs/uuid"
log "github.com/sirupsen/logrus"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"px.dev/pixie/src/cloud/vzmgr/vzerrors"
"px.dev/pixie/src/cloud/vzmgr/vzmgrpb"
"px.dev/pixie/src/utils"
)
// InfoFetcher fetches information about deployments using the key.
type InfoFetcher interface {
FetchOrgUserIDUsingDeploymentKey(context.Context, string) (uuid.UUID, uuid.UUID, uuid.UUID, error)
}
// VizierProvisioner provisions a new Vizier.
type VizierProvisioner interface {
// ProvisionVizier creates the vizier, with specified org_id, user_id, cluster_uid. Returns
// Cluster ID or error. If it already exists it will return the current cluster ID. Will return an error if the cluster is
// currently active (ie. Not disconnected).
ProvisionOrClaimVizier(context.Context, uuid.UUID, uuid.UUID, string, string) (uuid.UUID, string, error)
}
// Service is the deployment service.
type Service struct {
deploymentInfoFetcher InfoFetcher
vp VizierProvisioner
}
// New creates a deployment service.
func New(dif InfoFetcher, vp VizierProvisioner) *Service {
return &Service{deploymentInfoFetcher: dif, vp: vp}
}
// RegisterVizierDeployment will use the deployment key to generate or fetch the vizier key.
func (s *Service) RegisterVizierDeployment(ctx context.Context, req *vzmgrpb.RegisterVizierDeploymentRequest) (*vzmgrpb.RegisterVizierDeploymentResponse, error) {
if len(req.K8sClusterUID) == 0 {
return nil, status.Error(codes.InvalidArgument, "empty cluster UID is not allowed")
}
// Fetch the orgID and userID based on the deployment key.
orgID, userID, keyID, err := s.deploymentInfoFetcher.FetchOrgUserIDUsingDeploymentKey(ctx, req.DeploymentKey)
if err != nil {
return nil, status.Error(codes.Unauthenticated, "invalid/unknown deployment key")
}
// Now we know the org and user ID to use for deployment. The process is as follows:
// 1. Try to fetch a cluster with either an empty UID or one where the UID matches the one in the protobuf.
// 2. If the UID matches then return that cluster.
// 3. Otherwise, pick a cluster with no UID specified and claim it.
// 4. If no empty clusters exist then we create a new cluster.
clusterID, clusterName, err := s.vp.ProvisionOrClaimVizier(ctx, orgID, userID, req.K8sClusterUID, req.K8sClusterName)
if err != nil {
return nil, vzerrors.ToGRPCError(err)
}
log.WithField("orgID", orgID).WithField("keyID", keyID).WithField("clusterID", clusterID).WithField("clusterName", clusterName).Info("Successfully registered Vizier deployment")
return &vzmgrpb.RegisterVizierDeploymentResponse{
VizierID: utils.ProtoFromUUID(clusterID),
VizierName: clusterName,
}, nil
}
|
/*
It is well known that if the square root of a natural number is not an integer, then it is irrational. The decimal expansion of such square roots is infinite without any repeating pattern at all.
The square root of two is 1.41421356237309504880..., and the digital sum of the first one hundred decimal digits is 475.
For the first one hundred natural numbers, find the total of the digital sums of the first one hundred decimal digits for all the irrational square roots.
*/
package main
import (
"fmt"
"math"
"math/big"
"strings"
)
func main() {
fmt.Println(solve(2))
fmt.Println(solve(100))
}
func solve(n int) int {
r := 0
for i := 1; i <= n; i++ {
if !issquare(i) {
v := sqrtex(i, 100)
r += digitsum(v)
}
}
return r
}
func issquare(x int) bool {
s := math.Sqrt(float64(x))
return int(s)*int(s) == x
}
func sqrtex(x int, prec int) string {
z := big.NewFloat(float64(x))
z.SetPrec(4 * uint(prec))
z.Sqrt(z)
s := z.Text('f', prec+1)
s = strings.Replace(s, ".", "", -1)
return s[:len(s)-2]
}
func digitsum(s string) int {
r := 0
for _, c := range s {
r += int(c - '0')
}
return r
}
|
package tasks
import (
"log"
"net/http"
db "github.com/AnthuanGarcia/RestApiGo/db"
model "github.com/AnthuanGarcia/RestApiGo/src/models"
"github.com/gin-gonic/gin"
)
// HandleGetTasks - EndPoint Todas las tareas
func HandleGetTasks(c *gin.Context) {
loadedTasks, err := db.GetAllTasks()
if err != nil {
c.JSON(http.StatusNotFound, gin.H{"msg": err})
return
}
c.JSON(http.StatusOK, gin.H{"tasks": loadedTasks})
}
// HandleGetTask - Endpoint una sola Tarea
func HandleGetTask(c *gin.Context) {
var task model.Task
if err := c.BindUri(&task); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"msg": err})
}
loadedTask, err := db.GetTaskID(task.ID)
if err != nil {
c.JSON(http.StatusNotFound, gin.H{"msg": err})
return
}
c.JSON(http.StatusOK, gin.H{
"ID": loadedTask.ID,
"Body": loadedTask.Body,
})
}
// HandleCreateTask - Crea una tarea en un documento
func HandleCreateTask(c *gin.Context) {
var task model.Task
if err := c.ShouldBindJSON(&task); err != nil {
log.Print(err)
c.JSON(http.StatusBadRequest, gin.H{"msg": err})
return
}
id, err := db.Create(&task)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"msg": err})
return
}
c.JSON(http.StatusOK, gin.H{"id": id})
}
// HandleUpdateTask - Actualiza una tarea en un documento
func HandleUpdateTask(c *gin.Context) {
var task model.Task
if err := c.ShouldBindJSON(&task); err != nil {
log.Print(err)
c.JSON(http.StatusBadRequest, gin.H{"msg": err})
return
}
savedTask, err := db.Update(&task)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"msg": err})
return
}
c.JSON(http.StatusOK, gin.H{"task": savedTask})
}
|
package models
import (
"fmt"
"os"
"path/filepath"
)
type FinderEntity struct {
os.FileInfo `json:"-"`
Name string `json:"name"`
Size int64 `json:"-"`
HumanReadableSize string `json:"humanReadableSize"`
Path string `json:"path"`
LastModifiedAt string `json:"lastModifiedAt"`
}
func (entity *FinderEntity) SetHumanReadableEntitySize() {
entity.HumanReadableSize = GetHumanReadableSize(entity.Size)
}
func (entity *FinderEntity) SetActualSize() error {
//if it's directory we need to calculate actual size
if entity.IsDir() {
var size int64
path := filepath.Join(entity.Path, entity.Name)
err := filepath.Walk(path, func(_ string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if !info.IsDir() {
size += info.Size()
}
return err
})
if err != nil {
return err
}
entity.Size = size
}
entity.SetHumanReadableEntitySize()
return nil
}
func GetHumanReadableSize(size int64) string {
const unit = 1000
if size < unit {
return fmt.Sprintf("%d B", size)
}
div, exp := int64(unit), 0
for n := size / unit; n >= unit; n /= unit {
div *= unit
exp++
}
return fmt.Sprintf("%.1f %cB", float64(size)/float64(div), "kMGTPE"[exp])
}
|
package services
import (
"strings"
"time"
"github.com/ne7ermore/gRBAC/common"
"github.com/ne7ermore/gRBAC/plugin"
)
type roleMap map[string]*Role
type User struct {
Id string `json:"id"`
UserId string `json:"user_id"`
Roles roleMap `json:"roles"`
CreateTime time.Time `json:"createTime"`
UpdateTime time.Time `json:"updateTime"`
}
func NewUserFromModel(m plugin.User, rp plugin.RolePools, pp plugin.PermissionPools) *User {
_map := make(roleMap)
for _, r := range strings.Split(m.GetRoles(), common.MongoRoleSep) {
if r == "" {
continue
}
if _, found := _map[r]; found {
continue
}
if role, err := rp.Get(r); err == nil {
_map[r] = NewRoleFromModel(role, pp)
}
}
return &User{
Id: m.Getid(),
UserId: m.GetUserId(),
Roles: _map,
CreateTime: m.GetCreateTime(),
UpdateTime: m.GetUpdateTime(),
}
}
func CreateUser(uid string, up plugin.UserPools, rp plugin.RolePools, pp plugin.PermissionPools) (*User, error) {
id, err := up.New(uid)
if err != nil {
return nil, err
}
u, err := GetUserById(id, up, rp, pp)
if err != nil {
return nil, err
}
common.Get().NewUser(id)
return u, nil
}
func GetUserById(id string, up plugin.UserPools, rp plugin.RolePools, pp plugin.PermissionPools) (*User, error) {
u, err := up.Get(id)
if err != nil {
return nil, err
}
return NewUserFromModel(u, rp, pp), nil
}
func GetUserByUid(uid string, up plugin.UserPools, rp plugin.RolePools, pp plugin.PermissionPools) (*User, error) {
u, err := up.GetByUid(uid)
if err != nil {
return nil, err
}
return NewUserFromModel(u, rp, pp), nil
}
func UpdateUser(id string, update map[string]string, up plugin.UserPools, rp plugin.RolePools, pp plugin.PermissionPools) (*User, error) {
if err := up.Update(id, update); err != nil {
return nil, err
}
return GetUserById(id, up, rp, pp)
}
func AddRole(uid, rid string) error {
return common.Get().AddRole(uid, rid)
}
func DelRole(uid, rid string) error {
return common.Get().DelRole(uid, rid)
}
func GetUsers(skip, limit int, field string, up plugin.UserPools, rp plugin.RolePools, pp plugin.PermissionPools) ([]*User, error) {
us, err := up.Gets(skip, limit, field)
if err != nil {
return nil, err
}
users := make([]*User, 0, limit)
for _, u := range us {
users = append(users, NewUserFromModel(u, rp, pp))
}
return users, nil
}
func GetUsersCount(up plugin.UserPools) int {
return up.Counts()
}
|
// Copyright (c) 2014 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
/*
Package walletdb provides a namespaced database interface for btcwallet.
Overview
A wallet essentially consists of a multitude of stored data such as private
and public keys, key derivation bits, pay-to-script-hash scripts, and various
metadata. One of the issues with many wallets is they are tightly integrated.
Designing a wallet with loosely coupled components that provide specific
functionality is ideal, however it presents a challenge in regards to data
storage since each component needs to store its own data without knowing the
internals of other components or breaking atomicity.
This package solves this issue by providing a pluggable driver, namespaced
database interface that is intended to be used by the main wallet daemon. This
allows the potential for any backend database type with a suitable driver. Each
component, which will typically be a package, can then implement various
functionality such as address management, voting pools, and colored coin
metadata in their own namespace without having to worry about conflicts with
other packages even though they are sharing the same database that is managed by
the wallet.
A quick overview of the features walletdb provides are as follows:
- Key/value store
- Namespace support
- Allows multiple packages to have their own area in the database without
worrying about conflicts
- Read-only and read-write transactions with both manual and managed modes
- Nested buckets
- Supports registration of backend databases
- Comprehensive test coverage
Database
The main entry point is the DB interface. It exposes functionality for
creating, retrieving, and removing namespaces. It is obtained via the Create
and Open functions which take a database type string that identifies the
specific database driver (backend) to use as well as arguments specific to the
specified driver.
Namespaces
The Namespace interface is an abstraction that provides facilities for obtaining
transactions (the Tx interface) that are the basis of all database reads and
writes. Unlike some database interfaces that support reading and writing
without transactions, this interface requires transactions even when only
reading or writing a single key.
The Begin function provides an unmanaged transaction while the View and Update
functions provide a managed transaction. These are described in more detail
below.
Transactions
The Tx interface provides facilities for rolling back or commiting changes that
took place while the transaction was active. It also provides the root bucket
under which all keys, values, and nested buckets are stored. A transaction
can either be read-only or read-write and managed or unmanaged.
Managed versus Unmanaged Transactions
A managed transaction is one where the caller provides a function to execute
within the context of the transaction and the commit or rollback is handled
automatically depending on whether or not the provided function returns an
error. Attempting to manually call Rollback or Commit on the managed
transaction will result in a panic.
An unmanaged transaction, on the other hand, requires the caller to manually
call Commit or Rollback when they are finished with it. Leaving transactions
open for long periods of time can have several adverse effects, so it is
recommended that managed transactions are used instead.
Buckets
The Bucket interface provides the ability to manipulate key/value pairs and
nested buckets as well as iterate through them.
The Get, Put, and Delete functions work with key/value pairs, while the Bucket,
CreateBucket, CreateBucketIfNotExists, and DeleteBucket functions work with
buckets. The ForEach function allows the caller to provide a function to be
called with each key/value pair and nested bucket in the current bucket.
Root Bucket
As discussed above, all of the functions which are used to manipulate key/value
pairs and nested buckets exist on the Bucket interface. The root bucket is the
upper-most bucket in a namespace under which data is stored and is created at
the same time as the namespace. Use the RootBucket function on the Tx interface
to retrieve it.
Nested Buckets
The CreateBucket and CreateBucketIfNotExists functions on the Bucket interface
provide the ability to create an arbitrary number of nested buckets. It is
a good idea to avoid a lot of buckets with little data in them as it could lead
to poor page utilization depending on the specific driver in use.
*/
package walletdb
|
package j2rpc
import (
"context"
"reflect"
)
type callback struct {
server *server
methodName string
//receiver object of method, set if fn is method
rcv reflect.Value
//the function
fn reflect.Value
//input argument types
argTypes []reflect.Type
//method's first argument is a context (not included in argTypes)
hasCtx bool
//err return idx, of -1 when method cannot return error
errPos int
}
// makeArgTypes ...
func (c *callback) makeArgTypes() bool {
fnt := c.fn.Type()
outs := make([]reflect.Type, fnt.NumOut())
for i := 0; i < fnt.NumOut(); i++ {
outs[i] = fnt.Out(i)
}
//A maximum of two values can be returned.
if len(outs) > 2 {
return false
}
//If an error is returned, it must be the last returned value.
switch {
case len(outs) == 1 && isErrorType(outs[0]):
c.errPos = 0
case len(outs) == 2:
if isErrorType(outs[0]) || !isErrorType(outs[1]) {
return false
}
c.errPos = 1
}
firstArg := 0
if c.rcv.IsValid() {
firstArg++
}
if fnt.NumIn() > firstArg && fnt.In(firstArg).Implements(contextType) {
c.hasCtx = true
firstArg++
}
//Add all remaining parameters.
c.argTypes = make([]reflect.Type, fnt.NumIn()-firstArg)
for i := firstArg; i < fnt.NumIn(); i++ {
c.argTypes[i-firstArg] = fnt.In(i)
}
return true
}
// call invokes the callback.
func (c *callback) call(ctx context.Context, args []reflect.Value) (res interface{}, err error) {
//Create the argument slice.
fullArgs := make([]reflect.Value, 0, 2+len(args))
if c.rcv.IsValid() {
fullArgs = append(fullArgs, c.rcv)
}
if c.hasCtx {
fullArgs = append(fullArgs, reflect.ValueOf(ctx))
}
fullArgs = append(fullArgs, args...)
//Catch panic while running the callback.
defer func() {
if p := recover(); p != nil {
err = c.server.stack(p, c.methodName)
return
}
}()
//Run the callback.
results := c.fn.Call(fullArgs)
if len(results) == 0 {
return
}
if c.errPos >= 0 {
err = value2err(results[c.errPos])
if err != nil {
return
}
}
rv := results[0]
if !rv.IsValid() {
return
}
return rv.Interface(), err
}
func value2err(val reflect.Value) error {
if !isErrorType(val.Type()) {
return nil
}
if !val.IsValid() {
return nil
}
if val.IsZero() {
return nil
}
return val.Interface().(error)
}
|
package tmp
const HandlerMQTTTmp = `package {{printf "%v_handler" (index . 0)}}
import (
"encoding/json"
"fmt"
"strings"
{{printf "\"%v/handlers/%v_handler/%v_helper\"" (index . 1) (index . 0) (index . 0)}}
{{printf "\"%v/helper\"" (index . 1)}}
{{printf "\"%v/hub/hub_helper\"" (index . 1)}}
mqtt "github.com/eclipse/paho.mqtt.golang"
)
type handler struct {
hub_helper.HelperForHandler
client mqtt.Client
err chan error
}
func InitHandler(hub hub_helper.HelperForHandler, conf *helper.HandlerConfig) (H {{print (index . 0)}}_helper.Handler, err error) {
h := &handler{HelperForHandler: hub}
H = h
client := mqtt.NewClient(mqtt.NewClientOptions().
AddBroker(fmt.Sprintf("tcp://%v:%v", conf.Host, conf.Port)).
SetUsername(conf.User).
SetPassword(conf.Password).
SetOnConnectHandler(h.onConn).
SetConnectionLostHandler(h.onLost).
SetConnectRetry(true).
SetResumeSubs(true))
h.client = client
if token := client.Connect(); token.Wait() && token.Error() != nil {
return nil, token.Error()
}
helper.Wg.Add(1)
go h.loop()
return
}
func (h *handler) haron(_ mqtt.Client, msg mqtt.Message) {
var topic, route string
str := strings.Split(msg.Topic(), "/")
if len(str) == 3 {
route = strings.TrimSpace(str[1])
topic = strings.TrimSpace(str[2])
} else {
return
}
switch route {
}
}
func (h *handler) onConn(client mqtt.Client) {
helper.Log.Service("mqtt is conected")
if token := client.Subscribe("back/#", 0, h.haron); token.Wait() && token.Error() != nil {
h.err <- token.Error()
}
}
func (h *handler) onLost(client mqtt.Client, err error) {
helper.Log.Servicef("mqtt is disconected: %v", err)
token := client.Unsubscribe("back/#")
token.Wait()
}
func (h *handler) loop() {
defer helper.Wg.Done()
for {
select {
case e := <-h.err:
helper.Log.Errorf("mqtt erorr: %v", e)
h.client.Disconnect(1000)
return
case <-helper.Ctx.Done():
helper.Log.Service("mqtt stoped")
h.client.Disconnect(1000)
return
}
}
}
func (h *handler) respOk(topic string, data interface{}) {
resp := &helper.ResponseModel{
Success: true,
Result: data,
}
buf, err := json.Marshal(resp)
if err != nil {
helper.Log.Warningf(helper.KeyErrorParse+": json: %v", err)
return
}
if token := h.client.Publish(topic, 0, false, buf); token.Wait() && token.Error() != nil {
helper.Log.Warningf(helper.KeyErrorSend+": mqtt: %v", token.Error())
}
}
func (h *handler) respError(topic string, code helper.ErrCode, msg string) {
resp := &helper.ResponseModel{
Success: false,
Result: &helper.ResponseError{
Code: code,
Msg: msg,
},
}
buf, _ := json.Marshal(resp)
if token := h.client.Publish(topic, 0, false, buf); token.Wait() && token.Error() != nil {
helper.Log.Warningf(helper.KeyErrorSend+": mqtt: %v", token.Error())
}
}`
|
package config
// BUFFERSIZE is the size of max packet size
const BUFFERSIZE = 1024
// PORT the default port for communication
const PORT = "4242"
const SERVER_ADDR = "100.0.0.1:" + PORT |
package backservice
import (
"context"
"fmt"
calc "github.com/flexera/calc/back_service/gen/calc"
"github.com/flexera/calc/back_service/services/dynamo"
"github.com/flexera/micro/log"
)
// calc service example implementation.
// The example methods log the requests and return zero values.
type calcsrvc struct {
dynamoc dynamo.Client
}
// NewCalc returns the calc service implementation.
func NewCalc(dynamoc dynamo.Client) calc.Service {
return &calcsrvc{dynamoc}
}
// Add implements add.
func (s *calcsrvc) Add(ctx context.Context, p *calc.AddPayload) (res int, err error) {
log.Info(ctx, "msg", "adding two numbers", "a", p.A, "b", p.B)
if p.A == 1 && p.B == 1 {
return 0, fmt.Errorf("these are simple numbers; please add them in your head")
}
res = p.A + p.B
err2 := s.dynamoc.SaveResult(ctx, p.A, p.B, float32(res))
if err2 != nil {
log.Error(ctx, "msg", "failed to save result to dynamodb", "err", err2)
}
return
}
// Sub implements sub.
func (s *calcsrvc) Sub(ctx context.Context, p *calc.SubPayload) (res int, err error) {
log.Info(ctx, "msg", "adding two numbers", "a", p.A, "b", p.B)
if p.A == 1 && p.B == 1 {
return 0, fmt.Errorf("these are simple numbers; please subtract them in your head")
}
res = p.A - p.B
err2 := s.dynamoc.SaveResult(ctx, p.A, p.B, float32(res))
if err2 != nil {
log.Error(ctx, "msg", "failed to save result to dynamodb", "err", err2)
}
return
}
// Mul implements mul.
func (s *calcsrvc) Mul(ctx context.Context, p *calc.MulPayload) (res int, err error) {
log.Info(ctx, "msg", "adding two numbers", "a", p.A, "b", p.B)
if p.A == 1 && p.B == 1 {
return 0, fmt.Errorf("these are simple numbers; please multiply them in your head")
}
res = p.A * p.B
err2 := s.dynamoc.SaveResult(ctx, p.A, p.B, float32(res))
if err2 != nil {
log.Error(ctx, "msg", "failed to save result to dynamodb", "err", err2)
}
return
}
// Div implements div.
func (s *calcsrvc) Div(ctx context.Context, p *calc.DivPayload) (res float32, err error) {
log.Info(ctx, "msg", "dividing two numbers", "a", p.A, "b", p.B)
if p.A == 1 && p.B == 1 {
return 0, fmt.Errorf("these are simple numbers; please add them in your head")
}
if p.B == 0 {
return 0, fmt.Errorf("Cannot divide a number by 0, didn't you know that?!!")
}
res = float32(p.A) / float32(p.B)
err2 := s.dynamoc.SaveResult(ctx, p.A, p.B, res)
if err2 != nil {
log.Error(ctx, "msg", "failed to save result to dynamodb", "err", err2)
}
return
} |
// Copyright 2017 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package statistics
import (
"context"
"sort"
"time"
"unsafe"
"github.com/pingcap/errors"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/parser/ast"
"github.com/pingcap/tidb/parser/mysql"
"github.com/pingcap/tidb/parser/terror"
"github.com/pingcap/tidb/sessionctx/stmtctx"
"github.com/pingcap/tidb/tablecodec"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/collate"
"github.com/pingcap/tidb/util/fastrand"
"github.com/pingcap/tidb/util/sqlexec"
"github.com/pingcap/tipb/go-tipb"
"github.com/twmb/murmur3"
)
// SampleItem is an item of sampled column value.
type SampleItem struct {
// Value is the sampled column value.
Value types.Datum
// Handle is the handle of the sample in its key.
// This property is used to calculate Ordinal in fast analyze.
Handle kv.Handle
// Ordinal is original position of this item in SampleCollector before sorting. This
// is used for computing correlation.
Ordinal int
}
// EmptySampleItemSize is the size of empty SampleItem, 96 = 72 (datum) + 8 (int) + 16.
const EmptySampleItemSize = int64(unsafe.Sizeof(SampleItem{}))
// CopySampleItems returns a deep copy of SampleItem slice.
func CopySampleItems(items []*SampleItem) []*SampleItem {
n := make([]*SampleItem, len(items))
for i, item := range items {
ni := *item
n[i] = &ni
}
return n
}
// SortSampleItems shallow copies and sorts a slice of SampleItem.
func SortSampleItems(sc *stmtctx.StatementContext, items []*SampleItem) ([]*SampleItem, error) {
sortedItems := make([]*SampleItem, len(items))
copy(sortedItems, items)
sorter := sampleItemSorter{items: sortedItems, sc: sc}
sort.Stable(&sorter)
return sortedItems, sorter.err
}
type sampleItemSorter struct {
err error
sc *stmtctx.StatementContext
items []*SampleItem
}
func (s *sampleItemSorter) Len() int {
return len(s.items)
}
func (s *sampleItemSorter) Less(i, j int) bool {
var cmp int
cmp, s.err = s.items[i].Value.Compare(s.sc, &s.items[j].Value, collate.GetBinaryCollator())
if s.err != nil {
return true
}
return cmp < 0
}
func (s *sampleItemSorter) Swap(i, j int) {
s.items[i], s.items[j] = s.items[j], s.items[i]
}
// SampleCollector will collect Samples and calculate the count and ndv of an attribute.
type SampleCollector struct {
FMSketch *FMSketch
CMSketch *CMSketch
TopN *TopN
Samples []*SampleItem
seenValues int64 // seenValues is the current seen values.
NullCount int64
Count int64 // Count is the number of non-null rows.
MaxSampleSize int64
TotalSize int64 // TotalSize is the total size of column.
MemSize int64 // major memory size of this sample collector.
IsMerger bool
}
// MergeSampleCollector merges two sample collectors.
func (c *SampleCollector) MergeSampleCollector(sc *stmtctx.StatementContext, rc *SampleCollector) {
c.NullCount += rc.NullCount
c.Count += rc.Count
c.TotalSize += rc.TotalSize
c.FMSketch.MergeFMSketch(rc.FMSketch)
if rc.CMSketch != nil {
err := c.CMSketch.MergeCMSketch(rc.CMSketch)
terror.Log(errors.Trace(err))
}
for _, item := range rc.Samples {
err := c.collect(sc, item.Value)
terror.Log(errors.Trace(err))
}
}
// SampleCollectorToProto converts SampleCollector to its protobuf representation.
func SampleCollectorToProto(c *SampleCollector) *tipb.SampleCollector {
collector := &tipb.SampleCollector{
NullCount: c.NullCount,
Count: c.Count,
FmSketch: FMSketchToProto(c.FMSketch),
TotalSize: &c.TotalSize,
}
if c.CMSketch != nil {
collector.CmSketch = CMSketchToProto(c.CMSketch, nil)
}
for _, item := range c.Samples {
collector.Samples = append(collector.Samples, item.Value.GetBytes())
}
return collector
}
// MaxSampleValueLength defines the max length of the useful samples. If one sample value exceeds the max length, we drop it before building the stats.
const MaxSampleValueLength = mysql.MaxFieldVarCharLength / 2
// SampleCollectorFromProto converts SampleCollector from its protobuf representation.
func SampleCollectorFromProto(collector *tipb.SampleCollector) *SampleCollector {
s := &SampleCollector{
NullCount: collector.NullCount,
Count: collector.Count,
FMSketch: FMSketchFromProto(collector.FmSketch),
}
if collector.TotalSize != nil {
s.TotalSize = *collector.TotalSize
}
s.CMSketch, s.TopN = CMSketchAndTopNFromProto(collector.CmSketch)
for _, val := range collector.Samples {
// When store the histogram bucket boundaries to kv, we need to limit the length of the value.
if len(val) <= MaxSampleValueLength {
item := &SampleItem{Value: types.NewBytesDatum(val)}
s.Samples = append(s.Samples, item)
}
}
return s
}
func (c *SampleCollector) collect(sc *stmtctx.StatementContext, d types.Datum) error {
if !c.IsMerger {
if d.IsNull() {
c.NullCount++
return nil
}
c.Count++
if err := c.FMSketch.InsertValue(sc, d); err != nil {
return errors.Trace(err)
}
if c.CMSketch != nil {
c.CMSketch.InsertBytes(d.GetBytes())
}
// Minus one is to remove the flag byte.
c.TotalSize += int64(len(d.GetBytes()) - 1)
}
c.seenValues++
// The following code use types.CloneDatum(d) because d may have a deep reference
// to the underlying slice, GC can't free them which lead to memory leak eventually.
// TODO: Refactor the proto to avoid copying here.
if len(c.Samples) < int(c.MaxSampleSize) {
newItem := &SampleItem{}
d.Copy(&newItem.Value)
c.Samples = append(c.Samples, newItem)
} else {
shouldAdd := int64(fastrand.Uint64N(uint64(c.seenValues))) < c.MaxSampleSize
if shouldAdd {
idx := int(fastrand.Uint32N(uint32(c.MaxSampleSize)))
newItem := &SampleItem{}
d.Copy(&newItem.Value)
// To keep the order of the elements, we use delete and append, not direct replacement.
c.Samples = append(c.Samples[:idx], c.Samples[idx+1:]...)
c.Samples = append(c.Samples, newItem)
}
}
return nil
}
// CalcTotalSize is to calculate total size based on samples.
func (c *SampleCollector) CalcTotalSize() {
c.TotalSize = 0
for _, item := range c.Samples {
c.TotalSize += int64(len(item.Value.GetBytes()))
}
}
// SampleBuilder is used to build samples for columns.
// Also, if primary key is handle, it will directly build histogram for it.
type SampleBuilder struct {
RecordSet sqlexec.RecordSet
Sc *stmtctx.StatementContext
PkBuilder *SortedBuilder
Collators []collate.Collator
ColsFieldType []*types.FieldType
ColLen int // ColLen is the number of columns need to be sampled.
MaxBucketSize int64
MaxSampleSize int64
MaxFMSketchSize int64
CMSketchDepth int32
CMSketchWidth int32
}
// CollectColumnStats collects sample from the result set using Reservoir Sampling algorithm,
// and estimates NDVs using FM Sketch during the collecting process.
// It returns the sample collectors which contain total count, null count, distinct values count and CM Sketch.
// It also returns the statistic builder for PK which contains the histogram.
// See https://en.wikipedia.org/wiki/Reservoir_sampling
func (s SampleBuilder) CollectColumnStats() ([]*SampleCollector, *SortedBuilder, error) {
collectors := make([]*SampleCollector, s.ColLen)
for i := range collectors {
collectors[i] = &SampleCollector{
MaxSampleSize: s.MaxSampleSize,
FMSketch: NewFMSketch(int(s.MaxFMSketchSize)),
}
}
if s.CMSketchDepth > 0 && s.CMSketchWidth > 0 {
for i := range collectors {
collectors[i].CMSketch = NewCMSketch(s.CMSketchDepth, s.CMSketchWidth)
}
}
ctx := context.TODO()
req := s.RecordSet.NewChunk(nil)
it := chunk.NewIterator4Chunk(req)
for {
err := s.RecordSet.Next(ctx, req)
if err != nil {
return nil, nil, errors.Trace(err)
}
if req.NumRows() == 0 {
return collectors, s.PkBuilder, nil
}
if len(s.RecordSet.Fields()) == 0 {
return nil, nil, errors.Errorf("collect column stats failed: record set has 0 field")
}
for row := it.Begin(); row != it.End(); row = it.Next() {
datums := RowToDatums(row, s.RecordSet.Fields())
if s.PkBuilder != nil {
err = s.PkBuilder.Iterate(datums[0])
if err != nil {
return nil, nil, errors.Trace(err)
}
datums = datums[1:]
}
for i, val := range datums {
if s.Collators[i] != nil && !val.IsNull() {
decodedVal, err := tablecodec.DecodeColumnValue(val.GetBytes(), s.ColsFieldType[i], s.Sc.TimeZone)
if err != nil {
return nil, nil, err
}
decodedVal.SetBytesAsString(s.Collators[i].Key(decodedVal.GetString()), decodedVal.Collation(), uint32(decodedVal.Length()))
encodedKey, err := tablecodec.EncodeValue(s.Sc, nil, decodedVal)
if err != nil {
return nil, nil, err
}
val.SetBytes(encodedKey)
}
err = collectors[i].collect(s.Sc, val)
if err != nil {
return nil, nil, errors.Trace(err)
}
}
}
}
}
// RowToDatums converts row to datum slice.
func RowToDatums(row chunk.Row, fields []*ast.ResultField) []types.Datum {
datums := make([]types.Datum, len(fields))
for i, f := range fields {
datums[i] = row.GetDatum(i, &f.Column.FieldType)
}
return datums
}
// ExtractTopN extracts the topn from the CM Sketch.
func (c *SampleCollector) ExtractTopN(numTop uint32, sc *stmtctx.StatementContext, tp *types.FieldType, timeZone *time.Location) error {
if numTop == 0 {
return nil
}
values := make([][]byte, 0, len(c.Samples))
for _, sample := range c.Samples {
values = append(values, sample.Value.GetBytes())
}
helper := newTopNHelper(values, numTop)
cms := c.CMSketch
c.TopN = NewTopN(int(helper.actualNumTop))
// Process them decreasingly so we can handle most frequent values first and reduce the probability of hash collision
// by small values.
for i := uint32(0); i < helper.actualNumTop; i++ {
h1, h2 := murmur3.Sum128(helper.sorted[i].data)
realCnt := cms.queryHashValue(nil, h1, h2)
// Because the encode of topn is the new encode type. But analyze proto returns the old encode type for a sample datum,
// we should decode it and re-encode it to get the correct bytes.
d, err := tablecodec.DecodeColumnValue(helper.sorted[i].data, tp, timeZone)
if err != nil {
return err
}
data, err := tablecodec.EncodeValue(sc, nil, d)
if err != nil {
return err
}
cms.SubValue(h1, h2, realCnt)
c.TopN.AppendTopN(data, realCnt)
}
c.TopN.Sort()
return nil
}
|
package handlers_test
import (
"encoding/json"
"errors"
"net/http"
"net/http/httptest"
"github.com/cloudfoundry-incubator/notifications/models"
"github.com/cloudfoundry-incubator/notifications/postal"
"github.com/cloudfoundry-incubator/notifications/web/handlers"
"github.com/cloudfoundry-incubator/notifications/web/params"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("ErrorWriter", func() {
var writer handlers.ErrorWriter
var recorder *httptest.ResponseRecorder
BeforeEach(func() {
writer = handlers.NewErrorWriter()
recorder = httptest.NewRecorder()
})
It("returns a 422 when a client tries to register a critical notification without critical_notifications.write scope", func() {
writer.Write(recorder, postal.UAAScopesError("UAA Scopes Error: Client does not have authority to register critical notifications."))
unprocessableEntity := 422
Expect(recorder.Code).To(Equal(unprocessableEntity))
body := make(map[string]interface{})
err := json.Unmarshal(recorder.Body.Bytes(), &body)
if err != nil {
panic(err)
}
Expect(body["errors"]).To(ContainElement("UAA Scopes Error: Client does not have authority to register critical notifications."))
})
It("returns a 502 when CloudController fails to respond", func() {
writer.Write(recorder, postal.CCDownError("Bad things happened!"))
Expect(recorder.Code).To(Equal(http.StatusBadGateway))
body := make(map[string]interface{})
err := json.Unmarshal(recorder.Body.Bytes(), &body)
if err != nil {
panic(err)
}
Expect(body["errors"]).To(ContainElement("Bad things happened!"))
})
It("returns a 502 when UAA fails to respond", func() {
writer.Write(recorder, postal.UAADownError("Whoops!"))
Expect(recorder.Code).To(Equal(http.StatusBadGateway))
body := make(map[string]interface{})
err := json.Unmarshal(recorder.Body.Bytes(), &body)
if err != nil {
panic(err)
}
Expect(body["errors"]).To(ContainElement("Whoops!"))
})
It("returns a 502 when UAA fails for unknown reasons", func() {
writer.Write(recorder, postal.UAAGenericError("UAA Unknown Error: BOOM!"))
Expect(recorder.Code).To(Equal(http.StatusBadGateway))
body := make(map[string]interface{})
err := json.Unmarshal(recorder.Body.Bytes(), &body)
if err != nil {
panic(err)
}
Expect(body["errors"]).To(ContainElement("UAA Unknown Error: BOOM!"))
})
It("returns a 500 when there is a template loading error", func() {
writer.Write(recorder, postal.TemplateLoadError("BOOM!"))
Expect(recorder.Code).To(Equal(http.StatusInternalServerError))
body := make(map[string]interface{})
err := json.Unmarshal(recorder.Body.Bytes(), &body)
if err != nil {
panic(err)
}
Expect(body["errors"]).To(ContainElement("An email template could not be loaded"))
})
It("returns a 500 when there is a template update error", func() {
writer.Write(recorder, params.TemplateUpdateError{})
Expect(recorder.Code).To(Equal(http.StatusInternalServerError))
body := make(map[string]interface{})
err := json.Unmarshal(recorder.Body.Bytes(), &body)
if err != nil {
panic(err)
}
Expect(body["errors"]).To(ContainElement("failed to update Template in the database"))
})
It("returns a 404 when the space cannot be found", func() {
writer.Write(recorder, postal.CCNotFoundError("Organization could not be found"))
Expect(recorder.Code).To(Equal(http.StatusNotFound))
body := make(map[string]interface{})
err := json.Unmarshal(recorder.Body.Bytes(), &body)
if err != nil {
panic(err)
}
Expect(body["errors"]).To(ContainElement("CloudController Error: Organization could not be found"))
})
It("returns a 400 when the params cannot be parsed due to syntatically invalid JSON", func() {
writer.Write(recorder, params.ParseError{})
Expect(recorder.Code).To(Equal(400))
body := make(map[string]interface{})
err := json.Unmarshal(recorder.Body.Bytes(), &body)
if err != nil {
panic(err)
}
Expect(body["errors"]).To(ContainElement("Request body could not be parsed"))
})
It("returns a 422 when the params are not valid due to semantically invalid JSON", func() {
writer.Write(recorder, params.ValidationError([]string{"something", "another"}))
Expect(recorder.Code).To(Equal(422))
body := make(map[string]interface{})
err := json.Unmarshal(recorder.Body.Bytes(), &body)
if err != nil {
panic(err)
}
Expect(body["errors"]).To(ContainElement("something"))
Expect(body["errors"]).To(ContainElement("another"))
})
It("returns a 422 when trying to send a critical notification without correct scope", func() {
writer.Write(recorder, postal.NewCriticalNotificationError("raptors"))
Expect(recorder.Code).To(Equal(422))
body := make(map[string]interface{})
err := json.Unmarshal(recorder.Body.Bytes(), &body)
if err != nil {
panic(err)
}
Expect(body["errors"]).To(ContainElement("Insufficient privileges to send notification raptors"))
})
It("returns a 409 when there is a duplicate record", func() {
writer.Write(recorder, models.ErrDuplicateRecord{})
Expect(recorder.Code).To(Equal(409))
body := make(map[string]interface{})
err := json.Unmarshal(recorder.Body.Bytes(), &body)
if err != nil {
panic(err)
}
Expect(body["errors"]).To(ContainElement("Duplicate Record"))
})
It("returns a 404 when a record cannot be found", func() {
writer.Write(recorder, models.ErrRecordNotFound{})
Expect(recorder.Code).To(Equal(404))
body := make(map[string]interface{})
err := json.Unmarshal(recorder.Body.Bytes(), &body)
if err != nil {
panic(err)
}
Expect(body["errors"]).To(ContainElement("Record Not Found"))
})
It("panics for unknown errors", func() {
Expect(func() {
writer.Write(recorder, errors.New("BOOM!"))
}).To(Panic())
})
})
|
package sys
import (
"easyctl/constant"
"easyctl/util"
"fmt"
"log"
"os"
)
const (
aliBaseEL7WriteErrMsg = "阿里云base镜像源配置失败..."
localWriteErrMsg = "local.repo文件写失败..."
nginxRepoFileWriteErrMsg = "nginx.repo文件写失败..."
aliEpelEL7WriteErrMsg = "阿里云epel镜像源配置失败..."
setAliMirrorSuccessful = "阿里云镜像源配置成功..."
setLocalMirrorSuccessful = "本地镜像源配置成功..."
setNginxMirrorSuccessful = "nginx镜像源配置成功..."
)
func SetDNS(dnsAddress string) (err error, result string) {
cmd := "sed -i \"/nameserver " + dnsAddress + "/d\" /etc/resolv.conf;" +
"echo \"nameserver " + dnsAddress + "\" >> /etc/resolv.conf\n"
fmt.Printf("[check] 检测dns地址:%s合法性...\n", dnsAddress)
err, result = util.CheckIP(dnsAddress)
if err != nil {
return err, result
}
shellErr, shellResult := util.ExecuteCmd(cmd)
return shellErr, shellResult
}
func SetAliYUM() {
// 备份repo文件
cmd := "mkdir -p /etc/yum.repos.d/`date +%Y%m%d`" + ";" +
"mv /etc/yum.repos.d/*.repo /etc/yum.repos.d/`date +%Y%m%d` -f"
fmt.Printf("[bakup] 备份历史repo文件...\n")
util.ExecuteCmd(cmd)
// 写入base文件
fmt.Printf("[create] 创建base-ali.repo文件...\n")
baseRepoFile, err := os.OpenFile("/etc/yum.repos.d/base-ali.repo", os.O_WRONLY|os.O_CREATE, 0666)
defer baseRepoFile.Close()
if err != nil {
fmt.Println(err.Error())
}
_, baseWriteErr := baseRepoFile.Write([]byte(constant.CentOSAliBaseYUMContent))
if baseWriteErr != nil {
fmt.Println(baseWriteErr.Error())
fmt.Println("[failed] " + aliBaseEL7WriteErrMsg)
}
fmt.Printf("[create] 创建epel-ali.repo文件...\n")
epelRepoFile, err := os.OpenFile("/etc/yum.repos.d/epel-ali.repo", os.O_WRONLY|os.O_CREATE, 0666)
defer epelRepoFile.Close()
if err != nil {
fmt.Println(err.Error())
}
_, epelWriteErr := epelRepoFile.Write([]byte(constant.CentOSAliEpelYUMContent))
if epelWriteErr != nil {
fmt.Println(epelWriteErr.Error())
fmt.Println("[failed] " + aliEpelEL7WriteErrMsg)
}
cleanYUMCacheCmd := "yum clean all"
fmt.Printf("[clean] 清除yum缓存...\n")
util.ExecuteCmd(cleanYUMCacheCmd)
fmt.Println("[successful] " + setAliMirrorSuccessful)
}
func SetLocalYUM() {
// 备份repo文件
cmd := "mkdir -p /etc/yum.repos.d/`date +%Y%m%d`" + ";" +
"mv /etc/yum.repos.d/*.repo /etc/yum.repos.d/`date +%Y%m%d` -f"
fmt.Printf("[bakup] 备份历史repo文件...\n")
util.ExecuteCmd(cmd)
// 写local.repo文件
fmt.Printf("[create] 创建local.repo文件...\n")
localRepoFile, err := os.OpenFile("/etc/yum.repos.d/local.repo", os.O_WRONLY|os.O_CREATE, 0666)
defer localRepoFile.Close()
if err != nil {
fmt.Println(err.Error())
}
_, localWriteErr := localRepoFile.Write([]byte(constant.CentOSLocalYUMContent))
if localWriteErr != nil {
fmt.Println(localWriteErr.Error())
fmt.Println("[failed] " + localWriteErrMsg)
}
cleanYUMCacheCmd := "yum clean all"
fmt.Printf("[clean] 清除yum缓存...\n")
util.ExecuteCmd(cleanYUMCacheCmd)
fmt.Println("[successful] " + setLocalMirrorSuccessful)
}
func SetNginxMirror() {
// 写nginx.repo文件
fmt.Printf("[yum] 创建nginx.repo文件...\n")
nginxRepoFile, err := os.OpenFile("/etc/yum.repos.d/nginx.repo", os.O_WRONLY|os.O_CREATE, 0666)
defer nginxRepoFile.Close()
if err != nil {
fmt.Println(err.Error())
}
_, nginxRepoFileWriteErr := nginxRepoFile.Write([]byte(constant.CentOSNginxMirrorContent))
if nginxRepoFileWriteErr != nil {
fmt.Println(nginxRepoFileWriteErr.Error())
fmt.Println("[failed] " + nginxRepoFileWriteErrMsg)
}
cleanYUMCacheCmd := "yum clean all"
fmt.Printf("[clean] 清除yum缓存...\n")
util.ExecuteCmd(cleanYUMCacheCmd)
fmt.Println("[successful] " + setNginxMirrorSuccessful)
}
func SetHostname(name string) {
// todo 校验hostname格式逻辑
fmt.Println("[hostname]配置hostname...")
cmd := fmt.Sprintf("sed -i '/HOSTNAME/d' /etc/sysconfig/network;"+
"echo \"HOSTNAME=%s\" >> /etc/sysconfig/network;"+
"sysctl kernel.hostname=%s", name, name)
err, _ := util.ExecuteCmd(cmd)
if err != nil {
log.Println(err.Error())
util.PrintFailureMsg("[failed] 配置hostname失败...")
} else {
util.PrintSuccessfulMsg("[success] 配置hostname成功...")
}
fmt.Println("[host]配置host解析...")
util.ExecuteCmd(fmt.Sprintf("echo \"127.0.0.1 %s\" >> /etc/hosts", name))
}
func SetTimeZone() {
fmt.Println("[timezone]配置时区为上海...")
util.ExecuteCmd("\\cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime -R")
}
|
package azure
import (
"github.com/openshift/installer/pkg/terraform"
"github.com/openshift/installer/pkg/terraform/providers"
"github.com/openshift/installer/pkg/terraform/stages"
typesazure "github.com/openshift/installer/pkg/types/azure"
)
// PlatformStages are the stages to run to provision the infrastructure in Azure.
var PlatformStages = []terraform.Stage{
stages.NewStage(
typesazure.Name,
"vnet",
[]providers.Provider{providers.AzureRM},
),
stages.NewStage(
typesazure.Name,
"bootstrap",
[]providers.Provider{providers.AzureRM, providers.Ignition, providers.Local},
stages.WithNormalBootstrapDestroy(),
),
stages.NewStage(
typesazure.Name,
"cluster",
[]providers.Provider{providers.AzureRM, providers.Time},
),
}
// StackPlatformStages are the stages to run to provision the infrastructure in Azure Stack.
var StackPlatformStages = []terraform.Stage{
stages.NewStage(
typesazure.StackTerraformName,
"vnet",
[]providers.Provider{providers.AzureStack},
),
stages.NewStage(
typesazure.StackTerraformName,
"bootstrap",
[]providers.Provider{providers.AzureStack, providers.Ignition, providers.Local},
stages.WithNormalBootstrapDestroy(),
),
stages.NewStage(
typesazure.StackTerraformName,
"cluster",
[]providers.Provider{providers.AzureStack},
),
}
|
package filestore
import (
"context"
"io"
"strings"
"time"
"github.com/google/uuid"
)
// RevisionTags is a comma separated string of tags that refers to the revision.
type RevisionTags string
// AddTag adds a new tag to the tags string.
func (tags RevisionTags) AddTag(tag string) RevisionTags {
tag = strings.Trim(tag, ",")
tag = strings.Split(tag, ",")[0]
tag = strings.TrimSpace(tag)
if tag == "" {
return tags
}
tagsStr := string(tags)
tagsStr = strings.TrimSpace(tagsStr)
tagsStr = strings.Trim(tagsStr, ",")
if strings.Contains(","+tagsStr+",", ","+tag+",") {
return tags
}
newTags := tagsStr + "," + tag
newTags = strings.Trim(newTags, ",")
return RevisionTags(newTags)
}
// RemoveTag removes tag from the tags string.
func (tags RevisionTags) RemoveTag(tag string) RevisionTags {
tag = strings.Trim(tag, ",")
tag = strings.Split(tag, ",")[0]
tag = strings.TrimSpace(tag)
if tag == "" {
return tags
}
tagsStr := string(tags)
tagsStr = strings.TrimSpace(tagsStr)
tagsStr = strings.Trim(tagsStr, ",")
newTags := strings.Replace(","+tagsStr+",", ","+tag+",", ",", 1)
newTags = strings.Trim(newTags, ",")
return RevisionTags(newTags)
}
func (tags RevisionTags) List() []string {
if string(tags) == "" {
return []string{}
}
return strings.Split(string(tags), ",")
}
// Revision is a snapshot of a file in the filestore, every file has at least one revision which is the current
// revision. File revisions is not applicable to directory file type.
type Revision struct {
ID uuid.UUID
// Tags is a comma separated string of tags that refer for a revision.
Tags RevisionTags
// IsCurrent flags if a revision is a current file revision.
IsCurrent bool
Data []byte
Checksum string
FileID uuid.UUID
CreatedAt time.Time
UpdatedAt time.Time
}
// RevisionQuery performs different queries associated to a file revision.
type RevisionQuery interface {
// GetData gets data of a revision.
GetData(ctx context.Context) (io.ReadCloser, error)
// SetCurrent sets a revision to be the current one.
SetCurrent(ctx context.Context) (*Revision, error)
// SetTags set tags of a revision.
SetTags(ctx context.Context, tags RevisionTags) error
// Delete deletes file revision.
Delete(ctx context.Context) error
}
|
package client
import (
"bytes"
"context"
"fmt"
"io/ioutil"
"golang.org/x/net/context/ctxhttp"
"github.com/terra-money/terra.go/msg"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
"github.com/cosmos/cosmos-sdk/types/rest"
feeutils "github.com/terra-money/core/custom/auth/client/utils"
)
// EstimateFeeResWrapper - wrapper for estimate fee query
type EstimateFeeResWrapper struct {
Height msg.Int `json:"height"`
Result feeutils.EstimateFeeResp `json:"result"`
}
// EstimateFee simulates gas and fee for a transaction
func (lcd LCDClient) EstimateFee(ctx context.Context, options CreateTxOptions) (res *feeutils.EstimateFeeResp, err error) {
estimateReq := feeutils.EstimateFeeReq{
BaseReq: rest.BaseReq{
From: msg.AccAddress(lcd.PrivKey.PubKey().Address()).String(),
Memo: options.Memo,
ChainID: lcd.ChainID,
AccountNumber: options.AccountNumber,
Sequence: options.Sequence,
TimeoutHeight: options.TimeoutHeight,
Fees: options.FeeAmount,
GasPrices: msg.NewDecCoins(lcd.GasPrice),
Gas: "auto",
GasAdjustment: lcd.GasAdjustment.String(),
},
Msgs: options.Msgs,
}
reqBytes, err := lcd.EncodingConfig.Amino.MarshalJSON(estimateReq)
if err != nil {
return nil, sdkerrors.Wrap(err, "failed to marshal")
}
resp, err := ctxhttp.Post(ctx, lcd.c, lcd.URL+"/txs/estimate_fee", "application/json", bytes.NewBuffer(reqBytes))
if err != nil {
return nil, sdkerrors.Wrap(err, "failed to estimate")
}
defer resp.Body.Close()
out, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, sdkerrors.Wrap(err, "failed to read response")
}
if resp.StatusCode != 200 {
return nil, fmt.Errorf("non-200 response code %d: %s", resp.StatusCode, string(out))
}
var response EstimateFeeResWrapper
err = lcd.EncodingConfig.Amino.UnmarshalJSON(out, &response)
if err != nil {
return nil, sdkerrors.Wrap(err, "failed to unmarshal response")
}
return &response.Result, nil
}
|
package solr
const (
VERSION = "0.5"
)
|
// Copyright 2015-2018 trivago N.V.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package format
import (
"gollum/core"
)
// Double formatter plugin
//
// Double is a formatter that duplicates a message and applies two different
// sets of formatters to both sides. After both messages have been processed,
// the value of the field defined as "source" by the double formatter will be
// copied from both copies and merged into the "target" field of the original
// message using a given separator.
//
// Parameters
//
// - Separator: This value sets the separator string placed between both parts.
// This parameter is set to ":" by default.
//
// - UseLeftStreamID: When set to "true", use the stream id of the left side
// (after formatting) as the streamID for the resulting message.
// This parameter is set to "false" by default.
//
// - Left: An optional list of formatters. The first copy of the message (left
// of the delimiter) is passed through these filters.
// This parameter is set to an empty list by default.
//
// - Right: An optional list of formatters. The second copy of the mssage (right
// of the delimiter) is passed through these filters.
// This parameter is set to an empty list by default.
//
// Examples
//
// This example creates a message of the form "<orig>|<hash>", where <orig> is
// the original console input and <hash> its hash.
//
// exampleConsumer:
// Type: consumer.Console
// Streams: "*"
// Modulators:
// - format.Double:
// Separator: "|"
// Right:
// - format.Identifier:
// Generator: hash
type Double struct {
core.SimpleFormatter `gollumdoc:"embed_type"`
separator []byte `config:"Separator" default:":"`
leftStreamID bool `config:"UseLeftStreamID" default:"false"`
left core.FormatterArray `config:"Left"`
right core.FormatterArray `config:"Right"`
Target string
}
func init() {
core.TypeRegistry.Register(Double{})
}
// Configure initializes this formatter with values from a plugin config.
func (format *Double) Configure(conf core.PluginConfigReader) {
format.Target = conf.GetString("Target", "")
}
// ApplyFormatter update message payload
func (format *Double) ApplyFormatter(msg *core.Message) error {
leftMsg := msg.Clone()
rightMsg := msg.Clone()
// apply sub-formatter
if err := format.left.ApplyFormatter(leftMsg); err != nil {
return err
}
if err := format.right.ApplyFormatter(rightMsg); err != nil {
return err
}
// update content
leftData := format.GetSourceDataAsBytes(leftMsg)
rightData := format.GetSourceDataAsBytes(rightMsg)
format.SetTargetData(msg, format.mergeData(leftData, rightData))
// handle streamID
if format.leftStreamID {
msg.SetStreamID(leftMsg.GetStreamID())
} else {
msg.SetStreamID(rightMsg.GetStreamID())
}
// fin
return nil
}
func (format *Double) mergeData(leftContent []byte, rightContent []byte) []byte {
size := len(leftContent) + len(format.separator) + len(rightContent)
content := make([]byte, 0, size)
content = append(content, leftContent...)
content = append(content, format.separator...)
content = append(content, rightContent...)
return content
}
|
package main
import (
"fmt"
)
func main() {
defer fmt.Println("...1...")
fmt.Println("...2...")
defer fmt.Println("...3...")
a := 10
b := 20
// 无参的匿名函数
defer func() {
fmt.Printf("func(): a = %d, b = %d\n", a, b)
}()
// 有参的匿名函数
defer func(a, b int) {
fmt.Printf("func(a, b): a = %d, b = %d\n", a, b)
}(a, b)
a = 100
b = 200
fmt.Printf("main: a = %d, b = %d\n", a, b)
// 结果为:
// ...2...
// main: a = 100, b = 200
// func(a, b): a = 10, b = 20
// func(): a = 100, b = 200
// ...3...
// ...1...
// 结论:
// 1、用defer声明的部分,是在函数运行结束前运行
// 2、对于多个defer,是按照“先进后出”的原则,即先写的defer语句后执行
// 3、对于闭包所捕获的外部变量,是获取其最终的结果
// 4、对于defer声明的传递参数型函数,它会先传递参数后运行
}
|
package main
import (
"fmt"
"time"
)
type Address struct {
HouseNumber uint32
Street string
HouseNumberAddOn string
POBox string
ZipCode string
City string
Country string
}
type VCard struct {
BirtDate time.Time
FirstName string
LastName string
NickName string
Photo string
Addresses map[string]*Address
}
func main() {
addr1 := &Address{12, "Elfenstraat", "", "", "2600", "Mechelen", "Belgie"}
addr2 := &Address{28, "Heideland", "", "", "2640", "Mortsel", "Belgie"}
addrs := make(map[string]*Address)
addrs["youth"] = addr1
addrs["now"] = addr2
birtdt := time.Date(1956, 1, 17, 15, 4, 5, 0, time.Local)
photo := "example.png"
vcard := &VCard{birtdt, "Ivo", "BalBaert", "", photo, addrs}
fmt.Printf("Here is the full VCard: %v\n", vcard)
fmt.Printf("My Addresses are:\n%v\n%v\n", addr1, addr2)
}
|
package main
import (
"log"
"math/rand"
"testing"
)
func TestDoTransactionSuccess(t *testing.T) {
system := NewSystem()
users := []*User{
{
ID: 1,
Name: "Tom",
Cash: 10,
},
{
ID: 2,
Name: "Jerry",
Cash: 10,
},
}
transcation := &Transcation{
TranscationID: 1,
FromID: 1,
ToID: 2,
Cash: 10,
}
finalstate := []int{0, 20}
for _, user := range users {
if err := system.AddUser(user); err != nil {
log.Printf("add user failed %v", err)
}
}
if err := system.DoTransaction(transcation); err != nil {
log.Printf("do transcation %d failed %v", transcation.TranscationID, err)
}
for i := range users {
if users[i].Cash != finalstate[i] {
t.Fatalf("user %d expected cash %v got %v", users[i].ID, finalstate[i], users[i].Cash)
}
}
}
func TestDoTransactionFail(t *testing.T) {
system := NewSystem()
users := []*User{
{
ID: 1,
Name: "Tom",
Cash: 10,
},
{
ID: 2,
Name: "Jerry",
Cash: 10,
},
}
transcation := &Transcation{
TranscationID: 1,
FromID: 1,
ToID: 2,
Cash: 15,
}
finalstate := []int{10, 10}
for _, user := range users {
if err := system.AddUser(user); err != nil {
log.Printf("add user failed %v", err)
}
}
if err := system.DoTransaction(transcation); err != nil {
log.Printf("do transcation %d failed %v", transcation.TranscationID, err)
}
for i := range users {
if users[i].Cash != finalstate[i] {
t.Fatalf("user %d expected cash %v got %v", users[i].ID, finalstate[i], users[i].Cash)
}
}
}
func TestUndoTransaction(t *testing.T) {
system := NewSystem()
users := []*User{
{
ID: 1,
Name: "Tom",
Cash: 10,
},
{
ID: 2,
Name: "Jerry",
Cash: 10,
},
{
ID: 3,
Name: "Spike",
Cash: 15,
},
{
ID: 4,
Name: "Bob",
Cash: 20,
},
}
undolog := []*Record{
{
Op: START,
TranscationId: 1,
UserId: 0,
Cash: 0,
},
{
Op: UPDATE,
TranscationId: 1,
UserId: 1,
Cash: 15,
},
{
Op: START,
TranscationId: 2,
UserId: 0,
Cash: 0,
},
{
Op: UPDATE,
TranscationId: 2,
UserId: 3,
Cash: 30,
},
{
Op: UPDATE,
TranscationId: 1,
UserId: 2,
Cash: 15,
},
{
Op: UPDATE,
TranscationId: 2,
UserId: 4,
Cash: 35,
},
}
finalstate := []int{15, 15, 30, 35}
for _, user := range users {
if err := system.AddUser(user); err != nil {
log.Printf("add user failed %v", err)
}
}
system.Undolog = undolog
if err := system.UndoTranscation(1); err != nil {
log.Printf("undo transcation %d failed", 1)
}
for i := range users {
if users[i].Cash != finalstate[i] {
t.Fatalf("user %d expected cash %v got %v", users[i].ID, finalstate[i], users[i].Cash)
}
}
}
func TestTransactionMany(t *testing.T) {
system := NewSystem()
users := []*User{
{
ID: 1,
Name: "Tom",
Cash: 100,
},
{
ID: 2,
Name: "Jerry",
Cash: 100,
},
{
ID: 3,
Name: "Spike",
Cash: 150,
},
{
ID: 4,
Name: "Bob",
Cash: 200,
},
{
ID: 5,
Name: "Alice",
Cash: 200,
},
}
originalstate := []int{100, 100, 150, 200, 200}
transcations := make([]*Transcation, 0, 20)
maxUserID := 5
for i := 1; i <= 20; i++ {
FromID := rand.Intn(maxUserID) + 1
ToID := rand.Intn(maxUserID) + 1
for FromID == ToID {
ToID = rand.Intn(maxUserID) + 1
}
transcations = append(transcations, &Transcation{
TranscationID: i,
FromID: FromID,
ToID: ToID,
Cash: rand.Intn(20) + 1,
})
}
originalSum := 0
for _, user := range users {
originalSum += user.Cash
if err := system.AddUser(user); err != nil {
log.Printf("add user failed %v", err)
}
}
ch := make(chan int)
// TODO: do transcation parallel
for _, transcation := range transcations {
go func(t *Transcation) {
if err := system.DoTransaction(t); err != nil {
log.Printf("do transcation %d failed %v", t.TranscationID, err)
}
ch <- 1
}(transcation)
}
// wait for transactions complete
for i := 0; i < len(transcations); i++ {
<-ch
}
newSum := 0
for _, user := range users {
newSum += user.Cash
}
if originalSum != newSum {
t.Fatalf("system in non-consistent state after many transactions")
}
if err := system.UndoTranscation(1); err != nil {
log.Printf("undo transcation failed %v", err)
}
for i := range users {
if users[i].Cash != originalstate[i] {
t.Fatalf("user %d expected cash %v got %v", users[i].ID, originalstate[i], users[i].Cash)
}
}
}
func TestGcUndoLog(t *testing.T) {
system := NewSystem()
undolog := []*Record{
{
Op: START,
TranscationId: 1,
UserId: 0,
Cash: 0,
},
{
Op: UPDATE,
TranscationId: 1,
UserId: 1,
Cash: 15,
},
{
Op: START,
TranscationId: 2,
UserId: 0,
Cash: 0,
},
{
Op: UPDATE,
TranscationId: 2,
UserId: 3,
Cash: 30,
},
{
Op: UPDATE,
TranscationId: 1,
UserId: 2,
Cash: 15,
},
{
Op: UPDATE,
TranscationId: 2,
UserId: 4,
Cash: 35,
},
{
Op: STARTCHECKPOINT,
TranscationId: 0,
UserId: 0,
Cash: 0,
},
{
Op: START,
TranscationId: 3,
UserId: 0,
Cash: 0,
},
{
Op: UPDATE,
TranscationId: 3,
UserId: 1,
Cash: 15,
},
{
Op: START,
TranscationId: 4,
UserId: 0,
Cash: 0,
},
{
Op: UPDATE,
TranscationId: 4,
UserId: 3,
Cash: 30,
},
{
Op: UPDATE,
TranscationId: 3,
UserId: 2,
Cash: 15,
},
{
Op: UPDATE,
TranscationId: 4,
UserId: 4,
Cash: 35,
},
{
Op: ENDCHECKPOINT,
TranscationId: 0,
UserId: 0,
Cash: 0,
},
}
system.Undolog = undolog
system.gcUndoLog()
expectedLen := 10
if len(system.Undolog) != expectedLen {
t.Fatalf("gcUndoLog not work expected len %d current len %d", expectedLen, len(system.Undolog))
}
}
|
package core
import (
"er"
"fwb"
"sgs"
)
type prtData struct {
hotIndex int
timer int
turn int
}
func prtInit(me *gameImp) *er.Err {
me.lg.Dbg("Enter Round Turns phase")
pd := &prtData{
hotIndex: len(me.app.GetPlayers()) - 1,
timer: -1,
turn: 0,
}
me.pd = pd
me.setDCE(fwb.CMD_ACTION, prtOnAction)
return nextTurn(me)
}
func findNextPlayer(me *gameImp) int {
pd := me.pd.(*prtData)
pn := len(me.app.GetPlayers())
pd.hotIndex = (pd.hotIndex + 1) % pn
pid := me.turnOrder[pd.hotIndex]
pindex := me.gd.GetPDIndex(pid)
trivialHotIndex := pd.hotIndex
for me.gd.PData[pindex][fwb.PD_PAWNS] <= 0 {
pd.hotIndex = (pd.hotIndex + 1) % pn
if pd.hotIndex == trivialHotIndex {
return -1
}
pid = me.turnOrder[pd.hotIndex]
pindex = me.gd.GetPDIndex(pid)
}
return pd.hotIndex
}
func nextTurn(me *gameImp) *er.Err {
pd := me.pd.(*prtData)
nextpi := findNextPlayer(me)
if nextpi < 0 {
return me.gotoPhase(_P_ROUNDS_SETTLEMENT)
}
pd.turn++
me.alg.Inf("Turn %v", pd.turn)
if pd.timer >= 0 {
me.unsetTimer(pd.timer)
}
pd.timer = me.setTimer(60000, prtOnTimeOut)
return me.app.SendAllPlayers(sgs.Command{
ID: fwb.CMD_START_TURN,
Who: fwb.CMD_WHO_APP,
Payload: me.turnOrder[pd.hotIndex],
})
}
func prtOnTimeOut(me *gameImp, command sgs.Command) *er.Err {
pd := me.pd.(*prtData)
me.app.SendToMockPlayer(me.turnOrder[pd.hotIndex], sgs.Command{
ID: fwb.CMD_START_TURN,
Who: fwb.CMD_WHO_APP,
Payload: me.turnOrder[pd.hotIndex],
})
return nil
}
func prtOnAction(me *gameImp, command sgs.Command) *er.Err {
pd := me.pd.(*prtData)
if command.Who != me.turnOrder[pd.hotIndex] {
return er.Throw(fwb.E_CMD_INVALID_CLIENT, er.EInfo{
"details": "Command source is not a valid client ID, or the client is not the currently enabled player",
"ID": command.Who,
"current player": me.turnOrder[pd.hotIndex],
}).To(me.lg)
}
action, err := me.ap.Parse(command)
if err.Importance() >= er.IMPT_THREAT || action == nil {
return err
}
if !action.ValidateAgainst(&me.gd) {
return err.Push(me.app.SendToPlayer(command.Who, sgs.Command{
ID: fwb.CMD_ACTION_REJECTED,
Who: fwb.CMD_WHO_APP,
Payload: command.Payload,
}))
}
err = err.Push(action.Do(&me.gd))
printAction(me, action)
if err.Importance() >= er.IMPT_THREAT {
return err.Push(me.app.SendToPlayer(command.Who, sgs.Command{
ID: fwb.CMD_ACTION_REJECTED,
Who: fwb.CMD_WHO_APP,
Payload: command.Payload,
}))
}
printTurnInfo(me)
err = err.Push(me.app.SendAllPlayers(sgs.Command{
ID: fwb.CMD_ACTION_COMMITTED,
Who: command.ID,
Payload: me.gd,
}))
return err.Push(nextTurn(me))
}
func printAction(me *gameImp, action fwb.Action) {
me.alg.Inf(action.String())
}
func printTurnInfo(me *gameImp) {
me.alg.Inf("Player Data")
for _, p := range me.gd.PData {
me.alg.Inf(" Player %v: %v", me.app.GetPlayer(p[fwb.PD_CLIENT_ID]).Name(), p[fwb.PD_CLIENT_ID+1:])
}
}
|
// Copyright 2018 Andreas Pannewitz. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package core
import (
"time"
)
func testanyType() (string, []anyType) {
name := "any"
data := []anyType{
int(4),
Cardinal(7),
time.Monday,
time.Monday,
"Thursday",
ID("Friday"),
}
return name, data
}
|
package main
import (
"context"
"flag"
"fmt"
"log"
"net"
"path/filepath"
pb "nekonenene/hello/pb"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/reflection"
)
var (
tls *bool = flag.Bool("tls", false, "Connection uses TLS if true, else plain TCP")
certFile *string = flag.String("cert_file", "", "The TLS cert file")
keyFile *string = flag.String("key_file", "", "The TLS key file")
)
const (
port = 50051
)
// server is used to implement hello.GreeterServer.
type server struct{}
// SayHello implements hello.GreeterServer
func (s *server) SayHello(ctx context.Context, in *pb.HelloRequest) (*pb.HelloReply, error) {
log.Printf("Received: %v, %v", in.Name, in.Age)
return &pb.HelloReply{Message: fmt.Sprintf("%s is %d years old.", in.Name, in.Age)}, nil
}
func main() {
lis, err := net.Listen("tcp", fmt.Sprintf(":%d", port))
if err != nil {
log.Fatalf("failed to listen: %v", err)
}
var opts []grpc.ServerOption
flag.Parse()
if *tls {
if *certFile == "" || *keyFile == "" {
log.Fatalf("Please specify cert_file and key_file.")
}
*certFile, _ = filepath.Abs(*certFile)
*keyFile, _ = filepath.Abs(*keyFile)
creds, err := credentials.NewServerTLSFromFile(*certFile, *keyFile)
if err != nil {
log.Fatalf("Failed to generate credentials %v", err)
}
log.Println("Generated credentials")
opts = []grpc.ServerOption{grpc.Creds(creds)}
}
grpcServer := grpc.NewServer(opts...)
pb.RegisterGreeterServer(grpcServer, &server{})
log.Printf("Server started at localhost:%d\n", port)
// Register reflection service on gRPC server.
reflection.Register(grpcServer)
if err := grpcServer.Serve(lis); err != nil {
log.Fatalf("failed to serve: %v", err)
}
}
|
// Package fftypes contains core types used throughout the functions framework
// (ff) implementation.
package fftypes
import (
"cloud.google.com/go/functions/metadata"
)
// BackgroundEvent is the incoming payload to functions framework to trigger
// Background Functions (https://cloud.google.com/functions/docs/writing/background).
// These fields are converted into parameters passed to the user function.
type BackgroundEvent struct {
Data interface{} `json:"data"`
Metadata *metadata.Metadata `json:"context"`
}
|
package raft
import (
"log"
"net"
"net/http"
"net/rpc"
"sync"
)
type ClientEnd struct {
addr string
rpcClient *rpc.Client
sync.Mutex
}
func (end *ClientEnd) call(method string, args interface{}, reply interface{}) error {
var client *rpc.Client
var err error
end.Lock()
client = end.rpcClient
end.Unlock()
if client == nil {
client, err = rpc.DialHTTP("tcp", end.addr)
if err != nil {
return err
}
end.Lock()
end.rpcClient = client
end.Unlock()
}
if err := client.Call(method, args, reply); err != nil {
return err
}
return nil
}
func (rf *Raft) startRPCServer() {
addr := rf.peers[rf.name].addr
server := rpc.NewServer()
server.Register(rf)
var err error
var listener net.Listener
if listener, err = net.Listen("tcp", addr); err != nil {
log.Fatal(err)
}
if err = http.Serve(listener, server); err != nil {
log.Fatal(err)
}
}
func (rf *Raft) RequestVote(req *RequestVoteReq, resp *RequestVoteResp) (err error) {
rf.Lock()
defer rf.Unlock()
resp.Term = rf.currentTerm
resp.VoteGranted = true
if req.GetTerm() < rf.currentTerm {
// 如果term < currentTerm返回 false (5.2 节)
resp.VoteGranted = false
return
}
if req.GetTerm() > rf.currentTerm {
// 如果接收到的 RPC 请求或响应中,任期号T > currentTerm,那么就令 currentTerm 等于 T,并切换状态为跟随者(5.1 节)
rf.beFollower(req.Term)
}
// 如果 votedFor 为空或者为 candidateId,并且候选人的日志至少和自己一样新,那么就投票给他(5.2 节,5.4 节)
// Raft 通过比较两份日志中最后一条日志条目的索引值和任期号定义谁的日志比较新。
// 1.如果两份日志最后的条目的任期号不同,那么任期号大的日志更加新。2.如果两份日志最后的条目任期号相同,那么日志比较长的那个就更加新。
lastLogTerm := rf.getLastLogTerm()
lastLogIndex := rf.getLastLogIndex()
if (rf.votedFor == NULL || rf.votedFor == req.GetCandidateName()) &&
(req.GetLastLogTerm() > lastLogTerm ||
(req.GetLastLogTerm() == lastLogTerm && req.GetLastLogIndex() >= lastLogIndex)) {
rf.votedFor = req.CandidateName
sendCh(rf.voteCh)
} else {
resp.VoteGranted = false
}
return
}
func (rf *Raft) AppendEntries(req *AppendEntriesReq, resp *AppendEntriesResp) (err error) {
rf.Lock()
defer rf.Unlock()
dlog("raft %s AppendEntries %v", rf.name, req)
resp.Term = rf.currentTerm
resp.Success = true
resp.ConflictIndex = -1
resp.ConflictTerm = -1
if req.GetTerm() < rf.currentTerm {
// 如果 term < currentTerm 就返回 false (5.1 节)
resp.Success = false
return
}
if req.GetTerm() > rf.currentTerm {
// 如果接收到的 RPC 请求或响应中,任期号T > currentTerm,那么就令 currentTerm 等于 T,并切换状态为跟随者(5.1 节)
rf.beFollower(req.GetTerm())
}
if rf.state != Follower {
rf.beFollower(req.GetTerm())
}
sendCh(rf.appendLogCh)
if req.GetPrevLogIndex() < rf.lastIncludedIndex {
// 同步位置在快照内,直接从 0 开始同步,即同步快照过来
resp.Success = false
resp.ConflictIndex = 0
return
} else if req.GetPrevLogIndex() == rf.lastIncludedIndex {
if req.GetPrevLogTerm() != rf.lastIncludedTerm {
// 同步位置为快照最后索引,但是term 冲突,直接从 0 开始同步,即同步快照过来
resp.Success = false
resp.ConflictIndex = 0
return
}
} else {
// 日志在 prevLogIndex 位置处的日志条目的索引号和 prevLogIndex 不匹配
if rf.getLastLogIndex() < req.GetPrevLogIndex() {
resp.Success = false
resp.ConflictIndex = int64(rf.getLastLogIndex())
return
}
// 日志在 prevLogIndex 位置处的日志条目的任期号和 prevLogTerm 不匹配
if rf.getLog(req.GetPrevLogIndex()).Term != req.GetPrevLogTerm() {
resp.Success = false
resp.ConflictTerm = int64(rf.getLog(req.GetPrevLogIndex()).Term)
// 找到冲突term的首次出现位置,最差就是PrevLogIndex
for index := rf.lastIncludedIndex + 1; index <= req.GetPrevLogIndex(); index++ {
if int64(rf.getLog(req.GetPrevLogIndex()).Term) == resp.ConflictTerm {
resp.ConflictIndex = int64(index)
break
}
}
return
}
}
// 如果已经存在的日志条目和新的产生冲突(索引值相同但是任期号不同),删除这一条和之后所有的 (5.3 节)
// 采用极端方式,第一条就当作冲突。直接覆盖本地
if len(req.GetEntries()) > 0 {
index := req.GetPrevLogIndex() + 1
logPos := rf.index2LogPos(index)
rf.log = rf.log[:logPos]
rf.log = append(rf.log, req.GetEntries()...)
rf.saveStateToDisk()
}
if req.GetLeaderCommit() > rf.commitIndex {
//如果 leaderCommit > commitIndex,令 commitIndex 等于 leaderCommit 和 新日志条目索引值中较小的一个
rf.commitIndex = min(req.GetLeaderCommit(), rf.getLastLogIndex())
sendCh(rf.commitNotifyCh)
}
return
}
func (rf *Raft) InstallSnapshot(req *InstallSnapshotReq, resp *InstallSnapshotResp) (err error) {
rf.Lock()
defer rf.Unlock()
resp.Term = rf.currentTerm
if req.GetTerm() < rf.currentTerm {
// 如果term < currentTerm就立即回复
return
}
if req.GetTerm() > rf.currentTerm {
// 如果接收到的 RPC 请求或响应中,任期号T > currentTerm,那么就令 currentTerm 等于 T,并切换状态为跟随者(5.1 节)
rf.beFollower(req.Term)
}
sendCh(rf.appendLogCh)
// leader快照不如本地长,那么忽略这个快照
if req.GetLastIncludedIndex() <= rf.lastIncludedIndex {
return
}
if req.GetLastIncludedIndex() >= rf.getLastLogIndex() {
// 快照比本地日志长,日志清空
rf.log = make([]*LogEntry, 0)
} else {
// 快照外还有日志,判断是否需要截断
curLog := rf.getLog(req.GetLastIncludedIndex())
if curLog.Term != req.GetLastIncludedTerm() {
// term冲突,丢弃整个日志
rf.log = make([]*LogEntry, 0)
} else {
// 如果现存的日志条目与快照中最后包含的日志条目具有相同的索引值和任期号,则保留其后的日志条目
logPos := rf.index2LogPos(req.GetLastIncludedIndex())
rf.log = rf.log[logPos+1:]
//logList := make([]*LogEntry, 0)
//logList = append(logList, rf.log[logPos:]...)
//rf.log = logList
dlog("snapshot pos %d logs %v", logPos, rf.log)
}
}
rf.lastIncludedIndex, rf.lastIncludedTerm = req.GetLastIncludedIndex(), req.GetLastIncludedTerm()
rf.saveStateToDisk()
rf.saveSnapshotToDisk(req.GetData())
rf.commitIndex = max(rf.commitIndex, rf.lastIncludedIndex)
rf.lastApplied = max(rf.lastApplied, rf.lastIncludedIndex)
if rf.lastApplied > rf.lastIncludedIndex {
return
}
rf.applyMsgCh <- ApplyMsg{
CommandValid: false,
Snapshot: req.GetData(),
}
return
}
func min(x, y uint64) uint64 {
if x < y {
return x
}
return y
}
func max(x, y uint64) uint64 {
if x > y {
return x
}
return y
}
|
package spotty
import (
"os"
)
var SPOTIFY_ID = os.Getenv("SPOTIFY_ID")
var SPOTIFY_SECRET = os.Getenv("SPOTIFY_SECRET")
var SPOTIFY_USER_ID = os.Getenv("SPOTIFY_USER_ID")
var SPOTIFY_PLAYLIST_ID = os.Getenv("SPOTIFY_PLAYLIST_ID")
|
package admin
import (
"cwengo.com/models"
/*"fmt"*/
/*"reflect"*/
/*"encoding/base64"*/
"github.com/astaxie/beego"
"strconv"
"strings"
)
type TopicController struct {
beego.Controller
}
func (this *TopicController) Prepare() {
userid := this.GetSession("userid")
username := this.GetSession("username")
if userid == nil || username == nil {
this.Ctx.Redirect(302, "/admin/login")
return
}
}
func (this *TopicController) Get() {
this.Layout = "admin/layout.html"
this.TplNames = "admin/Tpl/T.addTopic.tpl"
this.Data["isAddTopic"] = true
this.Data["Username"] = this.GetSession("username")
var err error
this.Data["Categories"], err = models.GetAllCategories()
if err != nil {
beego.Error(err)
}
this.Data["Labels"], err = models.GetAllLabels()
if err != nil {
beego.Error(err)
}
}
func (this *TopicController) Post() {
title := this.Input().Get("title")
category := this.Input().Get("category")
labels := this.GetStrings("label[]")
/*content, _ := base64.StdEncoding.DecodeString(this.GetString("content"))*/
content := this.Input().Get("content")
summery := this.Input().Get("summery")
/*fmt.Println(content, summery)*/
/*return*/
err := models.AddTopic(title, category, summery, content, labels)
if err != nil {
beego.Error(err)
}
this.Redirect("/admin/topic", 302)
}
func (this *TopicController) DelTopic() {
topic_id := this.Input().Get("topic_id")
var data map[string]interface{}
data = make(map[string]interface{})
if topic_id == "" {
data["status"] = -1
data["msg"] = "博客Id不能为空"
this.Ctx.Output.Json(data, true, true)
return
}
err := models.DelTopic(topic_id)
if err != nil {
beego.Error(err)
data["status"] = -1
data["msg"] = "删除出现错误"
this.Ctx.Output.Json(data, true, true)
return
}
data["status"] = 1
data["msg"] = "删除成功"
this.Ctx.Output.Json(data, true, true)
return
}
func (this *TopicController) EditTopicShow() {
tid := this.Input().Get("tid")
this.Data["Username"] = this.GetSession("username")
this.Data["toplicList"] = true
if tid == "" {
this.Layout = "admin/layout.html"
this.TplNames = "admin/Tpl/T.error.tpl"
} else {
this.Layout = "admin/layout.html"
this.TplNames = "admin/Tpl/T.editTopic.tpl"
topic, err := models.GetTopic(tid)
if err != nil {
beego.Error(err)
this.Redirect("/admin/home", 302)
return
}
this.Data["Topic"] = topic
Categories, err := models.GetAllCategories()
if err != nil {
beego.Error(err)
}
for _, v := range Categories {
cate, _ := strconv.Atoi(topic.Category)
if v.Id == int64(cate) {
v.IsSelected = true
} else {
v.IsSelected = false
}
}
this.Data["Categories"] = Categories
AllLabels, err := models.GetAllLabels()
Lables := strings.Split(topic.Lables, " ")
if len(Lables) > 0 {
for _, v := range AllLabels {
v.IsSelected = false
for _, s := range Lables {
lab, _ := strconv.Atoi(s)
if v.Id == int64(lab) {
v.IsSelected = true
break
}
}
}
}
this.Data["Labels"] = AllLabels
if err != nil {
beego.Error(err)
}
}
}
func (this *TopicController) EditTopicAct() {
tid := this.Input().Get("tid")
title := this.Input().Get("title")
category := this.Input().Get("category")
labels := this.GetStrings("label[]")
summery := this.Input().Get("summery")
content := this.Input().Get("content")
err := models.EditTopic(tid, title, category, summery, content, labels)
if err != nil {
beego.Error(err)
}
this.Redirect("/admin/home", 302)
}
|
package main
import (
"fmt"
"os"
"os/signal"
"strings"
"syscall"
"exchanges"
"github.com/bwmarrin/discordgo"
)
const prefix = "!price"
var token, secret string
func init() {
token = os.Getenv("APP_TOKEN")
secret = os.Getenv("SECRET")
}
func main() {
dg, err := discordgo.New("Bot " + token)
if err != nil {
fmt.Println("Discord connection error:", err)
return
}
fmt.Println("Discord connected")
dg.AddHandler(messageCreated)
err = dg.Open()
if err != nil {
fmt.Println("Discord connection error:", err)
return
}
// Waiting for termination signal
sc := make(chan os.Signal, 1)
signal.Notify(sc, syscall.SIGINT, syscall.SIGTERM, os.Interrupt, os.Kill)
<-sc
fmt.Println("Bye bye")
dg.Close()
}
func messageCreated(s *discordgo.Session, m *discordgo.MessageCreate) {
if m.Author.ID == s.State.User.ID {
return
}
inMsg := strings.ToLower(m.Content)
if strings.HasPrefix(inMsg, prefix) {
suffix := strings.Trim(inMsg[len(prefix):], " ")
exchange := suffix[:strings.Index(suffix, " ")]
pair := strings.Trim(suffix[len(exchange):], " ")
s.ChannelMessageSend(m.ChannelID, exchanges.GetLastestPrice(exchange, pair))
}
}
|
package tdt
import (
"fmt"
"testing"
)
func TestWithNoAttachedFunc(t *testing.T) {
testCases := []struct {
name string
arg string
want string
}{
{"name1", "arg1", "want1"},
}
for _, tc := range testCases {
fmt.Errorf("Name: %v\nArg: %v\nWant: %v", tc.name, tc.arg, tc.want)
}
}
|
package main
import (
"github/mariomang/catrouter"
"io/mariomang/github/consts"
"io/mariomang/github/controller"
)
//The SnowFlake program enter
func main() {
app := catrouter.NewDefaultApp(consts.AppName, consts.Author, consts.Version, consts.Email)
app.RegistController(catrouter.POST, "/primary/apply", controller.GenrateIDController)
app.Run(consts.ListenIP, consts.ListenPort)
}
|
package apigen
//API - This is api configuration
type API struct {
ModelName string `json:"model_name,omitempty"`
Methods Method `json:"methods,omitempty"`
}
//Method - This is method
type Method struct {
Detail Detail `json:"detail,omitempty"`
}
//Detail - This is detail
type Detail struct {
Name string `json:"name,omitempty"`
Type string `json:"type,omitempty"`
FileName FileName `json:"file_name,omitempty"`
LbConfig LbConfig `json:"lb_config,omitempty"`
DataAPIConfig DataAPIConfig `json:"data_api_config,omitempty"`
PostProcess []string `json:"post_process,omitempty"`
PreProcess []string `json:"pre_process,omitempty"`
}
//FileName - This is file name
type FileName struct {
JSONName string `json:"json_name,omitempty"`
ModelName string `json:"model_name,omitempty"`
LibName string `json:"lib_name,omitempty"`
ConstName string `json:"const_name,omitempty"`
}
//LbConfig - This is lb config
type LbConfig struct {
Accepts []Accept `json:"accepts,omitempty"`
Returns Returns `json:"returns,omitempty"`
HTTP HTTPConfig `json:"http,omitempty"`
}
//Returns - This is Return config
type Returns struct {
Arg string `json:"arg,omitempty"`
Type string `json:"type,omitempty"`
Root string `json:"root,omitempty"`
}
//HTTPConfig - This is HttpConfig
type HTTPConfig struct {
Verb string `json:"verb,omitempty"`
Path string `json:"path,omitempty"`
}
// Accept - This is accept
type Accept struct {
Arg string `json:"arg,omitempty"`
Type string `json:"type,omitempty"`
Required bool `json:"required,omitempty"`
}
// DataAPIConfig - This is data api cnfig
type DataAPIConfig struct {
DataAPIName string `json:"data_api_name,omitempty"`
Accepts []Accept `json:"accepts,omitempty"`
PrimaryKey string `json:"primary_key,omitempty"`
TableName string `json:"table_name,omitempty"`
}
|
package middlewares
import (
"fmt"
"github.com/owenliang/myf-go-cat/cat"
cat2 "github.com/owenliang/myf-go/client/cat"
"github.com/owenliang/myf-go/conf"
cronContext "github.com/owenliang/myf-go/cron/context"
)
func Cat() cronContext.HandleFunc {
// 初始化CAT
if conf.MyfConf.CatConfig.IsOpen {
if conf.MyfConf.Debug != 0 {
cat.DebugOn()
}
cat.Init(conf.MyfConf.Domain, cat.Config{CatServerVersion: "V2"})
}
return func(context *cronContext.Context) {
myfCat := cat2.NewMyfCat()
// 开启URL事务
root := cat.NewTransaction("JOB", fmt.Sprintf("%s(%s)", context.Name, context.Spec))
myfCat.Append(root)
context.Set("cat", myfCat)
// 调用下一个中间件
context.Next()
myfCat.FinishAll()
}
}
|
package main
import (
"database/sql"
DbModel "gowork/model/db"
Config "gowork/utils"
"log"
"time"
"github.com/go-xorm/xorm"
"github.com/kataras/iris"
_ "github.com/lib/pq"
)
//Home o
func Home(app *iris.Application, engine *xorm.Engine) {
//./bombardier-linux-amd64 -c 100 -n 100 lfoteam.ddns.net:9001/test
app.Get("/test", func(ctx iris.Context) {
p1 := User{
Name: "name1",
Age: 12,
}
ctx.JSON(p1)
})
app.Get("/sql1", func(ctx iris.Context) {
var (
id int
username string
)
config := Config.ReadConfig("config.ini")
connStr := config["connStr"]
db, err := sql.Open("postgres", connStr)
log.Print("conted")
if err != nil {
log.Print(err)
ctx.Text("err1" + err.Error())
return
}
defer db.Close()
rows, err := db.Query("select id,username from member;")
if rows == nil {
log.Print(err)
ctx.Text("err2" + err.Error())
return
}
err = rows.Err()
if err != nil {
log.Print(err)
ctx.Text("t1err:" + err.Error())
return
}
defer rows.Close()
strs := ""
for rows.Next() {
err := rows.Scan(&id, &username)
if err != nil {
log.Print(err)
}
log.Println(id, username)
strs += username
}
ctx.Text("t1:" + strs)
})
app.Get("/list", func(ctx iris.Context) {
//xorm reverse postgres "user=lfo dbname=lfo password=lfo sslmode=disable host=lfoteam.ddns.net" $GOPATH/src/github.com/go-xorm/cmd/xorm/templates/goxorm ./db
mlist := make([]DbModel.Member, 0)
err := engine.Find(&mlist)
if err != nil {
log.Print(err)
ctx.Text("err:" + err.Error())
return
}
// strs := "xorm"
// for _, entity := range mlist {
// strs += entity.Username
// log.Print(strs)
// }
//ab -n 1000 -n 200 -s 5 http://lfoteam.ddns.net:9001/list
ctx.JSON(mlist)
})
app.Post("/add", func(ctx iris.Context) {
var member DbModel.Member
ctx.ReadJSON(&member)
member.Time = time.Now()
affected, err := engine.Insert(&member)
log.Print(affected)
if err != nil {
log.Print(err)
res := BaseResponse{}
res.Sus = false
ctx.JSON(res)
return
}
res := BaseResponse{}
res.Sus = true
ctx.JSON(res)
})
}
|
package pie
import (
"encoding/json"
"golang.org/x/exp/constraints"
)
// JSONStringIndent returns the JSON encoded array as a string with indent applied.
//
// One important thing to note is that it will treat a nil slice as an empty
// slice to ensure that the JSON value return is always an array. See
// json.MarshalIndent for details.
func JSONStringIndent[T constraints.Ordered](ss []T, prefix, indent string) string {
if ss == nil {
return "[]"
}
// An error should not be possible.
data, _ := json.MarshalIndent(ss, prefix, indent)
return string(data)
}
|
package pathfileops
// DirTreeOp - Contains data fields used to store directory information
// generated by type DirMgr.
type DirTreeOp struct {
CallingFunc string
FileOps []FileOperationCode
FileSelectCriteria FileSelectionCriteria
SourceBaseDir DirMgr
TargetBaseDir DirMgr
ErrReturns []error
}
// New - Creates, initializes and returns a new
// DirTreeOp instance.
func (dTreeOp DirTreeOp) New() DirTreeOp {
newDTreeOp := DirTreeOp{}
newDTreeOp.ErrReturns = make([]error, 0, 100)
return newDTreeOp
}
|
package domain
import . "meli/cmd/data"
func GetLocation(distances ...float32) (x, y float32) {
if len(distances) != 3 {
return 0, 0
}
radiusKenobi := distances[0]
radiusSkywalker := distances[1]
rediusSato := distances[2]
S := (square(SATO_X) - square(SKYWALKER_X) + square(SATO_Y) - square(SKYWALKER_Y) + square(radiusSkywalker) - square(rediusSato)) / 2.0
T := (square(KENOBI_X) - square(SKYWALKER_X) + square(KENOBI_Y) - square(SKYWALKER_Y) + square(radiusSkywalker) - square(radiusKenobi)) / 2.0
y = ((T * (SKYWALKER_X - SATO_X)) - (S * (SKYWALKER_X - KENOBI_X))) / (((KENOBI_Y - SKYWALKER_Y) * (SKYWALKER_X - SATO_X)) - ((SATO_Y - SKYWALKER_Y) * (SKYWALKER_X - KENOBI_X)))
x = ((y * (KENOBI_Y - SKYWALKER_Y)) - T) / (SKYWALKER_X - KENOBI_X)
return x, y
}
func square(n float32) float32 {
return n * n
}
|
// Package hybrid provides a hybrid FSDB implementation.
//
// A hybrid FSDB is backed by a local FSDB and a remote bucket.
// All data are written locally first, then a background thread will upload them
// to the remote bucket and delete the local data.
// Read operations will check local FSDB first,
// and fetch from bucket if it does not present locally.
// When remote read happens,
// the data will be saved locally until the next upload loop.
//
// Data stored on the remote bucket will be gzipped using best compression
// level.
//
// Concurrency
//
// If you turn off the optional row lock (default is on),
// there are two possible cases we might lose date due to race conditions,
// but they are very unlikely.
//
// The first case is remote read. The read process is:
// 1. Check local FSDB.
// 2. Read fully from remote bucket.
// 3. Check local FSDB again to prevent using stale remote data to overwrite local data.
// 4. If there's still no local data in Step 3, write remote data locally.
// 5. Return local data.
// If another write happens between Step 3 and 4,
// then it might be overwritten by stale remote data.
//
// The other case is during upload. The upload process for each key is:
// 1. Read local data, calculate crc32c.
// 2. Gzip local data, upload to remote bucket.
// 3. Calculate local data crc32c again.
// 4. If the crc32c from Step 1 and Step 3 matches, delete local data.
// If another write happens between Step 3 and 4,
// then it might be deleted on Step 4 so we only have stale data in the system.
//
// Turning on the optional row lock will make sure the discussed data loss
// scenarios won't happen, but it also degrade the performance slightly.
// The lock is only used partially inside the operations
// (whole local write operation, remote read from Step 3, upload from Step 3).
//
// There are no other locks used in the code,
// except a few atomic numbers in upload loop for logging purpose.
package hybrid
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.