text stringlengths 11 4.05M |
|---|
package main
import (
"container/list"
"encoding/json"
"github.com/gorilla/websocket"
"log"
"sync"
"time"
)
//Constant
const (
FIELD_SIZE = 50
STEP_SIZE = 3
CANVAS_SIZE = 1000
STANDARD_BOMB_RADIUS = 2
STANDARD_BOMB_TIME = 3
STANDARD_STEP_MULTIPLICATOR = 1
DEATH_STEP_MULTIPLICATOR = 0.5
SUDDEN_DEATH_START_TIME = 60
MAP_SIZE = CANVAS_SIZE / FIELD_SIZE
/*
10 is equal to full map, 10 is MAX!!!
*/
SUDDEN_DEATH_MAX_AREA = 7
/*
In seconds, higher number means more time between the increase of the area
*/
SUDDEN_INCREASE_TIME = 5
)
var GameMap Map
var Connections map[uint64]*Session
var ticker *time.Ticker
var spawnPositions [][]int
//var incomingTicker = time.NewTicker(1 * time.Millisecond)
var sessionRunning bool
var suddenDeathRunning bool
/*Things send to the clients*/
var bombermanArray []Bomberman
var abstractGameMap chan [][][]FieldObject
var clientPackageAsJson []byte
//Called before any connection is possible
func initGame() {
//Global variables
GameMap = NewMap(MAP_SIZE)
Connections = make(map[uint64]*Session, 0)
ticker = time.NewTicker(16 * time.Millisecond)
spawnPositions = [][]int{{0, 0}, {0, 10}, {0, 19}, {10, 0}, {10, 19}, {19, 0}, {19, 10}, {19, 19}}
sessionRunning = false
suddenDeathRunning = false
bombermanArray = make([]Bomberman, 0)
//abstractGameMap = make([][][]FieldObject,0)
abstractGameMap = make(chan [][][]FieldObject)
clientPackageAsJson = make([]byte, 0)
//Routines
go UpdateClients()
go func() {
for {
abstractGameMap <- BuildAbstractGameMap()
}
}()
}
/*
Wrapper Function to Build the Map new.
*/
//func //MapChanged() {
// go BuildAbstractGameMap()
//}
/*
Represents the Position of a Player.
*/
type Position struct {
x int
y int
}
/*
Initialises a new Position with x and y.
*/
func newPosition(x int, y int) Position {
return Position{
x: x,
y: y,
}
}
//Updates a Position-object
func (p *Position) updatePosition(xOffset int, yOffset int) {
p.x += xOffset
p.y += yOffset
}
/*
Converts a Pixelposition to an Arrayposition.
*/
func pixToArr(pixel int) int {
return (pixel + FIELD_SIZE/2) / FIELD_SIZE
}
/*
Prints a list to the log.
*/
func printList(list *list.List) {
element := list.Front()
if element == nil {
log.Println("List is null!")
return
}
log.Println("List started: ")
log.Println(element.Value.(*Bomberman))
for element.Next() != nil {
log.Println(element.Value.(*Bomberman))
element = element.Next()
}
log.Println("List ended...")
}
/*
Represents the Keys pressed by the Player.
*/
type KeyInput struct {
Wpressed bool `json:"w"`
Spressed bool `json:"s"`
Apressed bool `json:"a"`
Dpressed bool `json:"d"`
SpacePressed bool `json:" "`
}
/*
Information which the Client needs.
This things will be send to Client.
*/
type ClientPackage struct {
Players []Bomberman
GameMap [][][]FieldObject
SessionRunning bool
}
/*
Wrapper for the user
*/
type Session struct {
User *User //Connected user
Bomber *Bomberman //Bomber of the connected user
Connection *websocket.Conn //Websocket connection
ConnectionStarted time.Time //point when player joined
}
func NewSession(user *User, character *Bomberman, connection *websocket.Conn, connectionStarted time.Time) *Session {
return &Session{User: user, Bomber: character, Connection: connection, ConnectionStarted: connectionStarted}
}
/*
Returns the string representation of the connection
*/
func (r *Session) String() string {
return "Session: { " + r.User.String() + "|" + r.Bomber.String() + "|" + r.Connection.RemoteAddr().String() + "|" + r.ConnectionStarted.String() + "}"
}
/*
Prints every active connection
*/
func AllConnectionsAsString() string {
result := "Active Connections:"
for _, v := range Connections {
result += v.String() + "\n"
}
return result
}
/*
Starts the interaction loop.
*/
func StartPlayerLoop(session *Session) {
//Add the infos to the connection map
//MapChanged()
Connections[session.User.UserID] = session
playerWebsocketLoop(session)
//Remove player from list at his last array position
x := (session.Bomber.PositionX + FIELD_SIZE/2) / FIELD_SIZE
y := (session.Bomber.PositionY + FIELD_SIZE/2) / FIELD_SIZE
removePlayerFromList(GameMap.Fields[x][y].Player, session.Bomber)
//Remove from the connection map
delete(Connections, session.User.UserID)
}
/*
Interaction loop.
The user input is received and the Player-Position is updated accordingly / Bombs get placed.
*/
func playerWebsocketLoop(session *Session) {
for {
session.Bomber.IsMoving = false
_, p, err := session.Connection.ReadMessage()
if err != nil {
log.Println(err)
return
}
var keys KeyInput
if err := json.Unmarshal(p, &keys); err != nil {
log.Println(err)
continue
}
/*
Checks which Key got pressed and performs an Action accordingly. If a movement key was pressed, the Collision and "legalness" of the Movement
will be checked before updating the Player-Position.
*/
realStepSize := STEP_SIZE * session.Bomber.stepMult
if keys.Wpressed {
session.Bomber.DirUp, session.Bomber.DirDown, session.Bomber.DirLeft, session.Bomber.DirRight = true, false, false, false
session.Bomber.IsMoving = true
if session.Bomber.collisionWithSurroundings(0, -int(realStepSize)) {
if session.Bomber.moveIfLegal(session.Bomber.PositionX, session.Bomber.PositionY-int(realStepSize)) {
session.Bomber.topRightPos.updatePosition(0, -int(realStepSize))
session.Bomber.topLeftPos.updatePosition(0, -int(realStepSize))
session.Bomber.bottomRightPos.updatePosition(0, -int(realStepSize))
session.Bomber.bottomLeftPos.updatePosition(0, -int(realStepSize))
session.Bomber.PositionY -= int(realStepSize)
}
}
} else
//S
if keys.Spressed {
session.Bomber.DirUp, session.Bomber.DirDown, session.Bomber.DirLeft, session.Bomber.DirRight = false, true, false, false
session.Bomber.IsMoving = true
if session.Bomber.collisionWithSurroundings(0, int(realStepSize)) {
if session.Bomber.moveIfLegal(session.Bomber.PositionX, session.Bomber.PositionY+int(realStepSize)) {
session.Bomber.topRightPos.updatePosition(0, int(realStepSize))
session.Bomber.topLeftPos.updatePosition(0, int(realStepSize))
session.Bomber.bottomRightPos.updatePosition(0, int(realStepSize))
session.Bomber.bottomLeftPos.updatePosition(0, int(realStepSize))
session.Bomber.PositionY += int(realStepSize)
}
}
} else
//A
if keys.Apressed {
session.Bomber.DirUp, session.Bomber.DirDown, session.Bomber.DirLeft, session.Bomber.DirRight = false, false, true, false
session.Bomber.IsMoving = true
if session.Bomber.collisionWithSurroundings(-int(realStepSize), 0) {
if session.Bomber.moveIfLegal(session.Bomber.PositionX-int(realStepSize), session.Bomber.PositionY) {
session.Bomber.topRightPos.updatePosition(-int(realStepSize), 0)
session.Bomber.topLeftPos.updatePosition(-int(realStepSize), 0)
session.Bomber.bottomRightPos.updatePosition(-int(realStepSize), 0)
session.Bomber.bottomLeftPos.updatePosition(-int(realStepSize), 0)
session.Bomber.PositionX -= int(realStepSize)
}
}
} else
//D
if keys.Dpressed {
session.Bomber.DirUp, session.Bomber.DirDown, session.Bomber.DirLeft, session.Bomber.DirRight = false, false, false, true
session.Bomber.IsMoving = true
if session.Bomber.collisionWithSurroundings(int(realStepSize), 0) {
if session.Bomber.moveIfLegal(session.Bomber.PositionX+int(realStepSize), session.Bomber.PositionY) {
session.Bomber.topRightPos.updatePosition(int(realStepSize), 0)
session.Bomber.topLeftPos.updatePosition(int(realStepSize), 0)
session.Bomber.bottomRightPos.updatePosition(int(realStepSize), 0)
session.Bomber.bottomLeftPos.updatePosition(int(realStepSize), 0)
session.Bomber.PositionX += int(realStepSize)
}
}
}
//Spacebar
if keys.SpacePressed {
if session.Bomber.IsAlive {
go session.Bomber.placeBomb()
}
}
}
}
/*
Updates the Client in an Interval.
*/
func UpdateClients() {
for _ = range ticker.C {
err := sendDataToClients()
if err != nil {
log.Println(err)
break
}
}
log.Println("Updating Clients stopped.")
}
/*
Sends the Data needed by the Client to the Client.
*/
func sendDataToClients() error {
//Create array from all connected Bombermen
connectionLength := len(Connections)
bombermanArray = make([]Bomberman, connectionLength)
count := 0
wg := &sync.WaitGroup{}
wg.Add(connectionLength)
for _, v := range Connections {
session := v
go func(count int) {
bombermanArray[count] = *session.Bomber
wg.Done()
}(count)
count++
}
wg.Wait()
var err error
clientPackageAsJson, err = json.Marshal(ClientPackage{
Players: bombermanArray,
GameMap: <-abstractGameMap,
SessionRunning: sessionRunning,
})
if err != nil {
log.Println(err)
return err
}
for _, v := range Connections {
if err := v.Connection.WriteMessage(websocket.TextMessage, clientPackageAsJson); err != nil {
return err
}
}
return nil
}
func isLesserThan(a interface{}, b interface{}) bool {
return a.(*Session).User.UserID < b.(*Session).User.UserID
}
/*
Starts the a Game-Session, if more then one Player is connected and all are ready.
*/
func StartGameIfPlayersReady() {
if len(Connections) < 2 {
return
}
for _, v := range Connections {
if !v.Bomber.PlayerReady {
return
}
}
resetGame("images/testMap.png")
sessionRunning = true
for _, v := range Connections {
v.Bomber.PlayerReady = false
}
time.AfterFunc(time.Second*SUDDEN_DEATH_START_TIME, startSuddenDeath)
}
/*
Starts the Suddendeath and Poison spreading.
*/
func startSuddenDeath() {
suddenDeathRunning = true
p := newPoison()
//go checkForPoison()
for t := 0; t < SUDDEN_DEATH_MAX_AREA; t++ {
if !suddenDeathRunning {
break
}
for i := 0; i < len(GameMap.Fields); i++ {
for j := 0; j < len(GameMap.Fields[i]); j++ {
if (i == t) || (j == t) || (i == 19-t) || (j == 19-t) {
if GameMap.Fields[i][j].Contains[0] != nil {
if GameMap.Fields[i][j].Contains[0].getType() == 13 {
continue
}
}
if GameMap.Fields[i][j].Contains[1] != nil {
if GameMap.Fields[i][j].Contains[1].getType() == 13 {
continue
}
}
GameMap.Fields[i][j].addPoison(&p)
killAllPlayersOnField(GameMap.Fields[i][j].Player)
}
}
}
findWinner()
time.Sleep(time.Second * SUDDEN_INCREASE_TIME)
}
}
/*
Inefficient! todo: Change!
While Sudden Death is running, constantly loops to all Fields and, if a Poison-Field is found, kills all player on the Field.
*/
//func checkForPoison() {
// for suddenDeathRunning {
// for i := 0; i < len(GameMap.Fields); i++ {
// for j := 0; j < len(GameMap.Fields[i]); j++ {
// if GameMap.Fields[i][j].Contains[0] != nil || GameMap.Fields[i][j].Contains[1] != nil{
// if GameMap.Fields[i][j].Contains[0].getType() == 13 || GameMap.Fields[i][j].Contains[1].getType() == 13 {
// if GameMap.Fields[i][j].Player != nil {
// //TO DO: Dont insta kill
// killAllPlayersOnField(GameMap.Fields[i][j].Player)
// findWinner()
// }
// }
// }
// }
// }
// }
//}
/*
Resets the Game.
*/
func resetGame(s string) {
suddenDeathRunning = false
playerDied = false
GameMap.clear()
if err := CreateMapFromImage(GameMap, s); err != nil {
log.Fatal(err)
}
count := 0
for _, v := range Connections {
if count > 7 {
count = 0
}
v.Bomber.Reset(spawnPositions[count][0], spawnPositions[count][1])
count++
}
}
/*
Kills all Players on a Field.
*/
func killAllPlayersOnField(list *list.List) {
element := list.Front()
if element != nil {
element.Value.(*Bomberman).Kill()
playerDied = true
for element.Next() != nil {
element = element.Next()
element.Value.(*Bomberman).Kill()
}
}
}
/*
Checks if only one Player is alive and acts accordingly.
*/
func findWinner() {
counter := 0
var lastBomberAlive *Bomberman
for _, v := range Connections {
if v.Bomber.IsAlive {
lastBomberAlive = v.Bomber
counter++
}
}
if counter > 1 {
return
} else if counter == 0 {
log.Println("Draw")
} else if counter == 1 {
log.Println(lastBomberAlive.Name + "has Won")
user, err := getUserByID(db, lastBomberAlive.UserID)
if err != nil {
log.Println(err)
}
user.GamesWon = user.GamesWon + 1
err = updatePlayerStats(db, *user)
if err != nil {
log.Println(err)
}
}
//todo send message
resetGame("images/testMap.png")
sessionRunning = false
}
|
// Copyright (C) 2015 Nippon Telegraph and Telephone Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pb
import (
"flag"
"fmt"
"github.com/golang/protobuf/proto"
"github.com/osrg/earthquake/earthquake/signal"
logutil "github.com/osrg/earthquake/earthquake/util/log"
"github.com/osrg/earthquake/earthquake/util/mockorchestrator"
pbutil "github.com/osrg/earthquake/earthquake/util/pb"
"github.com/stretchr/testify/assert"
"net"
"os"
"sync"
"testing"
"time"
)
func TestMain(m *testing.M) {
flag.Parse()
logutil.InitLog("", true)
signal.RegisterKnownSignals()
actionCh := make(chan signal.Action)
eventCh := SingletonPBEndpoint.Start(0, actionCh)
mockOrc := mockorchestrator.NewMockOrchestrator(eventCh, actionCh)
mockOrc.Start()
defer mockOrc.Shutdown()
os.Exit(m.Run())
}
func newPBMessage(t *testing.T, entityID string, value int) proto.Message {
dummyFuncName := fmt.Sprintf("func-%d", value)
pbType := pbutil.InspectorMsgReq_EVENT
pbFuncType := pbutil.InspectorMsgReq_Event_FUNC_CALL
pbReq := pbutil.InspectorMsgReq{
Type: &pbType,
EntityId: proto.String(entityID),
Pid: proto.Int32(42),
Tid: proto.Int32(42),
MsgId: proto.Int32(int32(value)),
Event: &pbutil.InspectorMsgReq_Event{
Type: &pbFuncType,
FuncCall: &pbutil.InspectorMsgReq_Event_FuncCall{
Name: proto.String(dummyFuncName),
},
},
HasJavaSpecificFields: proto.Int32(0),
}
return &pbReq
}
func dial(t *testing.T) net.Conn {
s := fmt.Sprintf(":%d", SingletonPBEndpoint.ActualPort)
t.Logf("Dialing to %s", s)
conn, err := net.Dial("tcp", s)
assert.NoError(t, err)
return conn
}
func TestPBEndpoint_1_1(t *testing.T) {
testPBEndpoint(t, 1, 1, false)
}
func TestPBEndpoint_1_2(t *testing.T) {
testPBEndpoint(t, 1, 2, false)
}
func TestPBEndpoint_2_1(t *testing.T) {
testPBEndpoint(t, 2, 1, false)
}
func TestPBEndpoint_2_2(t *testing.T) {
testPBEndpoint(t, 2, 2, false)
}
func TestPBEndpoint_10_10(t *testing.T) {
testPBEndpoint(t, 10, 10, false)
}
func TestPBEndpointShouldNotBlock_10_10(t *testing.T) {
testPBEndpoint(t, 10, 10, true)
}
func testPBEndpoint(t *testing.T, n, entities int, concurrent bool) {
conns := make([]net.Conn, entities)
for i := 0; i < entities; i++ {
conns[i] = dial(t)
}
sender := func() {
for i := 0; i < n; i++ {
// FIXME: entity id string cannot be shared between diferrent net.Conns (i.e. not retrieable)
// so we append time.Now() here at the moment
entityID := fmt.Sprintf("entity-%d-%s", i%entities, time.Now())
req := newPBMessage(t, entityID, i)
t.Logf("Test %d: Sending %s", i, req)
err := pbutil.SendMsg(conns[i%entities], req)
assert.NoError(t, err)
t.Logf("Test %d: Sent %s", i, req)
}
}
receiver := func() {
for i := 0; i < n; i++ {
t.Logf("Test %d: Receiving", i)
rsp := pbutil.InspectorMsgRsp{}
err := pbutil.RecvMsg(conns[i%entities], &rsp)
assert.NoError(t, err)
t.Logf("Test %d: Received %v", i, rsp)
}
}
if concurrent {
var wg sync.WaitGroup
wg.Add(2)
go func() {
defer wg.Done()
sender()
}()
go func() {
defer wg.Done()
receiver()
}()
wg.Wait()
} else {
sender()
receiver()
}
}
|
package egressipam
import (
"encoding/binary"
"errors"
"net"
"sort"
"strings"
"github.com/jpillora/ipmath"
"github.com/redhat-cop/egressip-ipam-operator/controllers/egressipam/reconcilecontext"
"github.com/scylladb/go-set/strset"
"github.com/scylladb/go-set/u32set"
corev1 "k8s.io/api/core/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
)
// Assigns ips to unassigned namespaces and updates them
func (r *EgressIPAMReconciler) assignIPsToNamespaces(rc *reconcilecontext.ReconcileContext) ([]corev1.Namespace, error) {
IPsByCIDR, err := r.sortIPsByCIDR(rc)
if err != nil {
r.Log.Error(err, "unable to sort assignedIPs by CIDR")
return []corev1.Namespace{}, err
}
r.Log.V(1).Info("currently assigned ", "IPs by CIDR", IPsByCIDR)
//in all cases we need to add the base network and the broadcast address
for cidr := range IPsByCIDR {
base, cidrt, err := net.ParseCIDR(cidr)
if err != nil {
r.Log.Error(err, "unable to parse", "cidr", cidr)
return []corev1.Namespace{}, err
}
broadcastip := ipmath.FromUInt32((^binary.BigEndian.Uint32([]byte(cidrt.Mask))) | binary.BigEndian.Uint32([]byte(base.To4())))
IPsByCIDR[cidr] = append(IPsByCIDR[cidr], base, broadcastip)
}
r.Log.V(1).Info("adding always excluded network IPs ", "IPs by CIDR", IPsByCIDR)
// add reserved ips
//reservedIPsByCIDR := getReservedIPsByCIDR(egressIPAM)
for cidr := range IPsByCIDR {
IPsByCIDR[cidr] = append(IPsByCIDR[cidr], rc.ReservedIPsByCIDR[cidr]...)
}
r.Log.V(1).Info("adding reserved IPs ", "IPs by CIDR", IPsByCIDR)
// if nodes' IPs are in the CIDR, they should count as assigned.
nodesIPsByCIDR, err := r.getNodesIPsByCIDR(rc)
if err != nil {
r.Log.Error(err, "unable to get nodesIPs by CIDR")
return []corev1.Namespace{}, err
}
for cidr := range IPsByCIDR {
IPsByCIDR[cidr] = append(IPsByCIDR[cidr], nodesIPsByCIDR[cidr]...)
}
r.Log.V(1).Info("adding nodes IPs (if in the same CIDR) ", "IPs by CIDR", IPsByCIDR)
//adding cloud provider IPs by CIDR
infraIPsByCIDR, err := rc.Infra.GetUsedIPsByCIDR(rc)
if err != nil {
r.Log.Error(err, "unable to get cloud infra used IPs by CIDR")
return []corev1.Namespace{}, err
}
for cidr := range IPsByCIDR {
IPsByCIDR[cidr] = append(IPsByCIDR[cidr], infraIPsByCIDR[cidr]...)
}
r.Log.V(1).Info("adding cloud infrastructure reserved IPs ", "IPs by CIDR", IPsByCIDR)
IPsByCIDR = removeDuplicates(IPsByCIDR)
r.Log.V(1).Info("final ", "IPs by CIDR", IPsByCIDR)
for cidr := range IPsByCIDR {
IPsByCIDR[cidr] = sortIPs(IPsByCIDR[cidr])
}
r.Log.V(1).Info("sorted reserved IPs ", "IPs by CIDR", IPsByCIDR)
newlyAssignedNamespaces := []corev1.Namespace{}
for _, unamespace := range rc.UnAssignedNamespaces {
namespace := unamespace.DeepCopy()
newIPsByCIDRs, err := r.getNextAvailableIPs(IPsByCIDR)
if err != nil {
r.Log.Error(err, "unable to assing new IPs for ", "namespace", namespace.GetName())
return []corev1.Namespace{}, err
}
ipstrings := []string{}
for _, cidr := range rc.CIDRs {
ipstrings = append(ipstrings, newIPsByCIDRs[cidr].String())
}
r.Log.Info("ips assigned to", "namespace", namespace.GetName(), "ips", ipstrings)
namespace.ObjectMeta.Annotations[NamespaceAssociationAnnotation] = strings.Join(ipstrings, ",")
err = r.GetClient().Update(rc.Context, namespace, &client.UpdateOptions{})
if err != nil {
r.Log.Error(err, "unable to update", "namespace", namespace.GetName())
return []corev1.Namespace{}, err
}
newlyAssignedNamespaces = append(newlyAssignedNamespaces, *namespace)
}
return newlyAssignedNamespaces, nil
}
func removeDuplicates(IPsByCIDR map[string][]net.IP) map[string][]net.IP {
result := map[string][]net.IP{}
for cidr, IPs := range IPsByCIDR {
ipSet := strset.New()
for _, IP := range IPs {
ipSet.Add(IP.String())
}
netIPs := []net.IP{}
for _, ip := range ipSet.List() {
netIPs = append(netIPs, net.ParseIP(ip))
}
result[cidr] = netIPs
}
return result
}
// returns a set of IPs. These IPs are the next available IP per CIDR.
// The map of CIDR is passed by reference and updated with the new IPs, so this function can be used in a loop.
func (r *EgressIPAMReconciler) getNextAvailableIPs(IPsByCIDR map[string][]net.IP) (map[string]net.IP, error) {
r.Log.V(1).Info("Assigning new IPs from", "IPs by CIDR", IPsByCIDR)
//iPsByCIDR := IPsByCIDR
assignedIPs := map[string]net.IP{}
for cidr := range IPsByCIDR {
assignedIP, newIPs, err := r.getNextAvailableIP(cidr, IPsByCIDR[cidr])
if err != nil {
r.Log.Error(err, "unable to assign get next ip for", "cidr", cidr)
return map[string]net.IP{}, err
}
assignedIPs[cidr] = assignedIP
IPsByCIDR[cidr] = newIPs
}
r.Log.V(1).Info("Assigned", "new IPs from", assignedIPs, " new IPs by CIDR", IPsByCIDR)
return assignedIPs, nil
}
func (r *EgressIPAMReconciler) getNextAvailableIP(cidrs string, assignedIPs []net.IP) (net.IP, []net.IP, error) {
r.Log.V(1).Info("Assigning new IP from", "CIDR", cidrs, "with already assigned IPs", assignedIPs)
_, cidr, err := net.ParseCIDR(cidrs)
if err != nil {
r.Log.Error(err, "unable to parse", "cidr", cidrs)
return net.IP{}, []net.IP{}, err
}
if uint32(len(assignedIPs)) == ipmath.NetworkSize(cidr) {
return net.IP{}, []net.IP{}, errors.New("no more available IPs in this CIDR: " + cidr.String())
}
for i := range assignedIPs {
if !assignedIPs[i].Equal(ipmath.DeltaIP(cidr.IP, i)) {
assignedIP := ipmath.DeltaIP(cidr.IP, i)
assignedIPs = append(assignedIPs[:i], append([]net.IP{assignedIP}, assignedIPs[i:]...)...)
r.Log.V(1).Info("Assigned ", "IP", assignedIP, "new assigned IPs", assignedIPs)
return assignedIP, assignedIPs, nil
}
}
return net.IP{}, []net.IP{}, errors.New("we should never get here")
}
func sortIPs(ips []net.IP) []net.IP {
ipstrs := []uint32{}
for _, ip := range ips {
ipstrs = append(ipstrs, ipmath.ToUInt32(ip))
}
//shake off eventual duplicates
ipstrs = u32set.New(ipstrs...).List()
sort.Slice(ipstrs, func(i, j int) bool { return ipstrs[i] < ipstrs[j] })
ips = []net.IP{}
for _, ipstr := range ipstrs {
ips = append(ips, ipmath.FromUInt32(ipstr))
}
// ips2 := make([]net.IP, len(ips))
// copy(ips2, ips)
// sort.Slice(ips2, func(i, j int) bool {
// return bytes.Compare(ips2[i], ips2[j]) < 0
// })
return ips
}
// returns a map with nodes and egress IPs that have been assigned to them. This should preserve IPs that are already assigned.
func (r *EgressIPAMReconciler) assignIPsToNodes(rc *reconcilecontext.ReconcileContext) (map[string][]string, error) {
// 1. get assignedIPsToNodesByCIDR
// 2. get assignedIPsToNamespacesByCIDR
// 3. calculate toBeAssignedIPsByCIDR
// 4. recalculate assignedIPsToNodesByCIDR
// 5 recalculate assignedIPsByNode
// 6. get NodesByCIDR
// 7. calculate NodesBy#AssignedIPByCIDR
// 8. assign IPs to the least assigned nodes, update map, by CIDR
assignedIPsToNodesByCIDR := map[string][]string{}
assignedIPsToNamespaceByCIDR := map[string][]string{}
toBeAssignedToNodesIPsByCIDR := map[string][]string{}
for _, cidr := range rc.CIDRs {
assignedIPsToNodesByCIDR[cidr] = []string{}
assignedIPsToNamespaceByCIDR[cidr] = []string{}
toBeAssignedToNodesIPsByCIDR[cidr] = []string{}
}
// 1. get assignedIPsToNodesByCIDR
for _, ipsbn := range rc.InitiallyAssignedIPsByNode {
for cidrstr := range assignedIPsToNodesByCIDR {
_, cidr, err := net.ParseCIDR(cidrstr)
if err != nil {
r.Log.Error(err, "unable to conver to cidr ", "string", cidrstr)
return map[string][]string{}, err
}
for _, ip := range ipsbn {
if cidr.Contains(net.ParseIP(ip)) {
assignedIPsToNodesByCIDR[cidrstr] = append(assignedIPsToNodesByCIDR[cidrstr], ip)
}
}
}
}
r.Log.V(1).Info("", "assignedIPsToNodesByCIDR: ", assignedIPsToNodesByCIDR)
// 2. get assignedIPsToNamespacesByCIDR
for _, namespace := range rc.FinallyAssignedNamespaces {
ipsstr, ok := namespace.GetAnnotations()[NamespaceAssociationAnnotation]
if !ok {
return map[string][]string{}, errors.New("unable to find ips in namespace" + namespace.GetName())
}
ipsbn := strings.Split(ipsstr, ",")
for cidrstr := range assignedIPsToNamespaceByCIDR {
_, cidr, err := net.ParseCIDR(cidrstr)
if err != nil {
r.Log.Error(err, "unable to conver to cidr ", "string", cidrstr)
return map[string][]string{}, err
}
for _, ipstr := range ipsbn {
ip := net.ParseIP(ipstr)
if cidr.Contains(ip) {
assignedIPsToNamespaceByCIDR[cidrstr] = append(assignedIPsToNamespaceByCIDR[cidrstr], ip.String())
}
}
}
}
r.Log.V(1).Info("", "assignedIPsToNamespaceByCIDR: ", assignedIPsToNamespaceByCIDR)
// 3. calculate toBeAssignedIPsByCIDR
for cidr := range assignedIPsToNamespaceByCIDR {
toBeAssignedToNodesIPsByCIDR[cidr] = strset.Difference(strset.New(assignedIPsToNamespaceByCIDR[cidr]...), strset.New(assignedIPsToNodesByCIDR[cidr]...)).List()
}
r.Log.V(1).Info("", "toBeAssignedToNodesIPsByCIDR: ", toBeAssignedToNodesIPsByCIDR)
// 4. recalculate assignedIPsToNodesByCIDR
for cidr := range assignedIPsToNamespaceByCIDR {
assignedIPsToNodesByCIDR[cidr] = strset.Intersection(strset.New(assignedIPsToNodesByCIDR[cidr]...), strset.New(assignedIPsToNamespaceByCIDR[cidr]...)).List()
}
r.Log.V(1).Info("new", "assignedIPsToNodesByCIDR: ", assignedIPsToNodesByCIDR)
// 5 recalculate assignedIPsByNode
newAssignedIPsByNode := map[string][]string{}
for _, assignedIPs := range assignedIPsToNodesByCIDR {
for _, assignedIP := range assignedIPs {
for node, initiallyAssignedToNodeIPs := range rc.InitiallyAssignedIPsByNode {
for _, initiallyAssignedToNodeIP := range initiallyAssignedToNodeIPs {
if assignedIP == initiallyAssignedToNodeIP {
newAssignedIPsByNode[node] = append(newAssignedIPsByNode[node], assignedIP)
}
}
}
}
}
r.Log.V(1).Info("new", "assignedIPsByNode: ", newAssignedIPsByNode)
// 6. get NodesByCIDR
nodesByCIDR := rc.SelectedNodesByCIDR
r.Log.V(1).Info("", "nodesByCIDR: ", nodesByCIDR)
// 7. calculate NodesByNumberOfAssignedIPByCIDR
nodesByNumberOfAssignedIPsByCIDR := map[string]map[int][]string{}
for cidr := range nodesByCIDR {
nodesByNumberOfAssignedIPsByCIDR[cidr] = map[int][]string{}
for _, node := range nodesByCIDR[cidr] {
if _, ok := rc.AllNodes[node]; ok && isCondition(rc.AllNodes[node].Status.Conditions, corev1.NodeReady, corev1.ConditionTrue) {
nodesByNumberOfAssignedIPsByCIDR[cidr][len(newAssignedIPsByNode[node])] = append(nodesByNumberOfAssignedIPsByCIDR[cidr][len(newAssignedIPsByNode[node])], node)
}
}
}
r.Log.V(1).Info("", "nodesByNumberOfAssignedIPsByCIDR: ", nodesByNumberOfAssignedIPsByCIDR)
// 8. assign IPs to the least assigned nodes, update map, by CIDR
for cidr, ips := range toBeAssignedToNodesIPsByCIDR {
for _, ip := range ips {
//pick the first node with the least IPs in this CIDR
r.Log.V(1).Info("", "nodesByNumberOfAssignedIPsByCIDR: ", nodesByNumberOfAssignedIPsByCIDR)
minIPsPerNode := getMinKey(nodesByNumberOfAssignedIPsByCIDR[cidr])
if minIPsPerNode == -1 {
err := errors.New("Unable to find nodes for CIDR" + cidr)
r.Log.Error(err, "", "cidr", cidr, "nodes", nodesByNumberOfAssignedIPsByCIDR[cidr])
return map[string][]string{}, err
}
r.Log.V(1).Info("", "minIPsPerNode: ", minIPsPerNode, "for cidr", cidr)
node := nodesByNumberOfAssignedIPsByCIDR[cidr][minIPsPerNode][0]
r.Log.Info("assigning", "IP", ip, "to node", node)
// add the node to the assignedIP per node map
newAssignedIPsByNode[node] = append(newAssignedIPsByNode[node], ip)
// remove the node from the minIPsPerNode map
nodesByNumberOfAssignedIPsByCIDR[cidr][minIPsPerNode] = nodesByNumberOfAssignedIPsByCIDR[cidr][minIPsPerNode][1:]
// add the node to the minIPsPerNode+1 map
nodesByNumberOfAssignedIPsByCIDR[cidr][minIPsPerNode+1] = append(nodesByNumberOfAssignedIPsByCIDR[cidr][minIPsPerNode+1], node)
}
}
//we could be in a situation where only some nodes got some IPs, we need to make sure that all of the nodes are in the final map, possibbly with zero IPs.
for _, nodeByNumberOfIPs := range nodesByNumberOfAssignedIPsByCIDR {
for _, node := range nodeByNumberOfIPs[0] {
newAssignedIPsByNode[node] = []string{}
}
}
//at this point we need to re-add the nodes that were removed because not ready
for nodeName, node := range rc.SelectedNodes {
if !isCondition(node.Status.Conditions, corev1.NodeReady, corev1.ConditionTrue) {
newAssignedIPsByNode[nodeName] = []string{}
}
}
return newAssignedIPsByNode, nil
}
func getMinKey(nodemap map[int][]string) int {
numbers := []int{}
for n, nodes := range nodemap {
if len(nodes) > 0 {
numbers = append(numbers, n)
}
}
if len(numbers) == 0 {
return -1
}
sort.Ints(numbers)
return numbers[0]
}
|
package proxy
import (
"crypto/tls"
"fmt"
"github.com/kataras/iris/v12"
"github.com/kataras/iris/v12/context"
"net/http"
"net/http/httputil"
"net/url"
)
func KubernetesClientProxy(ctx context.Context) {
clusterName := ctx.Params().Get("cluster_name")
proxyPath := ctx.Params().Get("p")
endpoint, err := clusterService.GetApiServerEndpoint(clusterName)
if err != nil {
_, _ = ctx.JSON(iris.StatusInternalServerError)
return
}
u, err := url.Parse(fmt.Sprintf("https://%s:%d", endpoint.Address, endpoint.Port))
if err != nil {
_, _ = ctx.JSON(iris.StatusInternalServerError)
return
}
proxy := httputil.NewSingleHostReverseProxy(u)
proxy.Transport = &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
secret, err := clusterService.GetSecrets(clusterName)
if err != nil {
_, _ = ctx.JSON(iris.StatusInternalServerError)
return
}
token := fmt.Sprintf("%s %s", keyPrefix, secret.KubernetesToken)
ctx.Request().Header.Add(AuthorizationHeader, token)
ctx.Request().URL.Path = proxyPath
proxy.ServeHTTP(ctx.ResponseWriter(), ctx.Request())
}
|
//
// Copyright (C) 2019-2021 vdaas.org vald team <vald@vdaas.org>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Package grpc provides grpc server logic
package grpc
import (
"context"
"fmt"
"strconv"
agent "github.com/vdaas/vald/apis/grpc/agent/core"
"github.com/vdaas/vald/apis/grpc/payload"
"github.com/vdaas/vald/internal/errors"
"github.com/vdaas/vald/internal/info"
"github.com/vdaas/vald/internal/log"
"github.com/vdaas/vald/internal/net/grpc"
"github.com/vdaas/vald/internal/net/grpc/status"
"github.com/vdaas/vald/internal/observability/trace"
"github.com/vdaas/vald/pkg/agent/core/ngt/model"
"github.com/vdaas/vald/pkg/agent/core/ngt/service"
)
type Server agent.AgentServer
type server struct {
ngt service.NGT
streamConcurrency int
}
func New(opts ...Option) Server {
s := new(server)
for _, opt := range append(defaultOpts, opts...) {
opt(s)
}
return s
}
func (s *server) Exists(ctx context.Context, uid *payload.Object_ID) (res *payload.Object_ID, err error) {
ctx, span := trace.StartSpan(ctx, "vald/agent-ngt.Exists")
defer func() {
if span != nil {
span.End()
}
}()
uuid := uid.GetId()
oid, ok := s.ngt.Exists(uuid)
if !ok {
err = errors.ErrObjectIDNotFound(uuid)
log.Warn("[Exists] an error occurred:", err)
if span != nil {
span.SetStatus(trace.StatusCodeNotFound(err.Error()))
}
return nil, status.WrapWithNotFound(fmt.Sprintf("Exists API uuid %s's oid not found", uuid), err, info.Get())
}
res = new(payload.Object_ID)
res.Id = strconv.Itoa(int(oid))
return res, nil
}
func (s *server) Search(ctx context.Context, req *payload.Search_Request) (*payload.Search_Response, error) {
ctx, span := trace.StartSpan(ctx, "vald/agent-ngt.Search")
defer func() {
if span != nil {
span.End()
}
}()
return toSearchResponse(
s.ngt.Search(
req.GetVector(),
req.GetConfig().GetNum(),
req.GetConfig().GetEpsilon(),
req.GetConfig().GetRadius()))
}
func (s *server) SearchByID(ctx context.Context, req *payload.Search_IDRequest) (*payload.Search_Response, error) {
ctx, span := trace.StartSpan(ctx, "vald/agent-ngt.SearchByID")
defer func() {
if span != nil {
span.End()
}
}()
return toSearchResponse(
s.ngt.SearchByID(
req.GetId(),
req.GetConfig().GetNum(),
req.GetConfig().GetEpsilon(),
req.GetConfig().GetRadius()))
}
func toSearchResponse(dists []model.Distance, err error) (res *payload.Search_Response, rerr error) {
res = new(payload.Search_Response)
if err != nil {
log.Errorf("[toSearchResponse]\tUnknown error\t%+v", err)
err = status.WrapWithInternal("Search API error occurred", err, info.Get())
}
res.Results = make([]*payload.Object_Distance, 0, len(dists))
for _, dist := range dists {
res.Results = append(res.Results, &payload.Object_Distance{
Id: dist.ID,
Distance: dist.Distance,
})
}
return res, err
}
func (s *server) StreamSearch(stream agent.Agent_StreamSearchServer) error {
ctx, span := trace.StartSpan(stream.Context(), "vald/agent-ngt.StreamSearch")
defer func() {
if span != nil {
span.End()
}
}()
return grpc.BidirectionalStream(ctx, stream, s.streamConcurrency,
func() interface{} { return new(payload.Search_Request) },
func(ctx context.Context, data interface{}) (interface{}, error) {
return s.Search(ctx, data.(*payload.Search_Request))
})
}
func (s *server) StreamSearchByID(stream agent.Agent_StreamSearchByIDServer) error {
ctx, span := trace.StartSpan(stream.Context(), "vald/agent-ngt.StreamSearchByID")
defer func() {
if span != nil {
span.End()
}
}()
return grpc.BidirectionalStream(ctx, stream, s.streamConcurrency,
func() interface{} { return new(payload.Search_IDRequest) },
func(ctx context.Context, data interface{}) (interface{}, error) {
return s.SearchByID(ctx, data.(*payload.Search_IDRequest))
})
}
func (s *server) Insert(ctx context.Context, vec *payload.Object_Vector) (res *payload.Empty, err error) {
ctx, span := trace.StartSpan(ctx, "vald/agent-ngt.Insert")
defer func() {
if span != nil {
span.End()
}
}()
err = s.ngt.Insert(vec.GetId(), vec.GetVector())
if err != nil {
log.Errorf("[Insert]\tUnknown error\t%+v", err)
if span != nil {
span.SetStatus(trace.StatusCodeInternal(err.Error()))
}
return nil, status.WrapWithInternal(fmt.Sprintf("Insert API failed to insert %#v", vec), err, info.Get())
}
return new(payload.Empty), nil
}
func (s *server) StreamInsert(stream agent.Agent_StreamInsertServer) error {
ctx, span := trace.StartSpan(stream.Context(), "vald/agent-ngt.StreamInsert")
defer func() {
if span != nil {
span.End()
}
}()
return grpc.BidirectionalStream(ctx, stream, s.streamConcurrency,
func() interface{} { return new(payload.Object_Vector) },
func(ctx context.Context, data interface{}) (interface{}, error) {
return s.Insert(ctx, data.(*payload.Object_Vector))
})
}
func (s *server) MultiInsert(ctx context.Context, vecs *payload.Object_Vectors) (res *payload.Empty, err error) {
ctx, span := trace.StartSpan(ctx, "vald/agent-ngt.MultiInsert")
defer func() {
if span != nil {
span.End()
}
}()
vmap := make(map[string][]float32, len(vecs.GetVectors()))
for _, vec := range vecs.GetVectors() {
vmap[vec.GetId()] = vec.GetVector()
}
err = s.ngt.InsertMultiple(vmap)
if err != nil {
log.Errorf("[MultiInsert]\tUnknown error\t%+v", err)
if span != nil {
span.SetStatus(trace.StatusCodeInternal(err.Error()))
}
return nil, status.WrapWithInternal(fmt.Sprintf("MultiInsert API failed insert %#v", vmap), err, info.Get())
}
return new(payload.Empty), nil
}
func (s *server) Update(ctx context.Context, vec *payload.Object_Vector) (res *payload.Empty, err error) {
ctx, span := trace.StartSpan(ctx, "vald/agent-ngt.Update")
defer func() {
if span != nil {
span.End()
}
}()
res = new(payload.Empty)
err = s.ngt.Update(vec.GetId(), vec.GetVector())
if err != nil {
log.Errorf("[Update]\tUnknown error\t%+v", err)
if span != nil {
span.SetStatus(trace.StatusCodeInternal(err.Error()))
}
return nil, status.WrapWithInternal(fmt.Sprintf("Update API failed to update %#v", vec), err, info.Get())
}
return res, nil
}
func (s *server) StreamUpdate(stream agent.Agent_StreamUpdateServer) error {
ctx, span := trace.StartSpan(stream.Context(), "vald/agent-ngt.StreamUpdate")
defer func() {
if span != nil {
span.End()
}
}()
return grpc.BidirectionalStream(ctx, stream, s.streamConcurrency,
func() interface{} { return new(payload.Object_Vector) },
func(ctx context.Context, data interface{}) (interface{}, error) {
return s.Update(ctx, data.(*payload.Object_Vector))
})
}
func (s *server) MultiUpdate(ctx context.Context, vecs *payload.Object_Vectors) (res *payload.Empty, err error) {
ctx, span := trace.StartSpan(ctx, "vald/agent-ngt.MultiUpdate")
defer func() {
if span != nil {
span.End()
}
}()
res = new(payload.Empty)
vmap := make(map[string][]float32, len(vecs.GetVectors()))
for _, vec := range vecs.GetVectors() {
vmap[vec.GetId()] = vec.GetVector()
}
err = s.ngt.UpdateMultiple(vmap)
if err != nil {
log.Errorf("[MultiUpdate]\tUnknown error\t%+v", err)
if span != nil {
span.SetStatus(trace.StatusCodeInternal(err.Error()))
}
return nil, status.WrapWithInternal(fmt.Sprintf("MultiUpdate API failed to update %#v", vmap), err, info.Get())
}
return res, err
}
func (s *server) Remove(ctx context.Context, id *payload.Object_ID) (res *payload.Empty, err error) {
ctx, span := trace.StartSpan(ctx, "vald/agent-ngt.Remove")
defer func() {
if span != nil {
span.End()
}
}()
res = new(payload.Empty)
uuid := id.GetId()
err = s.ngt.Delete(uuid)
if err != nil {
log.Errorf("[Remove]\tUnknown error\t%+v", err)
if span != nil {
span.SetStatus(trace.StatusCodeInternal(err.Error()))
}
return nil, status.WrapWithInternal(fmt.Sprintf("Remove API failed to delete uuid %s", uuid), err, info.Get())
}
return res, nil
}
func (s *server) StreamRemove(stream agent.Agent_StreamRemoveServer) error {
ctx, span := trace.StartSpan(stream.Context(), "vald/agent-ngt.StreamRemove")
defer func() {
if span != nil {
span.End()
}
}()
return grpc.BidirectionalStream(ctx, stream, s.streamConcurrency,
func() interface{} { return new(payload.Object_ID) },
func(ctx context.Context, data interface{}) (interface{}, error) {
return s.Remove(ctx, data.(*payload.Object_ID))
})
}
func (s *server) MultiRemove(ctx context.Context, ids *payload.Object_IDs) (res *payload.Empty, err error) {
ctx, span := trace.StartSpan(ctx, "vald/agent-ngt.MultiRemove")
defer func() {
if span != nil {
span.End()
}
}()
res = new(payload.Empty)
uuids := ids.GetIds()
err = s.ngt.DeleteMultiple(uuids...)
if err != nil {
log.Errorf("[MultiRemove]\tUnknown error\t%+v", err)
if span != nil {
span.SetStatus(trace.StatusCodeInternal(err.Error()))
}
return nil, status.WrapWithInternal(fmt.Sprintf("MultiUpdate API failed to delete %#v", uuids), err, info.Get())
}
return res, nil
}
func (s *server) GetObject(ctx context.Context, id *payload.Object_ID) (res *payload.Object_Vector, err error) {
ctx, span := trace.StartSpan(ctx, "vald/agent-ngt.GetObject")
defer func() {
if span != nil {
span.End()
}
}()
uuid := id.GetId()
vec, err := s.ngt.GetObject(uuid)
if err != nil {
log.Warnf("[GetObject]\tUUID not found\t%v", uuid)
if span != nil {
span.SetStatus(trace.StatusCodeNotFound(err.Error()))
}
return nil, status.WrapWithNotFound(fmt.Sprintf("GetObject API uuid %s Object not found", uuid), err, info.Get())
}
return &payload.Object_Vector{
Id: uuid,
Vector: vec,
}, nil
}
func (s *server) StreamGetObject(stream agent.Agent_StreamGetObjectServer) error {
ctx, span := trace.StartSpan(stream.Context(), "vald/agent-ngt.StreamGetObject")
defer func() {
if span != nil {
span.End()
}
}()
return grpc.BidirectionalStream(ctx, stream, s.streamConcurrency,
func() interface{} { return new(payload.Object_ID) },
func(ctx context.Context, data interface{}) (interface{}, error) {
return s.GetObject(ctx, data.(*payload.Object_ID))
})
}
func (s *server) CreateIndex(ctx context.Context, c *payload.Control_CreateIndexRequest) (res *payload.Empty, err error) {
ctx, span := trace.StartSpan(ctx, "vald/agent-ngt.CreateIndex")
defer func() {
if span != nil {
span.End()
}
}()
res = new(payload.Empty)
err = s.ngt.CreateIndex(ctx, c.GetPoolSize())
if err != nil {
if err == errors.ErrUncommittedIndexNotFound {
log.Warnf("[CreateIndex]\tfailed precondition error\t%s", err.Error())
if span != nil {
span.SetStatus(trace.StatusCodeFailedPrecondition(err.Error()))
}
return nil, status.WrapWithFailedPrecondition(fmt.Sprintf("CreateIndex API failed: %s", err), err)
}
log.Errorf("[CreateIndex]\tUnknown error\t%+v", err)
if span != nil {
span.SetStatus(trace.StatusCodeInternal(err.Error()))
}
return nil, status.WrapWithInternal(fmt.Sprintf("CreateIndex API failed to create indexes pool_size = %d", c.GetPoolSize()), err, info.Get())
}
return res, nil
}
func (s *server) SaveIndex(ctx context.Context, _ *payload.Empty) (res *payload.Empty, err error) {
ctx, span := trace.StartSpan(ctx, "vald/agent-ngt.SaveIndex")
defer func() {
if span != nil {
span.End()
}
}()
res = new(payload.Empty)
err = s.ngt.SaveIndex(ctx)
if err != nil {
log.Errorf("[SaveIndex]\tUnknown error\t%+v", err)
if span != nil {
span.SetStatus(trace.StatusCodeInternal(err.Error()))
}
return nil, status.WrapWithInternal("SaveIndex API failed to save indexes ", err, info.Get())
}
return res, nil
}
func (s *server) CreateAndSaveIndex(ctx context.Context, c *payload.Control_CreateIndexRequest) (res *payload.Empty, err error) {
ctx, span := trace.StartSpan(ctx, "vald/agent-ngt.CreateAndSaveIndex")
defer func() {
if span != nil {
span.End()
}
}()
res = new(payload.Empty)
err = s.ngt.CreateAndSaveIndex(ctx, c.GetPoolSize())
if err != nil {
log.Errorf("[CreateAndSaveIndex]\tUnknown error\t%+v", err)
if span != nil {
span.SetStatus(trace.StatusCodeInternal(err.Error()))
}
return nil, status.WrapWithInternal(fmt.Sprintf("CreateAndSaveIndex API failed to create and save indexes pool_size = %d", c.GetPoolSize()), err, info.Get())
}
return res, nil
}
func (s *server) IndexInfo(ctx context.Context, _ *payload.Empty) (res *payload.Info_Index_Count, err error) {
ctx, span := trace.StartSpan(ctx, "vald/agent-ngt.IndexInfo")
defer func() {
if span != nil {
span.End()
}
}()
return &payload.Info_Index_Count{
Stored: uint32(s.ngt.Len()),
Uncommitted: uint32(s.ngt.InsertVCacheLen()),
Indexing: s.ngt.IsIndexing(),
}, nil
}
|
package signup
import (
"fmt"
"github.com/rs/zerolog/log"
"github.com/saucelabs/saucectl/internal/sentry"
"github.com/spf13/cobra"
"os"
)
var (
runUse = "signup"
runShort = "Signup for Sauce Labs"
runLong = "Provide a web link for free trial signup"
runExample = "saucectl signup"
defaultLogFir = "<cwd>/logs"
)
// Command creates the `run` command
func Command() *cobra.Command {
cmd := &cobra.Command{
Use: runUse,
Short: runShort,
Long: runLong,
Example: runExample,
Run: func(cmd *cobra.Command, args []string) {
log.Info().Msg("Start Signup Command")
err := Run()
if err != nil {
log.Err(err).Msg("failed to execute run command")
sentry.CaptureError(err, sentry.Scope{})
os.Exit(1)
}
},
}
return cmd
}
// Run runs the command
func Run() error {
saucebotSignup := `
(‾)
|| Puppeteer,
################## /( Playwright,
## ## ,..%( TestCafe,
(# ## .,,.....%( Cypress!
(## ## ((((.......%(
(## ## ####
,################## ## %##
### /###
/################\ /##
(#####/ sSSSs \##########)
/######( sSSSSSs )#####
##/ ######\ sSSSs /######
#### #####################
## ## (#### #####
## ##### #####
##### #####
Achieve digital confidence with the Sauce Labs Testrunner Toolkit
View and analyze test results online with a free Sauce Labs account:
https://bit.ly/saucectl-signup`
fmt.Println(saucebotSignup)
return nil
}
|
package sqlite
import (
"context"
"database/sql"
"log"
"github.com/hezbymuhammad/payment-gateway/domain"
)
type sqliteMerchantRepo struct {
DB *sql.DB
}
func NewMerchantRepository(db *sql.DB) domain.MerchantRepository {
return &sqliteMerchantRepo{
DB: db,
}
}
func (mr *sqliteMerchantRepo) IsAuthorizedParent(ctx context.Context, mg *domain.MerchantGroup) (bool, error) {
query := "SELECT EXISTS (SELECT 1 FROM merchant_groups WHERE parent_merchant_id=? AND child_merchant_id=? LIMIT 1) as authorized"
rows, err := mr.DB.Query(query, mg.ParentMerchantID, mg.ChildMerchantID)
if err != nil {
log.Println(query)
log.Println(err)
return false, err
}
defer rows.Close()
var isExists string
rows.Next()
rows.Scan(&isExists)
if isExists != "0" {
return true, nil
}
return false, nil
}
func (mr *sqliteMerchantRepo) Store(ctx context.Context, m *domain.Merchant) error {
query := "INSERT INTO merchants(name) VALUES(?)"
stmt, err := mr.DB.PrepareContext(ctx, query)
if err != nil {
log.Println(query)
log.Println(err)
return err
}
res, err := stmt.ExecContext(ctx, m.Name)
if err != nil {
log.Println(query)
log.Println(err)
return err
}
lastID, err := res.LastInsertId()
if err != nil {
log.Println(query)
log.Println(err)
return err
}
m.ID = lastID
return nil
}
func (mr *sqliteMerchantRepo) InitSetting(ctx context.Context, m *domain.Merchant) error {
query := "INSERT INTO settings(merchant_id, color, payment_type, payment_name) VALUES(?, ?, ?, ?)"
stmt, err := mr.DB.PrepareContext(ctx, query)
if err != nil {
log.Println(query)
log.Println(err)
return err
}
_, err = stmt.ExecContext(ctx, m.ID, "RED", "CARD", "VISA")
if err != nil {
log.Println(query)
log.Println(err)
return err
}
return nil
}
func (mr *sqliteMerchantRepo) SetChild(ctx context.Context, mg *domain.MerchantGroup) error {
query := "INSERT INTO merchant_groups(parent_merchant_id, child_merchant_id) VALUES(?, ?)"
stmt, err := mr.DB.PrepareContext(ctx, query)
if err != nil {
log.Println(query)
log.Println(err)
return err
}
_, err = stmt.ExecContext(ctx, mg.ParentMerchantID, mg.ChildMerchantID)
if err != nil {
log.Println(query)
log.Println(err)
return err
}
return nil
}
|
package service
import (
"bytes"
"github.com/gorilla/mux"
"github.com/koind/shortener-servis/mystats"
"go.uber.org/zap"
"net/http"
)
// Shortener implements business logic for the shortener service
type Shortener interface {
Shorten(url string) string
Resolve(url string) string
}
type ShortenerService struct {
Shortener
stats mystats.StatsInterface
logger *zap.Logger
address string
}
// NewShortenerService creates a new shortener service
func NewShortenerService(
shortener Shortener,
stats mystats.StatsInterface,
logger *zap.Logger,
address string,
) *ShortenerService {
return &ShortenerService{shortener, stats, logger, address}
}
func (ss *ShortenerService) ResolverHandle(w http.ResponseWriter, r *http.Request) {
ss.stats.Add(r.URL.String())
switch r.Method {
case http.MethodGet:
vars := mux.Vars(r)
if url, ok := vars["shortened"]; ok {
shortUrl := ss.Shortener.Resolve(string(url))
ss.logger.Info(
"Url was found",
zap.String("url", url),
zap.String("shortUrl", shortUrl),
)
http.Redirect(w, r, shortUrl, http.StatusSeeOther)
} else {
ss.logger.Warn("Url not found", zap.String("url", url))
w.WriteHeader(404)
}
case http.MethodPost:
buf := new(bytes.Buffer)
buf.ReadFrom(r.Body)
shortened := ss.address + "/" + ss.Shortener.Shorten(buf.String())
ss.logger.Info("Created new short url", zap.String("shortUrl", shortened))
w.Write([]byte(shortened))
}
}
func (ss *ShortenerService) StatsHandle(w http.ResponseWriter, r *http.Request) {
w.Write([]byte(ss.stats.GetAll()))
}
|
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
// Code generated from the elasticsearch-specification DO NOT EDIT.
// https://github.com/elastic/elasticsearch-specification/tree/33e8a1c9cad22a5946ac735c4fba31af2da2cec2
package types
import (
"bytes"
"encoding/json"
"errors"
"io"
"strconv"
"github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/deploymentallocationstate"
)
// TrainedModelDeploymentAllocationStatus type.
//
// https://github.com/elastic/elasticsearch-specification/blob/33e8a1c9cad22a5946ac735c4fba31af2da2cec2/specification/ml/_types/TrainedModel.ts#L387-L394
type TrainedModelDeploymentAllocationStatus struct {
// AllocationCount The current number of nodes where the model is allocated.
AllocationCount int `json:"allocation_count"`
// State The detailed allocation state related to the nodes.
State deploymentallocationstate.DeploymentAllocationState `json:"state"`
// TargetAllocationCount The desired number of nodes for model allocation.
TargetAllocationCount int `json:"target_allocation_count"`
}
func (s *TrainedModelDeploymentAllocationStatus) UnmarshalJSON(data []byte) error {
dec := json.NewDecoder(bytes.NewReader(data))
for {
t, err := dec.Token()
if err != nil {
if errors.Is(err, io.EOF) {
break
}
return err
}
switch t {
case "allocation_count":
var tmp interface{}
dec.Decode(&tmp)
switch v := tmp.(type) {
case string:
value, err := strconv.Atoi(v)
if err != nil {
return err
}
s.AllocationCount = value
case float64:
f := int(v)
s.AllocationCount = f
}
case "state":
if err := dec.Decode(&s.State); err != nil {
return err
}
case "target_allocation_count":
var tmp interface{}
dec.Decode(&tmp)
switch v := tmp.(type) {
case string:
value, err := strconv.Atoi(v)
if err != nil {
return err
}
s.TargetAllocationCount = value
case float64:
f := int(v)
s.TargetAllocationCount = f
}
}
}
return nil
}
// NewTrainedModelDeploymentAllocationStatus returns a TrainedModelDeploymentAllocationStatus.
func NewTrainedModelDeploymentAllocationStatus() *TrainedModelDeploymentAllocationStatus {
r := &TrainedModelDeploymentAllocationStatus{}
return r
}
|
package cbor
import (
"testing"
"github.com/polydawn/refmt/tok/fixtures"
)
func testComposite(t *testing.T) {
t.Run("array nested in map as non-first and final entry, all indefinite length", func(t *testing.T) {
seq := fixtures.SequenceMap["array nested in map as non-first and final entry"].SansLengthInfo()
canon := bcat(b(0xbf),
b(0x60+2), []byte(`k1`), b(0x60+2), []byte(`v1`),
b(0x60+2), []byte(`ke`), bcat(b(0x9f),
b(0x60+2), []byte(`oh`),
b(0x60+4), []byte(`whee`),
b(0x60+3), []byte(`wow`),
b(0xff),
),
b(0xff),
)
t.Run("encode canonical", func(t *testing.T) {
checkEncoding(t, seq, canon, nil)
})
t.Run("decode canonical", func(t *testing.T) {
checkDecoding(t, seq, canon, nil)
})
})
t.Run("array nested in map as first and non-final entry, all indefinite length", func(t *testing.T) {
seq := fixtures.SequenceMap["array nested in map as first and non-final entry"].SansLengthInfo()
canon := bcat(b(0xbf),
b(0x60+2), []byte(`ke`), bcat(b(0x9f),
b(0x60+2), []byte(`oh`),
b(0x60+4), []byte(`whee`),
b(0x60+3), []byte(`wow`),
b(0xff),
),
b(0x60+2), []byte(`k1`), b(0x60+2), []byte(`v1`),
b(0xff),
)
t.Run("encode canonical", func(t *testing.T) {
checkEncoding(t, seq, canon, nil)
})
t.Run("decode canonical", func(t *testing.T) {
checkDecoding(t, seq, canon, nil)
})
})
t.Run("maps nested in array", func(t *testing.T) {
seq := fixtures.SequenceMap["maps nested in array"]
canon := bcat(b(0x80+3),
bcat(b(0xa0+1),
b(0x60+1), []byte(`k`), b(0x60+1), []byte(`v`),
),
b(0x60+4), []byte(`whee`),
bcat(b(0xa0+1),
b(0x60+2), []byte(`k1`), b(0x60+2), []byte(`v1`),
),
)
t.Run("encode canonical", func(t *testing.T) {
checkEncoding(t, seq, canon, nil)
})
t.Run("decode canonical", func(t *testing.T) {
checkDecoding(t, seq, canon, nil)
})
})
t.Run("maps nested in array, all indefinite length", func(t *testing.T) {
seq := fixtures.SequenceMap["maps nested in array"].SansLengthInfo()
canon := bcat(b(0x9f),
bcat(b(0xbf),
b(0x60+1), []byte(`k`), b(0x60+1), []byte(`v`),
b(0xff),
),
b(0x60+4), []byte(`whee`),
bcat(b(0xbf),
b(0x60+2), []byte(`k1`), b(0x60+2), []byte(`v1`),
b(0xff),
),
b(0xff),
)
t.Run("encode canonical", func(t *testing.T) {
checkEncoding(t, seq, canon, nil)
})
t.Run("decode canonical", func(t *testing.T) {
checkDecoding(t, seq, canon, nil)
})
})
t.Run("arrays in arrays in arrays", func(t *testing.T) {
seq := fixtures.SequenceMap["arrays in arrays in arrays"]
canon := bcat(b(0x80+1), b(0x80+1), b(0x80+0))
t.Run("encode canonical", func(t *testing.T) {
checkEncoding(t, seq, canon, nil)
})
t.Run("decode canonical", func(t *testing.T) {
checkDecoding(t, seq, canon, nil)
})
})
t.Run("arrays in arrays in arrays, all indefinite length", func(t *testing.T) {
seq := fixtures.SequenceMap["arrays in arrays in arrays"].SansLengthInfo()
canon := bcat(b(0x9f), b(0x9f), b(0x9f), b(0xff), b(0xff), b(0xff))
t.Run("encode canonical", func(t *testing.T) {
checkEncoding(t, seq, canon, nil)
})
t.Run("decode canonical", func(t *testing.T) {
checkDecoding(t, seq, canon, nil)
})
})
t.Run("maps nested in maps", func(t *testing.T) {
seq := fixtures.SequenceMap["maps nested in maps"]
canon := bcat(b(0xa0+1),
b(0x60+1), []byte(`k`), bcat(b(0xa0+1),
b(0x60+2), []byte(`k2`), b(0x60+2), []byte(`v2`),
),
)
t.Run("encode canonical", func(t *testing.T) {
checkEncoding(t, seq, canon, nil)
})
t.Run("decode canonical", func(t *testing.T) {
checkDecoding(t, seq, canon, nil)
})
})
t.Run("maps nested in maps, all indefinite length", func(t *testing.T) {
seq := fixtures.SequenceMap["maps nested in maps"].SansLengthInfo()
canon := bcat(b(0xbf),
b(0x60+1), []byte(`k`), bcat(b(0xbf),
b(0x60+2), []byte(`k2`), b(0x60+2), []byte(`v2`),
b(0xff),
),
b(0xff),
)
t.Run("encode canonical", func(t *testing.T) {
checkEncoding(t, seq, canon, nil)
})
t.Run("decode canonical", func(t *testing.T) {
checkDecoding(t, seq, canon, nil)
})
})
// Empty and null and null-at-depth...
t.Run("empty", func(t *testing.T) {
t.Skip("works, but awkward edge cases in test helpers")
//seq := fixtures.SequenceMap["empty"]
//canon := []byte(nil)
//t.Run("encode canonical", func(t *testing.T) {
// checkEncoding(t, seq, canon, nil)
//})
//t.Run("decode canonical", func(t *testing.T) {
// checkDecoding(t, seq, canon, io.EOF)
//})
})
t.Run("null", func(t *testing.T) {
seq := fixtures.SequenceMap["null"]
canon := b(0xf6)
t.Run("encode canonical", func(t *testing.T) {
checkEncoding(t, seq, canon, nil)
})
t.Run("decode canonical", func(t *testing.T) {
checkDecoding(t, seq, canon, nil)
})
})
t.Run("null in array", func(t *testing.T) {
seq := fixtures.SequenceMap["null in array"]
canon := bcat(b(0x80+1),
b(0xf6),
)
t.Run("encode canonical", func(t *testing.T) {
checkEncoding(t, seq, canon, nil)
})
t.Run("decode canonical", func(t *testing.T) {
checkDecoding(t, seq, canon, nil)
})
})
t.Run("null in array, indefinite length", func(t *testing.T) {
seq := fixtures.SequenceMap["null in array"].SansLengthInfo()
canon := bcat(b(0x9f),
b(0xf6),
b(0xff),
)
t.Run("encode canonical", func(t *testing.T) {
checkEncoding(t, seq, canon, nil)
})
t.Run("decode canonical", func(t *testing.T) {
checkDecoding(t, seq, canon, nil)
})
})
t.Run("null in map", func(t *testing.T) {
seq := fixtures.SequenceMap["null in map"]
canon := bcat(b(0xa0+1),
b(0x60+1), []byte(`k`), b(0xf6),
)
t.Run("encode canonical", func(t *testing.T) {
checkEncoding(t, seq, canon, nil)
})
t.Run("decode canonical", func(t *testing.T) {
checkDecoding(t, seq, canon, nil)
})
})
t.Run("null in map, indefinite length", func(t *testing.T) {
seq := fixtures.SequenceMap["null in map"].SansLengthInfo()
canon := bcat(b(0xbf),
b(0x60+1), []byte(`k`), b(0xf6),
b(0xff),
)
t.Run("encode canonical", func(t *testing.T) {
checkEncoding(t, seq, canon, nil)
})
t.Run("decode canonical", func(t *testing.T) {
checkDecoding(t, seq, canon, nil)
})
})
t.Run("null in array in array", func(t *testing.T) {
seq := fixtures.SequenceMap["null in array in array"]
canon := bcat(b(0x80+1),
b(0x80+1),
b(0xf6),
)
t.Run("encode canonical", func(t *testing.T) {
checkEncoding(t, seq, canon, nil)
})
t.Run("decode canonical", func(t *testing.T) {
checkDecoding(t, seq, canon, nil)
})
t.Run("indefinite length", func(t *testing.T) {
seq := seq.SansLengthInfo()
canon := bcat(b(0x9f),
b(0x9f),
b(0xf6),
b(0xff),
b(0xff),
)
t.Run("encode", func(t *testing.T) {
checkEncoding(t, seq, canon, nil)
})
t.Run("decode", func(t *testing.T) {
checkDecoding(t, seq, canon, nil)
})
})
})
}
|
package max
import (
"github.com/maprost/application/example/max/jobposition/bunny"
"github.com/maprost/application/example/max/jobposition/santa"
"github.com/maprost/application/example/max/profile"
"github.com/maprost/application/generator/genmodel"
)
func Application(company string) genmodel.Application {
application := genmodel.Application{}
application.Profile = profile.New()
switch company {
case "santa":
application.JobPosition = santa.New()
case "bunny":
application.JobPosition = bunny.New()
}
return application
}
|
package handlers
import (
"coffeebeans-people-backend/auth"
"coffeebeans-people-backend/models"
"coffeebeans-people-backend/utility"
"context"
"encoding/json"
"net/http"
)
func Login(svc models.ApiSvc, authSvc auth.AuthSvc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
var body models.User
err := json.NewDecoder(r.Body).Decode(&body)
if err != nil {
utility.NewJSONWriter(w).Write(models.Response{
Error: err.Error(),
Message: "Error decoding request body",
}, http.StatusBadRequest)
return
}
user, isProfileComplete, err := svc.LoginUser(context.TODO(), body.Email, body.Password)
tokenId, err := authSvc.GenerateToken(&user)
if err != nil {
utility.NewJSONWriter(w).Write(models.Response{
Error: err.Error(),
Message: "Error generating token id",
}, http.StatusBadRequest)
return
}
loginResponse := models.LoginResponse{
Email: user.Email,
Name: user.Name,
EmployeeId: user.EmployeeId,
Role: user.Role,
TokenId: tokenId,
IsProfileComplete: isProfileComplete,
}
if len(loginResponse.Email) == 0 {
utility.NewJSONWriter(w).Write(models.Response{
Error: "Unauthorized",
Message: "Invalid Credentials",
}, http.StatusUnauthorized)
return
}
utility.NewJSONWriter(w).Write(loginResponse, http.StatusOK)
}
}
|
package game_map
import (
"fmt"
"github.com/faiface/pixel"
"github.com/faiface/pixel/imdraw"
"github.com/faiface/pixel/pixelgl"
"github.com/faiface/pixel/text"
"github.com/sirupsen/logrus"
"github.com/steelx/go-rpg-cgm/combat"
"github.com/steelx/go-rpg-cgm/gui"
"github.com/steelx/go-rpg-cgm/state_machine"
"github.com/steelx/go-rpg-cgm/utilz"
"github.com/steelx/go-rpg-cgm/world"
"image/color"
"math"
"reflect"
)
//CS -> CombatState
const (
csNpcStand = "cs_npc_stand"
csEnemyDie = "cs_enemy_die"
csStandby = "cs_standby" // The character is waiting to be told what action to do by the player or AI
csProne = "cs_prone" // The character is waiting and ready to perform a given combat action
csAttack = "cs_attack" // The character will run an attack animation and attack an enemy
csSpecial = "cs_cast" // The character will run a cast-spell animation and a special effect will play
csUse = "cs_use" // The character uses some item with a use-item animation
csHurt = "cd_hurt" // The character takes some damage. Animation and numbers
csDie = "cs_die" // The character dies and the sprite is changed to the death sprite
csDeath = "cs_death"
csMove = "cs_move" // The character moves toward or away from the enemy, in order to perform an action
csVictory = "cs_victory" // The character dances around and combat ends
csRunanim = "cs_run_anim" // plays common animations states
csRetreat = "cs_retreat"
csSteal = "cs_steal"
)
type CombatState struct {
GameState *gui.StateStack
InternalStack *gui.StateStack
win *pixelgl.Window
Background *pixel.Sprite
BackgroundBounds pixel.Rect
Pos pixel.Vec
Layout gui.Layout
LayoutMap map[string][][]pixel.Vec
Actors map[string][]*combat.Actor
Characters map[string][]*Character
DeathList []*Character
ActorCharMap map[*combat.Actor]*Character
SelectedActor *combat.Actor
EffectList []EffectState
Loot []combat.ActorDropItem
Panels []gui.Panel
TipPanel,
NoticePanel gui.Panel
tipPanelText, noticePanelText string
showTipPanel, showNoticePanel bool
PanelTitles []PanelTitle
PartyList,
StatsList *gui.SelectionMenu
//HP and MP columns in the bottom right panel
StatsYCol,
marginLeft,
marginTop float64
Bars map[*combat.Actor]BarStats //actor ID = BarStats
imd *imdraw.IMDraw
EventQueue *EventQueue
IsFinishing,
Fled,
CanFlee bool
OnDieCallback, OnWinCallback func()
}
type PanelTitle struct {
text string
x, y float64
}
type BarStats struct {
HP, MP gui.ProgressBarIMD
}
func CombatStateCreate(state *gui.StateStack, win *pixelgl.Window, def CombatDef) *CombatState {
backgroundImg, err := utilz.LoadPicture(def.Background)
utilz.PanicIfErr(err)
// Setup layout panel
pos := pixel.V(0, 0)
layout := gui.LayoutCreate(pos.X, pos.Y, win)
layout.SplitHorz("screen", "top", "bottom", 0.72, 0)
layout.SplitHorz("top", "notice", "top", 0.25, 0)
layout.Contract("notice", 75, 25)
layout.SplitHorz("bottom", "tip", "bottom", 0.24, 0)
layout.SplitVert("bottom", "left", "right", 0.67, 0)
bottomH := layout.Top("left")
screenW := win.Bounds().W()
screenH := win.Bounds().H()
bgBounds := pixel.R(0, bottomH, screenW, screenH)
c := &CombatState{
win: win,
GameState: state,
InternalStack: gui.StateStackCreate(win),
BackgroundBounds: bgBounds,
Background: pixel.NewSprite(backgroundImg, bgBounds),
Pos: pos,
Actors: map[string][]*combat.Actor{
party: def.Actors.Party,
enemies: def.Actors.Enemies,
},
Characters: make(map[string][]*Character),
ActorCharMap: make(map[*combat.Actor]*Character),
StatsYCol: 208,
marginLeft: 18,
marginTop: 20,
imd: imdraw.New(nil),
EventQueue: EventsQueueCreate(),
Layout: layout,
CanFlee: def.CanFlee,
OnWinCallback: def.OnWin,
OnDieCallback: def.OnDie,
}
c.LayoutMap = combatLayout
c.CreateCombatCharacters(party)
c.CreateCombatCharacters(enemies)
c.Panels = []gui.Panel{
layout.CreatePanel("left"),
layout.CreatePanel("right"),
}
c.showTipPanel = false
c.showNoticePanel = false
c.TipPanel = layout.CreatePanel("tip")
c.NoticePanel = layout.CreatePanel("notice")
//Set up player list
partyListMenu := gui.SelectionMenuCreate(19, 0, 100,
c.Actors[party],
false,
pixel.ZV,
c.OnPartyMemberSelect,
c.RenderPartyNames,
)
c.PartyList = &partyListMenu
//title
x := layout.Left("left")
y := layout.Top("left")
marginTop := c.marginTop
marginLeft := c.marginLeft
c.PanelTitles = []PanelTitle{
{"NAME", x + marginLeft, y - marginTop + 2},
{"HP", layout.Left("right") + marginLeft, y - marginTop + 2},
{"MP", layout.Left("right") + marginLeft + c.StatsYCol, y - marginTop + 2},
}
y = y - 35 // - margin top
c.PartyList.SetPosition(x+marginLeft, y)
c.PartyList.HideCursor()
c.Bars = make(map[*combat.Actor]BarStats)
for _, p := range c.Actors[party] {
c.BuildBars(p)
}
for _, e := range c.Actors[enemies] {
c.BuildBars(e)
}
statsListMenu := gui.SelectionMenuCreate(19, 0, 100,
c.Actors[party],
false,
pixel.ZV,
c.OnPartyMemberSelect,
c.RenderPartyStats,
)
c.StatsList = &statsListMenu
x = layout.Left("right") - 8
c.StatsList.SetPosition(x, y)
c.StatsList.HideCursor()
return c
}
func (c *CombatState) BuildBars(actor *combat.Actor) {
hpBar := gui.ProgressBarIMDCreate(
0, 0,
actor.Stats.Get("HpNow"),
actor.Stats.Get("HpMax"),
"#dc3545", //red
"#15FF00", //green
3, 100,
c.imd,
)
mpBar := gui.ProgressBarIMDCreate(
0, 0,
actor.Stats.Get("MpNow"),
actor.Stats.Get("MpMax"),
"#7f7575",
"#00f1ff",
3, 100,
c.imd,
)
c.Bars[actor] = BarStats{
HP: hpBar,
MP: mpBar,
}
}
func (c *CombatState) Enter() {
}
func (c *CombatState) Exit() {
}
func (c *CombatState) Update(dt float64) bool {
for _, v := range c.Characters[party] {
v.Controller.Update(dt)
}
for _, v := range c.Characters[enemies] {
v.Controller.Update(dt)
}
for i := len(c.DeathList) - 1; i >= 0; i-- {
char := c.DeathList[i]
char.Controller.Update(dt)
state := char.Controller.Current
if state.IsFinished() {
c.DeathList = c.removeCharAtIndex(c.DeathList, i)
}
}
for i := len(c.EffectList) - 1; i >= 0; i-- {
fx := c.EffectList[i]
if fx.IsFinished() {
c.EffectList = c.removeFxAtIndex(c.EffectList, i)
}
fx.Update(dt)
}
if len(c.InternalStack.States) != 0 && c.InternalStack.Top() != nil {
c.InternalStack.Update(dt)
return true
}
if !c.IsFinishing {
c.EventQueue.Update()
c.AddTurns(c.Actors[party])
c.AddTurns(c.Actors[enemies])
if c.PartyWins() || c.HasPartyFled() {
c.EventQueue.Clear()
c.OnWin()
} else if c.EnemyWins() {
c.EventQueue.Clear()
c.OnLose()
}
}
return false
}
func (c CombatState) Render(renderer *pixelgl.Window) {
c.Background.Draw(renderer, pixel.IM.Moved(c.Pos))
//for _, v := range c.Characters[party] {
// pos := pixel.V(v.Entity.X, v.Entity.Y)
// v.Entity.Render(nil, renderer, pos)
//}
//for _, v := range c.Characters[enemies] {
// pos := pixel.V(v.Entity.X, v.Entity.Y)
// v.Entity.Render(nil, renderer, pos)
//}
for a, char := range c.ActorCharMap {
pos := pixel.V(char.Entity.X, char.Entity.Y)
char.Entity.Render(nil, renderer, pos)
if !a.IsPlayer() {
c.DrawHpBarAtFeet(renderer, char.Entity.X, char.Entity.Y, a)
}
}
for _, v := range c.DeathList {
pos := pixel.V(v.Entity.X, v.Entity.Y)
v.Entity.Render(nil, renderer, pos)
}
for i := len(c.EffectList) - 1; i >= 0; i-- {
v := c.EffectList[i]
v.Render(renderer)
}
for _, v := range c.Panels {
v.Draw(renderer)
}
if c.showTipPanel {
x := c.Layout.MidX("tip") - 10
y := c.Layout.MidY("tip")
c.TipPanel.Draw(renderer)
textBase := text.New(pixel.V(x, y), gui.BasicAtlasAscii)
fmt.Fprintln(textBase, c.tipPanelText)
textBase.Draw(renderer, pixel.IM)
}
if c.showNoticePanel {
x := c.Layout.MidX("notice") - 10
y := c.Layout.MidY("notice")
c.NoticePanel.Draw(renderer)
textBase := text.New(pixel.V(x, y), gui.BasicAtlasAscii)
fmt.Fprintln(textBase, c.noticePanelText)
textBase.Draw(renderer, pixel.IM)
}
textBase := text.New(pixel.V(0, 0), gui.BasicAtlas12)
//textBase.Color = txtColor
for _, v := range c.PanelTitles {
textBase.Clear()
fmt.Fprintln(textBase, v.text)
textBase.Draw(renderer, pixel.IM.Moved(pixel.V(v.x, v.y)))
}
c.PartyList.Render(renderer)
c.StatsList.Render(renderer)
c.InternalStack.Render(renderer)
c.EventQueue.Render(renderer)
camera := pixel.IM.Scaled(c.Pos, 1.0).Moved(c.win.Bounds().Center().Sub(c.Pos))
c.win.SetMatrix(camera)
}
func (c *CombatState) HandleInput(win *pixelgl.Window) {
}
func (c *CombatState) CreateCombatCharacters(key string) {
actorsList := c.Actors[key]
layout := c.LayoutMap[key][len(actorsList)-1]
for k, v := range actorsList {
charDef, ok := CharacterDefinitions[v.Id]
if !ok {
panic(fmt.Sprintf("Id '%s' Not found in CharacterDefinitions", v.Id))
}
if charDef.CombatEntityDef.Texture != "" {
charDef.EntityDef = charDef.CombatEntityDef
}
var char *Character
char = CharacterCreate(
charDef,
map[string]func() state_machine.State{
csStandby: func() state_machine.State {
return CSStandByCreate(char, c)
},
csNpcStand: func() state_machine.State {
return NPCStandCombatStateCreate(char, c)
},
csRunanim: func() state_machine.State {
return CSRunAnimCreate(char, c)
},
csHurt: func() state_machine.State {
return CSHurtCreate(char, c)
},
csMove: func() state_machine.State {
return CSMoveCreate(char, c)
},
csEnemyDie: func() state_machine.State {
return CSEnemyDieCreate(char, c)
},
},
)
c.ActorCharMap[v] = char
pos := layout[k]
// Combat positions are 0 - 1
// Need scaling to the screen size.
x := pos.X * c.win.Bounds().W()
y := pos.Y * c.win.Bounds().H()
char.Entity.X = x
char.Entity.Y = y
// Change to standby because it's combat time
animName := csStandby
char.Controller.Change(csStandby, animName)
c.Characters[key] = append(c.Characters[key], char)
}
}
func (c *CombatState) OnPartyMemberSelect(index int, str interface{}) {
logrus.Info(index, str)
}
func (c *CombatState) RenderPartyNames(args ...interface{}) {
//renderer pixel.Target, x, y float64, index int
rendererV := reflect.ValueOf(args[0])
renderer := rendererV.Interface().(pixel.Target)
xV := reflect.ValueOf(args[1])
yV := reflect.ValueOf(args[2])
x, y := xV.Interface().(float64), yV.Interface().(float64)
itemV := reflect.ValueOf(args[3])
actor := itemV.Interface().(*combat.Actor)
var txtColor color.RGBA
if c.SelectedActor == actor {
txtColor = utilz.HexToColor("#ffdc00") //yellow
} else {
txtColor = utilz.HexToColor("#FFFFFF") //white
}
cursorWidth := 16.0 + c.marginLeft
textBase := text.New(pixel.V(x-cursorWidth, y), gui.BasicAtlasAscii)
textBase.Color = txtColor
fmt.Fprintln(textBase, actor.Name)
textBase.Draw(renderer, pixel.IM)
}
func (c *CombatState) RenderPartyStats(args ...interface{}) {
//renderer pixel.Target, x, y float64, index int
rendererV := reflect.ValueOf(args[0])
renderer := rendererV.Interface().(pixel.Target)
xV := reflect.ValueOf(args[1])
yV := reflect.ValueOf(args[2])
x, y := xV.Interface().(float64), yV.Interface().(float64)
cursorWidth := 22.0
x = x + c.marginLeft - cursorWidth
itemV := reflect.ValueOf(args[3])
actor := itemV.Interface().(*combat.Actor)
stats := actor.Stats
barOffset := 70.0
bars := c.Bars[actor]
bars.HP.SetPosition(x+barOffset, y)
bars.HP.SetValue(stats.Get("HpNow"))
bars.HP.Render(renderer)
c.DrawHP(renderer, x, y, actor)
x = x + c.StatsYCol
c.DrawMP(renderer, x, y, actor)
mpNow := stats.Get("MpNow")
bars.MP.SetPosition(x+barOffset*0.7, y)
bars.MP.SetValue(mpNow)
bars.MP.Render(renderer)
}
func (c *CombatState) DrawHP(renderer pixel.Target, x, y float64, actor *combat.Actor) {
hp, max := actor.Stats.Get("HpNow"), actor.Stats.Get("HpMax")
percentHealth := hp / max
txtColor := utilz.HexToColor("#ffffff")
if percentHealth < 0.25 {
txtColor = utilz.HexToColor("#ff2727") //red
} else if percentHealth < 0.50 {
txtColor = utilz.HexToColor("#ffffa2") //light yellow
}
textBase := text.New(pixel.V(x, y), gui.BasicAtlasAscii)
textBase.Color = txtColor
fmt.Fprintf(textBase, fmt.Sprintf("%v/%v", hp, max))
textBase.Draw(renderer, pixel.IM)
}
func (c *CombatState) DrawMP(renderer pixel.Target, x, y float64, actor *combat.Actor) {
mpNow := actor.Stats.Get("MpNow")
mpNowStr := fmt.Sprintf("%v", mpNow)
textBase := text.New(pixel.V(x, y), gui.BasicAtlasAscii)
fmt.Fprintln(textBase, mpNowStr)
textBase.Draw(renderer, pixel.IM)
}
func (c *CombatState) DrawHpBarAtFeet(renderer pixel.Target, x, y float64, actor *combat.Actor) {
stats := actor.Stats
entityWidth, entityHeight := 64.0, 64.0
bars := c.Bars[actor]
bars.HP.Width = 32
bars.HP.SetPosition(x-entityWidth/3, y-entityHeight/2)
bars.HP.SetValue(stats.Get("HpNow"))
bars.HP.Render(renderer)
}
func (c *CombatState) HandleDeath() {
c.HandlePartyDeath()
c.HandleEnemyDeath()
}
func (c *CombatState) HandlePartyDeath() {
for _, actor := range c.Actors[party] {
character := c.ActorCharMap[actor]
state := character.Controller.Current
stats := actor.Stats
// is the character already dead?
var animId string
switch s := state.(type) {
case *CSStandBy:
animId = s.AnimId
case *CSRunAnim:
animId = s.AnimId
case *CSHurt:
animId = s.AnimId
case *CSMove:
animId = s.AnimId
default:
panic(fmt.Sprintf("animId not found with %v", s))
}
if animId != csDeath {
//still alive
//but Is the HP above 0?
hpNow := stats.Get("HpNow")
if hpNow <= 0 {
//Dead party actor we need to run anim,
//reason we dont move Party member to DeathList is
//party player can be revived
character.Controller.Change(csRunanim, csDeath, false)
c.EventQueue.RemoveEventsOwnedBy(actor)
}
}
}
}
func (c *CombatState) HandleEnemyDeath() {
for i := len(c.Actors[enemies]) - 1; i >= 0; i-- {
enemy := c.Actors[enemies][i]
character := c.ActorCharMap[enemy]
stats := enemy.Stats
hpNow := stats.Get("HpNow")
if hpNow <= 0 {
//Remove all references
c.Actors[enemies] = removeActorAtIndex(c.Actors[enemies], i)
c.Characters[enemies] = c.removeCharAtIndex(c.Characters[enemies], i)
delete(c.ActorCharMap, enemy)
character.Controller.Change(csEnemyDie)
c.EventQueue.RemoveEventsOwnedBy(enemy)
//Add the loot to the loot list
c.Loot = append(c.Loot, enemy.Drop)
//Add to effects
c.DeathList = append(c.DeathList, character)
}
}
}
func (c *CombatState) AddEffect(fx EffectState) {
for i := 0; i < len(c.EffectList); i++ {
priority := c.EffectList[i].Priority()
if fx.Priority() > priority {
c.insertFxAtIndex(i, fx)
return
}
}
//else
c.EffectList = append(c.EffectList, fx)
}
func (c *CombatState) ApplyDamage(target *combat.Actor, damage float64, isCritical bool) {
stats := target.Stats
hp := stats.Get("HpNow") - damage
stats.Set("HpNow", math.Max(0, hp))
hpAfterDamage := stats.Get("HpNow")
logrus.Info(target.Name, " HP now ", hpAfterDamage)
// Change actor's character to hurt state
character := c.ActorCharMap[target]
if damage > 0 {
state := character.Controller.Current
//check if its NOT csHurt then change it to csHurt
switch state.(type) {
case *CSHurt:
//logrus.Info("already in Hurt state, do nothing")
default:
character.Controller.Change(csHurt, state)
}
}
x, y := character.Entity.X, character.Entity.Y
dmgEffectColor := "#ff9054" //light red
if isCritical {
dmgEffectColor = "#ff2727" //red
}
dmgEffect := JumpingNumbersFXCreate(x, y, damage, dmgEffectColor)
c.AddEffect(dmgEffect)
c.HandleDeath()
}
func (c *CombatState) OnFlee() {
c.Fled = true
}
func (c *CombatState) HasPartyFled() bool {
return c.Fled
}
func (c *CombatState) OnWin() {
//Tell all living party members to dance.
for _, v := range c.Actors[party] {
char := c.ActorCharMap[v]
alive := v.Stats.Get("HpNow") > 0
if alive {
char.Controller.Change(csRunanim, csVictory, false)
}
}
//Create the storyboard and add the stats.
combatData := c.CalcCombatData()
world_ := reflect.ValueOf(c.GameState.Globals["world"]).Interface().(*combat.WorldExtended)
xpSummaryState := XPSummaryStateCreate(c.GameState, c.win, *world_.Party, combatData, c.OnWinCallback)
storyboardEvents := []interface{}{
UpdateState(c, 1.0),
BlackScreen("blackscreen"),
Wait(1),
KillState("blackscreen"),
ReplaceState(c, xpSummaryState),
Wait(0.3),
}
storyboard := StoryboardCreate(c.GameState, c.win, storyboardEvents, false)
c.GameState.Push(storyboard)
c.IsFinishing = true
}
func (c *CombatState) OnLose() {
c.IsFinishing = true
var storyboardEvents []interface{}
if c.OnDieCallback != nil {
storyboardEvents = []interface{}{
UpdateState(c, 1.5),
BlackScreen("blackscreen"),
Wait(1),
KillState("blackscreen"),
RemoveState(c),
RunFunction(c.OnDieCallback),
Wait(2),
}
} else {
gameOverState := GameOverStateCreate(c.GameState)
storyboardEvents = []interface{}{
UpdateState(c, 1.5),
BlackScreen("blackscreen"),
Wait(1),
KillState("blackscreen"),
ReplaceState(c, gameOverState),
Wait(2),
}
}
storyboard := StoryboardCreate(c.GameState, c.GameState.Win, storyboardEvents, false)
c.GameState.Push(storyboard)
//c.GameState.Pop()
//gameOverState := GameOverStateCreate(c.GameState)
//c.GameState.Push(gameOverState)
}
func (c *CombatState) CalcCombatData() CombatData {
drop := CombatData{
XP: 0,
Gold: 0,
Loot: make([]world.ItemIndex, 0),
}
lootDict := make(map[int]int) //itemId = count
for _, v := range c.Loot {
drop.XP += v.XP
drop.Gold += v.Gold
for _, itemId := range v.Always {
if _, ok := lootDict[itemId]; ok {
lootDict[itemId] += 1
} else {
lootDict[itemId] = 1
}
}
item := v.Chance.Pick()
if item.Id != -1 {
if _, ok := lootDict[item.Id]; ok {
lootDict[item.Id] += item.Count
} else {
lootDict[item.Id] = item.Count
}
}
}
for itemId, count := range lootDict {
drop.Loot = append(drop.Loot, world.ItemIndex{
Id: itemId,
Count: count,
})
}
return drop
}
func (c *CombatState) ApplyDodge(target *combat.Actor) {
character := c.ActorCharMap[target]
state := character.Controller.Current
//check if its NOT csHurt then change it to csHurt
switch state.(type) {
case *CSHurt:
//do nothing if it is
default:
character.Controller.Change(csHurt, state)
}
c.AddTextEffect(target, "DODGE", 2)
}
func (c *CombatState) ApplyMiss(target *combat.Actor) {
c.AddTextEffect(target, "MISS", 2)
}
func (c *CombatState) AddTextEffect(actor *combat.Actor, txt string, priority int) {
character := c.ActorCharMap[actor]
entity := character.Entity
pos := entity.GetSelectPosition()
x, y := pos.X, pos.Y
effect := CombatTextFXCreate(x, y, txt, "#FFFFFF", priority)
c.AddEffect(effect)
}
func (c *CombatState) ApplyCounter(target, owner *combat.Actor) {
//not Alive
if alive := target.Stats.Get("HpNow") > 0; !alive {
return
}
options := AttackOptions{
Counter: true,
}
// Add an attack state at -1
attack := CEAttackCreate(c, target, []*combat.Actor{owner}, options)
var tp float64 = -1 // immediate
c.EventQueue.Add(attack, tp)
c.AddTextEffect(target, "COUNTER", 3)
}
func (c *CombatState) ShowTip(txt string) {
c.showTipPanel = true
c.tipPanelText = txt
}
func (c *CombatState) ShowNotice(txt string) {
c.showNoticePanel = true
c.noticePanelText = txt
}
func (c *CombatState) HideTip() {
c.showTipPanel = false
}
func (c *CombatState) HideNotice() {
c.showNoticePanel = false
}
|
package ratelimiter_test
import (
"crypto/rand"
"encoding/hex"
"errors"
"sort"
"sync"
"testing"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/teambition/ratelimiter-go"
"gopkg.in/redis.v5"
)
// Implements RedisClient for redis.Client
type redisClient struct {
*redis.Client
}
func (c *redisClient) RateDel(key string) error {
return c.Del(key).Err()
}
func (c *redisClient) RateEvalSha(sha1 string, keys []string, args ...interface{}) (interface{}, error) {
return c.EvalSha(sha1, keys, args...).Result()
}
func (c *redisClient) RateScriptLoad(script string) (string, error) {
return c.ScriptLoad(script).Result()
}
// Implements RedisClient for redis.ClusterClient
type clusterClient struct {
*redis.ClusterClient
}
func (c *clusterClient) RateDel(key string) error {
return c.Del(key).Err()
}
func (c *clusterClient) RateEvalSha(sha1 string, keys []string, args ...interface{}) (interface{}, error) {
return c.EvalSha(sha1, keys, args...).Result()
}
func (c *clusterClient) RateScriptLoad(script string) (string, error) {
var sha1 string
err := c.ForEachMaster(func(client *redis.Client) error {
res, err := client.ScriptLoad(script).Result()
if err == nil {
sha1 = res
}
return err
})
return sha1, err
}
// Implements RedisClient for redis.Ring
type ringClient struct {
*redis.Ring
}
func (c *ringClient) RateDel(key string) error {
return c.Del(key).Err()
}
func (c *ringClient) RateEvalSha(sha1 string, keys []string, args ...interface{}) (interface{}, error) {
return c.EvalSha(sha1, keys, args...).Result()
}
func (c *ringClient) RateScriptLoad(script string) (string, error) {
var sha1 string
err := c.ForEachShard(func(client *redis.Client) error {
res, err := client.ScriptLoad(script).Result()
if err == nil {
sha1 = res
}
return err
})
return sha1, err
}
// init Test
func TestRatelimiterGo(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "RatelimiterGo Suite")
}
var client *redis.Client
var limiter *ratelimiter.Limiter
var _ = BeforeSuite(func() {
client = redis.NewClient(&redis.Options{
Addr: "localhost:6379",
})
pong, err := client.Ping().Result()
Expect(pong).To(Equal("PONG"))
Expect(err).ToNot(HaveOccurred())
})
var _ = AfterSuite(func() {
err := client.Close()
Expect(err).ShouldNot(HaveOccurred())
})
var _ = Describe("RatelimiterGo", func() {
var _ = Describe("ratelimiter.New, With default options", func() {
var limiter *ratelimiter.Limiter
var id string = genID()
It("ratelimiter.New", func() {
res, err := ratelimiter.New(&redisClient{client}, ratelimiter.Options{})
Expect(err).ToNot(HaveOccurred())
limiter = res
})
It("limiter.Get", func() {
res, err := limiter.Get(id)
Expect(err).ToNot(HaveOccurred())
Expect(res.Total).To(Equal(100))
Expect(res.Remaining).To(Equal(99))
Expect(res.Duration).To(Equal(time.Duration(60 * 1e9)))
Expect(res.Reset.UnixNano() > time.Now().UnixNano()).To(Equal(true))
res2, err2 := limiter.Get(id)
Expect(err2).ToNot(HaveOccurred())
Expect(res2.Total).To(Equal(100))
Expect(res2.Remaining).To(Equal(98))
})
It("limiter.Remove", func() {
err := limiter.Remove(id)
Expect(err).ToNot(HaveOccurred())
err2 := limiter.Remove(id)
Expect(err2).ToNot(HaveOccurred())
res3, err3 := limiter.Get(id)
Expect(err3).ToNot(HaveOccurred())
Expect(res3.Total).To(Equal(100))
Expect(res3.Remaining).To(Equal(99))
})
It("limiter.Get with invalid args", func() {
_, err := limiter.Get(id, 10)
Expect(err.Error()).To(Equal("ratelimiter: must be paired values"))
_, err2 := limiter.Get(id, -1, 10)
Expect(err2.Error()).To(Equal("ratelimiter: must be positive integer"))
_, err3 := limiter.Get(id, 10, 0)
Expect(err3.Error()).To(Equal("ratelimiter: must be positive integer"))
})
})
var _ = Describe("ratelimiter.New, With options", func() {
var limiter *ratelimiter.Limiter
var id string = genID()
It("ratelimiter.New", func() {
res, err := ratelimiter.New(&redisClient{client}, ratelimiter.Options{
Max: 3,
Duration: time.Second,
})
Expect(err).ToNot(HaveOccurred())
limiter = res
})
It("limiter.Get", func() {
res, err := limiter.Get(id)
Expect(err).ToNot(HaveOccurred())
Expect(res.Total).To(Equal(3))
Expect(res.Remaining).To(Equal(2))
Expect(res.Duration).To(Equal(time.Second))
Expect(res.Reset.UnixNano() > time.Now().UnixNano()).To(Equal(true))
Expect(res.Reset.UnixNano() <= time.Now().Add(time.Second).UnixNano()).To(Equal(true))
res2, _ := limiter.Get(id)
Expect(res2.Remaining).To(Equal(1))
res3, _ := limiter.Get(id)
Expect(res3.Remaining).To(Equal(0))
res4, _ := limiter.Get(id)
Expect(res4.Remaining).To(Equal(-1))
res5, _ := limiter.Get(id)
Expect(res5.Remaining).To(Equal(-1))
})
It("limiter.Remove", func() {
err := limiter.Remove(id)
Expect(err).ToNot(HaveOccurred())
res2, err2 := limiter.Get(id)
Expect(err2).ToNot(HaveOccurred())
Expect(res2.Remaining).To(Equal(2))
})
It("limiter.Get with multi-policy", func() {
id := genID()
policy := []int{2, 500, 2, 1000, 1, 1000}
res, err := limiter.Get(id, policy...)
Expect(err).ToNot(HaveOccurred())
Expect(res.Total).To(Equal(2))
Expect(res.Remaining).To(Equal(1))
Expect(res.Duration).To(Equal(time.Millisecond * 500))
res2, err2 := limiter.Get(id, policy...)
Expect(err2).ToNot(HaveOccurred())
Expect(res2.Remaining).To(Equal(0))
res3, err3 := limiter.Get(id, policy...)
Expect(err3).ToNot(HaveOccurred())
Expect(res3.Remaining).To(Equal(-1))
time.Sleep(res3.Duration + time.Millisecond)
res4, err4 := limiter.Get(id, policy...)
Expect(err4).ToNot(HaveOccurred())
Expect(res4.Total).To(Equal(2))
Expect(res4.Remaining).To(Equal(1))
Expect(res4.Duration).To(Equal(time.Second))
res5, err5 := limiter.Get(id, policy...)
Expect(err5).ToNot(HaveOccurred())
Expect(res5.Remaining).To(Equal(0))
res6, err6 := limiter.Get(id, policy...)
Expect(err6).ToNot(HaveOccurred())
Expect(res6.Remaining).To(Equal(-1))
time.Sleep(res6.Duration + time.Millisecond)
res7, err7 := limiter.Get(id, policy...)
Expect(err7).ToNot(HaveOccurred())
Expect(res7.Total).To(Equal(1))
Expect(res7.Remaining).To(Equal(0))
Expect(res7.Duration).To(Equal(time.Second))
res8, err8 := limiter.Get(id, policy...)
Expect(err8).ToNot(HaveOccurred())
Expect(res8.Remaining).To(Equal(-1))
// restore after double Duration
time.Sleep(res8.Duration*2 + time.Millisecond)
res9, err9 := limiter.Get(id, policy...)
Expect(err9).ToNot(HaveOccurred())
Expect(res9.Total).To(Equal(2))
Expect(res9.Remaining).To(Equal(1))
Expect(res9.Duration).To(Equal(time.Millisecond * 500))
})
})
var _ = Describe("ratelimiter.New, Chaos", func() {
It("10 limiters work for one id", func() {
var wg sync.WaitGroup
var id string = genID()
var result = NewResult(make([]int, 10000))
var redisOptions = redis.Options{Addr: "localhost:6379"}
var limiterOptions = ratelimiter.Options{Max: 9998}
var worker = func(c *redis.Client, l *ratelimiter.Limiter) {
defer wg.Done()
defer c.Close()
for i := 0; i < 1000; i++ {
res, err := l.Get(id)
Expect(err).ToNot(HaveOccurred())
result.Push(res.Remaining)
}
}
wg.Add(10)
for i := 0; i < 10; i++ {
client := redis.NewClient(&redisOptions)
limiter, err := ratelimiter.New(&redisClient{client}, limiterOptions)
Expect(err).ToNot(HaveOccurred())
go worker(client, limiter)
}
wg.Wait()
s := result.Value()
sort.Ints(s) // [-1 -1 0 1 2 3 ... 9997 9997]
Expect(s[0]).To(Equal(-1))
for i := 1; i < 10000; i++ {
Expect(s[i]).To(Equal(i - 2))
}
})
})
var _ = Describe("ratelimiter.New with redis ring, Chaos", func() {
It("10 limiters work for one id", func() {
Skip("Can't run in travis")
var wg sync.WaitGroup
var id string = genID()
var result = NewResult(make([]int, 10000))
var redisOptions = redis.RingOptions{Addrs: map[string]string{
"a": "localhost:6379",
"b": "localhost:6380",
}}
var limiterOptions = ratelimiter.Options{Max: 9998}
var worker = func(c *redis.Ring, l *ratelimiter.Limiter) {
defer wg.Done()
defer c.Close()
for i := 0; i < 1000; i++ {
res, err := l.Get(id)
Expect(err).ToNot(HaveOccurred())
result.Push(res.Remaining)
}
}
wg.Add(10)
for i := 0; i < 10; i++ {
client := redis.NewRing(&redisOptions)
limiter, err := ratelimiter.New(&ringClient{client}, limiterOptions)
Expect(err).ToNot(HaveOccurred())
go worker(client, limiter)
}
wg.Wait()
s := result.Value()
sort.Ints(s) // [-1 -1 0 1 2 3 ... 9997 9997]
Expect(s[0]).To(Equal(-1))
for i := 1; i < 10000; i++ {
Expect(s[i]).To(Equal(i - 2))
}
})
})
var _ = Describe("ratelimiter.New with redis cluster, Chaos", func() {
It("10 limiters work for one id", func() {
Skip("Can't run in travis")
var wg sync.WaitGroup
var id string = genID()
var result = NewResult(make([]int, 10000))
var redisOptions = redis.ClusterOptions{Addrs: []string{
"localhost:7000",
"localhost:7001",
"localhost:7002",
"localhost:7003",
"localhost:7004",
"localhost:7005",
}}
var limiterOptions = ratelimiter.Options{Max: 9998}
var worker = func(c *redis.ClusterClient, l *ratelimiter.Limiter) {
defer wg.Done()
defer c.Close()
for i := 0; i < 1000; i++ {
res, err := l.Get(id)
Expect(err).ToNot(HaveOccurred())
result.Push(res.Remaining)
}
}
wg.Add(10)
for i := 0; i < 10; i++ {
client := redis.NewClusterClient(&redisOptions)
limiter, err := ratelimiter.New(&clusterClient{client}, limiterOptions)
Expect(err).ToNot(HaveOccurred())
go worker(client, limiter)
}
wg.Wait()
s := result.Value()
sort.Ints(s) // [-1 -1 0 1 2 3 ... 9997 9997]
Expect(s[0]).To(Equal(-1))
for i := 1; i < 10000; i++ {
Expect(s[i]).To(Equal(i - 2))
}
})
})
})
func genID() string {
buf := make([]byte, 12)
_, err := rand.Read(buf)
if err != nil {
panic(err)
}
return hex.EncodeToString(buf)
}
type Result struct {
i int
len int
s []int
m sync.Mutex
}
func NewResult(s []int) Result {
return Result{s: s, len: len(s)}
}
func (r *Result) Push(val int) {
r.m.Lock()
if r.i == r.len {
panic(errors.New("Result overflow"))
}
r.s[r.i] = val
r.i++
r.m.Unlock()
}
func (r *Result) Value() []int {
return r.s
}
|
package game
// FarseerOperation view people role
func FarseerOperation(players []Player) Player {
votes := CollectVote(players, players)
result := GetVoteResult(votes)
return result.target[0]
}
|
package main
/*
读取config.yaml 文件
对yaml里面的site进行http检查
huangmingyou@gmail.com
2021.07
*/
import (
"github.com/robfig/cron/v3"
"crypto/tls"
"flag"
"fmt"
"gopkg.in/yaml.v2"
"io/ioutil"
"log"
"net"
"net/http"
"net/http/httptrace"
"os"
"regexp"
"strconv"
"strings"
"time"
)
var cfgfile string
var metrics string
type U struct {
Name string `yaml:"name"`
Url string `yaml:"url"`
Method string `yaml:"method"`
Respons string `yaml:"respons"`
Query string `yaml:"query"`
Timeout time.Duration `yaml:"timeout"`
}
type C struct {
Thread int `yaml:"thread"`
Updatecron string `yaml:"updatecron"`
Targets []U `yaml:",flow"`
}
var yc C
func ValidateConfigPath(path string) error {
s, err := os.Stat(path)
if err != nil {
return err
}
if s.IsDir() {
return fmt.Errorf("'%s' is a directory, not a normal file", path)
}
return nil
}
func ParseFlags() (string, string, error) {
var configPath string
var mode string
flag.StringVar(&configPath, "config", "./config.yml", "path to config file")
flag.StringVar(&mode, "mode", "cli", "run mode, cli or web")
flag.Parse()
if err := ValidateConfigPath(configPath); err != nil {
return "", "", err
}
return configPath, mode, nil
}
func timeGet(t U, c chan string) {
var res_str string
req, _ := http.NewRequest(t.Method, t.Url, nil)
if "POST" == t.Method {
req, _ = http.NewRequest(t.Method, t.Url, strings.NewReader(t.Query))
req.Header.Add("Content-Type", "application/x-www-form-urlencoded")
req.Header.Add("Content-Length", strconv.Itoa(len(t.Query)))
}
var start, connect, dns, tlsHandshake time.Time
tr := &http.Transport{
DialContext: (&net.Dialer{
Timeout: t.Timeout * time.Second,
KeepAlive: 30 * time.Second,
}).DialContext,
ForceAttemptHTTP2: true,
MaxIdleConns: 10,
IdleConnTimeout: 10 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
//ExpectContinueTimeout: 10 * time.Second,
}
trace := &httptrace.ClientTrace{
DNSStart: func(dsi httptrace.DNSStartInfo) { dns = time.Now() },
DNSDone: func(ddi httptrace.DNSDoneInfo) {
res_str = fmt.Sprintf("http_dns_time{name=\"%s\"} \t\t%d\n", t.Name, time.Since(dns))
},
TLSHandshakeStart: func() { tlsHandshake = time.Now() },
TLSHandshakeDone: func(cs tls.ConnectionState, err error) {
res_str += fmt.Sprintf("http_tls_handshake_time{name=\"%s\"} \t%d\n", t.Name, time.Since(tlsHandshake))
},
ConnectStart: func(network, addr string) { connect = time.Now() },
ConnectDone: func(network, addr string, err error) {
res_str += fmt.Sprintf("http_connect_time{name=\"%s\"} \t\t%d\n", t.Name, time.Since(connect))
},
GotFirstResponseByte: func() {
res_str += fmt.Sprintf("http_firstbyte_time{name=\"%s\"} \t%d\n", t.Name, time.Since(start))
},
}
req = req.WithContext(httptrace.WithClientTrace(req.Context(), trace))
start = time.Now()
resp, err := tr.RoundTrip(req)
if err != nil {
log.Fatal(err)
}
defer resp.Body.Close()
res_str += fmt.Sprintf("http_total_time{name=\"%s\"} \t\t%d\n", t.Name, time.Since(start))
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Fatal(err)
}
matchv1 := 1
var validID = regexp.MustCompile(t.Respons)
matchv := validID.MatchString(string(body))
if matchv {
matchv1 = 0
}
res_str += fmt.Sprintf("http_content_match{name=\"%s\"} \t%d\n", t.Name, matchv1)
c <- res_str
}
func Exporter(w http.ResponseWriter, r *http.Request) {
ch1 := make(chan string)
res2 := ""
for i := 0; i < len(yc.Targets); i++ {
go timeGet(yc.Targets[i], ch1)
res2 += <-ch1
}
fmt.Fprintf(w, res2)
}
func runcli() {
ch1 := make(chan string)
res2 := ""
for i := 0; i < len(yc.Targets); i++ {
go timeGet(yc.Targets[i], ch1)
res2 += <-ch1
}
metrics = res2
// fmt.Println(time.Now())
fmt.Println(res2)
}
func main() {
cfgPath, runmode, err := ParseFlags()
if err != nil {
log.Fatal(err)
}
content, err := ioutil.ReadFile(cfgPath)
err1 := yaml.Unmarshal(content, &yc)
if err1 != nil {
log.Fatalf("error: %v", err1)
}
cfgfile = cfgPath
// cron job
cjob := cron.New()
cjob.AddFunc(yc.Updatecron, runcli)
cjob.Start()
//
if runmode == "web" {
//init data
runcli()
//
http.HandleFunc("/metrics", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, metrics)
})
log.Fatal(http.ListenAndServe(":8080", nil))
} else {
runcli()
}
}
|
// +build small
/*
http://www.apache.org/licenses/LICENSE-2.0.txt
Copyright 2016 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package server
import (
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"net/http/httptest"
"strings"
"testing"
"time"
cadv "github.com/google/cadvisor/info/v1"
"github.com/intelsdi-x/snap-plugin-publisher-heapster/exchange"
"github.com/intelsdi-x/snap-plugin-publisher-heapster/mox"
. "github.com/smartystreets/goconvey/convey"
)
type mockHTTPDriver struct {
mox.CallMock
}
func (m *mockHTTPDriver) AddRoute(methods []string, path string, handler http.HandlerFunc) error {
res := m.Called("AddRoute", 1, methods, path, handler)
return res.Error(0)
}
func (m *mockHTTPDriver) ListenAndServe(serverAddr string) error {
res := m.Called("ListenAndServe", 1, serverAddr)
return res.Error(0)
}
// nilReader is a reader for testing that returns errors for all ops
type nilReader struct{}
func (n *nilReader) Read(p []byte) (int, error) {
return 0, io.ErrUnexpectedEOF
}
func (n *nilReader) Close() error {
return io.ErrUnexpectedEOF
}
type mockJSONCodec struct {
mox.CallMock
}
func (c mockJSONCodec) Unmarshal(raw []byte, dest interface{}) error {
res := c.Called("Unmarshal", 1, raw, dest)
return res.Error(0)
}
func (c mockJSONCodec) Encode(writer io.Writer, obj interface{}) error {
res := c.Called("Encode", 1, writer, obj)
return res.Error(0)
}
type mockResponseWriter struct {
mox.CallMock
}
func (w *mockResponseWriter) Header() http.Header {
res := w.Called("Header", 1)
var header http.Header
if res[0] != nil {
header = res[0].(http.Header)
}
return header
}
func (w *mockResponseWriter) Write(buf []byte) (int, error) {
res := w.Called("Write", 2, buf)
return res.Int(0), res.Error(1)
}
func (w *mockResponseWriter) WriteHeader(code int) {
_ = w.Called("WriteHeader", 0, code)
}
func TestNewDefaultContext(t *testing.T) {
Convey("Working with server package", t, func() {
Convey("creating new server context should not fail", func() {
So(func() {
newDefaultContext(exchange.NewSystemConfig(), exchange.NewMetricMemory())
}, ShouldNotPanic)
Convey("but created object should have all fields initialized", func() {
ctx := newDefaultContext(exchange.NewSystemConfig(), exchange.NewMetricMemory())
So(ctx, ShouldNotBeNil)
So(ctx.Config(), ShouldNotBeNil)
So(ctx.Memory(), ShouldNotBeNil)
})
})
})
}
func TestNewHTTPDriver(t *testing.T) {
Convey("Working with server package", t, func() {
Convey("creating new HTTP driver should not fail", func() {
So(func() {
newHTTPDriver()
}, ShouldNotPanic)
Convey("but should deliver a non-nil instance of driver", func() {
driver := newHTTPDriver()
So(driver, ShouldNotBeNil)
})
})
})
}
func TestNewServer(t *testing.T) {
config := exchange.NewSystemConfig()
memory := exchange.NewMetricMemory()
Convey("While configuring server subsystem", t, func() {
mockDriver := &mockHTTPDriver{}
oldHTTPDriverCtor := newHTTPDriver
newHTTPDriver = func() HTTPDriver {
return mockDriver
}
Convey("building new server instance should not fail", func() {
server, err := NewServer(config, memory)
So(err, ShouldBeNil)
So(server, ShouldNotBeNil)
})
Convey("building new instance should fail if server setup terminates with error", func() {
mockDriver.AddInterceptor(func(funcName string, _ []interface{}, result *mox.Results) bool {
if funcName == "AddRoute" {
(*result)[0] = errors.New("AddRoute failed")
return true
}
return false
})
_, err := NewServer(config, memory)
So(err, ShouldNotBeNil)
})
Reset(func() {
newHTTPDriver = oldHTTPDriverCtor
})
})
}
func TestDefaultContext_AddStatusPublisher(t *testing.T) {
config := exchange.NewSystemConfig()
memory := exchange.NewMetricMemory()
Convey("While configuring the server subsystem", t, func() {
mockDriver := &mockHTTPDriver{}
oldHTTPDriverCtor := newHTTPDriver
newHTTPDriver = func() HTTPDriver {
return mockDriver
}
Convey("adding status publisher should not fail", func() {
var registeredRoutes []string
mockDriver.AddInterceptor(func(funcName string, args []interface{}, result *mox.Results) bool {
if funcName == "AddRoute" {
registeredRoutes = append(registeredRoutes, args[2].(string))
return true
}
return false
})
Convey("when adding a simple route", func() {
server, _ := NewServer(config, memory)
err := server.AddStatusPublisher("test", func() interface{} {
return map[string]string{"status": "ok"}
})
So(err, ShouldBeNil)
Convey("and new route should be created in the server", func() {
So(registeredRoutes, ShouldContain, "/_status/test")
})
})
})
Convey("and adding a route that fails", func() {
mockDriver.AddInterceptor(func(funcName string, args []interface{}, result *mox.Results) bool {
if funcName == "AddRoute" && strings.HasSuffix(args[2].(string), "/test") {
(*result)[0] = fmt.Errorf("AddRoute failed: %s", args[2].(string))
return true
}
return false
})
Convey("adding status publisher should fail", func() {
server, _ := NewServer(config, memory)
err := server.AddStatusPublisher("test", func() interface{} {
return map[string]string{"status": "ok"}
})
So(err, ShouldNotBeNil)
So(err.Error(), ShouldContainSubstring, "AddRoute failed")
So(err.Error(), ShouldContainSubstring, "/test")
})
})
Reset(func() {
newHTTPDriver = oldHTTPDriverCtor
})
})
}
func TestDefaultContext_Start(t *testing.T) {
config := exchange.NewSystemConfig()
memory := exchange.NewMetricMemory()
Convey("While configuring the server subsystem", t, func() {
mockDriver := &mockHTTPDriver{}
oldHTTPDriverCtor := newHTTPDriver
newHTTPDriver = func() HTTPDriver {
return mockDriver
}
// ListenAndServe is launched async, so add notifying-interceptor
done := make(chan bool)
mockDriver.AddInterceptor(func(funcName string, _ []interface{}, _ *mox.Results) bool {
if funcName == "ListenAndServe" {
done <- true
return true
}
return false
})
Convey("and using default parameters", func() {
server, _ := NewServer(config, memory)
Convey("starting the server should not fail", func() {
err := server.Start()
<-done
So(err, ShouldBeNil)
Convey("and HTTP driver should launch server main loop", func() {
So(mockDriver.GetAllCalled(), ShouldContain, "ListenAndServe")
})
})
})
Reset(func() {
newHTTPDriver = oldHTTPDriverCtor
})
})
}
func TestDefaultContext_listen(t *testing.T) {
config := exchange.NewSystemConfig()
memory := exchange.NewMetricMemory()
Convey("While configuring the server subsystem", t, func() {
mockDriver := &mockHTTPDriver{}
oldHTTPDriverCtor := newHTTPDriver
newHTTPDriver = func() HTTPDriver {
return mockDriver
}
Convey("and using HTTP driver setup that fails", func() {
tmp := newDefaultContext(config, memory)
server := &tmp
server.setup()
mockDriver.AddInterceptor(func(funcName string, args []interface{}, result *mox.Results) bool {
if funcName == "ListenAndServe" {
(*result)[0] = errors.New("ListenAndServe failed")
return false
}
return false
})
Convey("starting the server listening routine should also fail", func() {
err := server.listen()
So(err, ShouldNotBeNil)
So(err.Error(), ShouldEqual, "ListenAndServe failed")
})
})
Reset(func() {
newHTTPDriver = oldHTTPDriverCtor
})
})
}
func TestDefaultContext_containerStats(t *testing.T) {
config := exchange.NewSystemConfig()
memory := exchange.NewMetricMemory()
Convey("While using the server subsystem", t, func() {
w := httptest.NewRecorder()
r, _ := http.NewRequest("POST", "./stats/container", strings.NewReader(`{"num_stats": 1}`))
mockDriver := &mockHTTPDriver{}
oldHTTPDriverCtor := newHTTPDriver
newHTTPDriver = func() HTTPDriver {
return mockDriver
}
tmp := newDefaultContext(config, memory)
server := &tmp
server.setup()
oldJSONCodec := serverJSONCodec
myMockJSONCodec := mockJSONCodec{}
Convey("and issuing request for container stats", func() {
Convey("the handler should not fail", func() {
server.containerStats(w, r)
w.Flush()
So(w.Code, ShouldEqual, http.StatusOK)
Convey("the response should represent valid JSON", func() {
var stats map[string]interface{}
err := json.Unmarshal(w.Body.Bytes(), &stats)
So(err, ShouldBeNil)
})
})
Convey("when there are some container entries in the metric memory", func() {
server.memory.ContainerMap["/"] = makeDummyContainerInfo("/")
server.memory.ContainerMap["/foo"] = makeDummyContainerInfo("f00")
Convey("the handler should not fail", func() {
server.containerStats(w, r)
w.Flush()
So(w.Code, ShouldEqual, http.StatusOK)
Convey("the response should reflect entries from memory", func() {
var stats map[string]map[string]interface{}
err := json.Unmarshal(w.Body.Bytes(), &stats)
So(err, ShouldBeNil)
So(stats, ShouldContainKey, "/")
So(stats, ShouldContainKey, "/foo")
So(stats["/foo"]["id"], ShouldEqual, "f00")
So(stats["/foo"]["name"], ShouldEqual, "f00")
})
})
})
Convey("when reading request fails", func() {
r.Body = &nilReader{}
Convey("the handler should fail too", func() {
server.containerStats(w, r)
w.Flush()
So(w.Code, ShouldEqual, http.StatusInternalServerError)
})
})
Convey("when decoding request fails", func() {
serverJSONCodec = &myMockJSONCodec
myMockJSONCodec.AddInterceptor(func(funcName string, _ []interface{}, result *mox.Results) bool {
if funcName == "Unmarshal" {
(*result)[0] = errors.New("decoding failed")
return true
}
return false
})
Convey("the handler should also fail", func() {
server.containerStats(w, r)
w.Flush()
So(w.Code, ShouldEqual, http.StatusInternalServerError)
})
})
Convey("when encoding response fails", func() {
serverJSONCodec = &myMockJSONCodec
myMockJSONCodec.AddInterceptor(func(funcName string, _ []interface{}, result *mox.Results) bool {
if funcName == "Encode" {
(*result)[0] = errors.New("encoding failed")
return true
}
return false
})
Convey("the handler should also fail", func() {
server.containerStats(w, r)
w.Flush()
So(w.Code, ShouldEqual, http.StatusInternalServerError)
})
})
Convey("when storing response fails", func() {
statusCode := 0
headers := http.Header{}
mockWriter := &mockResponseWriter{}
mockWriter.AddInterceptor(func(funcName string, args []interface{}, res *mox.Results) bool {
if funcName == "Write" {
(*res)[1] = errors.New("Write failed")
return true
}
if funcName == "Header" {
(*res)[0] = headers
return true
}
if funcName == "WriteHeader" {
statusCode = args[1].(int)
return true
}
return false
})
Convey("the handler should fail too", func() {
server.containerStats(mockWriter, r)
So(statusCode, ShouldEqual, http.StatusInternalServerError)
})
})
Reset(func() {
serverJSONCodec = oldJSONCodec
})
})
Reset(func() {
newHTTPDriver = oldHTTPDriverCtor
})
})
}
func TestDefaultContext_serveStatusWrapper(t *testing.T) {
config := exchange.NewSystemConfig()
memory := exchange.NewMetricMemory()
Convey("While using the server subsystem", t, func() {
w := httptest.NewRecorder()
r, _ := http.NewRequest("GET", "./stats/container", strings.NewReader(`{"num_stats": 1}`))
mockDriver := &mockHTTPDriver{}
oldHTTPDriverCtor := newHTTPDriver
newHTTPDriver = func() HTTPDriver {
return mockDriver
}
tmp := newDefaultContext(config, memory)
server := &tmp
server.setup()
Convey("and issuing request for status publisher", func() {
Convey("the handler should not fail", func() {
server.serveStatusWrapper("foo", func() interface{} {
return map[string]string{"bar": "bonk"}
}, w, r)
w.Flush()
So(w.Code, ShouldEqual, http.StatusOK)
Convey("the response should represent valid JSON", func() {
var output map[string]interface{}
err := json.Unmarshal(w.Body.Bytes(), &output)
So(err, ShouldBeNil)
Convey("and the output should represent the published status", func() {
So(output, ShouldContainKey, "bar")
})
})
})
Convey("while published object is not valid for JSON", func() {
server.serveStatusWrapper("foo", func() interface{} {
return map[float64]int{3.5: 2}
}, w, r)
w.Flush()
So(w.Code, ShouldEqual, http.StatusInternalServerError)
})
})
Reset(func() {
newHTTPDriver = oldHTTPDriverCtor
})
})
}
func TestDefaultContext_buildStatsResponse(t *testing.T) {
parseDate := func(dateStr string) time.Time {
res, _ := time.Parse("02.01.06", dateStr)
return res
}
extractDays := func(c *cadv.ContainerInfo) (res []int) {
for _, s := range c.Stats {
res = append(res, s.Timestamp.Day())
}
return res
}
config := exchange.NewSystemConfig()
memory := exchange.NewMetricMemory()
Convey("While using the server subsystem", t, func() {
mockDriver := &mockHTTPDriver{}
oldHTTPDriverCtor := newHTTPDriver
newHTTPDriver = func() HTTPDriver {
return mockDriver
}
tmp := newDefaultContext(config, memory)
server := &tmp
server.setup()
Convey("with some stats in memory between 1st and 5th July", func() {
container := makeDummyContainerInfo("/")
for d := 1; d <= 5; d++ {
stats := cadv.ContainerStats{
Timestamp: parseDate(fmt.Sprintf("%02d.%02d.%02d", d, 7, 9))}
container.Stats = append(container.Stats, &stats)
}
server.memory.ContainerMap["/"] = container
Convey("when stats are requested >= 2nd July", func() {
request := &exchange.StatsRequest{
Start: parseDate("02.07.09"),
End: parseDate("11.12.13"),
}
response := server.buildStatsResponse(request).(map[string]*cadv.ContainerInfo)
Convey("correct list of dates should be present in response", func() {
days := extractDays(response["/"])
So(days, ShouldResemble, []int{5, 4, 3, 2})
})
})
Convey("when stats are requested >= 2 and <= 4th July", func() {
request := &exchange.StatsRequest{
Start: parseDate("02.07.09"),
End: parseDate("04.07.09"),
}
response := server.buildStatsResponse(request).(map[string]*cadv.ContainerInfo)
Convey("correct list of dates should be present in response", func() {
days := extractDays(response["/"])
So(days, ShouldResemble, []int{4, 3, 2})
})
})
Convey("when 3 stats elements are requested", func() {
request := &exchange.StatsRequest{
Start: parseDate("01.01.01"),
End: parseDate("11.12.13"),
NumStats: 3,
}
response := server.buildStatsResponse(request).(map[string]*cadv.ContainerInfo)
Convey("correct number and list of dates should be present in response", func() {
days := extractDays(response["/"])
So(days, ShouldResemble, []int{5, 4, 3})
})
})
})
Reset(func() {
newHTTPDriver = oldHTTPDriverCtor
})
})
}
func makeDummyContainerInfo(id string) *cadv.ContainerInfo {
res := cadv.ContainerInfo{
ContainerReference: cadv.ContainerReference{
Id: id,
Name: id,
}}
return &res
}
|
package myheap
import (
"fmt"
)
type IndexMaxHeap struct {
data []int // 最大索引堆中的数据,类型可以变
indexes []int // 最大索引堆中的索引, indexes[x] = i 表示索引i在x的位置
reverse []int // 最大索引堆中的反向索引, reverse[i] = x 表示索引i在x的位置
count int // 当前堆容量
capacity int //初始化容量
}
// 构造函数, 构造一个空堆, 可容纳capacity个元素
func NewIndexMaxHeap(cap int) *IndexMaxHeap {
heap := new(IndexMaxHeap)
heap.data = make([]int, cap+1) //跳过0从1开始
heap.indexes = make([]int, cap+1)
heap.reverse = make([]int, cap+1)
for i := 0; i <= heap.capacity; i++ { //索引i在堆中的位置,初始化时没有,为0
heap.reverse[i] = 0 // 从1开始,0表示跟本不存在
}
heap.count = 0
heap.capacity = cap
return heap
}
// Heapify:给定一个数组排列成 堆 的形状的过程
// 数组按照 二叉树 排列,每个叶子节点本身就是 最大堆
// 完全二叉树 第一个非叶子节点=最后一个索引/2,从后向前依次考察每个不是叶子节点的节点,然后shiftDown,继续向上
// O(n)
func NewIndexMaxHeapHeapify(arr []int) *IndexMaxHeap {
n := len(arr)
heap := new(IndexMaxHeap)
heap.data = make([]int, n+1) //跳过0从1开始
heap.indexes = make([]int, n+1)
heap.reverse = make([]int, n+1)
heap.capacity = n
// for i := 0; i < n; i++ {
// heap.data[i+1] = arr[i]
// }
copy(heap.data[1:], arr)
heap.count = n
for i := heap.count / 2; i >= 1; i-- { // 从第一个非叶子节点开始,叶子节点都是最大堆
heap.shiftDown(i)
}
return heap
}
// 返回堆中的元素个数
func (h IndexMaxHeap) Size() int {
return h.count
}
// 返回一个布尔值, 表示堆中是否为空
func (h IndexMaxHeap) IsEmpty() bool {
return h.count == 0
}
// 看索引i所在的位置是否存在元素
// 越界问题:i索引的元素真的存在在堆中,i在容量范围里不意味着一定在堆中
func (h IndexMaxHeap) Contain(i int) bool {
if i+1 >= 1 && i+1 <= h.capacity {
return h.reverse[i+1] != 0
}
return false
}
// 新元素的索引为i, 元素为item
// 传入的i对用户而言,是从0索引的,内部从1开始
func (h *IndexMaxHeap) Insert(i int, item int) {
if h.count+1 <= h.capacity && i+1 >= 1 && i+1 <= h.capacity {
// 再插入一个新元素前,还需要保证索引i所在的位置是没有元素的
if !h.Contain(i) {
i += 1
h.data[i] = item
h.indexes[h.count+1] = i
h.reverse[i] = h.count + 1
h.count++
h.shiftUp(h.count)
}
}
}
// 从最大索引堆中取出堆顶元素, 即索引堆中所存储的最大数据
func (h *IndexMaxHeap) ExtractMax() int {
var ret int
if h.count > 0 {
ret = h.data[h.indexes[1]]
h.indexes[1], h.indexes[h.count] = h.indexes[h.count], h.indexes[1]
h.reverse[h.indexes[1]] = 1 // 第一个位置
h.reverse[h.indexes[h.count]] = 0 //删除置0
h.count--
h.shiftDown(1)
}
return ret
}
// 从最大索引堆中取出堆顶元素的索引
// 传入的i对用户而言,是从0索引的
func (h *IndexMaxHeap) ExtractMaxIndex() int {
var ret int
if h.count > 0 {
ret = h.indexes[1] - 1 // 1→0
h.indexes[1], h.indexes[h.count] = h.indexes[h.count], h.indexes[1]
h.reverse[h.indexes[1]] = 1 // 第一个位置
h.reverse[h.indexes[h.count]] = 0 //删除置0
h.count--
h.shiftDown(1)
}
return ret
}
// 获取最大索引堆中索引为i的元素
func (h *IndexMaxHeap) GetItem(i int) int {
if h.Contain(i) {
return h.data[i+1]
} else {
fmt.Println("索引越界")
return 0
}
}
// 获取最大索引堆中的堆顶元素
func (h IndexMaxHeap) getMax() int {
if h.count > 0 {
return h.data[h.indexes[1]]
}
return 0
}
// 获取最大索引堆中的堆顶元素
func (h IndexMaxHeap) getMaxIndex() int {
if h.count > 0 {
return h.indexes[1] - 1
}
return 0
}
// 将最大索引堆中索引为i的元素修改为newItem
// O(n+nlogn) → O(n)
func (h *IndexMaxHeap) Update(i int, newItem int) {
// 越界问题:i索引的元素真的存在在堆中,i在容量范围里不意味着一定在堆中
if h.Contain(i) {
i += 1
h.data[i] = newItem
// 找到indexes[j] = i, j表示data[i]在堆中的位置
// 之后shiftUp(j), 再shiftDown(j)
// for j := 1; j <= h.count; j ++ { // O(n)
// if h.indexes[j] == i {
// h.shiftUp(j) // O(nlogn)
// h.shiftDown(j) // O(nlogn)
// return
// }
// }
// 有了 reverse 之后,
// 我们可以非常简单的通过reverse直接定位索引i在indexes中的位置
// 整体变为O(logn)
j := h.reverse[i] // O(1)
h.shiftUp(j) // O(logn)
h.shiftDown(j) // O(logn)
} else {
fmt.Println("索引越界")
}
}
// 新插入数据与父节点比较,k是数据的索引
func (h *IndexMaxHeap) shiftUp(k int) {
for k > 1 && h.data[h.indexes[k/2]] < h.data[h.indexes[k]] {
h.indexes[k], h.indexes[k/2] = h.indexes[k/2], h.indexes[k] // 交换索引,非数据
h.reverse[h.indexes[k/2]] = k / 2
h.reverse[h.indexes[k]] = k
k /= 2
}
}
// 将最后一个元素放置顶端,然后向下排序(谁大跟谁换)
func (h *IndexMaxHeap) shiftDown(k int) {
for 2*k <= h.count { // k存在左子节点
j := 2 * k // 在此轮循环中,data[k]和data[j]交换位置
if j+1 <= h.count && h.data[h.indexes[j+1]] > h.data[h.indexes[j]] {
j += 1
}
// data[j] 是 data[2*k]和data[2*k+1]中的最大值
if h.data[h.indexes[k]] >= h.data[h.indexes[j]] {
break
}
h.indexes[k], h.indexes[j] = h.indexes[j], h.indexes[k]
h.reverse[h.indexes[k]] = k
h.reverse[h.indexes[j]] = j
k = j
}
}
|
// +build mage
package main
import (
"errors"
"fmt"
"image"
"image/jpeg"
"os"
"path"
"github.com/nfnt/resize"
"github.com/magefile/mage/mg"
)
// Optimize images to web
func Optimize() error {
mg.Deps(OptimizeImages)
return nil
}
func loadImage(path string) (image.Image, error) {
f, err := os.Open(path)
if err != nil {
return nil, fmt.Errorf("open file: %w", err)
}
defer f.Close()
image, _, err := image.Decode(f)
if err != nil {
return nil, fmt.Errorf("image decode: %w", err)
}
return image, nil
}
const (
width = 150
height = 150
)
var optimizedImagesDestPath = fmt.Sprintf("static/%dx%d", width, height)
// Optimize images to web
func OptimizeImages() error {
fmt.Printf("Optimizing images to %s...\n", optimizedImagesDestPath)
image, err := loadImage("static/me.jpg")
if err != nil {
return err
}
if err := os.Mkdir(optimizedImagesDestPath, 0755); err != nil && !errors.Is(err, os.ErrExist) {
return fmt.Errorf("mkdir: %w", err)
}
newImage := resize.Resize(width, height, image, resize.Lanczos3)
f, err := os.OpenFile(path.Join(optimizedImagesDestPath, "me.jpg"), os.O_RDWR|os.O_CREATE, 0644)
if err != nil {
return fmt.Errorf("open file: %w", err)
}
defer f.Close()
return jpeg.Encode(f, newImage, nil)
}
|
// Copyright 2021 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ddl_test
import (
"context"
"fmt"
"math"
"testing"
"time"
"github.com/pingcap/failpoint"
"github.com/pingcap/tidb/ddl/util"
"github.com/pingcap/tidb/domain/infosync"
"github.com/pingcap/tidb/keyspace"
"github.com/pingcap/tidb/store/gcworker"
"github.com/pingcap/tidb/testkit"
"github.com/pingcap/tidb/util/gcutil"
"github.com/stretchr/testify/require"
tikvutil "github.com/tikv/client-go/v2/util"
)
// MockGC is used to make GC work in the test environment.
func MockGC(tk *testkit.TestKit) (string, string, string, func()) {
originGC := util.IsEmulatorGCEnable()
resetGC := func() {
if originGC {
util.EmulatorGCEnable()
} else {
util.EmulatorGCDisable()
}
}
// disable emulator GC.
// Otherwise emulator GC will delete table record as soon as possible after execute drop table ddl.
util.EmulatorGCDisable()
timeBeforeDrop := time.Now().Add(0 - 48*60*60*time.Second).Format(tikvutil.GCTimeFormat)
timeAfterDrop := time.Now().Add(48 * 60 * 60 * time.Second).Format(tikvutil.GCTimeFormat)
safePointSQL := `INSERT HIGH_PRIORITY INTO mysql.tidb VALUES ('tikv_gc_safe_point', '%[1]s', '')
ON DUPLICATE KEY
UPDATE variable_value = '%[1]s'`
// clear GC variables first.
tk.MustExec("delete from mysql.tidb where variable_name in ( 'tikv_gc_safe_point','tikv_gc_enable' )")
return timeBeforeDrop, timeAfterDrop, safePointSQL, resetGC
}
func TestAlterTableAttributes(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec(`create table alter_t (c int);`)
// normal cases
tk.MustExec(`alter table alter_t attributes="merge_option=allow";`)
tk.MustExec(`alter table alter_t attributes="merge_option=allow,key=value";`)
// space cases
tk.MustExec(`alter table alter_t attributes=" merge_option=allow ";`)
tk.MustExec(`alter table alter_t attributes=" merge_option = allow , key = value ";`)
// without equal
tk.MustExec(`alter table alter_t attributes " merge_option=allow ";`)
tk.MustExec(`alter table alter_t attributes " merge_option=allow , key=value ";`)
tk.MustExec("drop table alter_t")
}
func TestAlterTablePartitionAttributes(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec(`create table alter_p (c int)
PARTITION BY RANGE (c) (
PARTITION p0 VALUES LESS THAN (6),
PARTITION p1 VALUES LESS THAN (11),
PARTITION p2 VALUES LESS THAN (16),
PARTITION p3 VALUES LESS THAN (21)
);`)
// normal cases
tk.MustExec(`alter table alter_p partition p0 attributes="merge_option=allow";`)
tk.MustExec(`alter table alter_p partition p1 attributes="merge_option=allow,key=value";`)
// space cases
tk.MustExec(`alter table alter_p partition p2 attributes=" merge_option=allow ";`)
tk.MustExec(`alter table alter_p partition p3 attributes=" merge_option = allow , key = value ";`)
// without equal
tk.MustExec(`alter table alter_p partition p1 attributes " merge_option=allow ";`)
tk.MustExec(`alter table alter_p partition p1 attributes " merge_option=allow , key=value ";`)
// reset all
tk.MustExec(`alter table alter_p partition p0 attributes default;`)
tk.MustExec(`alter table alter_p partition p1 attributes default;`)
tk.MustExec(`alter table alter_p partition p2 attributes default;`)
tk.MustExec(`alter table alter_p partition p3 attributes default;`)
// add table level attribute
tk.MustExec(`alter table alter_p attributes="merge_option=deny";`)
rows := tk.MustQuery(`select * from information_schema.attributes;`).Sort().Rows()
require.Len(t, rows, 1)
// add a new partition p4
tk.MustExec(`alter table alter_p add partition (PARTITION p4 VALUES LESS THAN (60));`)
rows1 := tk.MustQuery(`select * from information_schema.attributes;`).Sort().Rows()
require.Len(t, rows1, 1)
require.NotEqual(t, rows[0][3], rows1[0][3])
// drop the new partition p4
tk.MustExec(`alter table alter_p drop partition p4;`)
rows2 := tk.MustQuery(`select * from information_schema.attributes;`).Sort().Rows()
require.Len(t, rows2, 1)
require.Equal(t, rows[0][3], rows2[0][3])
// add a new partition p5
tk.MustExec(`alter table alter_p add partition (PARTITION p5 VALUES LESS THAN (80));`)
rows3 := tk.MustQuery(`select * from information_schema.attributes;`).Sort().Rows()
require.Len(t, rows3, 1)
require.NotEqual(t, rows[0][3], rows3[0][3])
// truncate the new partition p5
tk.MustExec(`alter table alter_p truncate partition p5;`)
rows4 := tk.MustQuery(`select * from information_schema.attributes;`).Sort().Rows()
require.Len(t, rows4, 1)
require.NotEqual(t, rows3[0][3], rows4[0][3])
require.NotEqual(t, rows[0][3], rows4[0][3])
tk.MustExec("drop table alter_p")
}
func TestTruncateTable(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec(`create table truncate_t (c int)
PARTITION BY RANGE (c) (
PARTITION p0 VALUES LESS THAN (6),
PARTITION p1 VALUES LESS THAN (11)
);`)
// add attributes
tk.MustExec(`alter table truncate_t attributes="key=value";`)
tk.MustExec(`alter table truncate_t partition p0 attributes="key1=value1";`)
rows := tk.MustQuery(`select * from information_schema.attributes;`).Sort().Rows()
require.Len(t, rows, 2)
// truncate table
tk.MustExec(`truncate table truncate_t;`)
rows1 := tk.MustQuery(`select * from information_schema.attributes;`).Sort().Rows()
require.Len(t, rows1, 2)
// check table truncate_t's attribute
require.Equal(t, "schema/test/truncate_t", rows1[0][0])
require.Equal(t, `"key=value"`, rows1[0][2])
require.NotEqual(t, rows[0][3], rows1[0][3])
// check partition p0's attribute
require.Equal(t, "schema/test/truncate_t/p0", rows1[1][0])
require.Equal(t, `"key1=value1"`, rows1[1][2])
require.NotEqual(t, rows[1][3], rows1[1][3])
// test only table
tk.MustExec(`create table truncate_ot (c int);`)
// add attribute
tk.MustExec(`alter table truncate_ot attributes="key=value";`)
rows2 := tk.MustQuery(`select * from information_schema.attributes;`).Sort().Rows()
require.Len(t, rows2, 3)
// truncate table
tk.MustExec(`truncate table truncate_ot;`)
rows3 := tk.MustQuery(`select * from information_schema.attributes;`).Sort().Rows()
require.Len(t, rows3, 3)
// check table truncate_ot's attribute
require.Equal(t, "schema/test/truncate_ot", rows3[0][0])
require.Equal(t, `"key=value"`, rows3[0][2])
require.NotEqual(t, rows2[0][3], rows3[0][3])
}
func TestRenameTable(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec(`create table rename_t (c int)
PARTITION BY RANGE (c) (
PARTITION p0 VALUES LESS THAN (6),
PARTITION p1 VALUES LESS THAN (11)
);`)
// add attributes
tk.MustExec(`alter table rename_t attributes="key=value";`)
tk.MustExec(`alter table rename_t partition p0 attributes="key1=value1";`)
rows := tk.MustQuery(`select * from information_schema.attributes;`).Sort().Rows()
require.Len(t, rows, 2)
// rename table
tk.MustExec(`rename table rename_t to rename_t1;`)
rows1 := tk.MustQuery(`select * from information_schema.attributes;`).Sort().Rows()
require.Len(t, rows1, 2)
// check table rename_t1's attribute
require.Equal(t, "schema/test/rename_t1", rows1[0][0])
require.Equal(t, `"key=value"`, rows1[0][2])
require.Equal(t, rows[0][3], rows1[0][3])
// check partition p0's attribute
require.Equal(t, "schema/test/rename_t1/p0", rows1[1][0])
require.Equal(t, `"key1=value1"`, rows1[1][2])
require.Equal(t, rows[1][3], rows1[1][3])
// test only table
tk.MustExec(`create table rename_ot (c int);`)
// add attribute
tk.MustExec(`alter table rename_ot attributes="key=value";`)
rows2 := tk.MustQuery(`select * from information_schema.attributes;`).Sort().Rows()
require.Len(t, rows2, 3)
// rename table
tk.MustExec(`rename table rename_ot to rename_ot1;`)
rows3 := tk.MustQuery(`select * from information_schema.attributes;`).Sort().Rows()
require.Len(t, rows3, 3)
// check table rename_ot1's attribute
require.Equal(t, "schema/test/rename_ot1", rows3[0][0])
require.Equal(t, `"key=value"`, rows3[0][2])
require.Equal(t, rows2[0][3], rows3[0][3])
}
func TestRecoverTable(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec(`create table recover_t (c int)
PARTITION BY RANGE (c) (
PARTITION p0 VALUES LESS THAN (6),
PARTITION p1 VALUES LESS THAN (11)
);`)
timeBeforeDrop, _, safePointSQL, resetGC := MockGC(tk)
defer resetGC()
// Set GC safe point
tk.MustExec(fmt.Sprintf(safePointSQL, timeBeforeDrop))
// Set GC enable.
require.NoError(t, gcutil.EnableGC(tk.Session()))
// add attributes
tk.MustExec(`alter table recover_t attributes="key=value";`)
tk.MustExec(`alter table recover_t partition p0 attributes="key1=value1";`)
rows := tk.MustQuery(`select * from information_schema.attributes;`).Sort().Rows()
require.Len(t, rows, 2)
// drop table
tk.MustExec(`drop table recover_t;`)
// recover table
tk.MustExec(`recover table recover_t;`)
rows1 := tk.MustQuery(`select * from information_schema.attributes;`).Sort().Rows()
require.Len(t, rows1, 2)
// check table recover_t's attribute
require.Equal(t, "schema/test/recover_t", rows1[0][0])
require.Equal(t, `"key=value"`, rows1[0][2])
require.Equal(t, rows[0][3], rows1[0][3])
// check partition p0's attribute
require.Equal(t, "schema/test/recover_t/p0", rows1[1][0])
require.Equal(t, `"key1=value1"`, rows1[1][2])
require.Equal(t, rows[1][3], rows1[1][3])
}
func TestFlashbackTable(t *testing.T) {
store, dom := testkit.CreateMockStoreAndDomain(t)
_, err := infosync.GlobalInfoSyncerInit(context.Background(), dom.DDL().GetID(), dom.ServerID, dom.GetEtcdClient(), dom.GetEtcdClient(), dom.GetPDClient(), keyspace.CodecV1, true)
require.NoError(t, err)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec(`create table flash_t (c int)
PARTITION BY RANGE (c) (
PARTITION p0 VALUES LESS THAN (6),
PARTITION p1 VALUES LESS THAN (11)
);`)
timeBeforeDrop, _, safePointSQL, resetGC := MockGC(tk)
defer resetGC()
// Set GC safe point
tk.MustExec(fmt.Sprintf(safePointSQL, timeBeforeDrop))
// Set GC enable.
err = gcutil.EnableGC(tk.Session())
require.NoError(t, err)
// add attributes
tk.MustExec(`alter table flash_t attributes="key=value";`)
tk.MustExec(`alter table flash_t partition p0 attributes="key1=value1";`)
rows := tk.MustQuery(`select * from information_schema.attributes;`).Sort().Rows()
require.Len(t, rows, 2)
// drop table
tk.MustExec(`drop table flash_t;`)
// flashback table
tk.MustExec(`flashback table flash_t to flash_t1;`)
rows1 := tk.MustQuery(`select * from information_schema.attributes;`).Sort().Rows()
require.Len(t, rows1, 2)
// check table flash_t1's attribute
require.Equal(t, "schema/test/flash_t1", rows1[0][0])
require.Equal(t, `"key=value"`, rows1[0][2])
require.Equal(t, rows[0][3], rows1[0][3])
// check partition p0's attribute
require.Equal(t, "schema/test/flash_t1/p0", rows1[1][0])
require.Equal(t, `"key1=value1"`, rows1[1][2])
require.Equal(t, rows[1][3], rows1[1][3])
// truncate table
tk.MustExec(`truncate table flash_t1;`)
// flashback table
tk.MustExec(`flashback table flash_t1 to flash_t2;`)
rows2 := tk.MustQuery(`select * from information_schema.attributes;`).Sort().Rows()
require.Len(t, rows1, 2)
// check table flash_t2's attribute
require.Equal(t, "schema/test/flash_t2", rows2[0][0])
require.Equal(t, `"key=value"`, rows2[0][2])
require.Equal(t, rows[0][3], rows2[0][3])
// check partition p0's attribute
require.Equal(t, "schema/test/flash_t2/p0", rows2[1][0])
require.Equal(t, `"key1=value1"`, rows2[1][2])
require.Equal(t, rows[1][3], rows2[1][3])
}
func TestDropTable(t *testing.T) {
store, dom := testkit.CreateMockStoreAndDomain(t)
_, err := infosync.GlobalInfoSyncerInit(context.Background(), dom.DDL().GetID(), dom.ServerID, dom.GetEtcdClient(), dom.GetEtcdClient(), dom.GetPDClient(), keyspace.CodecV1, true)
require.NoError(t, err)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec(`create table drop_t (c int)
PARTITION BY RANGE (c) (
PARTITION p0 VALUES LESS THAN (6),
PARTITION p1 VALUES LESS THAN (11)
);`)
failpoint.Enable("github.com/pingcap/tidb/store/gcworker/ignoreDeleteRangeFailed", `return`)
defer func() {
failpoint.Disable("github.com/pingcap/tidb/store/gcworker/ignoreDeleteRangeFailed")
}()
timeBeforeDrop, _, safePointSQL, resetGC := MockGC(tk)
defer resetGC()
// Set GC safe point
tk.MustExec(fmt.Sprintf(safePointSQL, timeBeforeDrop))
// Set GC enable.
err = gcutil.EnableGC(tk.Session())
require.NoError(t, err)
gcWorker, err := gcworker.NewMockGCWorker(store)
require.NoError(t, err)
// add attributes
tk.MustExec(`alter table drop_t attributes="key=value";`)
tk.MustExec(`alter table drop_t partition p0 attributes="key1=value1";`)
rows := tk.MustQuery(`select * from information_schema.attributes;`).Sort().Rows()
require.Len(t, rows, 2)
// drop table
tk.MustExec(`drop table drop_t;`)
err = gcWorker.DeleteRanges(context.Background(), uint64(math.MaxInt64))
require.NoError(t, err)
rows = tk.MustQuery(`select * from information_schema.attributes;`).Sort().Rows()
require.Len(t, rows, 0)
tk.MustExec("use test")
tk.MustExec(`create table drop_t (c int)
PARTITION BY RANGE (c) (
PARTITION p0 VALUES LESS THAN (6),
PARTITION p1 VALUES LESS THAN (11)
);`)
rows = tk.MustQuery(`select * from information_schema.attributes;`).Sort().Rows()
require.Len(t, rows, 0)
}
func TestCreateWithSameName(t *testing.T) {
store, dom := testkit.CreateMockStoreAndDomain(t)
_, err := infosync.GlobalInfoSyncerInit(context.Background(), dom.DDL().GetID(), dom.ServerID, dom.GetEtcdClient(), dom.GetEtcdClient(), dom.GetPDClient(), keyspace.CodecV1, true)
require.NoError(t, err)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec(`create table recreate_t (c int)
PARTITION BY RANGE (c) (
PARTITION p0 VALUES LESS THAN (6),
PARTITION p1 VALUES LESS THAN (11)
);`)
failpoint.Enable("github.com/pingcap/tidb/store/gcworker/ignoreDeleteRangeFailed", `return`)
defer func() {
failpoint.Disable("github.com/pingcap/tidb/store/gcworker/ignoreDeleteRangeFailed")
}()
timeBeforeDrop, _, safePointSQL, resetGC := MockGC(tk)
defer resetGC()
// Set GC safe point
tk.MustExec(fmt.Sprintf(safePointSQL, timeBeforeDrop))
// Set GC enable.
err = gcutil.EnableGC(tk.Session())
require.NoError(t, err)
gcWorker, err := gcworker.NewMockGCWorker(store)
require.NoError(t, err)
// add attributes
tk.MustExec(`alter table recreate_t attributes="key=value";`)
tk.MustExec(`alter table recreate_t partition p0 attributes="key1=value1";`)
rows := tk.MustQuery(`select * from information_schema.attributes;`).Sort().Rows()
require.Len(t, rows, 2)
// drop table
tk.MustExec(`drop table recreate_t;`)
rows = tk.MustQuery(`select * from information_schema.attributes;`).Sort().Rows()
require.Len(t, rows, 0)
tk.MustExec(`create table recreate_t (c int)
PARTITION BY RANGE (c) (
PARTITION p0 VALUES LESS THAN (6),
PARTITION p1 VALUES LESS THAN (11)
);`)
// add attributes
tk.MustExec(`alter table recreate_t attributes="key=value";`)
tk.MustExec(`alter table recreate_t partition p1 attributes="key1=value1";`)
rows = tk.MustQuery(`select * from information_schema.attributes;`).Sort().Rows()
require.Len(t, rows, 2)
err = gcWorker.DeleteRanges(context.Background(), uint64(math.MaxInt64))
require.NoError(t, err)
rows = tk.MustQuery(`select * from information_schema.attributes;`).Sort().Rows()
require.Len(t, rows, 2)
// drop table
tk.MustExec(`drop table recreate_t;`)
err = gcWorker.DeleteRanges(context.Background(), uint64(math.MaxInt64))
require.NoError(t, err)
rows = tk.MustQuery(`select * from information_schema.attributes;`).Sort().Rows()
require.Len(t, rows, 0)
}
func TestPartition(t *testing.T) {
store, dom := testkit.CreateMockStoreAndDomain(t)
_, err := infosync.GlobalInfoSyncerInit(context.Background(), dom.DDL().GetID(), dom.ServerID, dom.GetEtcdClient(), dom.GetEtcdClient(), dom.GetPDClient(), keyspace.CodecV1, true)
require.NoError(t, err)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec(`create table part (c int)
PARTITION BY RANGE (c) (
PARTITION p0 VALUES LESS THAN (6),
PARTITION p1 VALUES LESS THAN (11),
PARTITION p2 VALUES LESS THAN (20)
);`)
tk.MustExec(`create table part1 (c int);`)
// add attributes
tk.MustExec(`alter table part attributes="key=value";`)
tk.MustExec(`alter table part partition p0 attributes="key1=value1";`)
tk.MustExec(`alter table part partition p1 attributes="key2=value2";`)
rows := tk.MustQuery(`select * from information_schema.attributes;`).Sort().Rows()
require.Len(t, rows, 3)
// drop partition
// partition p0's attribute will be deleted
tk.MustExec(`alter table part drop partition p0;`)
rows1 := tk.MustQuery(`select * from information_schema.attributes;`).Sort().Rows()
require.Len(t, rows1, 2)
require.Equal(t, "schema/test/part", rows1[0][0])
require.Equal(t, `"key=value"`, rows1[0][2])
// table attribute only contains three ranges now
require.NotEqual(t, rows[0][3], rows1[0][3])
require.Equal(t, "schema/test/part/p1", rows1[1][0])
require.Equal(t, `"key2=value2"`, rows1[1][2])
require.Equal(t, rows[2][3], rows1[1][3])
// truncate partition
// partition p1's key range will be updated
tk.MustExec(`alter table part truncate partition p1;`)
rows2 := tk.MustQuery(`select * from information_schema.attributes;`).Sort().Rows()
require.Len(t, rows2, 2)
require.Equal(t, "schema/test/part", rows2[0][0])
require.Equal(t, `"key=value"`, rows2[0][2])
require.NotEqual(t, rows1[0][3], rows2[0][3])
require.Equal(t, "schema/test/part/p1", rows2[1][0])
require.Equal(t, `"key2=value2"`, rows2[1][2])
require.NotEqual(t, rows1[1][3], rows2[1][3])
// exchange partition
// partition p1's attribute will be exchanged to table part1
tk.MustExec(`set @@tidb_enable_exchange_partition=1;`)
tk.MustExec(`alter table part exchange partition p1 with table part1;`)
rows3 := tk.MustQuery(`select * from information_schema.attributes;`).Sort().Rows()
require.Len(t, rows3, 2)
require.Equal(t, "schema/test/part", rows3[0][0])
require.Equal(t, `"key=value"`, rows3[0][2])
require.Equal(t, rows2[0][3], rows3[0][3])
require.Equal(t, "schema/test/part1", rows3[1][0])
require.Equal(t, `"key2=value2"`, rows3[1][2])
require.Equal(t, rows2[1][3], rows3[1][3])
}
func TestDropSchema(t *testing.T) {
store, dom := testkit.CreateMockStoreAndDomain(t)
_, err := infosync.GlobalInfoSyncerInit(context.Background(), dom.DDL().GetID(), dom.ServerID, dom.GetEtcdClient(), dom.GetEtcdClient(), dom.GetPDClient(), keyspace.CodecV1, true)
require.NoError(t, err)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec(`create table drop_s1 (c int)
PARTITION BY RANGE (c) (
PARTITION p0 VALUES LESS THAN (6),
PARTITION p1 VALUES LESS THAN (11)
);`)
tk.MustExec(`create table drop_s2 (c int);`)
// add attributes
tk.MustExec(`alter table drop_s1 attributes="key=value";`)
tk.MustExec(`alter table drop_s1 partition p0 attributes="key1=value1";`)
tk.MustExec(`alter table drop_s2 attributes="key=value";`)
rows := tk.MustQuery(`select * from information_schema.attributes;`).Rows()
require.Len(t, rows, 3)
// drop database
tk.MustExec(`drop database test`)
rows = tk.MustQuery(`select * from information_schema.attributes;`).Rows()
require.Len(t, rows, 0)
}
func TestDefaultKeyword(t *testing.T) {
store, dom := testkit.CreateMockStoreAndDomain(t)
_, err := infosync.GlobalInfoSyncerInit(context.Background(), dom.DDL().GetID(), dom.ServerID, dom.GetEtcdClient(), dom.GetEtcdClient(), dom.GetPDClient(), keyspace.CodecV1, true)
require.NoError(t, err)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec(`create table def (c int)
PARTITION BY RANGE (c) (
PARTITION p0 VALUES LESS THAN (6),
PARTITION p1 VALUES LESS THAN (11)
);`)
// add attributes
tk.MustExec(`alter table def attributes="key=value";`)
tk.MustExec(`alter table def partition p0 attributes="key1=value1";`)
rows := tk.MustQuery(`select * from information_schema.attributes;`).Rows()
require.Len(t, rows, 2)
// reset the partition p0's attribute
tk.MustExec(`alter table def partition p0 attributes=default;`)
rows = tk.MustQuery(`select * from information_schema.attributes;`).Rows()
require.Len(t, rows, 1)
// reset the table def's attribute
tk.MustExec(`alter table def attributes=default;`)
rows = tk.MustQuery(`select * from information_schema.attributes;`).Rows()
require.Len(t, rows, 0)
}
|
package main
import (
"fmt"
"github.com/therecipe/qt/core"
"github.com/therecipe/qt/widgets"
)
func containers() {
//Group Box
groupBox := widgets.NewQGroupBox2("Group Box", nil)
groupBox.SetWindowTitle("Group Box")
groupBoxLayout := widgets.NewQVBoxLayout2(groupBox)
for i := 0; i < 3; i++ {
groupBoxLayout.AddWidget(widgets.NewQPushButton2(fmt.Sprintf("PushButton: %v", i), nil), 0, 0)
}
addWidget(groupBox)
//Scroll Area
scrollArea := widgets.NewQScrollArea(nil)
scrollArea.SetWindowTitle("Scroll Area")
scrollAreaWidget := widgets.NewQWidget(nil, 0)
scrollAreaWidgetLayout := widgets.NewQGridLayout(scrollAreaWidget)
for row := 0; row < 25; row++ {
for column := 0; column < 25; column++ {
scrollAreaWidgetLayout.AddWidget2(widgets.NewQLabel2(fmt.Sprintf("[%v:%v]", row, column), nil, 0), row, column, core.Qt__AlignCenter)
}
}
scrollArea.SetWidget(scrollAreaWidget)
addWidget(scrollArea)
//Tool Box
toolBox := widgets.NewQToolBox(nil, 0)
toolBox.SetWindowTitle("Tool Box")
toolBox.AddItem2(widgets.NewQLabel2("First Widget", nil, 0), "First")
toolBox.AddItem2(widgets.NewQPushButton2("Second Widget", nil), "Second")
toolBox.AddItem2(widgets.NewQGroupBox2("Third Widget", nil), "Third")
addWidget(toolBox)
//Tab Widget
tabWidget := widgets.NewQTabWidget(nil)
tabWidget.SetWindowTitle("Tab Widget")
tabWidget.AddTab(widgets.NewQLabel2("First Widget", nil, 0), "First")
tabWidget.AddTab(widgets.NewQPushButton2("Second Widget", nil), "Second")
tabWidget.AddTab(widgets.NewQGroupBox2("Third Widget", nil), "Third")
addWidget(tabWidget)
//Stacked Widget
centralWidget := widgets.NewQWidget(nil, 0)
centralWidget.SetWindowTitle("Stacked Widget")
centralWidgetLayout := widgets.NewQVBoxLayout2(centralWidget)
stackedWidget := widgets.NewQStackedWidget(nil)
stackedWidget.AddWidget(widgets.NewQLabel2("First Widget", nil, 0))
stackedWidget.AddWidget(widgets.NewQPushButton2("Second Widget", nil))
stackedWidget.AddWidget(widgets.NewQGroupBox2("Third Widget", nil))
centralWidgetLayout.AddWidget(stackedWidget, 0, 0)
changeStackButton := widgets.NewQPushButton2("Show Next In Stack", nil)
changeStackButton.ConnectClicked(func(checked bool) {
nextIndex := stackedWidget.CurrentIndex() + 1
if nextIndex >= stackedWidget.Count() {
stackedWidget.SetCurrentIndex(0)
} else {
stackedWidget.SetCurrentIndex(nextIndex)
}
})
centralWidgetLayout.AddWidget(changeStackButton, 0, 0)
addWidget(centralWidget)
//Frame
frame := widgets.NewQFrame(nil, 0)
frame.SetWindowTitle("Frame")
frameLayout := widgets.NewQVBoxLayout2(frame)
for i := 0; i < 3; i++ {
someFrame := widgets.NewQFrame(nil, 0)
switch i {
case 0:
someFrame.SetFrameStyle(int(widgets.QFrame__Box) | int(widgets.QFrame__Raised))
case 1:
someFrame.SetFrameStyle(int(widgets.QFrame__Panel) | int(widgets.QFrame__Raised))
case 2:
someFrame.SetFrameStyle(int(widgets.QFrame__StyledPanel) | int(widgets.QFrame__Raised))
}
someFrameLayout := widgets.NewQVBoxLayout2(someFrame)
someFrameLayout.AddWidget(widgets.NewQPushButton2(fmt.Sprintf("PushButton: %v", i), nil), 0, 0)
frameLayout.AddWidget(someFrame, 0, 0)
}
addWidget(frame)
//Widget
widget := widgets.NewQWidget(nil, 0)
widget.SetWindowTitle("Widget")
widgetLayout := widgets.NewQVBoxLayout2(widget)
for i := 0; i < 3; i++ {
someWidget := widgets.NewQWidget(nil, 0)
someWidgetLayout := widgets.NewQVBoxLayout2(someWidget)
someWidgetLayout.AddWidget(widgets.NewQPushButton2(fmt.Sprintf("PushButton: %v", i), nil), 0, 0)
widgetLayout.AddWidget(someWidget, 0, 0)
}
addWidget(widget)
//MDI Area
mdiArea := widgets.NewQMdiArea(nil)
mdiArea.SetWindowTitle("MDI Area")
subWindow := widgets.NewQMdiSubWindow(nil, 0)
sWCentralWidget := widgets.NewQWidget(nil, 0)
sWCentralWidgetLayout := widgets.NewQVBoxLayout2(sWCentralWidget)
sWCentralWidgetLayout.AddWidget(widgets.NewQLabel2("Label", nil, 0), 0, 0)
sWCentralWidgetLayout.AddWidget(widgets.NewQPushButton2("PushButton", nil), 0, 0)
subWindow.SetWidget(sWCentralWidget)
mdiArea.AddSubWindow(subWindow, 0)
mdiArea.Resize2(300, 300)
addWidget(mdiArea)
//Dock Widget
mainWindow := widgets.NewQMainWindow(nil, 0)
mainWindow.SetWindowTitle("Dock Widget")
topDockWidget := widgets.NewQDockWidget("Top Dock Widget", nil, 0)
topDockWidget.SetAllowedAreas(core.Qt__AllDockWidgetAreas)
topDockWidget.SetFloating(true)
topDockWidget.SetWidget(widgets.NewQPushButton2("PushButton", nil))
mainWindow.AddDockWidget(core.Qt__TopDockWidgetArea, topDockWidget)
bottomDockWidget := widgets.NewQDockWidget("Bottom Dock Widget", nil, 0)
bottomDockWidget.SetAllowedAreas(core.Qt__AllDockWidgetAreas)
bottomDockWidget.SetFloating(true)
bottomDockWidget.SetWidget(widgets.NewQPushButton2("PushButton", nil))
mainWindow.AddDockWidget(core.Qt__BottomDockWidgetArea, bottomDockWidget)
addWidget(mainWindow)
}
|
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"flag"
"fmt"
"time"
"os"
"strings"
"strconv"
gpumanager "pkg/gpu/nvidia"
"github.com/golang/glog"
)
const (
// Device plugin settings.
kubeletEndpoint = "kubelet.sock"
pluginEndpointPrefix = "nvidiaGPU"
devDirectory = "/dev"
envExtendedResourceName = "DP_EXTENDED_RESOURCE_NAME"
envExtendedResourceValuePerDevice = "DP_EXTENDED_RESOURCE_VALUE_PER_DEVICE"
)
func getExtendedResourceValuePerDevice() (extendedResourceValue uint) {
extendedResourceValue = 1 // default value
strNum, present := os.LookupEnv(envExtendedResourceValuePerDevice)
if !present {
return
}
rawExtendedResourceValue, err := strconv.Atoi(strNum)
if err != nil {
glog.Errorf("Fatal: Could not parse %s environment variable: %v\n", envExtendedResourceValuePerDevice, err)
}
if rawExtendedResourceValue < 1 {
glog.Errorf("Fatal: invalid %s environment variable value: %v\n", envExtendedResourceValuePerDevice, rawExtendedResourceValue)
}
extendedResourceValue = uint(rawExtendedResourceValue)
return
}
var (
hostPathPrefix = flag.String("host-path", "/home/kubernetes/bin/nvidia", "Path on the host that contains nvidia libraries. This will be mounted inside the container as '-container-path'")
containerPathPrefix = flag.String("container-path", "/usr/local/nvidia", "Path on the container that mounts '-host-path'")
hostVulkanICDPathPrefix = flag.String("host-vulkan-icd-path", "/home/kubernetes/bin/nvidia/vulkan/icd.d", "Path on the host that contains the Nvidia Vulkan installable client driver. This will be mounted inside the container as '-container-vulkan-icd-path'")
containerVulkanICDPathPrefix = flag.String("container-vulkan-icd-path", "/etc/vulkan/icd.d", "Path on the container that mounts '-host-vulkan-icd-path'")
pluginMountPath = flag.String("plugin-directory", "/device-plugin", "The directory path to create plugin socket")
gpuDuplicationFactor = flag.Uint("gpu-duplication-factor", getExtendedResourceValuePerDevice(), "The number of fake GPU device declared per real GPU device")
)
func main() {
flag.Parse()
glog.Infoln("device-plugin started")
mountPaths := []gpumanager.MountPath{
{HostPath: *hostPathPrefix, ContainerPath: *containerPathPrefix},
{HostPath: *hostVulkanICDPathPrefix, ContainerPath: *containerVulkanICDPathPrefix}}
ngm := gpumanager.NewSharedNvidiaGPUManager(devDirectory, mountPaths, *gpuDuplicationFactor)
// Keep on trying until success. This is required
// because Nvidia drivers may not be installed initially.
for {
err := ngm.Start()
if err == nil {
break
}
// Use non-default level to avoid log spam.
glog.V(3).Infof("nvidiaGPUManager.Start() failed: %v", err)
time.Sleep(5 * time.Second)
}
ngm.Serve(*pluginMountPath, kubeletEndpoint, fmt.Sprintf("%s-%d.sock", strings.Replace(os.Getenv(envExtendedResourceName), "/", ".", -1), time.Now().Unix()))
}
|
package main
import (
"io"
"log"
"net"
"time"
)
func main() {
ln, err := net.Listen("tcp", ":8080")
if err != nil {
log.Fatalf("Something went wrong while creating to Listener : %v", err)
}
for {
conn, err := ln.Accept()
if err != nil {
log.Printf("Something went wrong while accepting to conn : %v", err)
continue
}
go handleConnection(conn)
}
}
func handleConnection(c net.Conn) {
defer c.Close()
for {
_, err := io.WriteString(c, "Got Response from Server \n")
if err != nil {
log.Printf("Something went wrong while responding to client : %v", err)
}
time.Sleep(time.Second)
}
}
|
package resolvers
import (
"context"
"github.com/syncromatics/kafmesh/internal/graph/generated"
"github.com/syncromatics/kafmesh/internal/graph/model"
"github.com/pkg/errors"
)
//go:generate mockgen -source=./query.go -destination=./query_mock_test.go -package=resolvers_test
// QueryLoader is the loader for queries
type QueryLoader interface {
GetAllServices() ([]*model.Service, error)
GetAllPods() ([]*model.Pod, error)
GetAllTopics() ([]*model.Topic, error)
ServiceByID(int) (*model.Service, error)
ComponentByID(int) (*model.Component, error)
}
var _ generated.QueryResolver = &QueryResolver{}
// QueryResolver resolves querys
type QueryResolver struct {
*Resolver
}
// Services gets all the services
func (r *QueryResolver) Services(ctx context.Context) ([]*model.Service, error) {
result, err := r.DataLoaders.QueryLoader(ctx).GetAllServices()
if err != nil {
return nil, errors.Wrap(err, "failed to get services from loader")
}
return result, nil
}
// Pods gets all the pods
func (r *QueryResolver) Pods(ctx context.Context) ([]*model.Pod, error) {
result, err := r.DataLoaders.QueryLoader(ctx).GetAllPods()
if err != nil {
return nil, errors.Wrap(err, "failed to get pods from loader")
}
return result, nil
}
// Topics gets all the topics
func (r *QueryResolver) Topics(ctx context.Context) ([]*model.Topic, error) {
result, err := r.DataLoaders.QueryLoader(ctx).GetAllTopics()
if err != nil {
return nil, errors.Wrap(err, "failed to get topics from loader")
}
return result, nil
}
// ServiceByID gets the service by id
func (r *QueryResolver) ServiceByID(ctx context.Context, id int) (*model.Service, error) {
result, err := r.DataLoaders.QueryLoader(ctx).ServiceByID(id)
if err != nil {
return nil, errors.Wrap(err, "failed to get service by id from loader")
}
return result, nil
}
// ComponentByID gets the component by id
func (r *QueryResolver) ComponentByID(ctx context.Context, id int) (*model.Component, error) {
result, err := r.DataLoaders.QueryLoader(ctx).ComponentByID(id)
if err != nil {
return nil, errors.Wrap(err, "failed to get component by id from loader")
}
return result, nil
}
|
package day1
import "testing"
var testInput = []int{1721,979,366, 299, 675, 1456}
func Test_calc(t *testing.T) {
type args struct {
lines *[]int
}
tests := []struct {
name string
args args
want int
wantErr bool
}{
{"Expect 514579", args{&testInput}, 514579, false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := calcTwoNum(tt.args.lines)
if (err != nil) != tt.wantErr {
t.Errorf("calc() error = %v, wantErr %v", err, tt.wantErr)
return
}
if got != tt.want {
t.Errorf("calc() got = %v, want %v", got, tt.want)
}
})
}
}
func Test_calcThreeNum(t *testing.T) {
type args struct {
lines *[]int
}
tests := []struct {
name string
args args
want int
wantErr bool
}{
{"Expect 241861950", args{&testInput}, 241861950, false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := calcThreeNum(tt.args.lines)
if (err != nil) != tt.wantErr {
t.Errorf("calcThreeNum() error = %v, wantErr %v", err, tt.wantErr)
return
}
if got != tt.want {
t.Errorf("calcThreeNum() got = %v, want %v", got, tt.want)
}
})
}
} |
// Copyright © 2017 Heptio
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package envoy
import (
"sort"
v2 "github.com/envoyproxy/go-control-plane/api"
)
// clusterCache is a thread safe, atomic, copy on write cache of *v2.Cluster objects.
type clusterCache chan []*v2.Cluster
// init must be called before clusterCache is used for the first time.
func (cc *clusterCache) init() {
*cc = make(clusterCache, 1)
*cc <- nil // prime cache
}
// Values returns a copy of the contents of the cache.
func (cc clusterCache) Values() []*v2.Cluster {
v := <-cc
r := make([]*v2.Cluster, len(v))
copy(r, v)
cc <- v
return r
}
// with executes f with the value of the stored in the cache.
// the value returned from f replaces the contents in the cache.
func (cc clusterCache) with(f func([]*v2.Cluster) []*v2.Cluster) {
v := <-cc
v = f(v)
// TODO(dfc) Add and Remove do not (currently) affect the sort order
// so it might be possible to avoid always sorting.
sort.Sort(clusterByName(v))
cc <- v
}
// Add adds an entry to the cache. If a Cluster with the same
// name exists, it is replaced.
// TODO(dfc) make Add variadic to support atomic addition of several clusters
// also niladic Add can be used as a no-op notify for watchers.
func (cc clusterCache) Add(c *v2.Cluster) {
cc.with(func(in []*v2.Cluster) []*v2.Cluster {
sort.Sort(clusterByName(in))
i := sort.Search(len(in), func(i int) bool { return in[i].Name >= c.Name })
if i < len(in) && in[i].Name == c.Name {
// c is already present, replace
in[i] = c
return in
}
// c is not present, append and sort
in = append(in, c)
sort.Sort(clusterByName(in))
return in
})
}
// Remove removes the named entry from the cache. If the entry
// is not present in the cache, the operation is a no-op.
func (cc clusterCache) Remove(name string) {
cc.with(func(in []*v2.Cluster) []*v2.Cluster {
sort.Sort(clusterByName(in))
i := sort.Search(len(in), func(i int) bool { return in[i].Name >= name })
if i < len(in) && in[i].Name == name {
// c is present, remove
in = append(in[:i], in[i+1:]...)
}
return in
})
}
type clusterByName []*v2.Cluster
func (c clusterByName) Len() int { return len(c) }
func (c clusterByName) Swap(i, j int) { c[i], c[j] = c[j], c[i] }
func (c clusterByName) Less(i, j int) bool { return c[i].Name < c[j].Name }
// clusterLoadAssignmentCache is a thread safe, atomic, copy on write cache of v2.ClusterLoadAssignment objects.
type clusterLoadAssignmentCache chan []*v2.ClusterLoadAssignment
// init must be called before clusterLoadAssignmentCache is used for the first time.
func (c *clusterLoadAssignmentCache) init() {
*c = make(clusterLoadAssignmentCache, 1)
*c <- nil // prime cache
}
// Values returns a copy of the contents of the cache.
func (c clusterLoadAssignmentCache) Values() []*v2.ClusterLoadAssignment {
v := <-c
r := make([]*v2.ClusterLoadAssignment, len(v))
copy(r, v)
c <- v
return r
}
// with executes f with the value of the stored in the cache.
// the value returned from f replaces the contents in the cache.
func (c clusterLoadAssignmentCache) with(f func([]*v2.ClusterLoadAssignment) []*v2.ClusterLoadAssignment) {
v := <-c
v = f(v)
// TODO(dfc) Add and Remove do not (currently) affect the sort order
// so it might be possible to avoid always sorting.
sort.Sort(clusterLoadAssignmentsByName(v))
c <- v
}
// Add adds an entry to the cache. If a ClusterLoadAssignment with the same
// name exists, it is replaced.
// TODO(dfc) make Add variadic to support atomic addition of several clusterLoadAssignments
// also niladic Add can be used as a no-op notify for watchers.
func (c clusterLoadAssignmentCache) Add(e *v2.ClusterLoadAssignment) {
c.with(func(in []*v2.ClusterLoadAssignment) []*v2.ClusterLoadAssignment {
sort.Sort(clusterLoadAssignmentsByName(in))
i := sort.Search(len(in), func(i int) bool { return in[i].ClusterName >= e.ClusterName })
if i < len(in) && in[i].ClusterName == e.ClusterName {
in[i] = e
return in
}
in = append(in, e)
sort.Sort(clusterLoadAssignmentsByName(in))
return in
})
}
// Remove removes the named entry from the cache. If the entry
// is not present in the cache, the operation is a no-op.
func (c clusterLoadAssignmentCache) Remove(name string) {
c.with(func(in []*v2.ClusterLoadAssignment) []*v2.ClusterLoadAssignment {
sort.Sort(clusterLoadAssignmentsByName(in))
i := sort.Search(len(in), func(i int) bool { return in[i].ClusterName >= name })
if i < len(in) && in[i].ClusterName == name {
// c is present, remove
in = append(in[:i], in[i+1:]...)
}
return in
})
}
type clusterLoadAssignmentsByName []*v2.ClusterLoadAssignment
func (c clusterLoadAssignmentsByName) Len() int { return len(c) }
func (c clusterLoadAssignmentsByName) Swap(i, j int) { c[i], c[j] = c[j], c[i] }
func (c clusterLoadAssignmentsByName) Less(i, j int) bool { return c[i].ClusterName < c[j].ClusterName }
// ListenerCache is a thread safe, atomic, copy on write cache of v2.Listener objects.
type listenerCache chan []*v2.Listener
// init must be called before listenerCache is used for the first time.
func (lc *listenerCache) init() {
*lc = make(listenerCache, 1)
*lc <- nil // prime cache
}
// Values returns a copy of the contents of the cache.
func (lc listenerCache) Values() []*v2.Listener {
v := <-lc
r := make([]*v2.Listener, len(v))
copy(r, v)
lc <- v
return r
}
// with executes f with the value of the stored in the cache.
// the value returned from f replaces the contents in the cache.
func (lc listenerCache) with(f func([]*v2.Listener) []*v2.Listener) {
l := <-lc
l = f(l)
// TODO(dfc) Add and Remove do not (currently) affect the sort order
// so it might be possible to avoid always sorting.
sort.Sort(listenersByName(l))
lc <- l
}
// Add adds an entry to the cache. If a Listener with the same
// name exists, it is replaced.
// TODO(dfc) make Add variadic to support atomic addition of several listeners
// also niladic Add can be used as a no-op notify for watchers.
func (lc listenerCache) Add(r *v2.Listener) {
lc.with(func(in []*v2.Listener) []*v2.Listener {
sort.Sort(listenersByName(in))
i := sort.Search(len(in), func(i int) bool { return in[i].Name >= r.Name })
if i < len(in) && in[i].Name == r.Name {
// c is already present, replace
in[i] = r
return in
}
// c is not present, append and sort
in = append(in, r)
sort.Sort(listenersByName(in))
return in
})
}
// Remove removes the named entry from the cache. If the entry
// is not present in the cache, the operation is a no-op.
func (lc listenerCache) Remove(name string) {
lc.with(func(in []*v2.Listener) []*v2.Listener {
sort.Sort(listenersByName(in))
i := sort.Search(len(in), func(i int) bool { return in[i].Name >= name })
if i < len(in) && in[i].Name == name {
// c is present, remove
in = append(in[:i], in[i+1:]...)
}
return in
})
}
type listenersByName []*v2.Listener
func (l listenersByName) Len() int { return len(l) }
func (l listenersByName) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
func (l listenersByName) Less(i, j int) bool { return l[i].Name < l[j].Name }
// clusterLoadAssignmentCache is a thread safe, atomic, copy on write cache of v2.ClusterLoadAssignment objects.
// VirtualHostCache is a thread safe, atomic, copy on write cache of v2.VirtualHost objects.
type virtualHostCache chan []*v2.VirtualHost
// init must be called before virtualHostCache is used for the first time.
func (vc *virtualHostCache) init() {
*vc = make(virtualHostCache, 1)
*vc <- nil // prime cache
}
// Values returns a copy of the contents of the cache.
func (vc virtualHostCache) Values() []*v2.VirtualHost {
v := <-vc
r := make([]*v2.VirtualHost, len(v))
copy(r, v)
vc <- v
return r
}
// with executes f with the value of the stored in the cache.
// the value returned from f replaces the contents in the cache.
func (vc virtualHostCache) with(f func([]*v2.VirtualHost) []*v2.VirtualHost) {
v := <-vc
v = f(v)
// TODO(dfc) Add and Remove do not (currently) affect the sort order
// so it might be possible to avoid always sorting.
sort.Sort(virtualHostsByName(v))
vc <- v
}
// Add adds an entry to the cache. If a VirtualHost with the same
// name exists, it is replaced.
// TODO(dfc) make Add variadic to support atomic addition of several clusters
// also niladic Add can be used as a no-op notify for watchers.
func (vc virtualHostCache) Add(r *v2.VirtualHost) {
vc.with(func(in []*v2.VirtualHost) []*v2.VirtualHost {
sort.Sort(virtualHostsByName(in))
i := sort.Search(len(in), func(i int) bool { return in[i].Name >= r.Name })
if i < len(in) && in[i].Name == r.Name {
// c is already present, replace
in[i] = r
return in
}
// c is not present, append and sort
in = append(in, r)
sort.Sort(virtualHostsByName(in))
return in
})
}
// Remove removes the named entry from the cache. If the entry
// is not present in the cache, the operation is a no-op.
func (vc virtualHostCache) Remove(name string) {
vc.with(func(in []*v2.VirtualHost) []*v2.VirtualHost {
sort.Sort(virtualHostsByName(in))
i := sort.Search(len(in), func(i int) bool { return in[i].Name >= name })
if i < len(in) && in[i].Name == name {
// c is present, remove
in = append(in[:i], in[i+1:]...)
}
return in
})
}
type virtualHostsByName []*v2.VirtualHost
func (v virtualHostsByName) Len() int { return len(v) }
func (v virtualHostsByName) Swap(i, j int) { v[i], v[j] = v[j], v[i] }
func (v virtualHostsByName) Less(i, j int) bool { return v[i].Name < v[j].Name }
|
// Package driver is an sqlite driver.
package driver
import (
"database/sql"
"embed"
"encoding/base64"
"fmt"
"io/fs"
"strings"
_ "modernc.org/sqlite"
"github.com/volatiletech/sqlboiler/v4/drivers"
"github.com/volatiletech/sqlboiler/v4/importers"
)
//go:embed override
var templates embed.FS
func init() {
drivers.RegisterFromInit("sqlite3", &SQLiteDriver{})
}
// Assemble the db info
func Assemble(config drivers.Config) (dbinfo *drivers.DBInfo, err error) {
driver := &SQLiteDriver{}
return driver.Assemble(config)
}
// SQLiteDriver holds the database connection string and a handle
// to the database connection.
type SQLiteDriver struct {
connStr string
dbConn *sql.DB
configForeignKeys []drivers.ForeignKey
}
// Templates that should be added/overridden
func (s SQLiteDriver) Templates() (map[string]string, error) {
tpls := make(map[string]string)
fs.WalkDir(templates, "override", func(path string, d fs.DirEntry, err error) error {
if err != nil {
return err
}
if d.IsDir() {
return nil
}
b, err := fs.ReadFile(templates, path)
if err != nil {
return err
}
tpls[strings.Replace(path, "override/", "", 1)] = base64.StdEncoding.EncodeToString(b)
return nil
})
return tpls, nil
}
// Assemble the db info
func (s SQLiteDriver) Assemble(config drivers.Config) (dbinfo *drivers.DBInfo, err error) {
defer func() {
if r := recover(); r != nil && err == nil {
dbinfo = nil
err = r.(error)
}
}()
dbname := config.MustString(drivers.ConfigDBName)
whitelist, _ := config.StringSlice(drivers.ConfigWhitelist)
blacklist, _ := config.StringSlice(drivers.ConfigBlacklist)
concurrency := config.DefaultInt(drivers.ConfigConcurrency, drivers.DefaultConcurrency)
s.connStr = SQLiteBuildQueryString(dbname)
s.configForeignKeys = config.MustForeignKeys(drivers.ConfigForeignKeys)
s.dbConn, err = sql.Open("sqlite", s.connStr)
if err != nil {
return nil, fmt.Errorf("sqlboiler-sqlite failed to connect to database: %w", err)
}
defer func() {
if e := s.dbConn.Close(); e != nil {
dbinfo = nil
err = e
}
}()
dbinfo = &drivers.DBInfo{
Dialect: drivers.Dialect{
LQ: '"',
RQ: '"',
UseSchema: false,
UseDefaultKeyword: true,
UseLastInsertID: false,
},
}
dbinfo.Tables, err = drivers.TablesConcurrently(s, "", whitelist, blacklist, concurrency)
if err != nil {
return nil, err
}
return dbinfo, err
}
// SQLiteBuildQueryString builds a query string for SQLite.
func SQLiteBuildQueryString(file string) string {
return "file:" + file + "?_loc=UTC&mode=ro"
}
// Open opens the database connection using the connection string
func (s SQLiteDriver) Open() error {
var err error
s.dbConn, err = sql.Open("sqlite3", s.connStr)
if err != nil {
return err
}
return nil
}
// Close closes the database connection
func (s SQLiteDriver) Close() {
s.dbConn.Close()
}
// TableNames connects to the sqlite database and
// retrieves all table names from sqlite_master
func (s SQLiteDriver) TableNames(schema string, whitelist, blacklist []string) ([]string, error) {
query := `SELECT name FROM sqlite_master WHERE type='table'`
args := []interface{}{}
if len(whitelist) > 0 {
tables := drivers.TablesFromList(whitelist)
if len(tables) > 0 {
query += fmt.Sprintf(" and tbl_name in (%s)", strings.Repeat(",?", len(tables))[1:])
for _, w := range tables {
args = append(args, w)
}
}
}
if len(blacklist) > 0 {
tables := drivers.TablesFromList(blacklist)
if len(tables) > 0 {
query += fmt.Sprintf(" and tbl_name not in (%s)", strings.Repeat(",?", len(tables))[1:])
for _, b := range tables {
args = append(args, b)
}
}
}
rows, err := s.dbConn.Query(query, args...)
if err != nil {
return nil, err
}
var names []string
defer rows.Close()
for rows.Next() {
var name string
if err := rows.Scan(&name); err != nil {
return nil, err
}
if name != "sqlite_sequence" {
names = append(names, name)
}
}
return names, nil
}
// ViewNames connects to the sqlite database and
// retrieves all view names from sqlite_master
func (s SQLiteDriver) ViewNames(schema string, whitelist, blacklist []string) ([]string, error) {
query := `SELECT name FROM sqlite_master WHERE type='view'`
args := []interface{}{}
if len(whitelist) > 0 {
views := drivers.TablesFromList(whitelist)
if len(views) > 0 {
query += fmt.Sprintf(" and tbl_name in (%s)", strings.Repeat(",?", len(views))[1:])
for _, w := range views {
args = append(args, w)
}
}
}
if len(blacklist) > 0 {
views := drivers.TablesFromList(blacklist)
if len(views) > 0 {
query += fmt.Sprintf(" and tbl_name not in (%s)", strings.Repeat(",?", len(views))[1:])
for _, b := range views {
args = append(args, b)
}
}
}
rows, err := s.dbConn.Query(query, args...)
if err != nil {
return nil, err
}
var names []string
defer rows.Close()
for rows.Next() {
var name string
if err := rows.Scan(&name); err != nil {
return nil, err
}
if name != "sqlite_sequence" {
names = append(names, name)
}
}
return names, nil
}
// ViewCapabilities return what actions are allowed for a view.
func (s SQLiteDriver) ViewCapabilities(schema, name string) (drivers.ViewCapabilities, error) {
// Inserts may be allowed with the presence of an INSTEAD OF TRIGGER
// but it is not yet implemented.
// See: https://www.sqlite.org/lang_createview.html
capabilities := drivers.ViewCapabilities{
CanInsert: false,
CanUpsert: false,
}
return capabilities, nil
}
func (s SQLiteDriver) ViewColumns(schema, tableName string, whitelist, blacklist []string) ([]drivers.Column, error) {
return s.Columns(schema, tableName, whitelist, blacklist)
}
type sqliteIndex struct {
SeqNum int
Unique int
Partial int
Name string
Origin string
Columns []string
}
type sqliteTableInfo struct {
Cid string
Name string
Type string
NotNull bool
DefaultValue *string
Pk int
Hidden int
}
func (s SQLiteDriver) tableInfo(tableName string) ([]*sqliteTableInfo, error) {
var ret []*sqliteTableInfo
rows, err := s.dbConn.Query(fmt.Sprintf("PRAGMA table_xinfo('%s')", tableName))
if err != nil {
return nil, err
}
defer rows.Close()
for rows.Next() {
tinfo := &sqliteTableInfo{}
if err := rows.Scan(&tinfo.Cid, &tinfo.Name, &tinfo.Type, &tinfo.NotNull, &tinfo.DefaultValue, &tinfo.Pk, &tinfo.Hidden); err != nil {
return nil, fmt.Errorf("unable to scan for table %s: %w", tableName, err)
}
ret = append(ret, tinfo)
}
return ret, nil
}
func (s SQLiteDriver) indexes(tableName string) ([]*sqliteIndex, error) {
var ret []*sqliteIndex
rows, err := s.dbConn.Query(fmt.Sprintf("PRAGMA index_list('%s')", tableName))
if err != nil {
return nil, err
}
defer rows.Close()
for rows.Next() {
var idx = &sqliteIndex{}
var columns []string
if err := rows.Scan(&idx.SeqNum, &idx.Name, &idx.Unique, &idx.Origin, &idx.Partial); err != nil {
return nil, err
}
// get all columns stored within the index
rowsColumns, err := s.dbConn.Query(fmt.Sprintf("PRAGMA index_info('%s')", idx.Name))
if err != nil {
return nil, err
}
for rowsColumns.Next() {
var rankIndex, rankTable int
var colName string
if err := rowsColumns.Scan(&rankIndex, &rankTable, &colName); err != nil {
return nil, fmt.Errorf("unable to scan for index %s: %w", idx.Name, err)
}
columns = append(columns, colName)
}
rowsColumns.Close()
idx.Columns = columns
ret = append(ret, idx)
}
return ret, nil
}
// Columns takes a table name and attempts to retrieve the table information
// from the database. It retrieves the column names
// and column types and returns those as a []Column after TranslateColumnType()
// converts the SQL types to Go types, for example: "varchar" to "string"
func (s SQLiteDriver) Columns(schema, tableName string, whitelist, blacklist []string) ([]drivers.Column, error) {
var columns []drivers.Column
// get all indexes
idxs, err := s.indexes(tableName)
if err != nil {
return nil, err
}
// finally get the remaining information about the columns
tinfo, err := s.tableInfo(tableName)
if err != nil {
return nil, err
}
query := "SELECT 1 FROM sqlite_master WHERE type = 'table' AND name = ? AND sql LIKE '%AUTOINCREMENT%'"
result, err := s.dbConn.Query(query, tableName)
if err != nil {
return nil, err
}
tableHasAutoIncr := result.Next()
if err := result.Close(); err != nil {
return nil, err
}
var whiteColumns, blackColumns []string
if len(whitelist) != 0 {
whiteColumns = drivers.ColumnsFromList(whitelist, tableName)
}
if len(blacklist) != 0 {
blackColumns = drivers.ColumnsFromList(blacklist, tableName)
}
nPkeys := 0
for _, column := range tinfo {
if column.Pk != 0 {
nPkeys++
}
}
ColumnLoop:
for _, column := range tinfo {
if len(whiteColumns) != 0 {
found := false
for _, white := range whiteColumns {
if white == column.Name {
found = true
break
}
}
if !found {
continue
}
} else if len(blackColumns) != 0 {
for _, black := range blackColumns {
if black == column.Name {
continue ColumnLoop
}
}
}
bColumn := drivers.Column{
Name: column.Name,
FullDBType: strings.ToUpper(column.Type),
DBType: strings.ToUpper(column.Type),
Nullable: !column.NotNull,
}
// also get a correct information for Unique
for _, idx := range idxs {
// A unique index with multiple columns does not make
// the individual column unique
if len(idx.Columns) > 1 {
continue
}
for _, name := range idx.Columns {
if name == column.Name {
// A column is unique if it has a unique non-partial index
bColumn.Unique = idx.Unique > 0 && idx.Partial == 0
}
}
}
isPrimaryKeyInteger := column.Pk == 1 && bColumn.FullDBType == "INTEGER"
// This is special behavior noted in the sqlite documentation.
// An integer primary key becomes synonymous with the internal ROWID
// and acts as an auto incrementing value. Although there's important
// differences between using the keyword AUTOINCREMENT and this inferred
// version, they don't matter here so just masquerade as the same thing as
// above.
autoIncr := isPrimaryKeyInteger && (tableHasAutoIncr || nPkeys == 1)
// See: https://github.com/sqlite/sqlite/blob/91f621531dc1cb9ba5f6a47eb51b1de9ed8bdd07/src/pragma.c#L1165
bColumn.AutoGenerated = autoIncr || column.Hidden == 2 || column.Hidden == 3
if column.DefaultValue != nil {
bColumn.Default = *column.DefaultValue
} else if autoIncr {
bColumn.Default = "auto_increment"
} else if bColumn.AutoGenerated {
bColumn.Default = "auto_generated"
}
if bColumn.Nullable && bColumn.Default == "" {
bColumn.Default = "NULL"
}
columns = append(columns, bColumn)
}
return columns, nil
}
// PrimaryKeyInfo looks up the primary key for a table.
func (s SQLiteDriver) PrimaryKeyInfo(schema, tableName string) (*drivers.PrimaryKey, error) {
// lookup the columns affected by the PK
tinfo, err := s.tableInfo(tableName)
if err != nil {
return nil, err
}
var columns []string
for _, column := range tinfo {
if column.Pk > 0 {
columns = append(columns, column.Name)
}
}
var pk *drivers.PrimaryKey
if len(columns) > 0 {
pk = &drivers.PrimaryKey{Columns: columns}
}
return pk, nil
}
// ForeignKeyInfo retrieves the foreign keys for a given table name.
func (s SQLiteDriver) ForeignKeyInfo(schema, tableName string) ([]drivers.ForeignKey, error) {
dbForeignKeys, err := s.foreignKeyInfoFromDB(schema, tableName)
if err != nil {
return nil, fmt.Errorf("read foreign keys info from db: %w", err)
}
return drivers.CombineConfigAndDBForeignKeys(s.configForeignKeys, tableName, dbForeignKeys), nil
}
func (s SQLiteDriver) foreignKeyInfoFromDB(schema, tableName string) ([]drivers.ForeignKey, error) {
var fkeys []drivers.ForeignKey
query := fmt.Sprintf("PRAGMA foreign_key_list('%s')", tableName)
var rows *sql.Rows
var err error
if rows, err = s.dbConn.Query(query, tableName); err != nil {
return nil, err
}
defer rows.Close()
for rows.Next() {
var fkey drivers.ForeignKey
var onu, ond, match string
var id, seq int
fkey.Table = tableName
err = rows.Scan(&id, &seq, &fkey.ForeignTable, &fkey.Column, &fkey.ForeignColumn, &onu, &ond, &match)
if err != nil {
return nil, err
}
fkey.Name = fmt.Sprintf("FK_%d", id)
fkeys = append(fkeys, fkey)
}
if err = rows.Err(); err != nil {
return nil, err
}
return fkeys, nil
}
// TranslateColumnType converts sqlite database types to Go types, for example
// "varchar" to "string" and "bigint" to "int64". It returns this parsed data
// as a Column object.
// https://sqlite.org/datatype3.html
func (SQLiteDriver) TranslateColumnType(c drivers.Column) drivers.Column {
if c.Nullable {
switch strings.Split(c.DBType, "(")[0] {
case "INT", "INTEGER", "BIGINT":
c.Type = "null.Int64"
case "TINYINT", "INT8":
c.Type = "null.Int8"
case "SMALLINT", "INT2":
c.Type = "null.Int16"
case "MEDIUMINT":
c.Type = "null.Int32"
case "UNSIGNED BIG INT":
c.Type = "null.Uint64"
case "CHARACTER", "VARCHAR", "VARYING CHARACTER", "NCHAR",
"NATIVE CHARACTER", "NVARCHAR", "TEXT", "CLOB":
c.Type = "null.String"
case "BLOB":
c.Type = "null.Bytes"
case "FLOAT":
c.Type = "null.Float32"
case "REAL", "DOUBLE", "DOUBLE PRECISION":
c.Type = "null.Float64"
case "NUMERIC", "DECIMAL":
c.Type = "types.NullDecimal"
case "BOOLEAN":
c.Type = "null.Bool"
case "DATE", "DATETIME":
c.Type = "null.Time"
case "JSON":
c.Type = "null.JSON"
default:
c.Type = "null.String"
}
} else {
switch strings.Split(c.DBType, "(")[0] {
case "INT", "INTEGER", "BIGINT":
c.Type = "int64"
case "TINYINT", "INT8":
c.Type = "int8"
case "SMALLINT", "INT2":
c.Type = "int16"
case "MEDIUMINT":
c.Type = "int32"
case "UNSIGNED BIG INT":
c.Type = "uint64"
case "CHARACTER", "VARCHAR", "VARYING CHARACTER", "NCHAR",
"NATIVE CHARACTER", "NVARCHAR", "TEXT", "CLOB":
c.Type = "string"
case "BLOB":
c.Type = "[]byte"
case "FLOAT":
c.Type = "float32"
case "REAL", "DOUBLE", "DOUBLE PRECISION":
c.Type = "float64"
case "NUMERIC", "DECIMAL":
c.Type = "types.Decimal"
case "BOOLEAN":
c.Type = "bool"
case "DATE", "DATETIME":
c.Type = "time.Time"
case "JSON":
c.Type = "types.JSON"
default:
c.Type = "string"
}
}
return c
}
// Imports returns important imports for the driver
func (SQLiteDriver) Imports() (col importers.Collection, err error) {
col.All = importers.Set{
Standard: importers.List{
`"strconv"`,
},
}
col.Singleton = importers.Map{
"sqlite_upsert": {
Standard: importers.List{
`"fmt"`,
`"strings"`,
},
ThirdParty: importers.List{
`"github.com/volatiletech/strmangle"`,
`"github.com/volatiletech/sqlboiler/v4/drivers"`,
},
},
}
col.TestSingleton = importers.Map{
"sqlite3_suites_test": {
Standard: importers.List{
`"testing"`,
},
},
"sqlite3_main_test": {
Standard: importers.List{
`"database/sql"`,
`"fmt"`,
`"io"`,
`"math/rand"`,
`"os"`,
`"os/exec"`,
`"path/filepath"`,
`"regexp"`,
},
ThirdParty: importers.List{
`"github.com/pkg/errors"`,
`"github.com/spf13/viper"`,
`_ "modernc.org/sqlite"`,
},
},
}
col.BasedOnType = importers.Map{
"null.Float32": {
ThirdParty: importers.List{`"github.com/volatiletech/null/v8"`},
},
"null.Float64": {
ThirdParty: importers.List{`"github.com/volatiletech/null/v8"`},
},
"null.Int": {
ThirdParty: importers.List{`"github.com/volatiletech/null/v8"`},
},
"null.Int8": {
ThirdParty: importers.List{`"github.com/volatiletech/null/v8"`},
},
"null.Int16": {
ThirdParty: importers.List{`"github.com/volatiletech/null/v8"`},
},
"null.Int32": {
ThirdParty: importers.List{`"github.com/volatiletech/null/v8"`},
},
"null.Int64": {
ThirdParty: importers.List{`"github.com/volatiletech/null/v8"`},
},
"null.Uint": {
ThirdParty: importers.List{`"github.com/volatiletech/null/v8"`},
},
"null.Uint8": {
ThirdParty: importers.List{`"github.com/volatiletech/null/v8"`},
},
"null.Uint16": {
ThirdParty: importers.List{`"github.com/volatiletech/null/v8"`},
},
"null.Uint32": {
ThirdParty: importers.List{`"github.com/volatiletech/null/v8"`},
},
"null.Uint64": {
ThirdParty: importers.List{`"github.com/volatiletech/null/v8"`},
},
"null.String": {
ThirdParty: importers.List{`"github.com/volatiletech/null/v8"`},
},
"null.Bool": {
ThirdParty: importers.List{`"github.com/volatiletech/null/v8"`},
},
"null.Time": {
ThirdParty: importers.List{`"github.com/volatiletech/null/v8"`},
},
"null.Bytes": {
ThirdParty: importers.List{`"github.com/volatiletech/null/v8"`},
},
"time.Time": {
Standard: importers.List{`"time"`},
},
"types.Decimal": {
ThirdParty: importers.List{`"github.com/volatiletech/sqlboiler/v4/types"`},
},
"types.NullDecimal": {
ThirdParty: importers.List{`"github.com/volatiletech/sqlboiler/v4/types"`},
},
"types.JSON": {
ThirdParty: importers.List{`"github.com/volatiletech/sqlboiler/v4/types"`},
},
"null.JSON": {
ThirdParty: importers.List{`"github.com/volatiletech/null/v8"`},
},
}
return col, err
}
|
package config
import (
"context"
"os"
"path/filepath"
"sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestFileWatcherSource(t *testing.T) {
ctx := context.Background()
tmpdir := t.TempDir()
err := os.WriteFile(filepath.Join(tmpdir, "example.txt"), []byte{1, 2, 3, 4}, 0o600)
if !assert.NoError(t, err) {
return
}
err = os.WriteFile(filepath.Join(tmpdir, "kubernetes-example.txt"), []byte{1, 2, 3, 4}, 0o600)
if !assert.NoError(t, err) {
return
}
ssrc := NewStaticSource(&Config{
Options: &Options{
CAFile: filepath.Join(tmpdir, "example.txt"),
Policies: []Policy{{
KubernetesServiceAccountTokenFile: filepath.Join(tmpdir, "kubernetes-example.txt"),
}},
},
})
src := NewFileWatcherSource(ssrc)
var closeOnce sync.Once
ch := make(chan struct{})
src.OnConfigChange(context.Background(), func(ctx context.Context, cfg *Config) {
closeOnce.Do(func() {
close(ch)
})
})
err = os.WriteFile(filepath.Join(tmpdir, "example.txt"), []byte{5, 6, 7, 8}, 0o600)
if !assert.NoError(t, err) {
return
}
select {
case <-ch:
case <-time.After(time.Second):
t.Error("expected OnConfigChange to be fired after modifying a file")
}
err = os.WriteFile(filepath.Join(tmpdir, "kubernetes-example.txt"), []byte{5, 6, 7, 8}, 0o600)
if !assert.NoError(t, err) {
return
}
select {
case <-ch:
case <-time.After(time.Second):
t.Error("expected OnConfigChange to be fired after modifying a policy file")
}
ssrc.SetConfig(ctx, &Config{
Options: &Options{
CAFile: filepath.Join(tmpdir, "example.txt"),
},
})
select {
case <-ch:
case <-time.After(time.Second):
t.Error("expected OnConfigChange to be fired after triggering a change to the underlying source")
}
}
|
package factories
import (
"bytes"
"context"
"fmt"
"io"
"net"
"net/http"
"net/url"
"os"
"syscall"
"time"
"github.com/Azure/azure-pipeline-go/pipeline"
"github.com/Azure/azure-storage-blob-go/azblob"
"github.com/giventocode/azure-blob-md5/internal"
)
//azUtil TODO
type azUtil struct {
serviceURL *azblob.ServiceURL
containerURL *azblob.ContainerURL
creds *azblob.SharedKeyCredential
}
//newAzUtil TODO
func newAzUtil(accountName string, accountKey string, container string, baseBlobURL string) (*azUtil, error) {
creds, err := azblob.NewSharedKeyCredential(accountName, accountKey)
if err != nil {
return nil, err
}
pipeline := newPipeline(creds, azblob.PipelineOptions{
Retry: azblob.RetryOptions{
Policy: azblob.RetryPolicyFixed,
TryTimeout: 30 * time.Second,
MaxTries: 500,
RetryDelay: 100 * time.Millisecond,
MaxRetryDelay: 2 * time.Second}})
baseURL, err := parseBaseURL(accountName, baseBlobURL)
if err != nil {
return nil, err
}
surl := azblob.NewServiceURL(*baseURL, pipeline)
curl := surl.NewContainerURL(container)
return &azUtil{serviceURL: &surl,
containerURL: &curl,
creds: creds}, nil
}
func (p *azUtil) downloadRange(blobName string, offset int64, count int64) ([]byte, error) {
bburl := p.containerURL.NewBlockBlobURL(blobName)
ctx := context.Background()
res, err := bburl.Download(ctx, offset, count, azblob.BlobAccessConditions{}, false)
if err != nil {
return nil, err
}
opts := azblob.RetryReaderOptions{
MaxRetryRequests: 30,
}
reader := res.Body(opts)
data := make([]byte, count)
tmp := make([]byte, count)
//n, err := reader.Read(data)
wr := bytes.NewBuffer(data)
n, err := io.CopyBuffer(wr, reader, tmp)
defer reader.Close()
if err != nil {
return nil, err
}
data = wr.Bytes()[count:]
if n != count {
return nil, fmt.Errorf(" received data len is different than expected. Expected:%d Received:%d ", count, n)
}
return data, nil
}
//BlobItemInfo TODO
type blobItemInfo struct {
blob azblob.BlobItem
err error
}
func (p *azUtil) setMD5(blobName string, hash []byte) error {
burl := p.containerURL.NewBlobURL(blobName)
ctx := context.Background()
response, err := burl.SetHTTPHeaders(ctx, azblob.BlobHTTPHeaders{ContentMD5: hash}, azblob.BlobAccessConditions{})
if err != nil {
return err
}
return response.Response().Body.Close()
}
func (p *azUtil) iterateBlobList(prefix string, chanDepth int) <-chan blobItemInfo {
blobs := make(chan blobItemInfo, chanDepth)
var marker azblob.Marker
options := azblob.ListBlobsSegmentOptions{
Details: azblob.BlobListingDetails{
Metadata: true},
Prefix: prefix}
go func() {
defer close(blobs)
for {
ctx := context.Background()
response, err := p.containerURL.ListBlobsFlatSegment(ctx, marker, options)
if err != nil {
blobs <- blobItemInfo{err: err}
return
}
for _, blob := range response.Segment.BlobItems {
blobs <- blobItemInfo{blob: blob}
}
if response.NextMarker.NotDone() {
marker = response.NextMarker
continue
}
break
}
}()
return blobs
}
func parseBaseURL(accountName string, baseURL string) (*url.URL, error) {
var err error
var url *url.URL
if baseURL == "" {
url, err = url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net", accountName))
if err != nil {
return nil, err
}
return url, nil
}
if url, err = url.Parse(fmt.Sprintf("https://%s.%s", accountName, baseURL)); err != nil {
return nil, err
}
return url, nil
}
func newPipeline(c azblob.Credential, o azblob.PipelineOptions) pipeline.Pipeline {
if c == nil {
panic("c can't be nil")
}
// Closest to API goes first; closest to the wire goes last
f := []pipeline.Factory{
azblob.NewTelemetryPolicyFactory(o.Telemetry),
azblob.NewUniqueRequestIDPolicyFactory(),
azblob.NewRetryPolicyFactory(o.Retry),
c,
}
f = append(f,
pipeline.MethodFactoryMarker(), // indicates at what stage in the pipeline the method factory is invoked
azblob.NewRequestLogPolicyFactory(o.RequestLog))
return pipeline.NewPipeline(f, pipeline.Options{HTTPSender: newHTTPClientFactory(), Log: o.Log})
}
func newHTTPClientFactory() pipeline.Factory {
return &clientPolicyFactory{}
}
type clientPolicyFactory struct {
}
// Create initializes a logging policy object.
func (f *clientPolicyFactory) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy {
return &clientPolicy{po: po}
}
type clientPolicy struct {
po *pipeline.PolicyOptions
}
const winWSAETIMEDOUT syscall.Errno = 10060
// checks if the underlying error is a connectex error and if the underying cause is winsock timeout or temporary error, in which case we should retry.
func isWinsockTimeOutError(err error) net.Error {
if uerr, ok := err.(*url.Error); ok {
if derr, ok := uerr.Err.(*net.OpError); ok {
if serr, ok := derr.Err.(*os.SyscallError); ok && serr.Syscall == "connectex" {
if winerr, ok := serr.Err.(syscall.Errno); ok && (winerr == winWSAETIMEDOUT || winerr.Temporary()) {
return &retriableError{error: err}
}
}
}
}
return nil
}
func isDialConnectError(err error) net.Error {
if uerr, ok := err.(*url.Error); ok {
if derr, ok := uerr.Err.(*net.OpError); ok {
if serr, ok := derr.Err.(*os.SyscallError); ok && serr.Syscall == "connect" {
return &retriableError{error: err}
}
}
}
return nil
}
func isRetriableDialError(err error) net.Error {
if derr := isWinsockTimeOutError(err); derr != nil {
return derr
}
return isDialConnectError(err)
}
type retriableError struct {
error
}
func (*retriableError) Timeout() bool {
return false
}
func (*retriableError) Temporary() bool {
return true
}
const tcpKeepOpenMinLength = 8 * int64(internal.MB)
func (p *clientPolicy) Do(ctx context.Context, request pipeline.Request) (pipeline.Response, error) {
req := request.WithContext(ctx)
if req.ContentLength < tcpKeepOpenMinLength {
req.Close = true
}
r, err := pipelineHTTPClient.Do(req)
pipresp := pipeline.NewHTTPResponse(r)
if err != nil {
if derr := isRetriableDialError(err); derr != nil {
return pipresp, derr
}
err = pipeline.NewError(err, "HTTP request failed")
}
return pipresp, err
}
var pipelineHTTPClient = newpipelineHTTPClient()
func newpipelineHTTPClient() *http.Client {
return &http.Client{
Transport: &http.Transport{
Proxy: http.ProxyFromEnvironment,
Dial: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
DualStack: true,
}).Dial,
MaxIdleConns: 100,
MaxIdleConnsPerHost: 100,
IdleConnTimeout: 60 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
DisableKeepAlives: false,
DisableCompression: false,
MaxResponseHeaderBytes: 0}}
}
|
// DO NOT EDIT. This file was generated by "github.com/frk/gosql".
package testdata
import (
"github.com/frk/gosql"
"github.com/frk/gosql/internal/testdata/common"
)
func (q *InsertResultErrorHandlerSingleQuery) Exec(c gosql.Conn) error {
const queryString = `INSERT INTO "test_user" AS u (
"id"
, "email"
, "full_name"
, "created_at"
) VALUES (
$1
, $2
, $3
, $4
)
RETURNING
u."id"
, u."email"
, u."full_name"
, u."created_at"` // `
row := c.QueryRow(queryString,
q.User.Id,
q.User.Email,
q.User.FullName,
q.User.CreatedAt,
)
q.result = new(common.User)
return q.erh.HandleError(row.Scan(
&q.result.Id,
&q.result.Email,
&q.result.FullName,
&q.result.CreatedAt,
))
}
|
package repository
import (
"database/sql"
"github.com/bot/myteambot/app/models"
"github.com/volatiletech/sqlboiler/boil"
"github.com/volatiletech/sqlboiler/queries/qm"
)
func InsertGroup(chatID int64, name string) {
var group models.Group
group.ChatID = chatID
group.Name = name
group.InsertG(boil.Infer())
}
func FindGroup(ID uint) *models.Group {
group, err := models.Groups(qm.Where("id = ?", ID)).OneG()
if err != nil && err != sql.ErrNoRows {
panic(err)
}
return group
}
func GetAllGroups() []*models.Group {
groups, err := models.Groups().AllG()
if err != nil && err != sql.ErrNoRows {
panic(err)
}
return groups
}
func FindGroupByChatID(chatID int64) *models.Group {
group, err := models.Groups(qm.Where("chat_id = ?", chatID)).OneG()
if err != nil && err != sql.ErrNoRows {
panic(err)
}
return group
}
func UpdateGroup(ID uint, name string) {
group := FindGroup(ID)
group.Name = name
group.UpdateG(boil.Infer())
}
func UpsertGroup(chatID int64, name string) {
group := FindGroupByChatID(chatID)
if group == nil {
InsertGroup(chatID, name)
} else {
UpdateGroup(group.ID, name)
}
}
|
package main
import "fmt"
const MAX int =3
func main() {
a :=[] int {12,24,36}
var i int
var ptr [MAX] *int
for i=0;i<MAX;i++{
ptr[i]=&a[i]
}
for i=0;i< MAX;i++ {
fmt.Printf("values of a[%d] =%d\n",i,*ptr[i])
}
var m int ;
m=100;
var m1 *int
m1=&m;
var m2 **int
m2=&m1
fmt.Printf("the value is %d \n",m)
fmt.Printf("the value is %d \n",m1)
fmt.Printf("the value is %d \n",*m1)
//fmt.Printf("the value hello is %d ",**m1)
fmt.Printf("the value is %d \n",m2)
fmt.Printf("the value is %d \n",*m2)
fmt.Printf("the value is %d\n ",**m2)
}
|
package main
import (
"net/http"
"ratelimit/rate"
)
func myHandler(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("DO SOME API STUFF!"))
}
func main() {
r := http.NewServeMux()
r.Handle("/bla", rate.RateHandler(http.HandlerFunc(myHandler)))
http.ListenAndServe(":2705", r)
}
|
package node
import (
"context"
"errors"
logging "github.com/ipfs/go-log"
"github.com/multiformats/go-multiaddr"
"github.com/shepf/star-tools/node/impl"
"github.com/shepf/star-tools/node/modules"
"github.com/shepf/star-tools/node/modules/helpers"
"github.com/shepf/star-tools/node/repo"
"github.com/shepf/star-tools/node/types"
"go.uber.org/fx"
"golang.org/x/xerrors"
"github.com/shepf/star-tools/api"
"github.com/shepf/star-tools/node/config"
"github.com/shepf/star-tools/node/modules/dtypes"
)
var log = logging.Logger("builder")
// special is a type used to give keys to modules which
// can't really be identified by the returned type
type special struct{ id int }
type invoke int
//nolint:golint
const (
// libp2p
PstoreAddSelfKeysKey = invoke(iota)
StartListeningKey
BootstrapKey
// daemon
ExtractApiKey
HeadMetricsKey
RunPeerTaggerKey
SetApiEndpointKey
_nInvokes // keep this last
)
type Settings struct {
// modules is a map of constructors for DI
//
// In most cases the index will be a reflect. Type of element returned by
// the constructor, but for some 'constructors' it's hard to specify what's
// the return type should be (or the constructor returns fx group)
modules map[interface{}]fx.Option
// invokes are separate from modules as they can't be referenced by return
// type, and must be applied in correct order
invokes []fx.Option
nodeType repo.RepoType
Online bool // Online option applied
Config bool // Config option applied
}
func defaults() []Option {
return []Option{
Override(new(helpers.MetricsCtx), context.Background),
Override(new(dtypes.ShutdownChan), make(chan struct{})),
// Filecoin modules
}
}
func isType(t repo.RepoType) func(s *Settings) bool {
return func(s *Settings) bool { return s.nodeType == t }
}
// Online sets up basic libp2p node
func Online() Option {
return Options(
// make sure that online is applied before Config.
// This is important because Config overrides some of Online units
func(s *Settings) error { s.Online = true; return nil },
ApplyIf(func(s *Settings) bool { return s.Config },
Error(errors.New("the Online option must be set before Config option")),
),
//libp2p(),
// common
// Full node
ApplyIf(isType(repo.FullNode)), // TODO: Fix offline mode
)
}
// Config sets up constructors based on the provided Config
func ConfigCommon(cfg *config.Common) Option {
return Options(
func(s *Settings) error { s.Config = true; return nil },
Override(new(dtypes.APIEndpoint), func() (dtypes.APIEndpoint, error) {
return multiaddr.NewMultiaddr(cfg.API.ListenAddress)
}),
Override(SetApiEndpointKey, func(lr repo.LockedRepo, e dtypes.APIEndpoint) error {
return lr.SetAPIEndpoint(e)
}),
)
}
func ConfigFullNode(c interface{}) Option {
cfg, ok := c.(*config.FullNode)
if !ok {
return Error(xerrors.Errorf("invalid config from repo, got: %T", c))
}
return Options(
ConfigCommon(&cfg.Common),
)
}
func Repo(r repo.Repo) Option {
return func(settings *Settings) error {
lr, err := r.Lock(settings.nodeType)
if err != nil {
return err
}
c, err := lr.Config()
if err != nil {
return err
}
return Options(
Override(new(repo.LockedRepo), modules.LockedRepo(lr)), // module handles closing
Override(new(types.KeyStore), modules.KeyStore),
Override(new(*dtypes.APIAlg), modules.APISecret),
ApplyIf(isType(repo.FullNode), ConfigFullNode(c)),
)(settings)
}
}
func FullAPI(out *api.FullNode) Option {
return func(s *Settings) error {
resAPI := &impl.FullNodeAPI{}
s.invokes[ExtractApiKey] = fx.Extract(resAPI)
*out = resAPI
return nil
}
}
type StopFunc func(context.Context) error
// New builds and starts new Star node
func New(ctx context.Context, opts ...Option) (StopFunc, error) {
settings := Settings{
modules: map[interface{}]fx.Option{},
invokes: make([]fx.Option, _nInvokes),
nodeType: repo.FullNode,
}
// apply module options in the right order
if err := Options(Options(defaults()...), Options(opts...))(&settings); err != nil {
return nil, xerrors.Errorf("applying node options failed: %w", err)
}
// gather constructors for fx.Options
ctors := make([]fx.Option, 0, len(settings.modules))
for _, opt := range settings.modules {
ctors = append(ctors, opt)
}
// fill holes in invokes for use in fx.Options
for i, opt := range settings.invokes {
if opt == nil {
settings.invokes[i] = fx.Options()
}
}
app := fx.New(
fx.Options(ctors...),
fx.Options(settings.invokes...),
fx.NopLogger,
)
// TODO: we probably should have a 'firewall' for Closing signal
// on this context, and implement closing logic through lifecycles
// correctly
if err := app.Start(ctx); err != nil {
// comment fx.NopLogger few lines above for easier debugging
return nil, xerrors.Errorf("starting node: %w", err)
}
return app.Stop, nil
}
|
package main
import (
"fmt"
"github.com/Cloud-Foundations/Dominator/imageunpacker/client"
"github.com/Cloud-Foundations/Dominator/lib/log"
)
func exportImageSubcommand(args []string, logger log.DebugLogger) error {
err := client.ExportImage(getClient(), args[0], args[1], args[2])
if err != nil {
return fmt.Errorf("error exporting image: %s", err)
}
return nil
}
|
package csvparse
import (
"encoding/csv"
"os"
)
// LoadCSV loads the passed file name as a record.
func LoadCSV(s string) ([][]string, error) {
f, err := os.Open(s)
defer f.Close()
if err != nil {
return nil, err
}
r := csv.NewReader(f)
records, err := r.ReadAll()
if err != nil {
return nil, err
}
return records, nil
}
|
package cache
import (
"log"
"github.com/rapidclock/align-bot-stats-cache/models"
)
func GetGenderRatio() *models.GenderRatio {
curRatio := new(models.GenderRatio)
maleRatio, err := Get(GenderRatioMale)
if err != nil {
log.Fatal(err)
panic(err)
}
femaleRatio, err := Get(GenderRatioFemale)
if err != nil {
log.Fatal(err)
panic(err)
}
curRatio.Male = maleRatio
curRatio.Female = femaleRatio
return curRatio
}
func GetStudentCounts(location string) *models.StudentCount {
counts := make(models.StudentCount)
switch location {
case "seattle":
counts["Seattle"] = getSeattleCount()
case "boston":
counts["Boston"] = getBostonCount()
case "charlotte":
counts["Charlotte"] = getCharlotteCount()
case "siliconValley":
counts["SiliconValley"] = getSiliconValleyCount()
default:
counts["Seattle"] = getSeattleCount()
counts["Boston"] = getBostonCount()
counts["Charlotte"] = getCharlotteCount()
counts["SiliconValley"] = getSiliconValleyCount()
}
return &counts
}
func getSeattleCount() string {
count, err := Get(StudentCountSeattle)
handleError(err)
return count
}
func getBostonCount() string {
count, err := Get(StudentCountBoston)
handleError(err)
return count
}
func getCharlotteCount() string {
count, err := Get(StudentCountCharlotte)
handleError(err)
return count
}
func getSiliconValleyCount() string {
count, err := Get(StudentCountSiliconValley)
handleError(err)
return count
}
func GetGraduatesCount() *models.GraduatesCount {
gradNum := new(models.GraduatesCount)
totNum, err := Get(NumberOfGraduatesTotal)
handleError(err)
gradNum.TotalGraduates = totNum
return gradNum
}
func GetTotalCost() *models.TotalProgramCost {
progCost := new(models.TotalProgramCost)
cost, err := Get(ProgramCostTotal)
handleError(err)
progCost.TotalCost = cost
return progCost
}
func GetPerCreditCost() *models.PerCreditProgramCost {
perCredCost := new(models.PerCreditProgramCost)
cost, err := Get(ProgramCostPerCredit)
handleError(err)
perCredCost.PerCreditCost = cost
return perCredCost
}
func GetTopEmployers(k int) *models.TopEmployers {
topEmpData := GetFromUnorderedSet(TopEmployers)
topEmp := new(models.TopEmployers)
topEmp.Employers = topEmpData[:k]
return topEmp
}
func GetTopBackgrounds(k int) *models.TopBackgrounds {
topBgData := GetFromUnorderedSet(TopBackgrounds)
topBg := new(models.TopBackgrounds)
topBg.Backgrounds = topBgData[:k]
return topBg
}
func GetDropOutRate() *models.DropOutRate {
doRate := new(models.DropOutRate)
rate, err := Get(DropOutRate)
handleError(err)
doRate.Rate = rate
return doRate
}
func GetEmploymentRate() *models.EmploymentRate {
emplRate := new(models.EmploymentRate)
rate, err := Get(EmploymentRate)
handleError(err)
emplRate.Rate = rate
return emplRate
}
func GetAverageSalary() *models.AverageSalary {
avgSal := new(models.AverageSalary)
sal, err := Get(SalaryAvg)
handleError(err)
avgSal.AvgSalary = sal
return avgSal
}
func GetAcceptanceRate() *models.AcceptanceRate {
accRate := new(models.AcceptanceRate)
rate, err := Get(AcceptanceRates)
handleError(err)
accRate.Rate = rate
return accRate
}
|
package config
import (
"github.com/joho/godotenv"
"os"
"strconv"
)
type Config struct {
Protocol string `json:"protocol"`
Host string `json:"host"`
VHost string `json:"vhost"`
Port int `json:"port"`
Username string `json:"username"`
Password string `json:"password"`
}
func LoadConfig() (*Config, error) {
_ = godotenv.Load(".env")
port, err := strconv.Atoi(os.Getenv("MQ_PORT"))
if err != nil {
return nil, err
}
config := &Config{
Protocol: os.Getenv("MQ_PROTOCOL"),
Port: port,
Host: os.Getenv("MQ_HOST"),
Username: os.Getenv("MQ_USER"),
Password: os.Getenv("MQ_PWD"),
VHost: os.Getenv("MQ_VHOST"),
}
return config, nil
}
|
package main
import (
"fmt"
)
// The d2() function also contains a for loop and a defer statement, which
// will also be executed three times. However, this time, the defer keyword is applied to an
// anonymous function instead of a single fmt.Print()
func d4() {
for i := 3; i > 0; i-- {
defer func() {
fmt.Print(i, " ")
}()
}
fmt.Println()
}
func d10() {
d4()
}
// After the for loop has ended, the value of i is 0 , because it is that value of i that made the
// for loop terminate. However, the tricky point here is that the deferred anonymous function
// is evaluated after the for loop ends because it has no parameters, which means that it is
// evaluated three times for an i value of 0 , hence the generated output. This kind of
// confusing code is what might lead to the creation of nasty bugs in your projects, so try to
// avoid it.
|
package main
import (
"fmt"
"github.com/blang/semver"
"github.com/magiconair/properties/assert"
"github.com/spf13/viper"
"gopkg.in/src-d/go-git.v4/plumbing/object"
"io/ioutil"
"testing"
"time"
)
func TestGetRepository(t *testing.T) {
_, err := getRepository(".")
if err != nil {
t.Fail()
}
_, err = getRepository("some-path")
if err == nil {
t.Fail()
}
}
type mockCommitIter struct {
commits []object.Commit
currentIndex int
}
func (m mockCommitIter) Next() (*object.Commit, error) {
if m.currentIndex < len(m.commits) {
m.currentIndex++
return &m.commits[m.currentIndex], nil
} else {
return nil, nil
}
}
func (m mockCommitIter) ForEach(fn func(*object.Commit) error) error {
var err error
for i := 0; i < len(m.commits); i++ {
err = fn(&m.commits[i])
}
return err
}
func (m mockCommitIter) Close() {
m.currentIndex = 0
}
func TestLoadTemplate(t *testing.T) {
tests := []struct {
template string
initConfig func(string) error
expectedResult string
}{
{
template: "My template",
initConfig: func(path string) error {
viper.Set(templateKey, path)
return nil
},
expectedResult: "My template",
},
{
template: "",
initConfig: func(path string) error {
viper.Set(templateKey, path)
return nil
},
expectedResult: ungroupedTemplate,
},
}
for _, test := range tests {
if test.template != "" {
f, _ := ioutil.TempFile("", "tmp")
f.Write([]byte(test.template))
_ = test.initConfig(f.Name())
result, err := loadTemplate()
assert.Equal(t, err, nil)
assert.Equal(t, result, test.expectedResult)
f.Close()
} else {
_ = test.initConfig("")
result, err := loadTemplate()
assert.Equal(t, err, nil)
assert.Equal(t, result, test.expectedResult)
}
}
}
func TestRender(t *testing.T) {
version012 := semver.MustParse("0.1.2")
version011 := semver.MustParse("0.1.1")
now := time.Now()
yesterday := now.AddDate(0, 0, -1)
lastWeek := now.AddDate(0, 0, -7)
testGroups := []ChangeGroup{
{
Version: version012,
TaggedChanges: []TaggedChanges{
{
Tag: "Added",
Changes: []Change{
{
Description: "Commit with version, tag, and reference",
Version: version012,
Reference: "XYZ-123",
Tag: "Added",
When: now,
},
{
Description: "Another commit with version, tag, and reference",
Version: version012,
Reference: "XYZ-123",
Tag: "Added",
When: yesterday,
},
},
},
{
Tag: "Removed",
Changes: []Change{
{
Description: "One more commit with version, tag, and reference",
Version: version012,
Reference: "XYZ-123",
Tag: "Removed",
When: lastWeek,
},
},
},
},
When: now,
},
{
Version: version011,
TaggedChanges: []TaggedChanges{
{
Tag: "Changed",
Changes: []Change{
{
Description: "Commit with version, tag, but no reference",
Version: version011,
Reference: "",
Tag: "Changed",
When: lastWeek,
},
},
},
},
When: lastWeek,
},
}
tests := []struct {
template string
groups []ChangeGroup
initConfig func() error
expectedResult string
}{
{
template: ungroupedTemplate,
groups: testGroups,
initConfig: func() error {
viper.Set(nameKey, "Test")
viper.Set(showReferenceKey, false)
return nil
},
expectedResult: fmt.Sprintf(`
# Test Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](http://keepachangelog.com/)
and this project adheres to [Semantic Versioning](http://semver.org/).
## [0.1.2] - %s
### Added
- Commit with version, tag, and reference
- Another commit with version, tag, and reference
### Removed
- One more commit with version, tag, and reference
***
## [0.1.1] - %s
### Changed
- Commit with version, tag, but no reference
***
`, testGroups[0].Date(), testGroups[1].Date()),
},
{
template: ungroupedTemplate,
groups: testGroups,
initConfig: func() error {
viper.Set(nameKey, "Test")
viper.Set(showReferenceKey, true)
return nil
},
expectedResult: fmt.Sprintf(`
# Test Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](http://keepachangelog.com/)
and this project adheres to [Semantic Versioning](http://semver.org/).
## [0.1.2] - %s
### Added
- XYZ-123 Commit with version, tag, and reference
- XYZ-123 Another commit with version, tag, and reference
### Removed
- XYZ-123 One more commit with version, tag, and reference
***
## [0.1.1] - %s
### Changed
- Commit with version, tag, but no reference
***
`, testGroups[0].Date(), testGroups[1].Date()),
},
}
for _, test := range tests {
_ = test.initConfig()
result := render(test.template, &test.groups)
assert.Equal(t, result, test.expectedResult)
}
}
func TestGroupChanges(t *testing.T) {
version012 := semver.MustParse("0.1.2")
version011 := semver.MustParse("0.1.1")
now := time.Now()
yesterday := now.AddDate(0, 0, -1)
lastWeek := now.AddDate(0, 0, -7)
tests := []struct {
changes []Change
expectedResults []ChangeGroup
}{
{
changes: []Change{
{
Description: "Commit with version, tag, and reference",
Version: version012,
Reference: "XYZ-123",
Tag: "Added",
When: now,
},
{
Description: "Another commit with version, tag, and reference",
Version: version012,
Reference: "XYZ-123",
Tag: "Added",
When: yesterday,
},
{
Description: "One more commit with version, tag, and reference",
Version: version012,
Reference: "XYZ-123",
Tag: "Removed",
When: lastWeek,
},
{
Description: "Commit with version, tag, but no reference",
Version: version011,
Reference: "",
Tag: "Changed",
When: lastWeek,
},
},
expectedResults: []ChangeGroup{
{
Version: version012,
TaggedChanges: []TaggedChanges{
{
Tag: "Added",
Changes: []Change{
{
Description: "Commit with version, tag, and reference",
Version: version012,
Reference: "XYZ-123",
Tag: "Added",
When: now,
},
{
Description: "Another commit with version, tag, and reference",
Version: version012,
Reference: "XYZ-123",
Tag: "Added",
When: yesterday,
},
},
},
{
Tag: "Removed",
Changes: []Change{
{
Description: "One more commit with version, tag, and reference",
Version: version012,
Reference: "XYZ-123",
Tag: "Removed",
When: lastWeek,
},
},
},
},
When: now,
},
{
Version: version011,
TaggedChanges: []TaggedChanges{
{
Tag: "Changed",
Changes: []Change{
{
Description: "Commit with version, tag, but no reference",
Version: version011,
Reference: "",
Tag: "Changed",
When: lastWeek,
},
},
},
},
When: lastWeek,
},
},
},
{
changes: []Change{},
expectedResults: []ChangeGroup{},
},
}
for _, test := range tests {
results := groupChanges(&test.changes)
if len(test.expectedResults) > 0 {
assert.Equal(t, test.expectedResults, results)
} else {
assert.Equal(t, 0, len(results))
}
}
}
func TestBuildChanges(t *testing.T) {
version000 := semver.MustParse("0.0.0")
version012 := semver.MustParse("0.1.2")
version011 := semver.MustParse("0.1.1")
testAuthor := object.Signature{
Name: "Testy Testerson",
Email: "test@test.com",
When: time.Now(),
}
tests := []struct {
start semver.Version
commitItr object.CommitIter
expectedResults []Change
}{
{
start: version000,
commitItr: mockCommitIter{
commits: []object.Commit{
{
Message: "Commit with version, tag, and reference\n" +
"\n" +
"version: 0.1.2\n" +
"tag: Added\n" +
"reference: XYZ-123",
Author: testAuthor,
},
{
Message: "Commit with version, tag, but no reference\n" +
"\n" +
"version: 0.1.1\n" +
"tag: Changed",
Author: testAuthor,
},
{
Message: "Commit with version, but no tag or reference\n" +
"\n" +
"version: 0.1.0",
Author: testAuthor,
},
{
Message: "Commit with no version, tag or reference\n" +
"\n",
Author: testAuthor,
},
},
currentIndex: 0,
},
expectedResults: []Change{
{
Description: "Commit with version, tag, and reference",
Version: version012,
Reference: "XYZ-123",
Tag: "Added",
When: testAuthor.When,
},
{
Description: "Commit with version, tag, but no reference",
Version: version011,
Reference: "",
Tag: "Changed",
When: testAuthor.When,
},
},
},
{
start: version012,
commitItr: mockCommitIter{
commits: []object.Commit{
{
Message: "Commit with version, tag, and reference\n" +
"\n" +
"version: 0.1.2\n" +
"tag: Added\n" +
"reference: XYZ-123",
Author: testAuthor,
},
{
Message: "Commit with version, tag, but no reference\n" +
"\n" +
"version: 0.1.1\n" +
"tag: Changed",
Author: testAuthor,
},
{
Message: "Commit with version, but no tag or reference\n" +
"\n" +
"version: 0.1.0",
Author: testAuthor,
},
{
Message: "Commit with no version, tag or reference\n" +
"\n",
Author: testAuthor,
},
},
currentIndex: 0,
},
expectedResults: []Change{
{
Description: "Commit with version, tag, and reference",
Version: version012,
Reference: "XYZ-123",
Tag: "Added",
When: testAuthor.When,
},
},
},
{
start: version000,
commitItr: mockCommitIter{
commits: []object.Commit{
{
Message: "Commit with no version, tag or reference\n" +
"\n",
},
},
currentIndex: 0,
},
expectedResults: []Change{},
},
{
start: version000,
commitItr: mockCommitIter{
commits: []object.Commit{},
currentIndex: 0,
},
expectedResults: []Change{},
},
}
for _, test := range tests {
results := buildChanges(test.start, test.commitItr)
if len(test.expectedResults) > 0 {
assert.Equal(t, test.expectedResults, results)
} else {
assert.Equal(t, 0, len(results))
}
}
}
|
package installconfig
import (
"os"
"strings"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"sigs.k8s.io/yaml"
"github.com/openshift/installer/pkg/asset"
"github.com/openshift/installer/pkg/types"
"github.com/openshift/installer/pkg/types/conversion"
"github.com/openshift/installer/pkg/types/defaults"
)
// AssetBase is the base structure for the separate InstallConfig assets used
// in the agent-based and IPI/UPI installation methods.
type AssetBase struct {
Config *types.InstallConfig `json:"config"`
File *asset.File `json:"file"`
}
// Files returns the files generated by the asset.
func (a *AssetBase) Files() []*asset.File {
if a.File != nil {
return []*asset.File{a.File}
}
return []*asset.File{}
}
// Name returns the human-friendly name of the asset.
func (a *AssetBase) Name() string {
return "Install Config"
}
// LoadFromFile returns the installconfig from disk.
func (a *AssetBase) LoadFromFile(f asset.FileFetcher) (found bool, err error) {
file, err := f.FetchByName(installConfigFilename)
if err != nil {
if os.IsNotExist(err) {
return false, nil
}
return false, errors.Wrap(err, asset.InstallConfigError)
}
config := &types.InstallConfig{}
if err := yaml.UnmarshalStrict(file.Data, config, yaml.DisallowUnknownFields); err != nil {
err = errors.Wrapf(err, "failed to unmarshal %s", installConfigFilename)
if !strings.Contains(err.Error(), "unknown field") {
return false, errors.Wrap(err, asset.InstallConfigError)
}
err = errors.Wrapf(err, "failed to parse first occurrence of unknown field")
logrus.Warnf(err.Error())
logrus.Info("Attempting to unmarshal while ignoring unknown keys because strict unmarshaling failed")
if err = yaml.Unmarshal(file.Data, config); err != nil {
err = errors.Wrapf(err, "failed to unmarshal %s", installConfigFilename)
return false, errors.Wrap(err, asset.InstallConfigError)
}
}
a.Config = config
// Upconvert any deprecated fields
if err := conversion.ConvertInstallConfig(a.Config); err != nil {
return false, errors.Wrap(errors.Wrap(err, "failed to upconvert install config"), asset.InstallConfigError)
}
defaults.SetInstallConfigDefaults(a.Config)
return true, nil
}
// RecordFile generates the asset manifest file from the config CR.
func (a *AssetBase) RecordFile() error {
data, err := yaml.Marshal(a.Config)
if err != nil {
return errors.Wrap(err, "failed to Marshal InstallConfig")
}
a.File = &asset.File{
Filename: installConfigFilename,
Data: data,
}
return nil
}
|
package main
import (
"bytes"
"fmt"
"io"
"os"
)
func main() {
var b bytes.Buffer
// 通过 string 构造一个byte数组,将数据写到Buffer中
b.Write([]byte("Hello"))
// 拼接一个字符串到Buffer中
fmt.Fprint(&b, " World!")
// 将数据写到标准输出
io.Copy(os.Stdout, &b)
}
|
package heap
import (
"fmt"
"testing"
"github.com/stretchr/testify/require"
)
func TestMaxHeapSort(t *testing.T) {
tests := []struct {
arr []int
want []int
}{
{
arr: []int{4, 2, 5, 1, 3},
want: []int{5, 4, 3, 2, 1},
},
{
arr: []int{5, 9, 1, 6, 8, 14, 6, 49, 25, 4, 6, 3},
want: []int{49, 25, 14, 9, 8, 6, 6, 6, 5, 4, 3, 1},
},
}
for i, tt := range tests {
t.Run(fmt.Sprintf("case#%d", i), func(t *testing.T) {
assert := require.New(t)
h := NewMaxHeap(len(tt.arr))
for idx, n := range tt.arr {
h.Push(n)
assert.Equal(idx+1, h.Size())
}
var got []int
var cnt = len(tt.arr)
for !h.IsEmpty() {
got = append(got, h.Top())
h.Pop()
cnt--
assert.Equal(cnt, h.Size())
}
assert.Equal(tt.want, got)
assert.False(h.Pop())
assert.Equal(-1, h.Top())
})
}
}
func TestMinHeapSort(t *testing.T) {
tests := []struct {
arr []int
want []int
}{
{
arr: []int{4, 2, 5, 1, 3},
want: []int{1, 2, 3, 4, 5},
},
{
arr: []int{5, 9, 1, 6, 8, 14, 6, 49, 25, 4, 6, 3},
want: []int{1, 3, 4, 5, 6, 6, 6, 8, 9, 14, 25, 49},
},
}
for i, tt := range tests {
t.Run(fmt.Sprintf("case#%d", i), func(t *testing.T) {
assert := require.New(t)
h := NewMinHeap(len(tt.arr))
for _, n := range tt.arr {
h.Push(n)
}
var got []int
for !h.IsEmpty() {
got = append(got, h.Top())
h.Pop()
}
assert.Equal(tt.want, got)
assert.False(h.Pop())
assert.Equal(-1, h.Top())
})
}
}
|
package jobs
import (
"encoding/json"
"fmt"
"net/url"
"os"
"strconv"
"strings"
"github.com/spf13/cobra"
"github.com/makkes/gitlab-cli/api"
"github.com/makkes/gitlab-cli/cmd/get/output"
"github.com/makkes/gitlab-cli/table"
)
func NewCommand(client api.Client, format *string) *cobra.Command {
var pipeline *string
cmd := &cobra.Command{
Use: "jobs",
Short: "List jobs of a pipeline",
RunE: func(cmd *cobra.Command, args []string) error {
if pipeline == nil || *pipeline == "" {
return fmt.Errorf("please provide a pipeline scope")
}
ids := strings.Split(*pipeline, ":")
if len(ids) < 2 || ids[0] == "" || ids[1] == "" {
return fmt.Errorf("ID must be of the form PROJECT_ID:PIPELINE_ID")
}
resp, _, err := client.Get(fmt.Sprintf("/projects/%s/pipelines/%s/jobs",
url.PathEscape(ids[0]),
url.PathEscape(ids[1])))
if err != nil {
return fmt.Errorf("error retrieving jobs: %w", err)
}
var respSlice []map[string]interface{}
err = json.Unmarshal(resp, &respSlice)
if err != nil {
return err
}
var jobs api.Jobs
err = json.Unmarshal(resp, &jobs)
if err != nil {
return err
}
if len(jobs) == 0 {
return nil
}
projectID, err := strconv.Atoi(ids[0])
if err != nil {
return fmt.Errorf("error converting project ID '%s' to integer: %w", ids[0], err)
}
for idx := range jobs {
jobs[idx].ProjectID = projectID
}
return output.NewPrinter(os.Stdout).Print(*format, func() error {
table.PrintJobs(jobs)
return nil
}, func() error {
for _, j := range jobs {
fmt.Printf("%d\n", j.ID)
}
return nil
}, jobs)
},
}
pipeline = cmd.PersistentFlags().StringP("pipeline", "p", "", "The pipeline to show jobs from")
return cmd
}
|
package main
import (
"bytes"
"context"
"encoding/json"
"errors"
"io"
"net/http"
"testing"
"github.com/kpacha/load-test/db"
"github.com/kpacha/load-test/requester"
)
func TestNewExecutor_Run_contextCanceled(t *testing.T) {
store := db.NewInMemory()
exec := NewExecutor(store)
p := Plan{
Min: 1,
Max: 10,
Steps: 1,
Duration: 1,
}
for i := 0; i < 100; i++ {
ctx, cancel := context.WithCancel(context.Background())
cancel()
_, err := exec.Run(ctx, p)
if err == nil {
t.Error("error expected")
return
}
if err.Error() != "executing the plan: executing the step #1 of the plan: context canceled" {
t.Errorf("unexpected error: %s", err.Error())
}
}
}
func Test_executor_Run_wrongReportFormat(t *testing.T) {
store := db.NewInMemory()
totalCalls := 0
exec := executor{
DB: store,
RequesterFactory: func(req *http.Request) requester.Requester {
totalCalls++
return dummyRequester(func(ctx context.Context, c int) io.Reader {
if totalCalls != c {
t.Errorf("unexpected number of calls. have %d want %d", totalCalls, c)
}
return bytes.NewBufferString("[]")
})
},
}
p := Plan{
Min: 1,
Max: 10,
Steps: 1,
Duration: 1,
}
_, err := exec.Run(context.Background(), p)
if err == nil {
t.Error("error expected")
return
}
if err.Error() != "executing the plan: decoding the results: json: cannot unmarshal array into Go value of type requester.Report" {
t.Errorf("unexpected error: %s", err.Error())
}
}
func Test_executor_Run_koStore(t *testing.T) {
expectedErr := errors.New("you should expect me")
store := erroredStore{expectedErr}
totalCalls := 0
exec := executor{
DB: store,
RequesterFactory: func(req *http.Request) requester.Requester {
totalCalls++
return dummyRequester(func(ctx context.Context, c int) io.Reader {
if totalCalls != c {
t.Errorf("unexpected number of calls. have %d want %d", totalCalls, c)
}
return bytes.NewBufferString("{}")
})
},
}
req, err := http.NewRequest("GET", "/", nil)
if err != nil {
t.Errorf("building the request: %s", err.Error())
return
}
p := Plan{
Min: 1,
Max: 10,
Steps: 1,
Duration: 1,
Request: req,
}
_, err = exec.Run(context.Background(), p)
if err == nil {
t.Error("error expected")
return
}
if err.Error() != "storing the results: you should expect me" {
t.Errorf("unexpected error: %s", err.Error())
}
}
func Test_executor_Run_ok(t *testing.T) {
store := db.NewInMemory()
name := "some-name"
totalCalls := 0
exec := executor{
DB: store,
RequesterFactory: func(req *http.Request) requester.Requester {
totalCalls++
return dummyRequester(func(ctx context.Context, c int) io.Reader {
if totalCalls != c {
t.Errorf("unexpected number of calls. have %d want %d", totalCalls, c)
}
return bytes.NewBufferString("{}")
})
},
}
req, err := http.NewRequest("GET", "/", nil)
if err != nil {
t.Errorf("building the request: %s", err.Error())
return
}
p := Plan{
Min: 1,
Max: 10,
Steps: 1,
Duration: 1,
Request: req,
Name: name,
}
if _, err = exec.Run(context.Background(), p); err != nil {
t.Errorf("unexpected error: %s", err.Error())
return
}
r, err := store.Get(name)
if err != nil {
t.Errorf("accessing the store: %s", err.Error())
return
}
results := []requester.Report{}
if err = json.NewDecoder(r).Decode(&results); err != nil {
t.Error(err)
return
}
if len(results) != 9 {
t.Errorf("unexpected result size: %d", len(results))
}
}
type dummyRequester func(ctx context.Context, c int) io.Reader
func (d dummyRequester) Run(ctx context.Context, c int) io.Reader {
return d(ctx, c)
}
|
// Copyright 2016-2021, Pulumi Corporation.
package schema
import pschema "github.com/pulumi/pulumi/pkg/v3/codegen/schema"
// CloudAPIMetadata is a collection of all resources and types in the AWS Cloud Control API.
type CloudAPIMetadata struct {
Resources map[string]CloudAPIResource `json:"resources"`
Types map[string]CloudAPIType `json:"types"`
Functions map[string]CloudAPIFunction `json:"functions"`
}
// CloudAPIResource contains metadata for a single AWS Resource.
type CloudAPIResource struct {
CfType string `json:"cf"`
Inputs map[string]pschema.PropertySpec `json:"inputs"`
Outputs map[string]pschema.PropertySpec `json:"outputs"`
AutoNamingSpec *AutoNamingSpec `json:"autoNamingSpec,omitempty"`
Required []string `json:"required,omitempty"`
CreateOnly []string `json:"createOnly,omitempty"`
WriteOnly []string `json:"writeOnly,omitempty"`
IrreversibleNames map[string]string `json:"irreversibleNames,omitempty"`
}
type AutoNamingSpec struct {
SdkName string `json:"sdkName"`
MinLength int `json:"minLength,omitempty"`
MaxLength int `json:"maxLength,omitempty"`
TriviaSpec *NamingTriviaSpec `json:"namingTriviaSpec,omitempty"`
}
// CloudAPIType contains metadata for an auxiliary type.
type CloudAPIType struct {
Type string `json:"type"`
Properties map[string]pschema.PropertySpec `json:"properties,omitempty"`
IrreversibleNames map[string]string `json:"irreversibleNames,omitempty"`
}
type CloudAPIFunction struct {
CfType string `json:"cf"`
Identifiers []string `json:"ids"`
}
// ExtensionResourceToken is a Pulumi token for the resource to deploy
// custom third-party CloudFormation types.
const ExtensionResourceToken = "aws-native:index:ExtensionResource"
|
package main
import (
"flag"
"github.com/golangcollege/sessions"
"github.com/jackc/pgx"
"html/template"
"log"
"module1/pkg/postgre"
"net/http"
"os"
"time"
)
type application struct {
errorLog *log.Logger
infoLog *log.Logger
session *sessions.Session
snippets *postgre.SnippetModel
templateCache map[string]*template.Template
}
func main() {
addr := flag.String("addr", ":4000", "HTTP network address")
secret := flag.String("secret", "s6Ndh+pPbnzHbS*+9Pk8qGWhTzbpa@ge", "Secret key")
flag.Parse()
infoLog := log.New(os.Stdout, "INFO\t", log.Ldate|log.Ltime)
errorLog := log.New(os.Stderr, "ERROR\t", log.Ldate|log.Ltime|log.Lshortfile)
config := pgx.ConnConfig{
Host: "postgresdb",
Port: 5432,
Database: "GoDB",
User: "postgres",
Password: "beks300900",
}
poolConfig := pgx.ConnPoolConfig{
ConnConfig: config,
MaxConnections: 10,
AfterConnect: nil,
AcquireTimeout: 0,
}
conn, err := pgx.NewConnPool(poolConfig)
if err != nil {
errorLog.Fatal(err)
}
defer conn.Close()
templateCache, err := newTemplateCache("./ui/html/")
if err != nil {
errorLog.Fatal(err)
}
session := sessions.New([]byte(*secret))
session.Lifetime = 12 * time.Hour
app := &application{
errorLog: errorLog,
infoLog: infoLog,
session: session,
snippets: &postgre.SnippetModel{Conn: conn},
templateCache: templateCache,
}
server := &http.Server{
Addr: *addr,
ErrorLog: errorLog,
Handler: app.routes(),
}
infoLog.Printf("Starting server on %s", *addr)
err = server.ListenAndServe()
errorLog.Fatal(err)
}
|
/*
* @lc app=leetcode.cn id=1700 lang=golang
*
* [1700] 无法吃午餐的学生数量
*/
// @lc code=start
package main
func countStudents(students []int, sandwiches []int) int {
count := [2]int{}
for i := 0; i < len(students); i++ {
count[students[i]]++
}
for i := 0; i < len(sandwiches); i++ {
if count[sandwiches[i]] > 0 {
count[sandwiches[i]]--
} else {
break
}
}
return count[0] + count[1]
}
// @lc code=end
|
package notify
import (
pb "biohouse/api"
"git.huoys.com/middle-end/library/pkg/net/comet"
"github.com/gogo/protobuf/proto"
)
type Notify interface {
//AsyncPush 非阻塞式PUSH
AsyncPush(sessionID string, ops pb.GameCommand, msg proto.Message)
//SyncPush 阻塞式PUSH
SyncPush(sessionID string, ops pb.GameCommand, msg proto.Message)
Close(sessionID string)
}
type notify struct {
pushChan chan *comet.PushData
closeChan chan string
s *comet.Server
}
func New(pc chan *comet.PushData, cc chan string, cs *comet.Server) (n Notify) {
n = ¬ify{
pushChan: pc,
closeChan: cc,
s: cs,
}
return
}
func (n *notify) Close(sessionID string) {
n.closeChan <- sessionID
}
func (n *notify) AsyncPush(sessionID string, ops pb.GameCommand, msg proto.Message) {
data, err := proto.Marshal(msg)
if err != nil {
return
}
p := &comet.PushData{
Mid: sessionID,
Data: data,
Ops: int32(ops),
}
n.pushChan <- p
}
func (n *notify) SyncPush(sessionID string, ops pb.GameCommand, msg proto.Message) {
data, err := proto.Marshal(msg)
if err != nil {
return
}
n.s.PushByChannel(sessionID, int32(ops), data)
}
|
package requests
type LoginUser struct {
Username string
Password string
}
type RegistUser struct {
Username string
Password string
}
type CheckUsername struct {
Username string
}
func (c *LoginUser) Valid() error {
return validate.Struct(c)
}
func (c *RegistUser) Valid() error {
return validate.Struct(c)
}
func (c *CheckUsername) Valid() error {
return validate.Struct(c)
}
|
package main
import (
"fmt"
"net/http"
"path"
"runtime"
"strings"
)
func main() {
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
http.ServeFile(w, r, fmt.Sprintf("%s/public%s", parentFilePathHelper(), r.URL.Path))
})
http.ListenAndServe(":8000", nil)
}
// urlHelper - returns the absolute path for the main file, go run main.go does not have the same path as an executable
func parentFilePathHelper() string {
_, filename, _, ok := runtime.Caller(0)
if !ok {
panic("No caller information")
}
return strings.Replace(path.Dir(filename), "src/main", "", 1)
}
|
package main
import (
"fmt"
"os"
"bufio"
"strings"
)
func doesReact(a, b string) bool {
capital := strings.ToUpper(a)
lower := strings.ToLower(a)
if (a == capital && b == lower) || (a == lower && b == capital) {
return true
}
return false
}
func main() {
f, _ := os.Open("input.txt")
defer f.Close()
scanner := bufio.NewScanner(f)
var polymer string;
for scanner.Scan() {
polymer += scanner.Text()
}
polymer += "."
for {
var i int = 0
length := len(polymer)
var deleted bool = false;
var polymerReacted string
for i < length-1 {
if doesReact(string(polymer[i]), string(polymer[i+1])) {
deleted = true
i+=2
} else {
polymerReacted += string(polymer[i])
i++
}
}
polymer = polymerReacted
polymer += "."
if !deleted {
fmt.Println(len(polymer)-1)
return
}
}
}
|
// +build linux darwin freebsd netbsd openbsd
package cmd
import (
"context"
"crypto/tls"
"encoding/json"
"fmt"
"log"
"net/http"
// Import pprof handlers into http.DefaultServeMux
_ "net/http/pprof"
"os"
"os/signal"
"syscall"
"time"
docker "github.com/docker/engine-api/client"
"github.com/gorilla/mux"
"github.com/spf13/cobra"
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
"github.com/dollarshaveclub/furan/lib/builder"
"github.com/dollarshaveclub/furan/lib/config"
"github.com/dollarshaveclub/furan/lib/consul"
"github.com/dollarshaveclub/furan/lib/gc"
githubfetch "github.com/dollarshaveclub/furan/lib/github_fetch"
"github.com/dollarshaveclub/furan/lib/grpc"
"github.com/dollarshaveclub/furan/lib/httphandlers"
flogger "github.com/dollarshaveclub/furan/lib/logger"
"github.com/dollarshaveclub/furan/lib/metrics"
"github.com/dollarshaveclub/furan/lib/s3"
"github.com/dollarshaveclub/furan/lib/squasher"
"github.com/dollarshaveclub/furan/lib/tagcheck"
"github.com/dollarshaveclub/furan/lib/vault"
)
var serverConfig config.Serverconfig
var serverCmd = &cobra.Command{
Use: "server",
Short: "Run Furan server",
Long: `Furan API server (see docs)`,
PreRun: func(cmd *cobra.Command, args []string) {
if serverConfig.S3ErrorLogs {
if serverConfig.S3ErrorLogBucket == "" {
clierr("S3 error log bucket must be defined")
}
if serverConfig.S3ErrorLogRegion == "" {
clierr("S3 error log region must be defined")
}
}
},
Run: server,
}
func init() {
serverCmd.PersistentFlags().UintVar(&serverConfig.HTTPSPort, "https-port", 4000, "REST HTTPS TCP port")
serverCmd.PersistentFlags().UintVar(&serverConfig.GRPCPort, "grpc-port", 4001, "gRPC TCP port")
serverCmd.PersistentFlags().StringVar(&serverConfig.HealthcheckAddr, "healthcheck-addr", "0.0.0.0", "HTTP healthcheck listen address")
serverCmd.PersistentFlags().UintVar(&serverConfig.HealthcheckHTTPport, "healthcheck-port", 4002, "Healthcheck HTTP port (listens on localhost only)")
serverCmd.PersistentFlags().UintVar(&serverConfig.PPROFPort, "pprof-port", 4003, "Port for serving pprof profiles")
serverCmd.PersistentFlags().StringVar(&serverConfig.HTTPSAddr, "https-addr", "0.0.0.0", "REST HTTPS listen address")
serverCmd.PersistentFlags().StringVar(&serverConfig.GRPCAddr, "grpc-addr", "0.0.0.0", "gRPC listen address")
serverCmd.PersistentFlags().UintVar(&serverConfig.Concurrency, "concurrency", 10, "Max concurrent builds")
serverCmd.PersistentFlags().UintVar(&serverConfig.Queuesize, "queue", 100, "Max queue size for buffered build requests")
serverCmd.PersistentFlags().StringVar(&serverConfig.VaultTLSCertPath, "tls-cert-path", "/tls/cert", "Vault path to TLS certificate")
serverCmd.PersistentFlags().StringVar(&serverConfig.VaultTLSKeyPath, "tls-key-path", "/tls/key", "Vault path to TLS private key")
serverCmd.PersistentFlags().BoolVar(&serverConfig.LogToSumo, "log-to-sumo", true, "Send log entries to SumoLogic HTTPS collector")
serverCmd.PersistentFlags().StringVar(&serverConfig.VaultSumoURLPath, "sumo-collector-path", "/sumologic/url", "Vault path SumoLogic collector URL")
serverCmd.PersistentFlags().BoolVar(&serverConfig.S3ErrorLogs, "s3-error-logs", false, "Upload failed build logs to S3 (region and bucket must be specified)")
serverCmd.PersistentFlags().StringVar(&serverConfig.S3ErrorLogRegion, "s3-error-log-region", "us-west-2", "Region for S3 error log upload")
serverCmd.PersistentFlags().StringVar(&serverConfig.S3ErrorLogBucket, "s3-error-log-bucket", "", "Bucket for S3 error log upload")
serverCmd.PersistentFlags().UintVar(&serverConfig.S3PresignTTL, "s3-error-log-presign-ttl", 60*24, "Presigned error log URL TTL in minutes (0 to disable)")
serverCmd.PersistentFlags().UintVar(&serverConfig.GCIntervalSecs, "gc-interval", 3600, "GC (garbage collection) interval in seconds")
serverCmd.PersistentFlags().StringVar(&serverConfig.DockerDiskPath, "docker-storage-path", "/var/lib/docker", "Path to Docker storage for monitoring free space (optional)")
serverCmd.PersistentFlags().StringVar(&consulConfig.Addr, "consul-addr", "127.0.0.1:8500", "Consul address (IP:port)")
serverCmd.PersistentFlags().StringVar(&consulConfig.KVPrefix, "consul-kv-prefix", "furan", "Consul KV prefix")
serverCmd.PersistentFlags().BoolVar(&serverConfig.DisableMetrics, "disable-metrics", false, "Disable Datadog metrics collection")
serverCmd.PersistentFlags().BoolVar(&awsConfig.EnableECR, "ecr", false, "Enable AWS ECR support")
serverCmd.PersistentFlags().StringSliceVar(&awsConfig.ECRRegistryHosts, "ecr-registry-hosts", []string{}, "ECR registry hosts (ex: 123456789.dkr.ecr.us-west-2.amazonaws.com) to authorize for base images")
RootCmd.AddCommand(serverCmd)
}
func setupServerLogger() {
var url string
if serverConfig.LogToSumo {
url = serverConfig.SumoURL
}
hn, err := os.Hostname()
if err != nil {
log.Fatalf("error getting hostname: %v", err)
}
stdlog := flogger.NewStandardLogger(os.Stderr, url)
logger = log.New(stdlog, fmt.Sprintf("%v: ", hn), log.LstdFlags)
}
// Separate server because it's HTTP on localhost only
// (simplifies Consul health check)
func healthcheck(ha *httphandlers.HTTPAdapter) {
r := mux.NewRouter()
r.HandleFunc("/health", ha.HealthHandler).Methods("GET")
addr := fmt.Sprintf("%v:%v", serverConfig.HealthcheckAddr, serverConfig.HealthcheckHTTPport)
server := &http.Server{Addr: addr, Handler: r}
logger.Printf("HTTP healthcheck listening on: %v", addr)
logger.Println(server.ListenAndServe())
}
func pprof() {
// pprof installs handlers into http.DefaultServeMux
logger.Println(http.ListenAndServe(fmt.Sprintf("127.0.0.1:%d", serverConfig.PPROFPort), nil))
logger.Printf("pprof listening on port: %v", serverConfig.PPROFPort)
}
func startGC(dc builder.ImageBuildClient, mc metrics.MetricsCollector, log *log.Logger, interval uint) {
igc := gc.NewDockerImageGC(log, dc, mc, serverConfig.DockerDiskPath)
ticker := time.NewTicker(time.Duration(interval) * time.Second)
go func() {
for {
select {
case <-ticker.C:
igc.GC()
}
}
}()
}
func server(cmd *cobra.Command, args []string) {
var err error
vault.SetupVault(&vaultConfig, &awsConfig, &dockerConfig, &gitConfig, &serverConfig, awscredsprefix)
if serverConfig.LogToSumo {
vault.GetSumoURL(&vaultConfig, &serverConfig)
}
setupServerLogger()
setupDB(initializeDB)
var mc metrics.MetricsCollector
if serverConfig.DisableMetrics {
mc = &metrics.FakeCollector{}
} else {
mc, err = newDatadogCollector()
if err != nil {
log.Fatalf("error creating Datadog collector: %v", err)
}
startDatadogTracer()
}
setupKafka(mc)
certPath, keyPath := vault.WriteTLSCert(&vaultConfig, &serverConfig)
defer vault.RmTempFiles(certPath, keyPath)
err = getDockercfg()
if err != nil {
logger.Fatalf("error reading dockercfg: %v", err)
}
dc, err := docker.NewEnvClient()
if err != nil {
log.Fatalf("error creating Docker client: %v", err)
}
gf := githubfetch.NewGitHubFetcher(gitConfig.Token)
osm := s3.NewS3StorageManager(awsConfig, mc, logger)
is := squasher.NewDockerImageSquasher(logger)
itc := tagcheck.NewRegistryTagChecker(&dockerConfig, logger.Printf)
s3errcfg := builder.S3ErrorLogConfig{
PushToS3: serverConfig.S3ErrorLogs,
Region: serverConfig.S3ErrorLogRegion,
Bucket: serverConfig.S3ErrorLogBucket,
PresignTTLMinutes: serverConfig.S3PresignTTL,
}
imageBuilder, err := builder.NewImageBuilder(kafkaConfig.Manager, dbConfig.Datalayer, gf, dc, mc, osm, is, itc, dockerConfig.DockercfgContents, s3errcfg, logger)
if err != nil {
log.Fatalf("error creating image builder: %v", err)
}
if awsConfig.EnableECR {
imageBuilder.SetECRConfig(awsConfig.AccessKeyID, awsConfig.SecretAccessKey, awsConfig.ECRRegistryHosts)
}
kvo, err := consul.NewConsulKVOrchestrator(&consulConfig)
if err != nil {
log.Fatalf("error creating key value orchestrator: %v", err)
}
datadogGrpcServiceName := datadogServiceName + ".grpc"
grpcSvr := grpc.NewGRPCServer(imageBuilder, dbConfig.Datalayer, kafkaConfig.Manager, kafkaConfig.Manager, mc, kvo, serverConfig.Queuesize, serverConfig.Concurrency, logger, datadogGrpcServiceName)
go grpcSvr.ListenRPC(serverConfig.GRPCAddr, serverConfig.GRPCPort)
ha := httphandlers.NewHTTPAdapter(grpcSvr)
stop := make(chan os.Signal, 10)
signal.Notify(stop, syscall.SIGTERM) //non-portable outside of POSIX systems
signal.Notify(stop, os.Interrupt)
startGC(dc, mc, logger, serverConfig.GCIntervalSecs)
go healthcheck(ha)
go pprof()
r := mux.NewRouter()
r.HandleFunc("/", versionHandler).Methods("GET")
r.HandleFunc("/build", ha.BuildRequestHandler).Methods("POST")
r.HandleFunc("/build/{id}", ha.BuildStatusHandler).Methods("GET")
r.HandleFunc("/build/{id}", ha.BuildCancelHandler).Methods("DELETE")
tlsconfig := &tls.Config{MinVersion: tls.VersionTLS12}
addr := fmt.Sprintf("%v:%v", serverConfig.HTTPSAddr, serverConfig.HTTPSPort)
server := &http.Server{Addr: addr, Handler: r, TLSConfig: tlsconfig}
go func() {
_ = <-stop
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
server.Shutdown(ctx)
cancel()
}()
logger.Printf("HTTPS REST listening on: %v", addr)
logger.Println(server.ListenAndServeTLS(certPath, keyPath))
logger.Printf("shutting down GRPC and aborting builds...")
tracer.Stop()
grpcSvr.Shutdown()
close(stop)
logger.Printf("done, exiting")
}
var version, description string
func setupVersion() {
bv := make([]byte, 20)
bd := make([]byte, 2048)
fv, err := os.Open("VERSION.txt")
if err != nil {
return
}
defer fv.Close()
sv, err := fv.Read(bv)
if err != nil {
return
}
fd, err := os.Open("DESCRIPTION.txt")
if err != nil {
return
}
defer fd.Close()
sd, err := fd.Read(bd)
if err != nil {
return
}
version = string(bv[:sv])
description = string(bd[:sd])
}
func versionHandler(w http.ResponseWriter, r *http.Request) {
w.Header().Add("Content-Type", "application/json")
version := struct {
Name string `json:"name"`
Version string `json:"version"`
Description string `json:"description"`
}{
Name: "furan",
Version: version,
Description: description,
}
vb, err := json.Marshal(version)
if err != nil {
w.Write([]byte(fmt.Sprintf(`{"error": "error marshalling version: %v"}`, err)))
w.WriteHeader(http.StatusInternalServerError)
return
}
w.Write(vb)
}
|
package operators
import (
"github.com/onsi/ginkgo"
"github.com/openshift/osde2e/pkg/common/alert"
"github.com/openshift/osde2e/pkg/common/helper"
)
var configureAlertManagerOperators string = "[Suite: operators] [OSD] Configure AlertManager Operator"
var configureAlertManagerInforming string = "[Suite: informing] [OSD] Configure AlertManager Operator"
func init() {
alert.RegisterGinkgoAlert(configureAlertManagerOperators, "SD-SREP", "Christopher Collins", "sd-cicd-alerts", "sd-cicd@redhat.com", 4)
alert.RegisterGinkgoAlert(configureAlertManagerInforming, "SD-SREP", "Matt Bargenquast", "sd-cicd-alerts", "sd-cicd@redhat.com", 4)
}
var _ = ginkgo.Describe(configureAlertManagerOperators, func() {
var operatorName = "configure-alertmanager-operator"
var operatorNamespace string = "openshift-monitoring"
var operatorLockFile string = "configure-alertmanager-operator-lock"
var defaultDesiredReplicas int32 = 1
// NOTE: CAM clusterRoles have random-ish names like:
// configure-alertmanager-operator.v0.1.80-03136c1-l589
//
// Test need to incorporate a regex-like test?
//
// var clusterRoles = []string{
// "configure-alertmanager-operator",
// }
var clusterRoleBindings = []string{}
var roleBindings = []string{
"configure-alertmanager-operator",
}
var roles = []string{
"configure-alertmanager-operator",
}
h := helper.New()
checkClusterServiceVersion(h, operatorNamespace, operatorName)
checkConfigMapLockfile(h, operatorNamespace, operatorLockFile)
checkDeployment(h, operatorNamespace, operatorName, defaultDesiredReplicas)
checkClusterRoleBindings(h, clusterRoleBindings)
checkRole(h, operatorNamespace, roles)
checkRoleBindings(h, operatorNamespace, roleBindings)
})
var _ = ginkgo.Describe(configureAlertManagerInforming, func() {
checkUpgrade(helper.New(), "openshift-monitoring", "configure-alertmanager-operator",
"configure-alertmanager-operator.v0.1.171-dba3c73",
)
})
|
package main
import (
"github.com/g-xianhui/op/server/pb"
"github.com/golang/protobuf/proto"
)
func echo(agent *Agent, p proto.Message) {
req := p.(*pb.MQEcho)
rep := &pb.MREcho{}
rep.Data = proto.String(req.GetData())
replyMsg(agent, pb.MECHO, rep)
}
func init() {
registerHandler(pb.MECHO, &pb.MQEcho{}, echo)
}
|
package tls
import (
"crypto/x509"
"crypto/x509/pkix"
"github.com/openshift/installer/pkg/asset"
)
// KubeletCSRSignerCertKey is a key/cert pair that signs the kubelet client certs.
type KubeletCSRSignerCertKey struct {
SelfSignedCertKey
}
var _ asset.WritableAsset = (*KubeletCSRSignerCertKey)(nil)
// Dependencies returns the dependency of the root-ca, which is empty.
func (c *KubeletCSRSignerCertKey) Dependencies() []asset.Asset {
return []asset.Asset{}
}
// Generate generates the root-ca key and cert pair.
func (c *KubeletCSRSignerCertKey) Generate(parents asset.Parents) error {
cfg := &CertCfg{
Subject: pkix.Name{CommonName: "kubelet-signer", OrganizationalUnit: []string{"openshift"}},
KeyUsages: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
Validity: ValidityOneDay,
IsCA: true,
}
return c.SelfSignedCertKey.Generate(cfg, "kubelet-signer")
}
// Name returns the human-friendly name of the asset.
func (c *KubeletCSRSignerCertKey) Name() string {
return "Certificate (kubelet-signer)"
}
// KubeletClientCABundle is the asset the generates the kubelet-client-ca-bundle,
// which contains all the individual client CAs.
type KubeletClientCABundle struct {
CertBundle
}
var _ asset.Asset = (*KubeletClientCABundle)(nil)
// Dependencies returns the dependency of the cert bundle.
func (a *KubeletClientCABundle) Dependencies() []asset.Asset {
return []asset.Asset{
&KubeletCSRSignerCertKey{},
}
}
// Generate generates the cert bundle based on its dependencies.
func (a *KubeletClientCABundle) Generate(deps asset.Parents) error {
var certs []CertInterface
for _, asset := range a.Dependencies() {
deps.Get(asset)
certs = append(certs, asset.(CertInterface))
}
return a.CertBundle.Generate("kubelet-client-ca-bundle", certs...)
}
// Name returns the human-friendly name of the asset.
func (a *KubeletClientCABundle) Name() string {
return "Certificate (kubelet-client-ca-bundle)"
}
// KubeletServingCABundle is the asset the generates the kubelet-serving-ca-bundle,
// which contains all the individual client CAs.
type KubeletServingCABundle struct {
CertBundle
}
var _ asset.Asset = (*KubeletServingCABundle)(nil)
// Dependencies returns the dependency of the cert bundle.
func (a *KubeletServingCABundle) Dependencies() []asset.Asset {
return []asset.Asset{
&KubeletCSRSignerCertKey{},
}
}
// Generate generates the cert bundle based on its dependencies.
func (a *KubeletServingCABundle) Generate(deps asset.Parents) error {
var certs []CertInterface
for _, asset := range a.Dependencies() {
deps.Get(asset)
certs = append(certs, asset.(CertInterface))
}
return a.CertBundle.Generate("kubelet-serving-ca-bundle", certs...)
}
// Name returns the human-friendly name of the asset.
func (a *KubeletServingCABundle) Name() string {
return "Certificate (kubelet-serving-ca-bundle)"
}
// KubeletBootstrapCertSigner is a key/cert pair that signs the kubelet bootstrap kubeconfig client certs that the kubelet
// uses to create CSRs for it's real certificates
type KubeletBootstrapCertSigner struct {
SelfSignedCertKey
}
var _ asset.WritableAsset = (*KubeletBootstrapCertSigner)(nil)
// Dependencies returns the dependency of the root-ca, which is empty.
func (c *KubeletBootstrapCertSigner) Dependencies() []asset.Asset {
return []asset.Asset{}
}
// Generate generates the root-ca key and cert pair.
func (c *KubeletBootstrapCertSigner) Generate(parents asset.Parents) error {
cfg := &CertCfg{
Subject: pkix.Name{CommonName: "kubelet-bootstrap-kubeconfig-signer", OrganizationalUnit: []string{"openshift"}},
KeyUsages: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
Validity: ValidityTenYears,
IsCA: true,
}
return c.SelfSignedCertKey.Generate(cfg, "kubelet-bootstrap-kubeconfig-signer")
}
// Name returns the human-friendly name of the asset.
func (c *KubeletBootstrapCertSigner) Name() string {
return "Certificate (kubelet-bootstrap-kubeconfig-signer)"
}
// KubeletBootstrapCABundle is the asset the generates the admin-kubeconfig-ca-bundle,
// which contains all the individual client CAs.
type KubeletBootstrapCABundle struct {
CertBundle
}
var _ asset.Asset = (*KubeletBootstrapCABundle)(nil)
// Dependencies returns the dependency of the cert bundle.
func (a *KubeletBootstrapCABundle) Dependencies() []asset.Asset {
return []asset.Asset{
&KubeletBootstrapCertSigner{},
}
}
// Generate generates the cert bundle based on its dependencies.
func (a *KubeletBootstrapCABundle) Generate(deps asset.Parents) error {
var certs []CertInterface
for _, asset := range a.Dependencies() {
deps.Get(asset)
certs = append(certs, asset.(CertInterface))
}
return a.CertBundle.Generate("kubelet-bootstrap-kubeconfig-ca-bundle", certs...)
}
// Name returns the human-friendly name of the asset.
func (a *KubeletBootstrapCABundle) Name() string {
return "Certificate (kubelet-bootstrap-kubeconfig-ca-bundle)"
}
// KubeletClientCertKey is the asset that generates the key/cert pair for kubelet client to apiserver.
// This credential can be revoked by deleting the configmap containing its signer.
type KubeletClientCertKey struct {
SignedCertKey
}
var _ asset.Asset = (*KubeletClientCertKey)(nil)
// Dependencies returns the dependency of the the cert/key pair, which includes
// the parent CA, and install config if it depends on the install config for
// DNS names, etc.
func (a *KubeletClientCertKey) Dependencies() []asset.Asset {
return []asset.Asset{
&KubeletBootstrapCertSigner{},
}
}
// Generate generates the cert/key pair based on its dependencies.
func (a *KubeletClientCertKey) Generate(dependencies asset.Parents) error {
ca := &KubeletBootstrapCertSigner{}
dependencies.Get(ca)
cfg := &CertCfg{
Subject: pkix.Name{CommonName: "system:serviceaccount:openshift-machine-config-operator:node-bootstrapper", Organization: []string{"system:serviceaccounts:openshift-machine-config-operator", "system:serviceaccounts"}},
KeyUsages: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
ExtKeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
Validity: ValidityTenYears,
}
return a.SignedCertKey.Generate(cfg, ca, "kubelet-client", DoNotAppendParent)
}
// Name returns the human-friendly name of the asset.
func (a *KubeletClientCertKey) Name() string {
return "Certificate (kubelet-client)"
}
|
package modules
import (
"fmt"
"io/ioutil"
"regexp"
"strings"
"github.com/wx13/genesis"
)
// LineInFile lets the user insert lines of text into a file.
type LineInFile struct {
// Required
File string // path to the file
Line []string // line(s) to insert
// Optional
Pattern []string // line(s) to replace
Success []string // pattern to check for success (defaults to Line)
Before []string // insert line before this pattern
After []string // insert line after this pattern
Absent bool // ensure line is absent from file
}
func (lif LineInFile) ID() string {
short := fmt.Sprintf("LineInFile: file=%s, line=%s, pattern=%s", lif.File, lif.Line, lif.Pattern)
long := fmt.Sprintf("before=%s, after=%s, success=%s absent=%s", lif.Before, lif.After, lif.Success, lif.Absent)
return short + "\n" + long
}
func (lif LineInFile) Files() []string {
return []string{lif.File}
}
func (lif LineInFile) Remove() (string, error) {
lif.File = genesis.ExpandHome(lif.File)
err := genesis.Store.ApplyPatch(lif.File, lif.ID())
if err != nil {
return "Could not apply patch.", err
}
return "Patch applied", nil
}
func (lif LineInFile) Status() (genesis.Status, string, error) {
lif.File = genesis.ExpandHome(lif.File)
lines, err := lif.readFile()
if err != nil {
return genesis.StatusFail, "Could not read file.", err
}
_, lines, _ = lif.split(lines, lif.After, lif.Before)
present, _, _ := lif.find(lines)
if present {
if lif.Absent {
return genesis.StatusFail, "Line is in file.", nil
} else {
return genesis.StatusPass, "Line is in file.", nil
}
} else {
if lif.Absent {
return genesis.StatusPass, "Line is absent from file.", nil
} else {
return genesis.StatusFail, "Line is absent from file.", nil
}
}
}
func (lif LineInFile) Install() (string, error) {
lif.File = genesis.ExpandHome(lif.File)
lines, _ := lif.readFile()
origLines := strings.Join(lines, "\n")
beg, mid, end := lif.split(lines, lif.After, lif.Before)
mid = lif.replace(mid)
lines = append(beg, append(mid, end...)...)
err := lif.writeFile(lines)
if err != nil {
return "Unable to write file.", err
}
genesis.Store.SavePatch(lif.File, origLines, strings.Join(lines, "\n"), lif.ID())
return "Wrote line to file", nil
}
// replace either replaces pattern line with line, or inserts
// the line at the end.
func (lif *LineInFile) replace(lines []string) []string {
present, start, stop := lif.find(lines)
if !present {
return append(lines, lif.Line...)
}
if stop == len(lines)-1 {
return append(lines[:start], lif.Line...)
}
return append(lines[:start], append(lif.Line, lines[stop+1:]...)...)
}
func (lif LineInFile) readFile() ([]string, error) {
content, err := ioutil.ReadFile(lif.File)
if err != nil {
return []string{}, err
}
lines := strings.Split(string(content), "\n")
return lines, nil
}
func (lif LineInFile) writeFile(lines []string) error {
content := strings.Join(lines, "\n") + "\n"
err := ioutil.WriteFile(lif.File, []byte(content), 0644)
return err
}
// findPattern looks for a line that matches the lif.Pattern (or Success) regex.
// Returns true if it finds it, and the slice index.
func (lif LineInFile) find(lines []string) (bool, int, int) {
var pattern []string
if len(lif.Success) == 0 {
pattern = lif.Pattern
} else {
pattern = lif.Success
}
return lif.findPattern(lines, pattern)
}
func (lif LineInFile) findPattern(lines []string, pattern []string) (bool, int, int) {
if len(lines) == 0 || len(pattern) == 0 {
return false, -1, -1
}
idx := 0
start := -1
for k, line := range lines {
match, _ := regexp.MatchString(pattern[idx], line)
if match {
if start < 0 {
start = k
}
idx++
if idx >= len(pattern) {
return true, start, k
}
}
}
return false, -1, -1
}
// Grab the lines between start and end (exclusive).
func (lif LineInFile) split(lines []string, sPtrn, ePtrn []string) (beg, mid, end []string) {
stop := -1
found := false
start := 0
// Assign to 'beg' everything up through the start pattern match (inclusive).
if len(sPtrn) > 0 {
found, _, stop = lif.findPattern(lines, sPtrn)
if found {
beg = lines[:stop+1]
}
}
// If there are no lines left, we are done.
if stop >= len(lines)-1 {
return beg, mid, end
}
// If there is no end pattern, we are done.
if len(ePtrn) == 0 {
return beg, lines[stop+1:], end
}
// Find the end pattern in the remaining text.
mid = lines[stop+1:]
found, start, _ = lif.findPattern(mid, ePtrn)
if found {
end = mid[start:]
mid = mid[:start]
}
return beg, mid, end
}
|
package game
type Position struct {
x int
y int
}
type Projectiles struct {
owner string
pos Position
}
type Frame struct {
player1Pos Position
player2Pos Position
projectiles []Projectiles
}
|
package graphql_test
import (
"context"
"encoding/json"
"errors"
"fmt"
"reflect"
"testing"
"time"
"github.com/graphql-go/graphql"
"github.com/graphql-go/graphql/gqlerrors"
"github.com/graphql-go/graphql/language/location"
"github.com/graphql-go/graphql/testutil"
)
func TestExecutesArbitraryCode(t *testing.T) {
deepData := map[string]interface{}{}
data := map[string]interface{}{
"a": func() interface{} { return "Apple" },
"b": func() interface{} { return "Banana" },
"c": func() interface{} { return "Cookie" },
"d": func() interface{} { return "Donut" },
"e": func() interface{} { return "Egg" },
"f": "Fish",
"pic": func(size int) string {
return fmt.Sprintf("Pic of size: %v", size)
},
"deep": func() interface{} { return deepData },
}
data["promise"] = func() interface{} {
return data
}
deepData = map[string]interface{}{
"a": func() interface{} { return "Already Been Done" },
"b": func() interface{} { return "Boring" },
"c": func() interface{} { return []string{"Contrived", "", "Confusing"} },
"deeper": func() interface{} { return []interface{}{data, nil, data} },
}
query := `
query Example($size: Int) {
a,
b,
x: c
...c
f
...on DataType {
pic(size: $size)
promise {
a
}
}
deep {
a
b
c
deeper {
a
b
}
}
}
fragment c on DataType {
d
e
}
`
expected := &graphql.Result{
Data: map[string]interface{}{
"b": "Banana",
"x": "Cookie",
"d": "Donut",
"e": "Egg",
"promise": map[string]interface{}{
"a": "Apple",
},
"a": "Apple",
"deep": map[string]interface{}{
"a": "Already Been Done",
"b": "Boring",
"c": []interface{}{
"Contrived",
"",
"Confusing",
},
"deeper": []interface{}{
map[string]interface{}{
"a": "Apple",
"b": "Banana",
},
nil,
map[string]interface{}{
"a": "Apple",
"b": "Banana",
},
},
},
"f": "Fish",
"pic": "Pic of size: 100",
},
}
// Schema Definitions
picResolverFn := func(p graphql.ResolveParams) (interface{}, error) {
// get and type assert ResolveFn for this field
picResolver, ok := p.Source.(map[string]interface{})["pic"].(func(size int) string)
if !ok {
return nil, nil
}
// get and type assert argument
sizeArg, ok := p.Args["size"].(int)
if !ok {
return nil, nil
}
return picResolver(sizeArg), nil
}
dataType := graphql.NewObject(graphql.ObjectConfig{
Name: "DataType",
Fields: graphql.Fields{
"a": &graphql.Field{
Type: graphql.String,
},
"b": &graphql.Field{
Type: graphql.String,
},
"c": &graphql.Field{
Type: graphql.String,
},
"d": &graphql.Field{
Type: graphql.String,
},
"e": &graphql.Field{
Type: graphql.String,
},
"f": &graphql.Field{
Type: graphql.String,
},
"pic": &graphql.Field{
Args: graphql.FieldConfigArgument{
"size": &graphql.ArgumentConfig{
Type: graphql.Int,
},
},
Type: graphql.String,
Resolve: picResolverFn,
},
},
})
deepDataType := graphql.NewObject(graphql.ObjectConfig{
Name: "DeepDataType",
Fields: graphql.Fields{
"a": &graphql.Field{
Type: graphql.String,
},
"b": &graphql.Field{
Type: graphql.String,
},
"c": &graphql.Field{
Type: graphql.NewList(graphql.String),
},
"deeper": &graphql.Field{
Type: graphql.NewList(dataType),
},
},
})
// Exploring a way to have a Object within itself
// in this case DataType has DeepDataType has DataType
dataType.AddFieldConfig("deep", &graphql.Field{
Type: deepDataType,
})
// in this case DataType has DataType
dataType.AddFieldConfig("promise", &graphql.Field{
Type: dataType,
})
schema, err := graphql.NewSchema(graphql.SchemaConfig{
Query: dataType,
})
if err != nil {
t.Fatalf("Error in schema %v", err.Error())
}
// parse query
astDoc := testutil.TestParse(t, query)
// execute
args := map[string]interface{}{
"size": 100,
}
operationName := "Example"
ep := graphql.ExecuteParams{
Schema: schema,
Root: data,
AST: astDoc,
OperationName: operationName,
Args: args,
}
result := testutil.TestExecute(t, ep)
if len(result.Errors) > 0 {
t.Fatalf("wrong result, unexpected errors: %v", result.Errors)
}
if !reflect.DeepEqual(expected, result) {
t.Fatalf("Unexpected result, Diff: %v", testutil.Diff(expected, result))
}
}
func TestMergesParallelFragments(t *testing.T) {
query := `
{ a, ...FragOne, ...FragTwo }
fragment FragOne on Type {
b
deep { b, deeper: deep { b } }
}
fragment FragTwo on Type {
c
deep { c, deeper: deep { c } }
}
`
expected := &graphql.Result{
Data: map[string]interface{}{
"a": "Apple",
"b": "Banana",
"deep": map[string]interface{}{
"c": "Cherry",
"b": "Banana",
"deeper": map[string]interface{}{
"b": "Banana",
"c": "Cherry",
},
},
"c": "Cherry",
},
}
typeObjectType := graphql.NewObject(graphql.ObjectConfig{
Name: "Type",
Fields: graphql.Fields{
"a": &graphql.Field{
Type: graphql.String,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
return "Apple", nil
},
},
"b": &graphql.Field{
Type: graphql.String,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
return "Banana", nil
},
},
"c": &graphql.Field{
Type: graphql.String,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
return "Cherry", nil
},
},
},
})
deepTypeFieldConfig := &graphql.Field{
Type: typeObjectType,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
return p.Source, nil
},
}
typeObjectType.AddFieldConfig("deep", deepTypeFieldConfig)
schema, err := graphql.NewSchema(graphql.SchemaConfig{
Query: typeObjectType,
})
if err != nil {
t.Fatalf("Error in schema %v", err.Error())
}
// parse query
ast := testutil.TestParse(t, query)
// execute
ep := graphql.ExecuteParams{
Schema: schema,
AST: ast,
}
result := testutil.TestExecute(t, ep)
if len(result.Errors) > 0 {
t.Fatalf("wrong result, unexpected errors: %v", result.Errors)
}
if !reflect.DeepEqual(expected, result) {
t.Fatalf("Unexpected result, Diff: %v", testutil.Diff(expected, result))
}
}
type CustomMap map[string]interface{}
func TestCustomMapType(t *testing.T) {
query := `
query Example { data { a } }
`
data := CustomMap{
"a": "1",
"b": "2",
}
schema, err := graphql.NewSchema(graphql.SchemaConfig{
Query: graphql.NewObject(graphql.ObjectConfig{
Name: "RootQuery",
Fields: graphql.Fields{
"data": &graphql.Field{
Type: graphql.NewObject(graphql.ObjectConfig{
Name: "Data",
Fields: graphql.Fields{
"a": &graphql.Field{
Type: graphql.String,
},
"b": &graphql.Field{
Type: graphql.String,
},
},
}),
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
return data, nil
},
},
},
}),
})
if err != nil {
t.Fatalf("Error in schema %v", err.Error())
}
result := testutil.TestExecute(t, graphql.ExecuteParams{
Schema: schema,
Root: data,
AST: testutil.TestParse(t, query),
})
if len(result.Errors) > 0 {
t.Fatalf("wrong result, unexpected errors: %v", result.Errors)
}
expected := map[string]interface{}{
"data": map[string]interface{}{
"a": "1",
},
}
if !reflect.DeepEqual(result.Data, expected) {
t.Fatalf("Expected context.key to equal %v, got %v", expected, result.Data)
}
}
func TestThreadsSourceCorrectly(t *testing.T) {
query := `
query Example { a }
`
data := map[string]interface{}{
"key": "value",
}
var resolvedSource map[string]interface{}
schema, err := graphql.NewSchema(graphql.SchemaConfig{
Query: graphql.NewObject(graphql.ObjectConfig{
Name: "Type",
Fields: graphql.Fields{
"a": &graphql.Field{
Type: graphql.String,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
resolvedSource = p.Source.(map[string]interface{})
return resolvedSource, nil
},
},
},
}),
})
if err != nil {
t.Fatalf("Error in schema %v", err.Error())
}
// parse query
ast := testutil.TestParse(t, query)
// execute
ep := graphql.ExecuteParams{
Schema: schema,
Root: data,
AST: ast,
}
result := testutil.TestExecute(t, ep)
if len(result.Errors) > 0 {
t.Fatalf("wrong result, unexpected errors: %v", result.Errors)
}
expected := "value"
if resolvedSource["key"] != expected {
t.Fatalf("Expected context.key to equal %v, got %v", expected, resolvedSource["key"])
}
}
func TestCorrectlyThreadsArguments(t *testing.T) {
query := `
query Example {
b(numArg: 123, stringArg: "foo")
}
`
var resolvedArgs map[string]interface{}
schema, err := graphql.NewSchema(graphql.SchemaConfig{
Query: graphql.NewObject(graphql.ObjectConfig{
Name: "Type",
Fields: graphql.Fields{
"b": &graphql.Field{
Args: graphql.FieldConfigArgument{
"numArg": &graphql.ArgumentConfig{
Type: graphql.Int,
},
"stringArg": &graphql.ArgumentConfig{
Type: graphql.String,
},
},
Type: graphql.String,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
resolvedArgs = p.Args
return resolvedArgs, nil
},
},
},
}),
})
if err != nil {
t.Fatalf("Error in schema %v", err.Error())
}
// parse query
ast := testutil.TestParse(t, query)
// execute
ep := graphql.ExecuteParams{
Schema: schema,
AST: ast,
}
result := testutil.TestExecute(t, ep)
if len(result.Errors) > 0 {
t.Fatalf("wrong result, unexpected errors: %v", result.Errors)
}
expectedNum := 123
expectedString := "foo"
if resolvedArgs["numArg"] != expectedNum {
t.Fatalf("Expected args.numArg to equal `%v`, got `%v`", expectedNum, resolvedArgs["numArg"])
}
if resolvedArgs["stringArg"] != expectedString {
t.Fatalf("Expected args.stringArg to equal `%v`, got `%v`", expectedNum, resolvedArgs["stringArg"])
}
}
func TestThreadsRootValueContextCorrectly(t *testing.T) {
query := `
query Example { a }
`
schema, err := graphql.NewSchema(graphql.SchemaConfig{
Query: graphql.NewObject(graphql.ObjectConfig{
Name: "Type",
Fields: graphql.Fields{
"a": &graphql.Field{
Type: graphql.String,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
val, _ := p.Info.RootValue.(map[string]interface{})["stringKey"].(string)
return val, nil
},
},
},
}),
})
if err != nil {
t.Fatalf("Error in schema %v", err.Error())
}
// parse query
ast := testutil.TestParse(t, query)
// execute
ep := graphql.ExecuteParams{
Schema: schema,
AST: ast,
Root: map[string]interface{}{
"stringKey": "stringValue",
},
}
result := testutil.TestExecute(t, ep)
if len(result.Errors) > 0 {
t.Fatalf("wrong result, unexpected errors: %v", result.Errors)
}
expected := &graphql.Result{
Data: map[string]interface{}{
"a": "stringValue",
},
}
if !reflect.DeepEqual(expected, result) {
t.Fatalf("Unexpected result, Diff: %v", testutil.Diff(expected, result))
}
}
func TestThreadsContextCorrectly(t *testing.T) {
query := `
query Example { a }
`
schema, err := graphql.NewSchema(graphql.SchemaConfig{
Query: graphql.NewObject(graphql.ObjectConfig{
Name: "Type",
Fields: graphql.Fields{
"a": &graphql.Field{
Type: graphql.String,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
return p.Context.Value("foo"), nil
},
},
},
}),
})
if err != nil {
t.Fatalf("Error in schema %v", err.Error())
}
// parse query
ast := testutil.TestParse(t, query)
// execute
ep := graphql.ExecuteParams{
Schema: schema,
AST: ast,
Context: context.WithValue(context.Background(), "foo", "bar"),
}
result := testutil.TestExecute(t, ep)
if len(result.Errors) > 0 {
t.Fatalf("wrong result, unexpected errors: %v", result.Errors)
}
expected := &graphql.Result{
Data: map[string]interface{}{
"a": "bar",
},
}
if !reflect.DeepEqual(expected, result) {
t.Fatalf("Unexpected result, Diff: %v", testutil.Diff(expected, result))
}
}
func TestNullsOutErrorSubtrees(t *testing.T) {
// TODO: TestNullsOutErrorSubtrees test for go-routines if implemented
query := `{
sync,
syncError,
}`
expectedData := map[string]interface{}{
"sync": "sync",
"syncError": nil,
}
expectedErrors := []gqlerrors.FormattedError{{
Message: "Error getting syncError",
Locations: []location.SourceLocation{
{
Line: 3, Column: 7,
},
},
Path: []interface{}{
"syncError",
},
},
}
data := map[string]interface{}{
"sync": func() interface{} {
return "sync"
},
"syncError": func() interface{} {
panic("Error getting syncError")
},
}
schema, err := graphql.NewSchema(graphql.SchemaConfig{
Query: graphql.NewObject(graphql.ObjectConfig{
Name: "Type",
Fields: graphql.Fields{
"sync": &graphql.Field{
Type: graphql.String,
},
"syncError": &graphql.Field{
Type: graphql.String,
},
},
}),
})
if err != nil {
t.Fatalf("Error in schema %v", err.Error())
}
// parse query
ast := testutil.TestParse(t, query)
// execute
ep := graphql.ExecuteParams{
Schema: schema,
AST: ast,
Root: data,
}
result := testutil.TestExecute(t, ep)
if !reflect.DeepEqual(expectedData, result.Data) {
t.Fatalf("Unexpected result, Diff: %v", testutil.Diff(expectedData, result.Data))
}
if !testutil.EqualFormattedErrors(expectedErrors, result.Errors) {
t.Fatalf("Unexpected result, Diff: %v", testutil.Diff(expectedErrors, result.Errors))
}
}
func TestUsesTheInlineOperationIfNoOperationNameIsProvided(t *testing.T) {
doc := `{ a }`
data := map[string]interface{}{
"a": "b",
}
expected := &graphql.Result{
Data: map[string]interface{}{
"a": "b",
},
}
schema, err := graphql.NewSchema(graphql.SchemaConfig{
Query: graphql.NewObject(graphql.ObjectConfig{
Name: "Type",
Fields: graphql.Fields{
"a": &graphql.Field{
Type: graphql.String,
},
},
}),
})
if err != nil {
t.Fatalf("Error in schema %v", err.Error())
}
// parse query
ast := testutil.TestParse(t, doc)
// execute
ep := graphql.ExecuteParams{
Schema: schema,
AST: ast,
Root: data,
}
result := testutil.TestExecute(t, ep)
if len(result.Errors) > 0 {
t.Fatalf("wrong result, unexpected errors: %v", result.Errors)
}
if !reflect.DeepEqual(expected, result) {
t.Fatalf("Unexpected result, Diff: %v", testutil.Diff(expected, result))
}
}
func TestUsesTheOnlyOperationIfNoOperationNameIsProvided(t *testing.T) {
doc := `query Example { a }`
data := map[string]interface{}{
"a": "b",
}
expected := &graphql.Result{
Data: map[string]interface{}{
"a": "b",
},
}
schema, err := graphql.NewSchema(graphql.SchemaConfig{
Query: graphql.NewObject(graphql.ObjectConfig{
Name: "Type",
Fields: graphql.Fields{
"a": &graphql.Field{
Type: graphql.String,
},
},
}),
})
if err != nil {
t.Fatalf("Error in schema %v", err.Error())
}
// parse query
ast := testutil.TestParse(t, doc)
// execute
ep := graphql.ExecuteParams{
Schema: schema,
AST: ast,
Root: data,
}
result := testutil.TestExecute(t, ep)
if len(result.Errors) > 0 {
t.Fatalf("wrong result, unexpected errors: %v", result.Errors)
}
if !reflect.DeepEqual(expected, result) {
t.Fatalf("Unexpected result, Diff: %v", testutil.Diff(expected, result))
}
}
func TestUsesTheNamedOperationIfOperationNameIsProvided(t *testing.T) {
doc := `query Example { first: a } query OtherExample { second: a }`
data := map[string]interface{}{
"a": "b",
}
expected := &graphql.Result{
Data: map[string]interface{}{
"second": "b",
},
}
schema, err := graphql.NewSchema(graphql.SchemaConfig{
Query: graphql.NewObject(graphql.ObjectConfig{
Name: "Type",
Fields: graphql.Fields{
"a": &graphql.Field{
Type: graphql.String,
},
},
}),
})
if err != nil {
t.Fatalf("Error in schema %v", err.Error())
}
// parse query
ast := testutil.TestParse(t, doc)
// execute
ep := graphql.ExecuteParams{
Schema: schema,
AST: ast,
Root: data,
OperationName: "OtherExample",
}
result := testutil.TestExecute(t, ep)
if len(result.Errors) > 0 {
t.Fatalf("wrong result, unexpected errors: %v", result.Errors)
}
if !reflect.DeepEqual(expected, result) {
t.Fatalf("Unexpected result, Diff: %v", testutil.Diff(expected, result))
}
}
func TestThrowsIfNoOperationIsProvided(t *testing.T) {
doc := `fragment Example on Type { a }`
data := map[string]interface{}{
"a": "b",
}
expectedErrors := []gqlerrors.FormattedError{
{
Message: "Must provide an operation.",
Locations: []location.SourceLocation{},
},
}
schema, err := graphql.NewSchema(graphql.SchemaConfig{
Query: graphql.NewObject(graphql.ObjectConfig{
Name: "Type",
Fields: graphql.Fields{
"a": &graphql.Field{
Type: graphql.String,
},
},
}),
})
if err != nil {
t.Fatalf("Error in schema %v", err.Error())
}
// parse query
ast := testutil.TestParse(t, doc)
// execute
ep := graphql.ExecuteParams{
Schema: schema,
AST: ast,
Root: data,
}
result := testutil.TestExecute(t, ep)
if result.Data != nil {
t.Fatalf("wrong result, expected nil result.Data, got %v", result.Data)
}
if !testutil.EqualFormattedErrors(expectedErrors, result.Errors) {
t.Fatalf("unexpected result, Diff: %v", testutil.Diff(expectedErrors, result.Errors))
}
}
func TestThrowsIfNoOperationNameIsProvidedWithMultipleOperations(t *testing.T) {
doc := `query Example { a } query OtherExample { a }`
data := map[string]interface{}{
"a": "b",
}
expectedErrors := []gqlerrors.FormattedError{
{
Message: "Must provide operation name if query contains multiple operations.",
Locations: []location.SourceLocation{},
},
}
schema, err := graphql.NewSchema(graphql.SchemaConfig{
Query: graphql.NewObject(graphql.ObjectConfig{
Name: "Type",
Fields: graphql.Fields{
"a": &graphql.Field{
Type: graphql.String,
},
},
}),
})
if err != nil {
t.Fatalf("Error in schema %v", err.Error())
}
// parse query
ast := testutil.TestParse(t, doc)
// execute
ep := graphql.ExecuteParams{
Schema: schema,
AST: ast,
Root: data,
}
result := testutil.TestExecute(t, ep)
if result.Data != nil {
t.Fatalf("wrong result, expected nil result.Data, got %v", result.Data)
}
if !testutil.EqualFormattedErrors(expectedErrors, result.Errors) {
t.Fatalf("unexpected result, Diff: %v", testutil.Diff(expectedErrors, result.Errors))
}
}
func TestThrowsIfUnknownOperationNameIsProvided(t *testing.T) {
doc := `query Example { a } query OtherExample { a }`
data := map[string]interface{}{
"a": "b",
}
expectedErrors := []gqlerrors.FormattedError{
{
Message: `Unknown operation named "UnknownExample".`,
Locations: []location.SourceLocation{},
},
}
schema, err := graphql.NewSchema(graphql.SchemaConfig{
Query: graphql.NewObject(graphql.ObjectConfig{
Name: "Type",
Fields: graphql.Fields{
"a": &graphql.Field{
Type: graphql.String,
},
},
}),
})
if err != nil {
t.Fatalf("Error in schema %v", err.Error())
}
// parse query
ast := testutil.TestParse(t, doc)
// execute
ep := graphql.ExecuteParams{
Schema: schema,
AST: ast,
Root: data,
OperationName: "UnknownExample",
}
result := testutil.TestExecute(t, ep)
if result.Data != nil {
t.Fatalf("wrong result, expected nil result.Data, got %v", result.Data)
}
if !testutil.EqualFormattedErrors(expectedErrors, result.Errors) {
t.Fatalf("unexpected result, Diff: %v", testutil.Diff(expectedErrors, result.Errors))
}
}
func TestThrowsIfOperationTypeIsUnsupported(t *testing.T) {
query := `mutation Mut { a } subscription Sub { a }`
operations := []string{"Mut", "Sub"}
expectedErrors := [][]gqlerrors.FormattedError{
{{
Message: `Schema is not configured for mutations`,
Locations: []location.SourceLocation{{Line: 1, Column: 1}},
}},
{{
Message: `Schema is not configured for subscriptions`,
Locations: []location.SourceLocation{{Line: 1, Column: 20}},
}},
}
schema, err := graphql.NewSchema(graphql.SchemaConfig{
Query: graphql.NewObject(graphql.ObjectConfig{
Name: "Query",
Fields: graphql.Fields{
"a": &graphql.Field{
Type: graphql.String,
},
},
}),
})
if err != nil {
t.Fatalf("Error in schema %v", err.Error())
}
// parse query
ast := testutil.TestParse(t, query)
for opIndex, operation := range operations {
expectedErrors := expectedErrors[opIndex]
// execute
ep := graphql.ExecuteParams{
Schema: schema,
AST: ast,
OperationName: operation,
}
result := testutil.TestExecute(t, ep)
if result.Data != nil {
t.Fatalf("wrong result, expected nil result.Data, got %v", result.Data)
}
if !testutil.EqualFormattedErrors(expectedErrors, result.Errors) {
t.Fatalf("unexpected result, Diff: %v", testutil.Diff(expectedErrors, result.Errors))
}
}
}
func TestUsesTheQuerySchemaForQueries(t *testing.T) {
doc := `query Q { a } mutation M { c } subscription S { a }`
data := map[string]interface{}{
"a": "b",
"c": "d",
}
expected := &graphql.Result{
Data: map[string]interface{}{
"a": "b",
},
}
schema, err := graphql.NewSchema(graphql.SchemaConfig{
Query: graphql.NewObject(graphql.ObjectConfig{
Name: "Q",
Fields: graphql.Fields{
"a": &graphql.Field{
Type: graphql.String,
},
},
}),
Mutation: graphql.NewObject(graphql.ObjectConfig{
Name: "M",
Fields: graphql.Fields{
"c": &graphql.Field{
Type: graphql.String,
},
},
}),
Subscription: graphql.NewObject(graphql.ObjectConfig{
Name: "S",
Fields: graphql.Fields{
"a": &graphql.Field{
Type: graphql.String,
},
},
}),
})
if err != nil {
t.Fatalf("Error in schema %v", err.Error())
}
// parse query
ast := testutil.TestParse(t, doc)
// execute
ep := graphql.ExecuteParams{
Schema: schema,
AST: ast,
Root: data,
OperationName: "Q",
}
result := testutil.TestExecute(t, ep)
if len(result.Errors) > 0 {
t.Fatalf("wrong result, unexpected errors: %v", result.Errors)
}
if !reflect.DeepEqual(expected, result) {
t.Fatalf("Unexpected result, Diff: %v", testutil.Diff(expected, result))
}
}
func TestUsesTheMutationSchemaForMutations(t *testing.T) {
doc := `query Q { a } mutation M { c }`
data := map[string]interface{}{
"a": "b",
"c": "d",
}
expected := &graphql.Result{
Data: map[string]interface{}{
"c": "d",
},
}
schema, err := graphql.NewSchema(graphql.SchemaConfig{
Query: graphql.NewObject(graphql.ObjectConfig{
Name: "Q",
Fields: graphql.Fields{
"a": &graphql.Field{
Type: graphql.String,
},
},
}),
Mutation: graphql.NewObject(graphql.ObjectConfig{
Name: "M",
Fields: graphql.Fields{
"c": &graphql.Field{
Type: graphql.String,
},
},
}),
})
if err != nil {
t.Fatalf("Error in schema %v", err.Error())
}
// parse query
ast := testutil.TestParse(t, doc)
// execute
ep := graphql.ExecuteParams{
Schema: schema,
AST: ast,
Root: data,
OperationName: "M",
}
result := testutil.TestExecute(t, ep)
if len(result.Errors) > 0 {
t.Fatalf("wrong result, unexpected errors: %v", result.Errors)
}
if !reflect.DeepEqual(expected, result) {
t.Fatalf("Unexpected result, Diff: %v", testutil.Diff(expected, result))
}
}
func TestUsesTheSubscriptionSchemaForSubscriptions(t *testing.T) {
doc := `query Q { a } subscription S { a }`
data := map[string]interface{}{
"a": "b",
"c": "d",
}
expected := &graphql.Result{
Data: map[string]interface{}{
"a": "b",
},
}
schema, err := graphql.NewSchema(graphql.SchemaConfig{
Query: graphql.NewObject(graphql.ObjectConfig{
Name: "Q",
Fields: graphql.Fields{
"a": &graphql.Field{
Type: graphql.String,
},
},
}),
Subscription: graphql.NewObject(graphql.ObjectConfig{
Name: "S",
Fields: graphql.Fields{
"a": &graphql.Field{
Type: graphql.String,
},
},
}),
})
if err != nil {
t.Fatalf("Error in schema %v", err.Error())
}
// parse query
ast := testutil.TestParse(t, doc)
// execute
ep := graphql.ExecuteParams{
Schema: schema,
AST: ast,
Root: data,
OperationName: "S",
}
result := testutil.TestExecute(t, ep)
if len(result.Errors) > 0 {
t.Fatalf("wrong result, unexpected errors: %v", result.Errors)
}
if !reflect.DeepEqual(expected, result) {
t.Fatalf("Unexpected result, Diff: %v", testutil.Diff(expected, result))
}
}
func TestCorrectFieldOrderingDespiteExecutionOrder(t *testing.T) {
doc := `
{
b,
a,
c,
d,
e
}
`
data := map[string]interface{}{
"a": func() interface{} { return "a" },
"b": func() interface{} { return "b" },
"c": func() interface{} { return "c" },
"d": func() interface{} { return "d" },
"e": func() interface{} { return "e" },
}
expected := &graphql.Result{
Data: map[string]interface{}{
"a": "a",
"b": "b",
"c": "c",
"d": "d",
"e": "e",
},
}
schema, err := graphql.NewSchema(graphql.SchemaConfig{
Query: graphql.NewObject(graphql.ObjectConfig{
Name: "Type",
Fields: graphql.Fields{
"a": &graphql.Field{
Type: graphql.String,
},
"b": &graphql.Field{
Type: graphql.String,
},
"c": &graphql.Field{
Type: graphql.String,
},
"d": &graphql.Field{
Type: graphql.String,
},
"e": &graphql.Field{
Type: graphql.String,
},
},
}),
})
if err != nil {
t.Fatalf("Error in schema %v", err.Error())
}
// parse query
ast := testutil.TestParse(t, doc)
// execute
ep := graphql.ExecuteParams{
Schema: schema,
AST: ast,
Root: data,
}
result := testutil.TestExecute(t, ep)
if len(result.Errors) > 0 {
t.Fatalf("wrong result, unexpected errors: %v", result.Errors)
}
if !reflect.DeepEqual(expected, result) {
t.Fatalf("Unexpected result, Diff: %v", testutil.Diff(expected, result))
}
// TODO: test to ensure key ordering
// The following does not work
// - iterating over result.Data map
// Note that golang's map iteration order is randomized
// So, iterating over result.Data won't do it for a test
// - Marshal the result.Data to json string and assert it
// json.Marshal seems to re-sort the keys automatically
//
t.Skipf("TODO: Ensure key ordering")
}
func TestAvoidsRecursion(t *testing.T) {
doc := `
query Q {
a
...Frag
...Frag
}
fragment Frag on Type {
a,
...Frag
}
`
data := map[string]interface{}{
"a": "b",
}
expected := &graphql.Result{
Data: map[string]interface{}{
"a": "b",
},
}
schema, err := graphql.NewSchema(graphql.SchemaConfig{
Query: graphql.NewObject(graphql.ObjectConfig{
Name: "Type",
Fields: graphql.Fields{
"a": &graphql.Field{
Type: graphql.String,
},
},
}),
})
if err != nil {
t.Fatalf("Error in schema %v", err.Error())
}
// parse query
ast := testutil.TestParse(t, doc)
// execute
ep := graphql.ExecuteParams{
Schema: schema,
AST: ast,
Root: data,
OperationName: "Q",
}
result := testutil.TestExecute(t, ep)
if len(result.Errors) > 0 {
t.Fatalf("wrong result, unexpected errors: %v", result.Errors)
}
if !reflect.DeepEqual(expected, result) {
t.Fatalf("Unexpected result, Diff: %v", testutil.Diff(expected, result))
}
}
func TestDoesNotIncludeIllegalFieldsInOutput(t *testing.T) {
doc := `mutation M {
thisIsIllegalDontIncludeMe
}`
expected := &graphql.Result{
Data: map[string]interface{}{},
}
schema, err := graphql.NewSchema(graphql.SchemaConfig{
Query: graphql.NewObject(graphql.ObjectConfig{
Name: "Q",
Fields: graphql.Fields{
"a": &graphql.Field{
Type: graphql.String,
},
},
}),
Mutation: graphql.NewObject(graphql.ObjectConfig{
Name: "M",
Fields: graphql.Fields{
"c": &graphql.Field{
Type: graphql.String,
},
},
}),
})
if err != nil {
t.Fatalf("Error in schema %v", err.Error())
}
// parse query
ast := testutil.TestParse(t, doc)
// execute
ep := graphql.ExecuteParams{
Schema: schema,
AST: ast,
}
result := testutil.TestExecute(t, ep)
if len(result.Errors) != 0 {
t.Fatalf("wrong result, expected len(%v) errors, got len(%v)", len(expected.Errors), len(result.Errors))
}
if !reflect.DeepEqual(expected, result) {
t.Fatalf("Unexpected result, Diff: %v", testutil.Diff(expected, result))
}
}
func TestDoesNotIncludeArgumentsThatWereNotSet(t *testing.T) {
doc := `{ field(a: true, c: false, e: 0) }`
expected := &graphql.Result{
Data: map[string]interface{}{
"field": `{"a":true,"c":false,"e":0}`,
},
}
schema, err := graphql.NewSchema(graphql.SchemaConfig{
Query: graphql.NewObject(graphql.ObjectConfig{
Name: "Type",
Fields: graphql.Fields{
"field": &graphql.Field{
Type: graphql.String,
Args: graphql.FieldConfigArgument{
"a": &graphql.ArgumentConfig{
Type: graphql.Boolean,
},
"b": &graphql.ArgumentConfig{
Type: graphql.Boolean,
},
"c": &graphql.ArgumentConfig{
Type: graphql.Boolean,
},
"d": &graphql.ArgumentConfig{
Type: graphql.Int,
},
"e": &graphql.ArgumentConfig{
Type: graphql.Int,
},
},
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
args, _ := json.Marshal(p.Args)
return string(args), nil
},
},
},
}),
})
if err != nil {
t.Fatalf("Error in schema %v", err.Error())
}
// parse query
ast := testutil.TestParse(t, doc)
// execute
ep := graphql.ExecuteParams{
Schema: schema,
AST: ast,
}
result := testutil.TestExecute(t, ep)
if len(result.Errors) > 0 {
t.Fatalf("wrong result, unexpected errors: %v", result.Errors)
}
if !reflect.DeepEqual(expected, result) {
t.Fatalf("Unexpected result, Diff: %v", testutil.Diff(expected, result))
}
}
type testSpecialType struct {
Value string
}
type testNotSpecialType struct {
Value string
}
func TestFailsWhenAnIsTypeOfCheckIsNotMet(t *testing.T) {
query := `{ specials { value } }`
data := map[string]interface{}{
"specials": []interface{}{
testSpecialType{"foo"},
testNotSpecialType{"bar"},
},
}
expected := &graphql.Result{
Data: map[string]interface{}{
"specials": []interface{}{
map[string]interface{}{
"value": "foo",
},
nil,
},
},
Errors: []gqlerrors.FormattedError{{
Message: `Expected value of type "SpecialType" but got: graphql_test.testNotSpecialType.`,
Locations: []location.SourceLocation{
{
Line: 1,
Column: 3,
},
},
Path: []interface{}{
"specials",
1,
},
},
},
}
specialType := graphql.NewObject(graphql.ObjectConfig{
Name: "SpecialType",
IsTypeOf: func(p graphql.IsTypeOfParams) bool {
if _, ok := p.Value.(testSpecialType); ok {
return true
}
return false
},
Fields: graphql.Fields{
"value": &graphql.Field{
Type: graphql.String,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
return p.Source.(testSpecialType).Value, nil
},
},
},
})
schema, err := graphql.NewSchema(graphql.SchemaConfig{
Query: graphql.NewObject(graphql.ObjectConfig{
Name: "Query",
Fields: graphql.Fields{
"specials": &graphql.Field{
Type: graphql.NewList(specialType),
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
return p.Source.(map[string]interface{})["specials"], nil
},
},
},
}),
})
if err != nil {
t.Fatalf("Error in schema %v", err.Error())
}
// parse query
ast := testutil.TestParse(t, query)
// execute
ep := graphql.ExecuteParams{
Schema: schema,
AST: ast,
Root: data,
}
result := testutil.TestExecute(t, ep)
if !testutil.EqualResults(expected, result) {
t.Fatalf("Unexpected result, Diff: %v", testutil.Diff(expected, result))
}
}
func TestFailsToExecuteQueryContainingATypeDefinition(t *testing.T) {
query := `
{ foo }
type Query { foo: String }
`
expected := &graphql.Result{
Data: nil,
Errors: []gqlerrors.FormattedError{
{
Message: "GraphQL cannot execute a request containing a ObjectDefinition",
Locations: []location.SourceLocation{},
},
},
}
schema, err := graphql.NewSchema(graphql.SchemaConfig{
Query: graphql.NewObject(graphql.ObjectConfig{
Name: "Query",
Fields: graphql.Fields{
"foo": &graphql.Field{
Type: graphql.String,
},
},
}),
})
if err != nil {
t.Fatalf("Error in schema %v", err.Error())
}
// parse query
ast := testutil.TestParse(t, query)
// execute
ep := graphql.ExecuteParams{
Schema: schema,
AST: ast,
}
result := testutil.TestExecute(t, ep)
if !testutil.EqualResults(expected, result) {
t.Fatalf("Unexpected result, Diff: %v", testutil.Diff(expected, result))
}
}
func TestQuery_ExecutionAddsErrorsFromFieldResolveFn(t *testing.T) {
qError := errors.New("queryError")
q := graphql.NewObject(graphql.ObjectConfig{
Name: "Query",
Fields: graphql.Fields{
"a": &graphql.Field{
Type: graphql.String,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
return nil, qError
},
},
"b": &graphql.Field{
Type: graphql.String,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
return "ok", nil
},
},
},
})
blogSchema, err := graphql.NewSchema(graphql.SchemaConfig{
Query: q,
})
if err != nil {
t.Fatalf("unexpected error, got: %v", err)
}
query := "{ a }"
result := graphql.Do(graphql.Params{
Schema: blogSchema,
RequestString: query,
})
if len(result.Errors) == 0 {
t.Fatal("wrong result, expected errors, got no errors")
}
if result.Errors[0].Error() != qError.Error() {
t.Fatalf("wrong result, unexpected error, got: %v, expected: %v", result.Errors[0], qError)
}
}
func TestQuery_ExecutionDoesNotAddErrorsFromFieldResolveFn(t *testing.T) {
qError := errors.New("queryError")
q := graphql.NewObject(graphql.ObjectConfig{
Name: "Query",
Fields: graphql.Fields{
"a": &graphql.Field{
Type: graphql.String,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
return nil, qError
},
},
"b": &graphql.Field{
Type: graphql.String,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
return "ok", nil
},
},
},
})
blogSchema, err := graphql.NewSchema(graphql.SchemaConfig{
Query: q,
})
if err != nil {
t.Fatalf("unexpected error, got: %v", err)
}
query := "{ b }"
result := graphql.Do(graphql.Params{
Schema: blogSchema,
RequestString: query,
})
if len(result.Errors) != 0 {
t.Fatalf("wrong result, unexpected errors: %+v", result.Errors)
}
}
func TestQuery_InputObjectUsesFieldDefaultValueFn(t *testing.T) {
inputType := graphql.NewInputObject(graphql.InputObjectConfig{
Name: "Input",
Fields: graphql.InputObjectConfigFieldMap{
"default": &graphql.InputObjectFieldConfig{
Type: graphql.String,
DefaultValue: "bar",
},
},
})
q := graphql.NewObject(graphql.ObjectConfig{
Name: "Query",
Fields: graphql.Fields{
"a": &graphql.Field{
Type: graphql.String,
Args: graphql.FieldConfigArgument{
"foo": &graphql.ArgumentConfig{
Type: graphql.NewNonNull(inputType),
},
},
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
val := p.Args["foo"].(map[string]interface{})
def, ok := val["default"]
if !ok || def == nil {
return nil, errors.New("queryError: No 'default' param")
}
if def.(string) != "bar" {
return nil, errors.New("queryError: 'default' param has wrong value")
}
return "ok", nil
},
},
},
})
schema, err := graphql.NewSchema(graphql.SchemaConfig{
Query: q,
})
if err != nil {
t.Fatalf("unexpected error, got: %v", err)
}
query := `{ a(foo: {}) }`
result := graphql.Do(graphql.Params{
Schema: schema,
RequestString: query,
})
if len(result.Errors) != 0 {
t.Fatalf("wrong result, unexpected errors: %+v", result.Errors)
}
}
func TestMutation_ExecutionAddsErrorsFromFieldResolveFn(t *testing.T) {
mError := errors.New("mutationError")
q := graphql.NewObject(graphql.ObjectConfig{
Name: "Query",
Fields: graphql.Fields{
"a": &graphql.Field{
Type: graphql.String,
},
},
})
m := graphql.NewObject(graphql.ObjectConfig{
Name: "Mutation",
Fields: graphql.Fields{
"foo": &graphql.Field{
Type: graphql.String,
Args: graphql.FieldConfigArgument{
"f": &graphql.ArgumentConfig{
Type: graphql.String,
},
},
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
return nil, mError
},
},
"bar": &graphql.Field{
Type: graphql.String,
Args: graphql.FieldConfigArgument{
"b": &graphql.ArgumentConfig{
Type: graphql.String,
},
},
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
return "ok", nil
},
},
},
})
schema, err := graphql.NewSchema(graphql.SchemaConfig{
Query: q,
Mutation: m,
})
if err != nil {
t.Fatalf("unexpected error, got: %v", err)
}
query := "mutation _ { newFoo: foo(f:\"title\") }"
result := graphql.Do(graphql.Params{
Schema: schema,
RequestString: query,
})
if len(result.Errors) == 0 {
t.Fatal("wrong result, expected errors, got no errors")
}
if result.Errors[0].Error() != mError.Error() {
t.Fatalf("wrong result, unexpected error, got: %v, expected: %v", result.Errors[0], mError)
}
}
func TestMutation_ExecutionDoesNotAddErrorsFromFieldResolveFn(t *testing.T) {
mError := errors.New("mutationError")
q := graphql.NewObject(graphql.ObjectConfig{
Name: "Query",
Fields: graphql.Fields{
"a": &graphql.Field{
Type: graphql.String,
},
},
})
m := graphql.NewObject(graphql.ObjectConfig{
Name: "Mutation",
Fields: graphql.Fields{
"foo": &graphql.Field{
Type: graphql.String,
Args: graphql.FieldConfigArgument{
"f": &graphql.ArgumentConfig{
Type: graphql.String,
},
},
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
return nil, mError
},
},
"bar": &graphql.Field{
Type: graphql.String,
Args: graphql.FieldConfigArgument{
"b": &graphql.ArgumentConfig{
Type: graphql.String,
},
},
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
return "ok", nil
},
},
},
})
schema, err := graphql.NewSchema(graphql.SchemaConfig{
Query: q,
Mutation: m,
})
if err != nil {
t.Fatalf("unexpected error, got: %v", err)
}
query := "mutation _ { newBar: bar(b:\"title\") }"
result := graphql.Do(graphql.Params{
Schema: schema,
RequestString: query,
})
if len(result.Errors) != 0 {
t.Fatalf("wrong result, unexpected errors: %+v", result.Errors)
}
}
func TestGraphqlTag(t *testing.T) {
typeObjectType := graphql.NewObject(graphql.ObjectConfig{
Name: "Type",
Fields: graphql.Fields{
"fooBar": &graphql.Field{Type: graphql.String},
},
})
var baz = &graphql.Field{
Type: typeObjectType,
Description: "typeObjectType",
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
t := struct {
FooBar string `graphql:"fooBar"`
}{"foo bar value"}
return t, nil
},
}
q := graphql.NewObject(graphql.ObjectConfig{
Name: "Query",
Fields: graphql.Fields{
"baz": baz,
},
})
schema, err := graphql.NewSchema(graphql.SchemaConfig{
Query: q,
})
if err != nil {
t.Fatalf("unexpected error, got: %v", err)
}
query := "{ baz { fooBar } }"
result := graphql.Do(graphql.Params{
Schema: schema,
RequestString: query,
})
if len(result.Errors) != 0 {
t.Fatalf("wrong result, unexpected errors: %+v", result.Errors)
}
expectedData := map[string]interface{}{
"baz": map[string]interface{}{
"fooBar": "foo bar value",
},
}
if !reflect.DeepEqual(result.Data, expectedData) {
t.Fatalf("unexpected result, got: %+v, expected: %+v", expectedData, result.Data)
}
}
func TestFieldResolver(t *testing.T) {
typeObjectType := graphql.NewObject(graphql.ObjectConfig{
Name: "Type",
Fields: graphql.Fields{
"fooBar": &graphql.Field{Type: graphql.String},
},
})
var baz = &graphql.Field{
Type: typeObjectType,
Description: "typeObjectType",
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
return testCustomResolver{}, nil
},
}
var bazPtr = &graphql.Field{
Type: typeObjectType,
Description: "typeObjectType",
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
return &testCustomResolver{}, nil
},
}
q := graphql.NewObject(graphql.ObjectConfig{
Name: "Query",
Fields: graphql.Fields{
"baz": baz,
"bazPtr": bazPtr,
},
})
schema, err := graphql.NewSchema(graphql.SchemaConfig{
Query: q,
})
if err != nil {
t.Fatalf("unexpected error, got: %v", err)
}
query := "{ baz { fooBar }, bazPtr { fooBar } }"
result := graphql.Do(graphql.Params{
Schema: schema,
RequestString: query,
})
if len(result.Errors) != 0 {
t.Fatalf("wrong result, unexpected errors: %+v", result.Errors)
}
expectedData := map[string]interface{}{
"baz": map[string]interface{}{
"fooBar": "foo bar value",
},
"bazPtr": map[string]interface{}{
"fooBar": "foo bar value",
},
}
if !reflect.DeepEqual(result.Data, expectedData) {
t.Fatalf("unexpected result, got: %+v, expected: %+v", result.Data, expectedData)
}
}
type testCustomResolver struct{}
func (r testCustomResolver) Resolve(p graphql.ResolveParams) (interface{}, error) {
if p.Info.FieldName == "fooBar" {
return "foo bar value", nil
}
return "", errors.New("invalid field " + p.Info.FieldName)
}
func TestContextDeadline(t *testing.T) {
timeout := time.Millisecond * time.Duration(100)
acceptableDelay := time.Millisecond * time.Duration(10)
expectedErrors := []gqlerrors.FormattedError{
{
Message: context.DeadlineExceeded.Error(),
Locations: []location.SourceLocation{},
},
}
// Query type includes a field that won't resolve within the deadline
var queryType = graphql.NewObject(
graphql.ObjectConfig{
Name: "Query",
Fields: graphql.Fields{
"hello": &graphql.Field{
Type: graphql.String,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
time.Sleep(2 * time.Second)
return "world", nil
},
},
},
})
schema, err := graphql.NewSchema(graphql.SchemaConfig{
Query: queryType,
})
if err != nil {
t.Fatalf("unexpected error, got: %v", err)
}
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
startTime := time.Now()
result := graphql.Do(graphql.Params{
Schema: schema,
RequestString: "{hello}",
Context: ctx,
})
duration := time.Since(startTime)
if duration > timeout+acceptableDelay {
t.Fatalf("graphql.Do completed in %s, should have completed in %s", duration, timeout)
}
if !result.HasErrors() || len(result.Errors) == 0 {
t.Fatalf("Result should include errors when deadline is exceeded")
}
if !testutil.EqualFormattedErrors(expectedErrors, result.Errors) {
t.Fatalf("Unexpected result, Diff: %v", testutil.Diff(expectedErrors, result.Errors))
}
}
func TestThunkResultsProcessedCorrectly(t *testing.T) {
barType := graphql.NewObject(graphql.ObjectConfig{
Name: "Bar",
Fields: graphql.Fields{
"bazA": &graphql.Field{
Type: graphql.String,
},
"bazB": &graphql.Field{
Type: graphql.String,
},
},
})
fooType := graphql.NewObject(graphql.ObjectConfig{
Name: "Foo",
Fields: graphql.Fields{
"bar": &graphql.Field{
Type: barType,
Resolve: func(params graphql.ResolveParams) (interface{}, error) {
var bar struct {
BazA string
BazB string
}
bar.BazA = "A"
bar.BazB = "B"
thunk := func() (interface{}, error) { return &bar, nil }
return thunk, nil
},
},
},
})
queryType := graphql.NewObject(graphql.ObjectConfig{
Name: "Query",
Fields: graphql.Fields{
"foo": &graphql.Field{
Type: fooType,
Resolve: func(params graphql.ResolveParams) (interface{}, error) {
var foo struct{}
return foo, nil
},
},
},
})
expectNoError := func(err error) {
if err != nil {
t.Fatalf("expected no error, got %v", err)
}
}
schema, err := graphql.NewSchema(graphql.SchemaConfig{
Query: queryType,
})
expectNoError(err)
query := "{ foo { bar { bazA bazB } } }"
result := graphql.Do(graphql.Params{
Schema: schema,
RequestString: query,
})
if len(result.Errors) != 0 {
t.Fatalf("expected no errors, got %v", result.Errors)
}
foo := result.Data.(map[string]interface{})["foo"].(map[string]interface{})
bar, ok := foo["bar"].(map[string]interface{})
if !ok {
t.Errorf("expected bar to be a map[string]interface{}: actual = %v", reflect.TypeOf(foo["bar"]))
} else {
if got, want := bar["bazA"], "A"; got != want {
t.Errorf("foo.bar.bazA: got=%v, want=%v", got, want)
}
if got, want := bar["bazB"], "B"; got != want {
t.Errorf("foo.bar.bazB: got=%v, want=%v", got, want)
}
}
if t.Failed() {
b, err := json.Marshal(result.Data)
expectNoError(err)
t.Log(string(b))
}
}
func TestThunkErrorsAreHandledCorrectly(t *testing.T) {
var bazCError = errors.New("barC error")
barType := graphql.NewObject(graphql.ObjectConfig{
Name: "Bar",
Fields: graphql.Fields{
"bazA": &graphql.Field{
Type: graphql.String,
},
"bazB": &graphql.Field{
Type: graphql.String,
},
"bazC": &graphql.Field{
Type: graphql.String,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
thunk := func() (interface{}, error) {
return nil, bazCError
}
return thunk, nil
},
},
},
})
fooType := graphql.NewObject(graphql.ObjectConfig{
Name: "Foo",
Fields: graphql.Fields{
"bar": &graphql.Field{
Type: barType,
Resolve: func(params graphql.ResolveParams) (interface{}, error) {
var bar struct {
BazA string
BazB string
}
bar.BazA = "A"
bar.BazB = "B"
thunk := func() (interface{}, error) {
return &bar, nil
}
return thunk, nil
},
},
},
})
queryType := graphql.NewObject(graphql.ObjectConfig{
Name: "Query",
Fields: graphql.Fields{
"foo": &graphql.Field{
Type: fooType,
Resolve: func(params graphql.ResolveParams) (interface{}, error) {
var foo struct{}
return foo, nil
},
},
},
})
schema, err := graphql.NewSchema(graphql.SchemaConfig{
Query: queryType,
})
if err != nil {
t.Fatalf("unexpected error, got: %v", err)
}
query := "{ foo { bar { bazA bazB bazC } } }"
result := graphql.Do(graphql.Params{
Schema: schema,
RequestString: query,
})
foo := result.Data.(map[string]interface{})["foo"].(map[string]interface{})
bar, ok := foo["bar"].(map[string]interface{})
if !ok {
t.Errorf("expected bar to be a map[string]interface{}: actual = %v", reflect.TypeOf(foo["bar"]))
} else {
if got, want := bar["bazA"], "A"; got != want {
t.Errorf("foo.bar.bazA: got=%v, want=%v", got, want)
}
if got, want := bar["bazB"], "B"; got != want {
t.Errorf("foo.bar.bazB: got=%v, want=%v", got, want)
}
if got := bar["bazC"]; got != nil {
t.Errorf("foo.bar.bazC: got=%v, want=nil", got)
}
var errs = result.Errors
if len(errs) != 1 {
t.Fatalf("expected 1 error, got %v", result.Errors)
}
if got, want := errs[0].Message, bazCError.Error(); got != want {
t.Errorf("expected error: got=%v, want=%v", got, want)
}
}
if t.Failed() {
b, err := json.Marshal(result.Data)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
t.Log(string(b))
}
}
func assertJSON(t *testing.T, expected string, actual interface{}) {
var e interface{}
if err := json.Unmarshal([]byte(expected), &e); err != nil {
t.Fatalf(err.Error())
}
aJSON, err := json.MarshalIndent(actual, "", " ")
if err != nil {
t.Fatalf(err.Error())
}
var a interface{}
if err := json.Unmarshal(aJSON, &a); err != nil {
t.Fatalf(err.Error())
}
if !reflect.DeepEqual(e, a) {
eNormalizedJSON, err := json.MarshalIndent(e, "", " ")
if err != nil {
t.Fatalf(err.Error())
}
t.Fatalf("Expected JSON:\n\n%v\n\nActual JSON:\n\n%v", string(eNormalizedJSON), string(aJSON))
}
}
type extendedError struct {
error
extensions map[string]interface{}
}
func (err extendedError) Extensions() map[string]interface{} {
return err.extensions
}
var _ gqlerrors.ExtendedError = &extendedError{}
func testErrors(t *testing.T, nameType graphql.Output, extensions map[string]interface{}, formatErrorFn func(err error) error) *graphql.Result {
type Hero struct {
Id string `graphql:"id"`
Name string
Friends []Hero `graphql:"friends"`
}
var heroFields graphql.Fields
heroType := graphql.NewObject(graphql.ObjectConfig{
Name: "Hero",
Fields: graphql.FieldsThunk(func() graphql.Fields {
return heroFields
}),
})
heroFields = graphql.Fields{
"id": &graphql.Field{
Type: graphql.ID,
},
"name": &graphql.Field{
Type: nameType,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
hero := p.Source.(Hero)
if hero.Name != "" {
return hero.Name, nil
}
err := fmt.Errorf("Name for character with ID %v could not be fetched.", hero.Id)
if formatErrorFn != nil {
err = formatErrorFn(err)
}
if extensions != nil {
return nil, &extendedError{
error: err,
extensions: extensions,
}
}
return nil, err
},
},
"friends": &graphql.Field{
Type: graphql.NewList(heroType),
},
}
queryType := graphql.NewObject(graphql.ObjectConfig{
Name: "Query",
Fields: graphql.Fields{
"hero": &graphql.Field{
Type: heroType,
Resolve: func(params graphql.ResolveParams) (interface{}, error) {
return Hero{
Name: "R2-D2",
Friends: []Hero{
{Id: "1000", Name: "Luke Skywalker"},
{Id: "1002"},
{Id: "1003", Name: "Leia Organa"},
},
}, nil
},
},
},
})
expectNoError := func(err error) {
if err != nil {
t.Fatalf("expected no error, got %v", err)
}
}
schema, err := graphql.NewSchema(graphql.SchemaConfig{
Query: queryType,
})
expectNoError(err)
return graphql.Do(graphql.Params{
Schema: schema,
RequestString: `{
hero {
name
heroFriends: friends {
id
name
}
}
}`,
})
}
// http://facebook.github.io/graphql/June2018/#example-bc485
func TestQuery_ErrorPath(t *testing.T) {
result := testErrors(t, graphql.String, nil, nil)
assertJSON(t, `{
"errors": [
{
"message": "Name for character with ID 1002 could not be fetched.",
"locations": [ { "line": 6, "column": 7 } ],
"path": [ "hero", "heroFriends", 1, "name" ]
}
],
"data": {
"hero": {
"name": "R2-D2",
"heroFriends": [
{
"id": "1000",
"name": "Luke Skywalker"
},
{
"id": "1002",
"name": null
},
{
"id": "1003",
"name": "Leia Organa"
}
]
}
}
}`, result)
}
// http://facebook.github.io/graphql/June2018/#example-08b62
func TestQuery_ErrorPathForNonNullField(t *testing.T) {
result := testErrors(t, graphql.NewNonNull(graphql.String), nil, nil)
assertJSON(t, `{
"errors": [
{
"message": "Name for character with ID 1002 could not be fetched.",
"locations": [ { "line": 6, "column": 7 } ],
"path": [ "hero", "heroFriends", 1, "name" ]
}
],
"data": {
"hero": {
"name": "R2-D2",
"heroFriends": [
{
"id": "1000",
"name": "Luke Skywalker"
},
null,
{
"id": "1003",
"name": "Leia Organa"
}
]
}
}
}`, result)
}
// http://facebook.github.io/graphql/June2018/#example-fce18
func TestQuery_ErrorExtensions(t *testing.T) {
result := testErrors(t, graphql.NewNonNull(graphql.String), map[string]interface{}{
"code": "CAN_NOT_FETCH_BY_ID",
"timestamp": "Fri Feb 9 14:33:09 UTC 2018",
}, nil)
assertJSON(t, `{
"errors": [
{
"message": "Name for character with ID 1002 could not be fetched.",
"locations": [ { "line": 6, "column": 7 } ],
"path": [ "hero", "heroFriends", 1, "name" ],
"extensions": {
"code": "CAN_NOT_FETCH_BY_ID",
"timestamp": "Fri Feb 9 14:33:09 UTC 2018"
}}
],
"data": {
"hero": {
"name": "R2-D2",
"heroFriends": [
{
"id": "1000",
"name": "Luke Skywalker"
},
null,
{
"id": "1003",
"name": "Leia Organa"
}
]
}
}
}`, result)
}
func TestQuery_OriginalErrorBuiltin(t *testing.T) {
result := testErrors(t, graphql.String, nil, nil)
switch err := result.Errors[0].OriginalError().(type) {
case *gqlerrors.Error:
switch err := err.OriginalError.(type) {
case error:
default:
t.Fatalf("unexpected error: %v", reflect.TypeOf(err))
}
default:
t.Fatalf("unexpected error: %v", reflect.TypeOf(err))
}
}
func TestQuery_OriginalErrorExtended(t *testing.T) {
result := testErrors(t, graphql.String, map[string]interface{}{
"code": "CAN_NOT_FETCH_BY_ID",
}, nil)
switch err := result.Errors[0].OriginalError().(type) {
case *gqlerrors.Error:
switch err := err.OriginalError.(type) {
case *extendedError:
case extendedError:
default:
t.Fatalf("unexpected error: %v", reflect.TypeOf(err))
}
default:
t.Fatalf("unexpected error: %v", reflect.TypeOf(err))
}
}
type customError struct {
error
}
func (e customError) Error() string {
return e.error.Error()
}
func TestQuery_OriginalErrorCustom(t *testing.T) {
result := testErrors(t, graphql.String, nil, func(err error) error {
return customError{error: err}
})
switch err := result.Errors[0].OriginalError().(type) {
case *gqlerrors.Error:
switch err := err.OriginalError.(type) {
case customError:
default:
t.Fatalf("unexpected error: %v", reflect.TypeOf(err))
}
default:
t.Fatalf("unexpected error: %v", reflect.TypeOf(err))
}
}
func TestQuery_OriginalErrorCustomPtr(t *testing.T) {
result := testErrors(t, graphql.String, nil, func(err error) error {
return &customError{error: err}
})
switch err := result.Errors[0].OriginalError().(type) {
case *gqlerrors.Error:
switch err := err.OriginalError.(type) {
case *customError:
default:
t.Fatalf("unexpected error: %v", reflect.TypeOf(err))
}
default:
t.Fatalf("unexpected error: %v", reflect.TypeOf(err))
}
}
func TestQuery_OriginalErrorPanic(t *testing.T) {
result := testErrors(t, graphql.String, nil, func(err error) error {
panic(errors.New("panic error"))
})
switch err := result.Errors[0].OriginalError().(type) {
case *gqlerrors.Error:
switch err := err.OriginalError.(type) {
case error:
default:
t.Fatalf("unexpected error: %v", reflect.TypeOf(err))
}
default:
t.Fatalf("unexpected error: %v", reflect.TypeOf(err))
}
}
|
package models
import (
"fmt"
"github.com/Sirupsen/logrus"
"gopkg.in/mgo.v2"
)
/*
type Player struct{
AccountID string
Chips int
}
type State struct{
CurrentPlayers []Player
ActivePlayer string
}
type Bet
type Hand struct{
Pot int
Bank int
State State
Actions []interface
}
type HighCard struct {
ID string `json:"gameID" bson:"gameID"`
Bank int `json:"-" bson:"bank"`
Hands []Hand `json:"players" bson:"players"`
}
*/
const (
highCardUpdate = "/highcard/update"
)
var highCardManager HighCardManager
type HighCardManager struct {
Games map[string]*HighCardGame
}
type HighCardMessage struct {
Type string `json:"type"`
Game `json:"gameInfo"`
State interface{} `json:"gameState"`
}
type HighCardHand struct {
PlayerList []*HighCardPlayer `json:"players"`
Players map[string]*HighCardPlayer `json:"-"`
*Deck `json:"-"`
Pot int `json:"pot"`
ActionTo `json:"actionTo"`
Complete bool `json:"complete"`
Payout int `json:"payout"` //only set if complete==true
NumTurns int `json:"-"`
NumStartPlayers int `json:"-"`
}
type HighCardPlayer struct {
Next *HighCardPlayer `json:"-"`
GamePlayer `json:"gamePlayer"`
Card `json:"card"`
State string `json:"state"`
}
type HighCardGame struct {
Game
Hands []*HighCardHand
Hand *HighCardHand
Ante int
}
type ActionTo struct {
CallAmount int `json:"callAmount"`
AccountID string `json:"accountID"`
}
func InitializeHighCardManager() {
highCardManager = HighCardManager{
Games: map[string]*HighCardGame{},
}
}
func LoadCreateHighCardGame(db *mgo.Database, gameID string) (*HighCardGame, error) {
game, err := LoadGame(db, gameID, "")
if err != nil {
return nil, fmt.Errorf("failed to load game in get high card game: %+v", err)
}
hcg, ok := highCardManager.Games[gameID]
if ok {
hcg.Game = game
} else {
hcg = &HighCardGame{
Game: game,
Hands: []*HighCardHand{},
Ante: 1,
}
highCardManager.Games[game.ID] = hcg
}
return hcg, nil
}
func (hcp *HighCardPlayer) Copy() *HighCardPlayer {
newPlayer := *hcp
return &newPlayer
}
func (hcg *HighCardGame) NewHand() error {
//update hcg with the current list of players
if len(hcg.Game.Players) < 2 {
return fmt.Errorf("not enough players to start game.")
}
h := HighCardHand{
PlayerList: []*HighCardPlayer{},
Players: map[string]*HighCardPlayer{},
Deck: NewDeck(),
Pot: 0,
NumTurns: 0,
Complete: false,
}
for i, p := range hcg.Game.Players {
if p.Chips-hcg.Ante < 0 {
continue
}
hcg.Game.Players[i].Chips -= hcg.Ante
h.Pot += hcg.Ante
card, _ := h.Deck.Deal()
player := HighCardPlayer{
GamePlayer: hcg.Game.Players[i],
Card: *card,
State: "",
}
h.PlayerList = append(h.PlayerList, &player)
h.Players[hcg.Game.Players[i].AccountID] = &player
}
if len(h.PlayerList) < 2 {
return fmt.Errorf("not enough players with ante")
}
//initiate each player's next player
for i := range h.PlayerList {
if i == len(h.PlayerList)-1 {
h.PlayerList[i].Next = h.PlayerList[0]
break
}
h.PlayerList[i].Next = h.PlayerList[i+1]
}
h.NumStartPlayers = len(h.PlayerList)
h.ActionTo = ActionTo{AccountID: h.PlayerList[0].AccountID}
hcg.Hand = &h
hcg.Hands = append(hcg.Hands, &h)
return nil
}
func (hcg *HighCardGame) StartHand() error {
//deal
if err := hcg.NewHand(); err != nil {
return fmt.Errorf("failed to create new highcard hand: %+v", err)
}
//wait for action
// receive action
// validate action
// analyze gamestate
// if endstate: goto deal
// else: emit action update
return nil
}
func (hcg *HighCardGame) Send() error {
for _, player := range hcg.Game.Players {
//obscure all other players's cards if hand not complete
obscuredPlayers := make([]*HighCardPlayer, len(hcg.Hand.PlayerList))
for i := range hcg.Hand.PlayerList {
//check and skip current player
obscuredPlayers[i] = hcg.Hand.PlayerList[i].Copy()
if !hcg.Hand.Complete && hcg.Hand.PlayerList[i].GamePlayer.AccountID != player.AccountID {
obscuredPlayers[i].Card = nullCard
}
}
state := HighCardHand{
PlayerList: obscuredPlayers,
Pot: hcg.Hand.Pot,
ActionTo: hcg.Hand.ActionTo,
Complete: hcg.Hand.Complete,
Payout: hcg.Hand.Payout,
}
msg := HighCardMessage{
Type: highCardUpdate,
Game: hcg.Game,
State: state,
}
if err := Send(player.AccountID, msg); err != nil {
return fmt.Errorf("failed to send in highcard.start: %+v", err)
}
}
return nil
}
func (hcg *HighCardGame) Fold() error {
if hcg.Hand.Complete {
return nil
}
return hcg.Transition("fold")
}
func highCardFoldOutOfTurn(gameID, accountID string) error {
hcg, ok := highCardManager.Games[gameID]
if !ok {
return fmt.Errorf("highcard game %s does not exist in fold out of turn", gameID)
}
hcg.Hand.Players[accountID].State = "fold"
return nil
}
func (hcg *HighCardGame) Check() error {
if hcg.Hand.Complete {
return nil
}
return hcg.Transition("check")
}
func (hcg *HighCardGame) Call() error {
if hcg.Hand.Complete {
return nil
}
currPlayer := hcg.Hand.ActionTo.AccountID
hcg.Hand.Players[currPlayer].Chips -= hcg.Hand.ActionTo.CallAmount
hcg.Hand.Pot += hcg.Hand.ActionTo.CallAmount
hcg.Hand.ActionTo.CallAmount = 0
return hcg.Transition("call")
}
func (hcg *HighCardGame) Bet(amount int) error {
if hcg.Hand.Complete {
return nil
}
currPlayer := hcg.Hand.ActionTo.AccountID
hcg.Hand.Players[currPlayer].Chips -= amount
hcg.Hand.Pot += amount
hcg.Hand.ActionTo.CallAmount = amount
return hcg.Transition(fmt.Sprintf("bet %d", amount))
}
func (hcg *HighCardGame) Raise(raiseAmount int) error {
if hcg.Hand.Complete {
return nil
}
currPlayer := hcg.Hand.ActionTo.AccountID
callAmount := hcg.Hand.ActionTo.CallAmount
hcg.Hand.Players[currPlayer].Chips -= (callAmount + raiseAmount)
hcg.Hand.Pot += (callAmount + raiseAmount)
hcg.Hand.ActionTo.CallAmount = raiseAmount
return hcg.Transition(fmt.Sprintf("call %d, reraise %d", callAmount, raiseAmount))
}
func (hcg *HighCardGame) Transition(state string) error {
currPlayer := hcg.Hand.ActionTo.AccountID
hcg.Hand.Players[currPlayer].State = state
//todo while loop to not be fold statea
hcg.Hand.ActionTo.AccountID = hcg.Hand.Players[currPlayer].Next.GamePlayer.AccountID
for {
if hcg.Hand.ActionTo.AccountID == "fold" {
currPlayer := hcg.Hand.ActionTo.AccountID
hcg.Hand.ActionTo.AccountID = hcg.Hand.Players[currPlayer].Next.GamePlayer.AccountID
} else {
break
}
}
return hcg.checkComplete()
}
func (hcg *HighCardGame) checkComplete() error {
hcg.Hand.NumTurns++
if hcg.Hand.NumTurns >= hcg.Hand.NumStartPlayers && hcg.Hand.ActionTo.CallAmount == 0 {
hcg.Hand.Complete = true
} else if hcg.OnePlayerRemains() {
hcg.Hand.Complete = true
}
if !hcg.Hand.Complete {
return nil
}
highest := 0
winners := []string{}
for accountID, player := range hcg.Hand.Players {
if player.State == "fold" {
continue
}
if player.Card.Rank == highest {
winners = append(winners, accountID)
} else if player.Card.Rank > highest {
highest = player.Card.Rank
winners = []string{accountID}
}
}
//calculate this hands chip updates
hcg.Hand.Payout = hcg.Hand.Pot / len(winners)
for _, w := range winners {
hcg.Hand.Players[w].Chips += hcg.Hand.Payout
hcg.Hand.Players[w].State = "winner"
}
//record the chip updates in our game object which will get saved
for i, p := range hcg.Game.Players {
hcg.Game.Players[i].Chips = hcg.Hand.Players[p.AccountID].Chips
//if you're not a winner you're a loser
if hcg.Hand.Players[p.AccountID].State != "winner" {
hcg.Hand.Players[p.AccountID].State = "loser"
}
}
logrus.Infof("highcard game complete. winners: %+v, payment: %d", winners, hcg.Hand.Payout)
hcg.Hand.Pot = 0
return nil
}
func (hcg *HighCardGame) OnePlayerRemains() bool {
//determine if only one player is in a nonfold state
numActives := 0
for _, p := range hcg.Hand.Players {
if p.State != "fold" {
numActives++
}
}
if numActives == 1 {
return true
}
return false
}
|
package linter
import (
"fmt"
"go/token"
"strings"
)
// Result is struct
type Result struct {
Position token.Position
Comment string
}
func NewResult(position token.Position, comment string) *Result {
// The `position` points out `type`, `func`, etc line. So, `position.Line` decrements here.
position.Line = position.Line - 1
return &Result{
Position: position,
Comment: comment,
}
}
func (r *Result) String() string {
return fmt.Sprintf("%s: %s", r.GetPosition(), r.GetComment())
}
func (r *Result) GetPosition() string {
return fmt.Sprintf("%s", r.Position.String())
}
func (r *Result) GetComment() string {
return strings.ReplaceAll(r.Comment, "\n", " ")
}
|
package main
// DoctolibConfig holds the information of the config.yml doctolib link declaration
type DoctolibConfig struct {
URL string `mapstructure:"url"`
VaccineName string `mapstructure:"vaccine_name"`
PracticeID string `mapstructure:"practice_id"`
AgendaID string `mapstructure:"agenda_id"`
VisitMotiveID string `mapstructure:"visit_motive_id"`
Detail *string `mapstructure:"detail"`
Delay *int `mapstructure:"delay"`
}
|
package router
import (
// "errors"
// "fmt"
"net/http"
"reflect"
"strings"
)
type StaticRouter struct {
Path string
DefaultCname string
DefaultAname string
routMap RouteMaps
}
func NewStaticRouter(path, defaultCname, defaultAname string) *StaticRouter {
t := new(StaticRouter)
t.Path = path
t.DefaultCname = defaultCname
t.DefaultAname = defaultAname
t.routMap = make(RouteMaps)
return t
}
func (this *StaticRouter) GetRouter(r *http.Request) (cName, aName string, contollerType reflect.Type, ok bool) {
cName = this.DefaultCname
aName = this.DefaultAname
ok = true
methods := make([]string, 2)
if this.Path != "" {
matchQuery := strings.Replace(r.URL.Path, this.Path, "", 1)
if matchQuery == r.URL.Path {
ok = false
} else {
methods = strings.Split(strings.Trim(matchQuery, "/"), "/")
if len(methods) >= 1 && methods[0] != "" {
cName = methods[0]
}
if len(methods) >= 2 && methods[1] != "" {
aName = methods[1]
}
}
} else {
methods = strings.Split(strings.Trim(r.URL.Path, "/"), "/")
if len(methods) >= 1 && methods[0] != "" {
cName = methods[0]
}
if len(methods) >= 2 && methods[1] != "" {
aName = methods[1]
}
}
aName = strings.ToLower(aName)
aName = strings.Title(aName)
aName = aName + METHOD_EXPORT_TAG
cName = strings.Title(cName) + "Controller"
if _, ok = this.routMap[cName]; ok {
contollerType, ok = this.routMap[cName][aName]
}
return
}
func (this *StaticRouter) AddController(c interface{}) {
reflectVal := reflect.ValueOf(c)
rt := reflectVal.Type()
ct := reflect.Indirect(reflectVal).Type()
// firstParam := strings.TrimSuffix(ct.Name(), "Controller")
firstParam := ct.Name()
if _, ok := this.routMap[firstParam]; ok {
return
} else {
this.routMap[firstParam] = make(map[string]reflect.Type)
}
var mname string
for i := 0; i < rt.NumMethod(); i++ {
mname = rt.Method(i).Name
if strings.HasSuffix(mname, METHOD_EXPORT_TAG) {
this.routMap[firstParam][rt.Method(i).Name] = ct
}
}
}
|
func isInterleave(s1 string, s2 string, s3 string) bool {
if len(s3) != len(s1)+len(s2) {
return false
}
mat := make([][]bool, len(s1)+1)
for idx := 0; idx <= len(s1); idx += 1 {
mat[idx] = make([]bool, len(s2)+1)
}
mat[0][0] = true
for row := 0; row <= len(s1); row += 1 {
for col := 0; col <= len(s2); col += 1 {
if row == 0 && col == 0 {
// do nothing
} else if row == 0 {
if mat[0][col-1] == true && s2[col-1] == s3[col-1] {
mat[0][col] = true
} else {
mat[0][col] = false
}
} else if col == 0 {
if mat[row-1][0] == true && s1[row-1] == s3[row-1] {
mat[row][0] = true
} else {
mat[row][0] = false
}
} else {
if s1[row-1] == s3[row+col-1] && mat[row-1][col] == true {
mat[row][col] = true
} else if s2[col-1] == s3[row+col-1] && mat[row][col-1] == true {
mat[row][col] = true
} else {
mat[row][col] = false
}
}
}
}
return mat[len(s1)][len(s2)]
}
|
package main
import (
"fmt"
"os"
"os/signal"
"syscall"
"github.com/iovisor/gobpf/elf"
)
func main() {
module := elf.NewModule("./program.o")
if err := module.Load(nil); err != nil {
fmt.Fprintf(os.Stderr, "Failed to load program: %v\n", err)
os.Exit(1)
}
defer func() {
if err := module.Close(); err != nil {
fmt.Fprintf(os.Stderr, "Failed to close program: %v", err)
}
}()
program := module.SchedProgram("sched_cls/demo")
if program == nil {
fmt.Fprintf(os.Stderr, "sched_cls/demo program not found\n")
os.Exit(1)
}
pinPath := "/sys/fs/bpf/bpf-sched-cls-test/demo"
if err := elf.PinObject(program.Fd(), pinPath); err != nil {
fmt.Fprintf(os.Stderr, "Failed to pin program to %q: %v\n", pinPath, err)
os.Exit(1)
}
defer func() {
if err := syscall.Unlink(pinPath); err != nil {
fmt.Fprintf(os.Stderr, "Failed to unpin program from %q: %v\n", pinPath, err)
}
}()
sig := make(chan os.Signal, 1)
signal.Notify(sig, os.Interrupt, os.Kill)
<-sig
}
|
package main
import "fmt"
func main() {
fmt.Println()
func() {
fmt.Println("Anonymous Func")
}()
func(x int) {
fmt.Println("X", x)
}(43)
f := func() {
fmt.Println("Func Expression")
}
f()
}
|
package genstruct
import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"os"
"os/exec"
"strings"
)
var TemplatePath string
var OutputPath string
func GenStruct(templatePath string, outputPath string) {
TemplatePath = templatePath
OutputPath = outputPath
// jsonBlob, err := ioutil.ReadFile(os.Getenv("PWD") + "/goapigen.json")
// check(err)
mainStruct := MainStruct{}
// err = json.Unmarshal(jsonBlob, &mainStruct)
// check(err)
if _, err := os.Stat(os.Getenv("PWD") + "/blueprint/_structure.json"); os.IsNotExist(err) {
fmt.Println("Error: BluePrint Not found")
return
} else {
mainStruct = readBlueprint(os.Getenv("PWD") + "/blueprint")
}
fmt.Println("\n\n\nVersion: " + mainStruct.Version)
fmt.Println(fmt.Sprintf("Revision: %d", mainStruct.Revision))
routeStruct := ""
routeImport := ""
for moduleName, module := range mainStruct.Modules {
routeStr, routeController, routeInject := module.GenRoute()
routeStruct += routeStr
if routeController {
if strings.Index(routeImport, "server/api/controllers") <= -1 { routeImport += "\n\"server/api/controllers\"" }
}
if routeInject {
if strings.Index(routeImport, "server/api/controllers/injection") <= -1 { routeImport += "\n\"server/api/controllers/injection\"" }
}
if fileStr, fileStrInject , hasController, hasInject := module.GenController(); hasController {
writeFile(fileStr, outputPath+"/server/api/controllers/"+moduleName+".go")
if hasInject {
writeFileIfNotExist(fileStrInject, outputPath+"/server/api/controllers/injection/"+moduleName+".go")
}
} else {
deleteFile(outputPath+"/server/api/controllers/"+moduleName+".go")
if hasInject {
writeFileIfNotExist(fileStrInject, outputPath+"/server/api/controllers/injection/"+moduleName+".go")
}
}
modelStr := module.GenModel()
modelPlugin, routePlugin, routePluginImport := module.GenPlugin()
modelStr += modelPlugin
writeFile(modelStr, outputPath+"/server/api/models/"+moduleName+".go")
routeStruct += routePlugin
routeImport += routePluginImport
// if fileStr, filePath, ok := module.GenPlugin(); ok {
// writeFile(fileStr, outputPath+"/server/api/plugin/"+filePath+".go")
// } else {
// deleteFile(outputPath+"/server/api/plugin/"+filePath+".go")
// }
// if fileStr, ok := module.GenUnitTest(); ok {
// writeFile(fileStr, outputPath+"/server/test/"+moduleName+"_test.go")
// } else {
// deleteFile(outputPath+"/server/test/"+moduleName+"_test.go")
// }
// writeFile(module.GenMockingModels(), outputPath+"/mock/mockmodels/"+moduleName+".go")
}
routeTemplate := readTemplate("routes/route.tmpt")
routeTemplate = strings.Replace(routeTemplate, "{{ROUTE}}", routeStruct, -1)
routeTemplate = strings.Replace(routeTemplate, "{{IMPORT_ADDITION}}", routeImport, -1)
writeFile(routeTemplate, outputPath+"/server/routes/routes.go")
cmd := exec.Command("gofmt", "-l", "-s","-w", outputPath + "/server/api")
out, err := cmd.Output()
if err != nil {
fmt.Println(err.Error())
return
}
fmt.Println(string(out))
}
func readPluginStruct(filePath string) Plugin {
struc := Plugin{}
jsonBlob, err := ioutil.ReadFile(TemplatePath + "/" +filePath )
check(err)
err = json.Unmarshal(jsonBlob, &struc)
check(err)
return struc
}
func readBlueprint(blueprintPath string) MainStruct {
struc := MainStruct{}
jsonBlob, err := ioutil.ReadFile(blueprintPath + "/_structure.json")
check(err)
err = json.Unmarshal(jsonBlob, &struc)
check(err)
files, errd := ioutil.ReadDir(blueprintPath)
check(errd)
struc.Modules = make(map[string]Module)
for _, f := range files {
if f.Name() != "_structure.json" {
module := Module{}
fmt.Println("ReadBlueprint: "+ f.Name())
jsonModule, err := ioutil.ReadFile(blueprintPath + "/"+f.Name())
check(err)
err = json.Unmarshal(jsonModule, &module)
check(err)
struc.Modules[module.Name] = module
}
}
return struc
}
func writeFile(data string, filePath string) {
file, err := os.Create(filePath)
if err != nil {
log.Fatal("Cannot create file", err)
}
defer file.Close()
fmt.Fprintf(file, data)
}
func makeDir(path string){
err := os.MkdirAll(path,0755)
check(err)
}
func writeFileWithMakeDir(data string, path string, fileName string) {
makeDir(path)
writeFile(data, path + "/" + fileName)
}
func writeFileIfNotExist(data string, filePath string) {
var _, err = os.Stat(filePath)
if os.IsNotExist(err) {
writeFile(data, filePath)
}
}
func deleteFile( filePath string) {
// delete file
_ = os.Remove(filePath)
}
func check(e error) {
if e != nil {
panic(e)
}
}
|
/*
Copyright 2021 CodeNotary, Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"fmt"
"github.com/codenotary/immudb/pkg/client"
"github.com/spf13/cobra"
"os"
homedir "github.com/mitchellh/go-homedir"
"github.com/spf13/viper"
)
var cfgFile string
var rootCmd = &cobra.Command{
Use: "streamer",
Short: "Stream an mp4 video file",
Long: `Upload and stream an mp4 video file`,
RunE: func(cmd *cobra.Command, args []string) error {
return Launch(cmd)
},
}
func Execute() {
if err := rootCmd.Execute(); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}
func init() {
cobra.OnInitialize(initConfig)
rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.streamer.yaml)")
rootCmd.PersistentFlags().IntP("port", "p", 8085, "port number")
rootCmd.PersistentFlags().StringP("source-file", "s", "/home/falce/Video/STS-127_Launch_HD_orig.mp4", "full path of mp4 video file")
rootCmd.PersistentFlags().Int("immudb-port", client.DefaultOptions().Port, "immudb port number")
rootCmd.PersistentFlags().String("immudb-address", client.DefaultOptions().Address, "immudb host address")
}
// initConfig reads in config file and ENV variables if set.
func initConfig() {
if cfgFile != "" {
viper.SetConfigFile(cfgFile)
} else {
home, err := homedir.Dir()
if err != nil {
fmt.Println(err)
os.Exit(1)
}
viper.AddConfigPath(home)
viper.SetConfigName(".streamer")
}
viper.AutomaticEnv()
if err := viper.ReadInConfig(); err == nil {
fmt.Println("Using config file:", viper.ConfigFileUsed())
}
}
|
package logic
import (
"context"
"fmt"
"strings"
"tpay_backend/model"
"tpay_backend/payapi/internal/common"
"tpay_backend/utils"
"tpay_backend/payapi/internal/svc"
"tpay_backend/payapi/internal/types"
"github.com/tal-tech/go-zero/core/logx"
)
type TransferLogic struct {
logx.Logger
ctx context.Context
svcCtx *svc.ServiceContext
merchant *model.Merchant
}
func NewTransferLogic(ctx context.Context, svcCtx *svc.ServiceContext, merchant *model.Merchant) TransferLogic {
return TransferLogic{
Logger: logx.WithContext(ctx),
ctx: ctx,
svcCtx: svcCtx,
merchant: merchant,
}
}
func (l *TransferLogic) VerifyParam(req types.TransferReq) error {
if req.Amount < 1 {
return common.NewCodeErrorWithMsg(common.VerifyParamFailed, "amount不能小于1")
}
if strings.TrimSpace(req.Currency) == "" {
return common.NewCodeErrorWithMsg(common.VerifyParamFailed, "currency不能为空")
}
if strings.TrimSpace(req.MchOrderNo) == "" {
return common.NewCodeErrorWithMsg(common.VerifyParamFailed, "mch_order_no不能为空")
}
if strings.TrimSpace(req.TradeType) == "" {
return common.NewCodeErrorWithMsg(common.VerifyParamFailed, "trade_type不能为空")
}
// 检查交易类型是否存在
transferTradeTypeSlice, err := model.NewGlobalConfigModel(l.svcCtx.DbEngine).FindValueByKey(model.ConfigTransferTradeTypeSlice)
if err != nil {
logx.Errorf("查询代付交易类型出错,key:%v, err:%v", model.ConfigPayTradeTypeSlice, err)
return common.NewCodeError(common.SystemInternalErr)
}
if !utils.InSlice(strings.TrimSpace(req.TradeType), strings.Split(transferTradeTypeSlice, ",")) {
logx.Errorf("代付trade_type不被支持,请求:%v,系统配置:%v", req.TradeType, transferTradeTypeSlice)
return common.NewCodeErrorWithMsg(common.VerifyParamFailed, "trade_type不被支持")
}
//if !utils.InSlice(strings.TrimSpace(req.TradeType), common.TransferTradeTypeSlice) {
// return common.NewCodeErrorWithMsg(common.VerifyParamFailed, "trade_type不被支持")
//}
if strings.TrimSpace(req.NotifyUrl) == "" {
return common.NewCodeErrorWithMsg(common.VerifyParamFailed, "notify_url不能为空")
}
if strings.TrimSpace(req.BankName) == "" {
return common.NewCodeErrorWithMsg(common.VerifyParamFailed, "bank_name不能为空")
}
if strings.TrimSpace(req.BankCardHolderName) == "" {
return common.NewCodeErrorWithMsg(common.VerifyParamFailed, "bank_card_holder_name不能为空")
}
if strings.TrimSpace(req.BankCardNo) == "" {
return common.NewCodeErrorWithMsg(common.VerifyParamFailed, "bank_card_no不能为空")
}
// 订单的币种和商户账号支持的币种不一致
if req.Currency != l.merchant.Currency {
return common.NewCodeErrorWithMsg(common.VerifyParamFailed, fmt.Sprintf("currency错误,该商户只支持(%s)货币类型", l.merchant.Currency))
}
exist, err := model.NewTransferOrderModel(l.svcCtx.DbEngine).MerchantOrderNoExist(req.MerchantNo, req.MchOrderNo)
if err != nil {
logx.Errorf("查询商户订单是否已经存在出错,MerchantNo:%v, MchOrderNo:%v, err:%v", err, req.MerchantNo, req.MchOrderNo, err)
return common.NewCodeError(common.SystemInternalErr)
}
if exist {
return common.NewCodeError(common.DuplicateOrderNO)
}
if l.merchant.Balance < req.Amount {
return common.NewCodeErrorWithMsg(common.VerifyParamFailed, "商户余额不足")
}
return nil
}
func (l *TransferLogic) Transfer(req types.TransferReq) (*types.TransferReply, error) {
// 1.参数验证
if err := l.VerifyParam(req); err != nil {
return nil, err
}
// 2.下单
param := TransferPlaceOrderRequest{
MchOrderNo: req.MchOrderNo,
Amount: req.Amount,
Currency: req.Currency,
OrderSource: model.TransferOrderSourceInterface,
TradeType: req.TradeType,
BankName: req.BankName,
BankCardNo: req.BankCardNo,
BankCardHolderName: req.BankCardHolderName,
BankBranchName: req.BankBranchName,
BankCode: req.BankCode,
NotifyUrl: req.NotifyUrl,
ReturnUrl: req.ReturnUrl,
Attach: req.Attach,
Remark: req.Remark,
Mode: model.TransferModePro, // 生产模式订单
}
placeOrder := NewTransferPlaceOrder(l.ctx, l.svcCtx, l.merchant)
resp, err := placeOrder.TransferPlaceOrder(param)
if err != nil {
l.Errorf("下单失败")
return nil, err
}
// 3.返回给下游
return &types.TransferReply{
MchOrderNo: req.MchOrderNo,
OrderNo: resp.OrderNo,
Status: resp.OrderStatus,
}, nil
}
|
package main
import (
"flag"
"fmt"
"log"
"net/http"
"websocker-hub/util"
"golang.org/x/net/websocket"
"encoding/json"
"io/ioutil"
"strconv"
"sync"
)
var (
newsChan = make(chan string)
wsConnPool WsConnectionPool
)
// WsConnectionPool websocket连接池
type WsConnectionPool struct {
websocketConn map[string]*websocket.Conn
sync.RWMutex
}
type result struct {
Module string `json:"module"`
NoticeType string `json:"type"`
Content json.RawMessage `json:"content"`
}
// 建立websocket的连接池并保存
func connection(ws *websocket.Conn) {
var err error
key := util.GetGUID()
wsConnPool.websocketConn[key] = ws
for {
var reply string
if err = websocket.Message.Receive(ws, &reply); err != nil {
fmt.Println("Can't receive ")
ws.Close()
break
}
fmt.Println("Received back from client: " + reply)
}
}
// 通过api请求接受通知内容
func notice(res http.ResponseWriter, req *http.Request) {
if req.Method == "POST" {
b, err := ioutil.ReadAll(req.Body)
if err != nil {
fmt.Println(err)
}
newsChan <- string(b)
}
}
// 向建立的链接的websocker发送消息
func noticeMessagetoClient() {
var err error
for {
x := <-newsChan
for key, ws := range wsConnPool.websocketConn {
if err = websocket.Message.Send(ws, x); err != nil {
fmt.Println("Can't send ", err)
ws.Close()
wsConnPool.Lock()
delete(wsConnPool.websocketConn, key)
wsConnPool.Unlock()
}
}
}
}
func main() {
wsConnPool.websocketConn = make(map[string]*websocket.Conn)
port := flag.Int("port", 8887, "websocket porxy port ")
flag.Parse()
fmt.Println("websocket proxy start ... ")
fmt.Printf("port:%v", *port)
http.Handle("/", http.FileServer(http.Dir("./ui")))
http.Handle("/v1/socket/ws", websocket.Handler(connection))
http.HandleFunc("/v1/socket/notice", notice)
go noticeMessagetoClient()
if err := http.ListenAndServe(":"+strconv.Itoa(*port), nil); err != nil {
log.Fatal("ListenAndServe:", err)
}
}
|
package vo
type Form struct {
PhoneNo string
LastDate string
}
|
/**
* Copyright 2019 Comcast Cable Communications Management, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package batchDeleter
import (
"github.com/stretchr/testify/mock"
"github.com/xmidt-org/codex-db"
)
type mockPruner struct {
mock.Mock
}
func (p *mockPruner) GetRecordsToDelete(shard int, limit int, deathDate int64) ([]db.RecordToDelete, error) {
args := p.Called(shard, limit, deathDate)
return args.Get(0).([]db.RecordToDelete), args.Error(1)
}
func (p *mockPruner) DeleteRecord(shard int, deathdate int64, recordID int64) error {
args := p.Called(shard, deathdate, recordID)
return args.Error(0)
}
|
package valuestream
import (
oproto "code.google.com/p/open-instrument/proto"
"sort"
)
type By func(p1, p2 *oproto.ValueStream) bool
func (by By) Sort(values []*oproto.ValueStream) {
sfs := &valuesSorter{
values: values,
by: by,
}
sort.Sort(sfs)
}
type valuesSorter struct {
values []*oproto.ValueStream
by By
}
func (this *valuesSorter) Len() int {
return len(this.values)
}
func (this *valuesSorter) Swap(i, j int) {
this.values[i], this.values[j] = this.values[j], this.values[i]
}
func (this *valuesSorter) Less(i, j int) bool {
return this.by(this.values[i], this.values[j])
}
|
/*
* @lc app=leetcode.cn id=127 lang=golang
*
* [127] 单词接龙
*/
package solution
import "math"
// @lc code=start
func ladderLength(beginWord string, endWord string, wordList []string) int {
if !isContain(wordList, endWord) {
return 0
}
wordList = append(append([]string{}, beginWord), wordList...)
distance := make([][]int, len(wordList))
for i := 0; i < len(distance); i++ {
distance[i] = make([]int, len(wordList))
}
for i := 1; i < len(wordList); i++ {
for j := 0; j < i; j++ {
if canBeConvert(wordList[i], wordList[j]) {
distance[j][i] = 1
distance[i][j] = 1
}
}
}
distMap := map[string]int{}
for i := 1; i < len(wordList); i++ {
distMap[wordList[i]] = math.MaxInt64
}
distMap[beginWord] = 1
queue := make(chan int, len(distance)-1)
queue <- 0
// BFS
for len(queue) > 0 {
p := <-queue
d0 := distMap[wordList[p]]
for q := 0; q < len(distance); q++ {
if distance[p][q] != 0 && distance[p][q]+d0 < distMap[wordList[q]] {
distMap[wordList[q]] = distance[p][q] + d0
queue <- q
}
}
}
if distMap[endWord] == math.MaxInt64 {
return 0
}
return distMap[endWord]
}
func isContain(wordList []string, word string) bool {
flag := false
for _, s := range wordList {
if s == word {
flag = true
}
}
return flag
}
func canBeConvert(word1, word2 string) bool {
cnt := 0
for i := 0; i < len(word1); i++ {
if word1[i] != word2[i] {
cnt++
}
}
return cnt == 1
}
// @lc code=end
|
package main
import "fmt"
func main() {
var a int = 10
fmt.Printf("a:%v,变量a的地址:%p\n", a, &a)
var p1 *int
p1 = &a
fmt.Println("p1的值:", p1)
fmt.Printf("p1的地址:%p\n", &p1)
fmt.Println("p1的数值,是a的地址,改地址存储的数据:", *p1)
// 3.操作变量 更改值 ,并不会改变地址
a = 100
fmt.Println("a:", a)
fmt.Printf("%p\n", &a)
// 4 通过指针修改变量的值
*p1 = 200
fmt.Printf("4, a:%v,变量a的地址:%p\n", a, &a)
// 5 指针的指针
var p2 **int
fmt.Println("5, p2=", p2)
p2 = &p1
fmt.Printf("%T,%T,%T\n", a, p1, p2)
fmt.Println("p2的数值:", p2)
fmt.Printf("p2 自己的地址: %p\n", &p2)
fmt.Println("p2 中存储的地址,对应的数值,就是p1的地址,对应的数据:", *p2)
fmt.Println("p2中存储的地址,对应的数值,在获取对应的数值:", **p2)
}
|
package event
// Listener type
type Listener func(ev *Event)
// Subscriber
type Subscriber struct {
Listeners map[string]*Listener
}
// Listener queue
type listenerQueue struct {
listeners []*Listener
}
// Add a Listener to queue.
func (lq *listenerQueue) add(callback *Listener){
lq.listeners = append(lq.listeners, callback)
}
// Add a Listener to queue.
func (lq *listenerQueue) remove(callback *Listener){
for k, v := range lq.listeners {
if v == callback {
lq.listeners = append(lq.listeners[:k], lq.listeners[k+1:]...)
return
}
}
}
// Dispatcher
type Dispatcher struct {
listeners map[string]*listenerQueue
}
// Add a Subscriber
func (dispatcher *Dispatcher) AddSubscriber(sub *Subscriber) {
for ev, callback := range sub.Listeners {
dispatcher.On(ev, callback)
}
}
// Remove a Subscriber
func (dispatcher *Dispatcher) RemoveSubscriber(sub *Subscriber) {
for ev, callback := range sub.Listeners {
dispatcher.Off(ev, callback)
}
}
// Add a Listener to dispatcher.
func (dispatcher *Dispatcher) On(eventName string, callback *Listener) {
lq, ok := dispatcher.listeners[eventName]
if !ok {
lq = &listenerQueue{
make([]*Listener, 0),
}
dispatcher.listeners[eventName] = lq
}
lq.add(callback)
}
// Remove a Listener from the dispatcher.
func (dispatcher *Dispatcher) Off(eventName string, callback *Listener) {
if callback == nil {
delete(dispatcher.listeners, eventName)
} else if listenerQueue, ok := dispatcher.listeners[eventName]; ok {
listenerQueue.remove(callback)
}
}
// Fire the event
func (dispatcher *Dispatcher) Fire(event *Event){
lq, ok := dispatcher.listeners[event.Name]
if ok {
for _, callback := range lq.listeners {
(*callback)(event)
}
}
}
// Creates a new event dispatcher.
func NewDispatcher() *Dispatcher{
return &Dispatcher{
make(map[string]*listenerQueue),
}
}
// Creates a new listener.
func NewListener(callback func(event *Event)) *Listener{
return (*Listener)(&callback)
}
// Creates a new listener.
func NewSubscriber(listeners map[string]func(event *Event)) *Subscriber{
var _listeners = make(map[string]*Listener, len(listeners))
for eventName, callback := range listeners {
_listeners[eventName] = NewListener(callback)
}
return &Subscriber{
_listeners,
}
}
|
package seev
import (
"encoding/xml"
"github.com/thought-machine/finance-messaging/iso20022"
)
type Document00200103 struct {
XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:seev.002.001.03 Document"`
Message *MeetingCancellationV03 `xml:"MtgCxl"`
}
func (d *Document00200103) AddMessage() *MeetingCancellationV03 {
d.Message = new(MeetingCancellationV03)
return d.Message
}
// Scope
// The MeetingCancellation message is sent by the party that sent the MeetingNotification message to the original receiver. It is sent to cancel the previous MeetingNotification message or to advise the cancellation of a meeting.
// Usage
// The MeetingCancellation message is used in two different situations.
// First, it is used to cancel a previously sent MeetingNotification message. In this case, the MessageCancellation, the MeetingReference and the Reason building blocks need to be present.
// Second, it is used to advise that the meeting is cancelled. In this case, only the MeetingReference and Reason building blocks need to be present.
type MeetingCancellationV03 struct {
// Identifies the cancellation message.
Identification *iso20022.MessageIdentification1 `xml:"Id"`
// Information indicating that the cancellation of a message previously sent is requested (and not the cancellation of the meeting).
MessageCancellation *iso20022.AmendInformation1 `xml:"MsgCxl,omitempty"`
// Series of elements which allow to identify a meeting.
MeetingReference *iso20022.MeetingReference5 `xml:"MtgRef"`
// Party notifying the cancellation of the meeting.
NotifyingParty *iso20022.PartyIdentification9Choice `xml:"NtifngPty,omitempty"`
// Identifies the security for which the meeting was organised.
Security []*iso20022.SecurityPosition6 `xml:"Scty,omitempty"`
// Defines the justification for the cancellation.
Reason *iso20022.MeetingCancellationReason2 `xml:"Rsn"`
}
func (m *MeetingCancellationV03) AddIdentification() *iso20022.MessageIdentification1 {
m.Identification = new(iso20022.MessageIdentification1)
return m.Identification
}
func (m *MeetingCancellationV03) AddMessageCancellation() *iso20022.AmendInformation1 {
m.MessageCancellation = new(iso20022.AmendInformation1)
return m.MessageCancellation
}
func (m *MeetingCancellationV03) AddMeetingReference() *iso20022.MeetingReference5 {
m.MeetingReference = new(iso20022.MeetingReference5)
return m.MeetingReference
}
func (m *MeetingCancellationV03) AddNotifyingParty() *iso20022.PartyIdentification9Choice {
m.NotifyingParty = new(iso20022.PartyIdentification9Choice)
return m.NotifyingParty
}
func (m *MeetingCancellationV03) AddSecurity() *iso20022.SecurityPosition6 {
newValue := new(iso20022.SecurityPosition6)
m.Security = append(m.Security, newValue)
return newValue
}
func (m *MeetingCancellationV03) AddReason() *iso20022.MeetingCancellationReason2 {
m.Reason = new(iso20022.MeetingCancellationReason2)
return m.Reason
}
|
package main
import (
"fmt"
"github.com/therecipe/qt/bluetooth"
"github.com/therecipe/qt/core"
)
const (
serviceUuid = "e8e10f95-1a70-4b27-9ccf-02010264e9c9"
androidUuid = "c9e96402-0102-cf9c-274b-701a950fe1e8"
)
type PingPong struct {
core.QObject
_ func() `constructor:"init"`
_ float64 `property:"ballX"`
_ float64 `property:"ballY"`
_ float64 `property:"leftBlockY"`
_ float64 `property:"rightBlockY"`
_ bool `property:"showDialog"`
_ string `property:"message"`
_ float64 `property:"role"`
_ float64 `property:"leftResult"`
_ float64 `property:"rightResult"`
_ func() `slot:"startGame"`
_ func() `slot:"update"`
_ func(x, y float64) `slot:"setSize"`
_ func(bX, bY float64) `slot:"updateBall"`
_ func(lY float64) `slot:"updateLeftBlock"`
_ func(rY float64) `slot:"updateRightBlock"`
_ func() `slot:"startServer"`
_ func() `slot:"startClient"`
_ func() `slot:"clientConnected"`
_ func() `slot:"clientDisconnected"`
_ func() `slot:"serverConnected"`
_ func() `slot:"serverDisconnected"`
_ func(bluetooth.QBluetoothSocket__SocketError) `slot:"socketError"`
_ func(bluetooth.QBluetoothServer__Error) `slot:"serverError"`
_ func(bluetooth.QBluetoothServiceDiscoveryAgent__Error) `slot:"serviceScanError"`
_ func() `slot:"done"`
_ func(b *bluetooth.QBluetoothServiceInfo) `slot:"addService"`
_ func() `slot:"readSocket"`
_ func() `signal:"ballChanged"`
_ func() `signal:"resultChanged"`
m_serverInfo *bluetooth.QBluetoothServer
m_serviceInfo *bluetooth.QBluetoothServiceInfo
socket *bluetooth.QBluetoothSocket
discoveryAgent *bluetooth.QBluetoothServiceDiscoveryAgent
m_timer *core.QTimer
m_serviceFound bool
interval float64
m_direction float64
m_ballPreviousX float64
m_ballPreviousY float64
m_boardWidth float64
m_boardHeight float64
m_targetX float64
m_targetY float64
m_proportionX float64
m_proportionY float64
}
func (p *PingPong) init() {
//TODO: always init will zero/nil
p.SetBallX(0)
p.SetBallY(0)
p.SetLeftBlockY(0)
p.SetRightBlockY(0)
p.SetShowDialog(false)
p.SetMessage("")
p.SetRole(0)
p.SetLeftResult(0)
p.SetRightResult(0)
p.interval = 5
p.m_timer = core.NewQTimer(p)
p.m_timer.ConnectTimeout(p.update)
p.ConnectSetMessage(p.setMessage)
p.ConnectStartGame(p.startGame)
p.ConnectUpdate(p.update)
p.ConnectSetSize(p.setSize)
p.ConnectUpdateBall(p.updateBall)
p.ConnectUpdateLeftBlock(p.updateLeftBlock)
p.ConnectUpdateRightBlock(p.updateRightBlock)
p.ConnectStartServer(p.startServer)
p.ConnectStartClient(p.startClient)
p.ConnectClientConnected(p.clientConnected)
p.ConnectClientDisconnected(p.clientDisconnected)
p.ConnectServerConnected(p.serverConnected)
p.ConnectServerDisconnected(p.serverDisconnected)
p.ConnectSocketError(p.socketError)
p.ConnectServerError(p.serverError)
p.ConnectServiceScanError(p.serviceScanError)
p.ConnectDone(p.done)
p.ConnectAddService(p.addService)
p.ConnectReadSocket(p.readSocket)
p.ConnectBallXChanged(func(float64) { p.BallChanged() })
p.ConnectBallYChanged(func(float64) { p.BallChanged() })
p.ConnectLeftResultChanged(func(float64) { p.ResultChanged() })
p.ConnectRightResultChanged(func(float64) { p.ResultChanged() })
}
func (p *PingPong) startGame() {
p.SetShowDialog(false)
p.ShowDialogChanged(p.IsShowDialog())
//! [Start the game]
if p.Role() == 1 {
p.updateDirection()
}
p.m_timer.Start(50)
//! [Start the game]
}
func (p *PingPong) update() {
size := core.NewQByteArray()
// Server is only updating the coordinates
//! [Updating coordinates]
if p.Role() == 1 {
p.checkBoundaries()
p.m_ballPreviousX = p.BallX()
p.m_ballPreviousY = p.BallY()
p.SetBallY(p.m_direction*(p.BallX()+p.interval) - p.m_direction*p.BallX() + p.BallY())
p.SetBallX(p.BallX() + p.interval)
size.SetNum8(p.BallX(), "g", 6)
size.Append3(" ")
size1 := core.NewQByteArray()
size.SetNum8(p.BallY(), "g", 6)
size.Append(size1)
size.Append3(" ")
size1.SetNum8(p.LeftBlockY(), "g", 6)
size.Append(size1)
size.Append3(" \n")
p.socket.Write2(size.ConstData())
p.BallChanged()
} else if p.Role() == 2 {
size.SetNum8(p.RightBlockY(), "g", 6)
size.Append3(" \n")
p.socket.Write2(size.ConstData())
}
//! [Updating coordinates]
}
func (p *PingPong) setSize(x, y float64) {
p.m_boardWidth = x
p.m_boardHeight = y
p.m_targetX = p.m_boardWidth
p.m_targetY = p.m_boardHeight / 2
p.SetBallX(p.m_boardWidth / 2)
p.m_ballPreviousX = p.BallX()
p.SetBallY(p.m_boardHeight - p.m_boardWidth/54)
p.m_ballPreviousY = p.BallY()
p.BallChanged()
}
func (p *PingPong) updateBall(bX, bY float64) {
p.SetBallX(bX)
p.SetBallY(bY)
}
func (p *PingPong) updateLeftBlock(lY float64) {
p.SetLeftBlockY(lY)
}
func (p *PingPong) updateRightBlock(rY float64) {
p.SetRightBlockY(rY)
}
func (p *PingPong) checkBoundaries() {
ballWidth := p.m_boardWidth / 54
blockSize := p.m_boardWidth / 27
blockHeight := p.m_boardHeight / 5
//! [Checking the boundaries]
if ((p.BallX() + ballWidth) > (p.m_boardWidth - blockSize)) && ((p.BallY() + ballWidth) < (p.RightBlockY() + blockHeight)) && (p.BallY() > p.RightBlockY()) {
p.m_targetY = 2*p.BallY() - p.m_ballPreviousY
p.m_targetX = p.m_ballPreviousX
p.interval = -5
p.updateDirection()
} else if (p.BallX() < blockSize) && ((p.BallY() + ballWidth) < (p.LeftBlockY() + blockHeight)) && (p.BallY() > p.LeftBlockY()) {
p.m_targetY = 2*p.BallY() - p.m_ballPreviousY
p.m_targetX = p.m_ballPreviousX
p.interval = 5
p.updateDirection()
} else if p.BallY() < 0 || (p.BallY()+ballWidth > p.m_boardHeight) {
p.m_targetY = p.m_ballPreviousY
p.m_targetX = p.BallX() + p.interval
p.updateDirection()
} else if (p.BallX() + ballWidth) > p.m_boardWidth { //! [Checking the boundaries]
p.SetLeftResult(p.LeftResult() + 1)
p.m_targetX = p.m_boardWidth
p.m_targetY = p.m_boardHeight / 2
p.SetBallX(p.m_boardWidth / 2)
p.m_ballPreviousX = p.BallX()
p.SetBallY(p.m_boardHeight - p.m_boardWidth/54)
p.m_ballPreviousY = p.BallY()
p.updateDirection()
p.checkResult()
result := core.NewQByteArray()
result.Append3("result ")
res := core.NewQByteArray()
res.SetNum8(p.LeftResult(), "g", 6)
result.Append(res)
result.Append3(" ")
res.SetNum8(p.RightResult(), "g", 6)
result.Append(res)
result.Append3(" \n")
p.socket.Write3(result)
println(result.ConstData())
p.ResultChanged()
} else if p.BallX() < 0 {
p.SetRightResult(p.RightResult() + 1)
p.m_targetX = 0
p.m_targetY = p.m_boardHeight / 2
p.SetBallX(p.m_boardWidth / 2)
p.m_ballPreviousX = p.BallX()
p.SetBallY(p.m_boardHeight - p.m_boardWidth/54)
p.m_ballPreviousY = p.BallY()
p.updateDirection()
p.checkResult()
result := core.NewQByteArray()
result.Append3("result ")
res := core.NewQByteArray()
res.SetNum8(p.LeftResult(), "g", 6)
result.Append(res)
result.Append3(" ")
res.SetNum8(p.RightResult(), "g", 6)
result.Append(res)
result.Append3(" \n")
p.socket.Write3(result)
println(result.ConstData())
p.ResultChanged()
}
}
func (p *PingPong) checkResult() {
if p.RightResult() == 10 && p.Role() == 2 {
p.SetMessage("Game over. You win!")
p.m_timer.Stop()
} else if p.RightResult() == 10 && p.Role() == 1 {
p.SetMessage("Game over. You lose!")
p.m_timer.Stop()
} else if p.LeftResult() == 10 && p.Role() == 1 {
p.SetMessage("Game over. You win!")
p.m_timer.Stop()
} else if p.LeftResult() == 10 && p.Role() == 2 {
p.SetMessage("Game over. You lose!")
p.m_timer.Stop()
}
}
func (p *PingPong) updateDirection() {
p.m_direction = (p.m_targetY - p.BallY()) / (p.m_targetX - p.BallX())
}
func (p *PingPong) startServer() {
p.SetMessage("Starting the server")
//! [Starting the server]
p.m_serverInfo = bluetooth.NewQBluetoothServer(bluetooth.QBluetoothServiceInfo__RfcommProtocol, nil)
p.m_serverInfo.ConnectNewConnection(p.clientConnected)
p.m_serverInfo.ConnectError2(p.serverError)
p.m_serverInfo.Listen2(bluetooth.NewQBluetoothUuid9(serviceUuid), "PingPong server")
//! [Starting the server]
p.SetMessage("Server started, waiting for the client. You are the left player.")
// m_role is set to 1 if it is a server
p.SetRole(1)
p.RoleChanged(p.Role())
}
func (p *PingPong) startClient() {
//! [Searching for the service]
p.discoveryAgent = bluetooth.NewQBluetoothServiceDiscoveryAgent2(bluetooth.NewQBluetoothAddress(), nil)
p.discoveryAgent.ConnectServiceDiscovered(p.addService)
p.discoveryAgent.ConnectFinished(p.done)
p.discoveryAgent.ConnectError2(p.serviceScanError)
p.discoveryAgent.SetUuidFilter2(bluetooth.NewQBluetoothUuid9(serviceUuid))
p.discoveryAgent.StartDefault(bluetooth.QBluetoothServiceDiscoveryAgent__FullDiscovery) //TODO: register enum for non *Default func call
//! [Searching for the service]
p.SetMessage("Starting server discovery. You are the right player")
// m_role is set to 2 if it is a client
p.SetRole(2)
p.RoleChanged(p.Role())
}
func (p *PingPong) clientConnected() {
//! [Initiating server socket]
if !p.m_serverInfo.HasPendingConnections() {
p.SetMessage("FAIL: expected pending server connection")
return
}
p.socket = p.m_serverInfo.NextPendingConnection()
if p.socket.Pointer() == nil {
return
}
p.socket.SetParent(p)
p.socket.ConnectReadyRead(p.readSocket)
p.socket.ConnectDisconnected(p.clientConnected)
p.socket.ConnectError2(p.socketError)
//! [Initiating server socket]
p.SetMessage("Client connected.")
size := core.NewQByteArray()
size.SetNum8(p.m_boardWidth, "g", 6)
size.Append3(" ")
size1 := core.NewQByteArray()
size1.SetNum8(p.m_boardHeight, "g", 6)
size.Append(size1)
size.Append3(" \n")
p.socket.Write2(size.ConstData())
}
func (p *PingPong) clientDisconnected() {
p.SetMessage("Client disconnected")
p.m_timer.Stop()
}
func (p *PingPong) socketError(bluetooth.QBluetoothSocket__SocketError) {
p.m_timer.Stop()
}
func (p *PingPong) serverError(bluetooth.QBluetoothServer__Error) {
p.m_timer.Stop()
}
func (p *PingPong) done() {
println("Service scan done")
if !p.m_serviceFound {
p.SetMessage("PingPong service not found")
}
}
func (p *PingPong) addService(service *bluetooth.QBluetoothServiceInfo) {
p.setMessage("Service found. Setting parameters...")
//! [Connecting the socket]
p.socket = bluetooth.NewQBluetoothSocket(bluetooth.QBluetoothServiceInfo__RfcommProtocol, nil)
p.socket.ConnectReadyRead(p.readSocket)
p.socket.ConnectConnected(p.serverConnected)
p.socket.ConnectDisconnected(p.serverDisconnected)
p.socket.ConnectToService(service, 0)
//! [Connecting the socket]
p.m_serviceFound = true
}
func (p *PingPong) serviceScanError(err bluetooth.QBluetoothServiceDiscoveryAgent__Error) {
p.setMessage(fmt.Sprint("Scanning error", err))
}
func (p *PingPong) serverConnected() {
p.setMessage("Server Connected")
size := core.NewQByteArray()
size.SetNum8(p.m_boardWidth, "g", 6)
size.Append3(" ")
size1 := core.NewQByteArray()
size1.SetNum8(p.m_boardHeight, "g", 6)
size.Append(size1)
size.Append3(" \n")
p.socket.Write2(size.ConstData())
}
func (p *PingPong) serverDisconnected() {
p.setMessage("Server Disconnected")
p.m_timer.Stop()
}
func (p *PingPong) readSocket() {
if p.socket.Pointer() == nil {
return
}
sep := " "
var line *core.QByteArray
for p.socket.CanReadLine() {
line = p.socket.ReadLine2(0)
println(line.ConstData(), line.Length())
if line.Contains2("result") {
result := line.Split(sep)
if len(result) > 2 {
leftSide := result[1]
rightSide := result[2]
p.SetLeftResult(leftSide.ToDouble(false))
p.SetRightResult(rightSide.ToDouble(false))
p.ResultChanged()
p.checkResult()
}
}
}
if p.m_proportionX == 0 || p.m_proportionY == 0 {
boardSize := line.Split(sep)
if len(boardSize) > 1 {
boardWidth := boardSize[0]
boardHeight := boardSize[1]
p.m_proportionX = p.m_boardWidth / boardWidth.ToDouble(false)
p.m_proportionY = p.m_boardHeight / boardHeight.ToDouble(false)
p.setMessage("Screen adjusted. Get ready!")
singleShot := core.NewQTimer(nil)
singleShot.ConnectTimeout(p.startGame)
singleShot.SetSingleShot(true)
singleShot.Start(3000)
}
} else if p.Role() == 1 {
boardSize := line.Split(sep)
if len(boardSize) > 1 {
rightBlockY := boardSize[0]
p.SetRightBlockY(p.m_proportionY * rightBlockY.ToDouble(false))
p.RightBlockYChanged(p.RightBlockY())
}
} else if p.Role() == 2 {
boardSize := line.Split(sep)
if len(boardSize) > 2 {
ballX := boardSize[0]
ballY := boardSize[1]
leftBlockY := boardSize[2]
p.SetBallX(p.m_proportionX * ballX.ToDouble(false))
p.SetBallY(p.m_proportionY * ballY.ToDouble(false))
p.SetLeftBlockY(p.m_proportionY * leftBlockY.ToDouble(false))
p.LeftBlockYChanged(p.LeftBlockY())
p.BallChanged()
}
}
}
func (p *PingPong) setMessage(message string) {
p.SetShowDialog(true)
p.SetMessageDefault(message)
p.ShowDialogChanged(p.IsShowDialog())
}
|
package model
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestNewField(t *testing.T) {
field := NewField("field1", StringType, []byte("value1"))
assert.Equal(t, "field1", field.Name())
assert.Equal(t, StringType, field.Type())
assert.Equal(t, "value1", string(field.Value()))
}
|
// +build mage
/*
* This file is part of impacca. Copyright (C) 2013 and above Shogun <shogun@cowtech.it>.
* Licensed under the MIT license, which can be found at https://choosealicense.com/licenses/mit.
*/
package main
import (
"fmt"
"os"
"path/filepath"
"strings"
"github.com/magefile/mage/sh"
)
var cwd, _ = os.Getwd()
var arch = "amd64"
var oses = []string{"darwin", "linux", "windows"}
func step(message string, args ...interface{}) {
fmt.Printf("\x1b[33m--- %s\x1b[0m\n", fmt.Sprintf(message, args...))
}
func execute(env map[string]string, args ...string) error {
step("Executing: %s ...", strings.Join(args, " "))
_, err := sh.Exec(env, os.Stdout, os.Stderr, args[0], args[1:]...)
return err
}
// Builds the executables
func Build() error {
step("Cleaning dist folder ...")
err := os.RemoveAll(filepath.Join(cwd, "dist"))
if err != nil {
return err
}
err = os.Mkdir(filepath.Join(cwd, "dist"), 0755)
if err != nil {
return err
}
// Compile executables
for _, os := range oses {
executable := fmt.Sprintf("%s/dist/impacca-%s", cwd, os)
err = execute(map[string]string{"GOARCH": arch, "GOOS": os}, "go", "build", "-o", executable, "-ldflags=-s -w")
if err != nil {
return err
}
}
return nil
}
// Verifies the code.
func Lint() error {
return execute(nil, "go", "vet")
}
var Default = Build
|
package geometry
type Rect struct {
Width, Height float64
}
func (r Rect) Area() float64 {
return r.Width * r.Height
}
func (r Rect) Perim() float64 {
return 2*r.Width + 2*r.Height
}
|
package dns
import "strings"
func DnsNameToKumaCompliant(name string) (string, error) {
// the request might be of the form:
// service-name.namespace.something-else.mesh.
// it will always end with a dot, as specified by https://tools.ietf.org/html/rfc1034#section-3.1
// `Since a complete domain name ends with the root label, this leads to a printed form which ends in a dot.`
countDots := strings.Count(name, ".")
toReplace := countDots
switch countDots {
case 0:
return name, nil
case 1:
if name[len(name)-1] == '.' {
return name, nil
}
default:
if name[len(name)-1] == '.' {
toReplace--
}
}
return strings.Replace(name, ".", "_", toReplace-1), nil
}
|
/*
* @lc app=leetcode.cn id=1524 lang=golang
*
* [1524] 和为奇数的子数组数目
*/
// @lc code=start
// package leetcode
func numOfSubarrays(arr []int) int {
oddNums := 0
evenNums := 1
var res = 0
sum := 0
for _, num := range arr {
sum += num
if sum%2 == 1 {
res += evenNums
oddNums += 1
} else {
res += oddNums
evenNums += 1
}
}
return res % 1000000007
}
// @lc code=end
|
package apperr
import "fmt"
type Error struct {
Message string `json:"message"`
Hint string `json:"hint"`
InternalError error `json:"-"`
}
func (e Error) Error() string {
return fmt.Sprintf("app_eror: %s. internal_err: %v", e.Message, e.InternalError)
}
func New(message, hint string) error {
return Error{
Message: message,
Hint: hint,
}
}
func With(err error, message string, hint string) error {
// apiErr, ok := err.(Error)
// if ok {
// return apiErr
// }
return Error{
Message: message,
Hint: hint,
InternalError: err,
}
}
|
package downloader
// 一个可以获取到的视频信息
type VideoFile struct {
FilePath, Title, Desc, ID, Md5 string
FileSize int64
}
|
package main
import (
"fmt"
"io/ioutil"
"strconv"
"strings"
)
func main() {
data, err := ioutil.ReadFile("input.txt")
if err != nil {
panic(err)
}
lines := strings.Split(strings.TrimSpace(string(data)), ",")
part1(lines)
part2(lines)
}
func part1(lines []string) {
is := toInt(lines)
fmt.Println(run(is, 1, false))
}
func part2(lines []string) {
is := toInt(lines)
fmt.Println(run(is, 5, false))
}
type opCode int
const (
opAdd opCode = 1
opMul opCode = 2
opIn opCode = 3
opOut opCode = 4
opJiT opCode = 5
opJiF opCode = 6
opLess opCode = 7
opEq opCode = 8
opHalt opCode = 99
)
func toInt(lines []string) []int {
ret := []int{}
for _, l := range lines {
i, err := strconv.Atoi(l)
if err != nil {
panic(err)
}
ret = append(ret, i)
}
return ret
}
func run(is []int, in int, debug bool) int {
pc := 0
out := -1
for {
op, modes := decodeOp(is[pc])
switch op {
case opAdd:
v1 := value(is, modes[2], is[pc+1])
v2 := value(is, modes[1], is[pc+2])
v3 := is[pc+3]
is[v3] = v1 + v2
pc += 4
case opMul:
v1 := value(is, modes[2], is[pc+1])
v2 := value(is, modes[1], is[pc+2])
v3 := is[pc+3]
is[v3] = v1 * v2
pc += 4
case opIn:
v := is[pc+1]
is[v] = in
pc += 2
case opOut:
v := value(is, modes[0], is[pc+1])
if debug {
fmt.Println(v)
}
out = v
pc += 2
case opJiT:
v1 := value(is, modes[1], is[pc+1])
v2 := value(is, modes[0], is[pc+2])
if v1 != 0 {
pc = v2
} else {
pc += 3
}
case opJiF:
v1 := value(is, modes[1], is[pc+1])
v2 := value(is, modes[0], is[pc+2])
if v1 == 0 {
pc = v2
} else {
pc += 3
}
case opLess:
v1 := value(is, modes[2], is[pc+1])
v2 := value(is, modes[1], is[pc+2])
v3 := is[pc+3]
if v1 < v2 {
is[v3] = 1
} else {
is[v3] = 0
}
pc += 4
case opEq:
v1 := value(is, modes[2], is[pc+1])
v2 := value(is, modes[1], is[pc+2])
v3 := is[pc+3]
if v1 == v2 {
is[v3] = 1
} else {
is[v3] = 0
}
pc += 4
case opHalt:
return out
default:
panic("unsupported op")
}
}
}
func value(is []int, mode, arg int) int {
if mode == 0 {
return is[arg]
} else if mode == 1 {
return arg
}
panic("Unsupported mode")
}
func decodeOp(i int) (opCode, []int) {
chars := strconv.Itoa(i)
digits := []int{}
for _, c := range chars {
d, _ := strconv.Atoi(string(c))
digits = append(digits, d)
}
if len(digits) == 1 {
digits = append([]int{0}, digits...)
}
op := opCode(digits[len(digits)-2]*10 + digits[len(digits)-1])
pad := 0
switch op {
case opAdd, opMul, opLess, opEq:
pad = 5
case opJiT, opJiF:
pad = 4
case opIn, opOut:
pad = 3
case opHalt:
pad = 0
default:
panic("unknown op")
}
for i := len(digits); i < pad; i++ {
digits = append([]int{0}, digits...)
}
return op, digits[:len(digits)-2]
}
|
package volume
type Index interface {
Get(key string) (fi FileInfo, err error)
Set(key string, fi FileInfo) error
Delete(key string) error
List(key string) ([]string, error)
ListN(key string, count int) ([]string, error)
Close() error
}
|
package fliptest
const defaultTemplate string = `
---
AWSTemplateFormatVersion: '2010-09-09'
Description: 'Stack to launch a vpc lambda to test Internet in a VPC'
Parameters:
SubnetId:
Description: The subnet id to deploy the lambda into
Type: String
VpcId:
Description: The vpc to deploy the lambda into
Type: String
Resources:
TestInternetFunction:
Type: AWS::Lambda::Function
Properties:
Code:
ZipFile: |
import json
import time
import urllib
class UrlTimer:
def __init__(self,name,url):
self.name = name
self.starttime = time.time()
self.elapsed = ""
self.message = ""
self.success = False
self.url = url
self.response_code = 0
self.dict = {}
def exec(self):
try:
response = urllib.request.urlopen(self.url, timeout=4)
self.response_code = response.getcode()
self.success = True
self.message = "got response code from URL"
except Exception as e:
self.message = "problem getting URL: " + str(e)
return self.report()
def dictify(self):
self.dict = {
"Name": self.name,
"ElapsedTimeS": self.elapsed,
"Message": self.message,
"Success": self.success,
"Url": self.url,
"ResponseCode": self.response_code,
}
def report(self):
self.elapsed = time.time() - self.starttime
self.dictify()
return json.dumps(self.dict)
def handler(event, context):
tests = []
total_time = float(0)
response = []
if event.get("TestUrls") is not None:
# means user passed custom tests
for test in event["TestUrls"]:
tests.append(UrlTimer(
test.get("Name"),
test.get("Url"),
)
)
else:
# run some default tests
tests.append(UrlTimer("gopkg","http://gopkg.in"))
tests.append(UrlTimer("google","http://www.google.com"))
if event["RequestType"] in ["RunAll"]:
for test in tests:
print(test.exec())
total_time += test.elapsed
response.append(test.dict)
return(response)
Handler: "index.handler"
Role:
Fn::GetAtt:
- LambdaExecutionRole
- Arn
Runtime: python3.9
Timeout: '30'
VpcConfig:
SecurityGroupIds:
- Ref: SecurityGroup
SubnetIds:
- Ref: SubnetId
LambdaExecutionRole:
Type: AWS::IAM::Role
Properties:
ManagedPolicyArns:
- "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole"
AssumeRolePolicyDocument:
Version: '2012-10-17'
Statement:
- Effect: Allow
Principal:
Service:
- lambda.amazonaws.com
Action:
- sts:AssumeRole
Path: "/cs/"
SecurityGroup:
Type: AWS::EC2::SecurityGroup
Properties:
GroupDescription: for nat relaunch test internet lambda function
VpcId:
Ref: VpcId
Outputs:
FunctionName:
Description: The name of the lambda function that was created
Value: !Ref TestInternetFunction
...
`
|
package p1
import (
"bufio"
"fmt"
"math"
"os"
)
func getColumn(ticket string) int {
min := 0
max := 7
for _, token := range ticket[7:] {
switch token {
case 'L':
max = max - int(math.Ceil(float64(max - min) / 2))
case 'R':
min = min + int(math.Ceil(float64(max - min) / 2))
}
}
return min
}
func getRow(ticket string) int {
min := 0
max := 127
for _, token := range ticket[:7] {
switch token {
case 'F':
max = max - int(math.Ceil(float64(max - min) / 2))
case 'B':
min = min + int(math.Ceil(float64(max - min) / 2))
}
}
return min
}
func getID(ticket string) int {
return getRow(ticket) * 8 + getColumn(ticket)
}
func driver() int {
filename := fmt.Sprintf("p1.input")
fp, fpe := os.Open(filename)
if fpe != nil {
panic(fpe)
}
defer fp.Close()
maxID := -1
bufReader := bufio.NewScanner(fp)
for bufReader.Scan() {
input := bufReader.Text()
id := getID(input)
if id > maxID {
maxID = id
}
}
return maxID
}
|
package polochon
// Torrent represents a torrent file
type Torrent struct {
Name string `json:"name"`
Quality Quality `json:"quality"`
URL string `json:"url"`
Seeders int `json:"seeders"`
Leechers int `json:"leechers"`
Source string `json:"source"`
UploadUser string `json:"upload_user"`
Size int `json:"size"`
}
// FilterTorrents filters the torrents to keep only the best ones
func FilterTorrents(torrents []Torrent) []Torrent {
torrentByQuality := map[Quality]Torrent{}
for _, t := range torrents {
bestByQuality, ok := torrentByQuality[t.Quality]
if !ok {
torrentByQuality[t.Quality] = t
continue
}
if t.Seeders > bestByQuality.Seeders {
torrentByQuality[t.Quality] = t
}
}
filtered := []Torrent{}
for _, t := range torrentByQuality {
filtered = append(filtered, t)
}
return filtered
}
|
package strucct
type UserLogin struct {
Id int `xorm:"not null pk autoincr INT(10)"`
Name string `xorm:"not null default '' index VARCHAR(100)"`
Uid string `xorm:"not null default '' unique VARCHAR(40)"`
Password string `xorm:"not null default '' VARCHAR(40)"`
Mail string `xorm:"not null default '' VARCHAR(40)"`
Ctime int64 `xorm:"not null default 0 BIGINT(20)"`
Cid string `xorm:"not null default '' VARCHAR(40)"`
MailVerify int `xorm:"not null default 0 TINYINT(4)"`
Utime int64 `xorm:"not null default 0 BIGINT(20)"`
}
|
package nat
import (
"fmt"
"sync"
"time"
ma "gx/ipfs/QmNTCey11oxhb1AxDnQBRHtdhap6Ctud872NjAYPYYXPuc/go-multiaddr"
"gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess"
manet "gx/ipfs/QmZcLBXKaFe8ND5YHPkJRAwmhJGrVsi1JqDZNyJ4nRK5Mj/go-multiaddr-net"
)
// Mapping represents a port mapping in a NAT.
type Mapping interface {
// NAT returns the NAT object this Mapping belongs to.
NAT() *NAT
// Protocol returns the protocol of this port mapping. This is either
// "tcp" or "udp" as no other protocols are likely to be NAT-supported.
Protocol() string
// InternalPort returns the internal device port. Mapping will continue to
// try to map InternalPort() to an external facing port.
InternalPort() int
// ExternalPort returns the external facing port. If the mapping is not
// established, port will be 0
ExternalPort() int
// InternalAddr returns the internal address.
InternalAddr() ma.Multiaddr
// ExternalAddr returns the external facing address. If the mapping is not
// established, addr will be nil, and and ErrNoMapping will be returned.
ExternalAddr() (addr ma.Multiaddr, err error)
// Close closes the port mapping
Close() error
}
// keeps republishing
type mapping struct {
sync.Mutex // guards all fields
nat *NAT
proto string
intport int
extport int
permanent bool
intaddr ma.Multiaddr
proc goprocess.Process
comment string
cached ma.Multiaddr
cacheTime time.Time
cacheLk sync.Mutex
}
func (m *mapping) NAT() *NAT {
m.Lock()
defer m.Unlock()
return m.nat
}
func (m *mapping) Protocol() string {
m.Lock()
defer m.Unlock()
return m.proto
}
func (m *mapping) InternalPort() int {
m.Lock()
defer m.Unlock()
return m.intport
}
func (m *mapping) ExternalPort() int {
m.Lock()
defer m.Unlock()
return m.extport
}
func (m *mapping) setExternalPort(p int) {
m.Lock()
defer m.Unlock()
m.extport = p
}
func (m *mapping) InternalAddr() ma.Multiaddr {
m.Lock()
defer m.Unlock()
return m.intaddr
}
func (m *mapping) ExternalAddr() (ma.Multiaddr, error) {
m.cacheLk.Lock()
ctime := m.cacheTime
cval := m.cached
m.cacheLk.Unlock()
if time.Since(ctime) < CacheTime {
return cval, nil
}
if m.ExternalPort() == 0 { // dont even try right now.
return nil, ErrNoMapping
}
m.nat.natmu.Lock()
ip, err := m.nat.nat.GetExternalAddress()
m.nat.natmu.Unlock()
if err != nil {
return nil, err
}
ipmaddr, err := manet.FromIP(ip)
if err != nil {
return nil, fmt.Errorf("error parsing ip")
}
// call m.ExternalPort again, as mapping may have changed under our feet. (tocttou)
extport := m.ExternalPort()
if extport == 0 {
return nil, ErrNoMapping
}
tcp, err := ma.NewMultiaddr(fmt.Sprintf("/%s/%d", m.Protocol(), extport))
if err != nil {
return nil, err
}
maddr2 := ipmaddr.Encapsulate(tcp)
m.cacheLk.Lock()
m.cached = maddr2
m.cacheTime = time.Now()
m.cacheLk.Unlock()
return maddr2, nil
}
func (m *mapping) Close() error {
return m.proc.Close()
}
|
package mysql_storage
import (
"database/sql"
"github.com/jmoiron/sqlx"
"server/src/dto"
)
type QuotesRepository struct {
db *sqlx.DB
}
func (repo *QuotesRepository) GetQuoteById(id int) (*dto.Quote, error){
selectStatement := "SELECT * FROM `Quotes` WHERE Id = ?"
quote := &dto.Quote{}
if err := repo.db.Get(quote, selectStatement, id); err != nil {
if err == sql.ErrNoRows {
return nil, nil
}
return nil, err
}
return quote, nil
}
func (repo *QuotesRepository) GetAllQuotes() ([]*dto.Quote, error){
selectStatement := "SELECT Id, AuthorName, Theme, Quote FROM Quotes"
quotes := &[]*dto.Quote{}
err := repo.db.Select(quotes, selectStatement)
if err != nil {
return nil, err
}
return *quotes, err
}
func (repo *QuotesRepository) GetUserSavedQuotes(userId int) ([]*dto.Quote, error){
return nil, nil
}
func (repo *QuotesRepository) InsertQuote(quote dto.Quote) error{
return nil
}
|
package hzutils
import (
"bytes"
"fmt"
"reflect"
"strconv"
)
// StringinSlice Does String in Slice?
// @param string, []string
// @return bool
func StringinSlice(s string, slice []string) bool {
for _, v := range slice {
if v == s {
return true
}
}
return false
}
// StringsToJSON Format String to JSON
// @param string
// @return json
func StringsToJSON(str string) string {
rs := []rune(str)
jsons := ""
for _, r := range rs {
rint := int(r)
if rint < 128 {
jsons += string(r)
} else {
jsons += "\\u" + strconv.FormatInt(int64(rint), 16) // json
}
}
return jsons
}
// PrintStruct tries walk struct return formatted string.
func PrintStruct(x interface{}) string {
buf := bytes.NewBuffer([]byte{})
if err := psEncode(buf, reflect.ValueOf(x)); err != nil {
return err.Error()
}
return buf.String()
}
func psEncode(buf *bytes.Buffer, v reflect.Value) error {
switch v.Kind() {
case reflect.Invalid:
buf.WriteString("nil")
// Int, Int8, Int16, Int32, Int64
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
fmt.Fprintf(buf, "%d", v.Int())
// Uint, Uint8, Uint16, Uint32, Uint64, Uintptr
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
fmt.Fprintf(buf, "%d", v.Uint())
// String
case reflect.String:
fmt.Fprintf(buf, "%q", v.String())
// Bool
case reflect.Bool:
fmt.Fprintf(buf, "%t", v.Bool())
// Float32, Float64
case reflect.Float32, reflect.Float64:
fmt.Fprintf(buf, "%g", v.Float())
// Point
case reflect.Ptr:
buf.WriteByte('&')
return psEncode(buf, v.Elem())
// Array, Slice
case reflect.Array, reflect.Slice:
buf.WriteString(v.Type().String() + " {")
for i := 0; i < v.Len(); i++ {
if i < 0 {
buf.WriteString(", ")
}
if err := psEncode(buf, v.Index(i)); err != nil {
return err
}
}
buf.WriteByte('}')
// Struct
case reflect.Struct:
buf.WriteString(v.Type().String() + " {")
for i := 0; i < v.NumField(); i++ {
if i > 0 {
buf.WriteString(", ")
}
fmt.Fprintf(buf, "%s:", v.Type().Field(i).Name)
if err := psEncode(buf, v.Field(i)); err != nil {
return err
}
}
buf.WriteByte('}')
// Map
case reflect.Map:
buf.WriteString(v.Type().String())
buf.WriteByte('{')
for i, key := range v.MapKeys() {
if i > 0 {
buf.WriteString(", ")
}
if err := psEncode(buf, key); err != nil {
return err
}
buf.WriteByte(':')
if err := psEncode(buf, v.MapIndex(key)); err != nil {
return err
}
}
buf.WriteByte('}')
// Interface
case reflect.Interface:
return psEncode(buf, v.Elem())
default: // complex, chan, func
return fmt.Errorf("unsupported type: %s", v.Type())
}
return nil
}
// Ping for the test is available
func Ping() {
fmt.Println("Welcome to Use HZUtils")
}
|
package main
func main() {
}
func isLeafNode(node *TreeNode) bool {
return node.Left == nil && node.Right == nil
}
func dfs(node *TreeNode) (ans int) {
if node.Left != nil {
if isLeafNode(node.Left) {
ans += node.Left.Val
} else {
ans += dfs(node.Left)
}
}
if node.Right != nil && !isLeafNode(node.Right) {
ans += dfs(node.Right)
}
return
}
func sumOfLeftLeaves(root *TreeNode) int {
if root == nil {
return 0
}
return dfs(root)
}
|
// Copyright 2015 The Go Circuit Project
// Use of this source code is governed by the license for
// The Go Circuit Project, found in the LICENSE file.
//
// Authors:
// 2015 Petar Maymounkov <p@gocircuit.org>
// Package tcp implements a peer transport over TCP.
package tcp
import (
"bufio"
"encoding/binary"
"github.com/gocircuit/runtime/sys"
"net"
)
// conn implements sys.Conn
type conn struct {
tcp *net.TCPConn
r *bufio.Reader
}
func newConn(c *net.TCPConn) *conn {
if err := c.SetKeepAlive(true); err != nil {
panic(err)
}
return &conn{c, bufio.NewReader(c)}
}
func (c *conn) Addr() sys.Addr {
return nil // cannot determine peer address of remote peer
}
func (c *conn) Receive() (chunk interface{}, err error) {
k, err := binary.ReadUvarint(c.r)
if err != nil {
return nil, err
}
var q = make([]byte, k)
var n, m int
for m < len(q) && err == nil {
n, err = c.r.Read(q[m:])
m += n
}
if err != nil {
return nil, err
}
return q, nil
}
func (c *conn) Send(v interface{}) (err error) {
chunk := v.([]byte)
q := make([]byte, len(chunk)+8)
n := binary.PutUvarint(q, uint64(len(chunk)))
m := copy(q[n:], chunk)
_, err = c.tcp.Write(q[:n+m])
return err
}
func (c *conn) Close() (err error) {
return c.tcp.Close()
}
|
package handlers
import (
"encoding/json"
"log"
"net/http"
"github.com/matscus/Hamster/Mock/info_service/datapool"
)
type ClientSearchRQ struct {
Meta struct {
Channel string `json:"channel"`
} `json:"meta"`
Data struct {
RequestFields []string `json:"requestFields"`
Filter struct {
GUID string `json:"guid"`
} `json:"filter"`
} `json:"data"`
}
type ClientSearchRS struct {
Clients []Client `json:"clients"`
}
type Client struct {
Addresses []Addresse `json:"addresses"`
Base struct {
ActualDate string `json:"actualDate"`
BirthPlace string `json:"birthPlace"`
Birthdate string `json:"birthdate"`
Categories []Categorie `json:"categories"`
Citizenships []Citizenship `json:"citizenships"`
FullName string `json:"fullName"`
Gender string `json:"gender"`
GUID string `json:"guid"`
Hid string `json:"hid"`
IdentityType string `json:"identityType"`
IsPatronymicLack bool `json:"isPatronymicLack"`
Name string `json:"name"`
Patronymic string `json:"patronymic"`
Residents []Resident `json:"residents"`
Surname string `json:"surname"`
} `json:"base"`
Detail struct {
Biometrics struct {
IsAgreement bool `json:"isAgreement"`
} `json:"biometrics"`
LastFio []interface{} `json:"lastFio"`
} `json:"detail"`
Documents []Document `json:"documents"`
Fatca struct{} `json:"fatca"`
Mails []string `json:"mails"`
Phones []Phone `json:"phones"`
Sources []Source `json:"sources"`
}
type Addresse struct {
ActualDate string `json:"actualDate"`
Area string `json:"area"`
AreaType string `json:"areaType"`
City string `json:"city"`
CityType string `json:"cityType"`
CountryName string `json:"countryName"`
District string `json:"district"`
Flat string `json:"flat"`
FullAddress string `json:"fullAddress"`
Hid string `json:"hid"`
HouseNumber string `json:"houseNumber"`
IsForeign bool `json:"isForeign"`
KladrCode string `json:"kladrCode"`
KladrPostalCode string `json:"kladrPostalCode"`
OkatoCode string `json:"okatoCode"`
PostalCode string `json:"postalCode"`
Primary bool `json:"primary"`
RegionName string `json:"regionName"`
RegionType string `json:"regionType"`
Settlement string `json:"settlement"`
SettlementType string `json:"settlementType"`
Street string `json:"street"`
StreetType string `json:"streetType"`
Type string `json:"type"`
}
type Categorie struct {
Params []Param `json:"params"`
Type string `json:"type"`
}
type Param struct {
Key string `json:"key"`
}
type Citizenship struct {
CountryName string `json:"countryName"`
}
type Resident struct {
State struct {
TerminalFlag bool `json:"terminalFlag"`
} `json:"state"`
Type string `json:"type"`
}
type Document struct {
ActualDate string `json:"actualDate"`
DepartmentCode string `json:"departmentCode"`
FullValue string `json:"fullValue"`
Hid string `json:"hid"`
IssueAuthority string `json:"issueAuthority"`
IssueDate string `json:"issueDate"`
Number string `json:"number"`
Primary bool `json:"primary"`
Series string `json:"series"`
State struct {
Code string `json:"code"`
} `json:"state"`
Type string `json:"type"`
}
type Phone struct {
ActualDate string `json:"actualDate"`
CityCode string `json:"cityCode"`
CountryCode string `json:"countryCode"`
FullNumber string `json:"fullNumber"`
Hid string `json:"hid"`
IsForeign bool `json:"isForeign"`
Number string `json:"number"`
NumberProfile string `json:"numberProfile"`
Primary bool `json:"primary"`
RawSource string `json:"rawSource"`
State struct {
Code string `json:"code"`
} `json:"state"`
Timezone string `json:"timezone"`
Type string `json:"type"`
}
type Source struct {
Hid string `json:"hid"`
SystemInfo struct {
RawID string `json:"rawId"`
SystemID string `json:"systemId"`
} `json:"systemInfo"`
}
func ClientSearch(w http.ResponseWriter, r *http.Request) {
rq := ClientSearchRQ{}
err := json.NewDecoder(r.Body).Decode(&rq)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
_, errWrite := w.Write([]byte("{\"Message\":\"" + err.Error() + "\"}"))
if errWrite != nil {
log.Printf("[ERROR] Not Writing to ResponseWriter error %s due: %s", err.Error(), errWrite.Error())
}
return
}
client := datapool.GUIDPool[rq.Data.Filter.GUID]
rs := ClientSearchRS{}
cli := Client{}
cli.Base.GUID = client.GUID
cli.Base.Hid = "162847723"
cli.Base.IdentityType = "3"
cli.Base.ActualDate = "2020-01-30"
cli.Base.FullName = "Ааааа Кристина Викторовна"
cli.Base.Surname = "Ааааа"
cli.Base.Name = "Кристина"
cli.Base.Patronymic = "Викторовна"
cli.Base.Gender = "FEMALE"
cli.Base.BirthPlace = "РОССИЯ, ГОРОД МОСКВА"
categorie := Categorie{}
categorie.Type = "EMPLOYEE"
param := Param{}
param.Key = "employeeFireDate"
categorie.Params = append(categorie.Params, param)
categorie.Type = "REGULAR"
cli.Base.Categories = append(cli.Base.Categories, categorie)
resident := Resident{}
resident.Type = "base"
resident.State.TerminalFlag = false
cli.Base.Residents = append(cli.Base.Residents, resident)
citizenship := Citizenship{}
citizenship.CountryName = "Российская федерация"
cli.Base.Citizenships = append(cli.Base.Citizenships, citizenship)
cli.Base.IsPatronymicLack = true
cli.Base.Birthdate = "1990-07-10"
addresse := Addresse{}
addresse.Hid = "82728384"
addresse.Type = "HOME"
addresse.Primary = true
addresse.ActualDate = "2020-01-24"
addresse.PostalCode = "117461"
addresse.KladrPostalCode = "117461"
addresse.CountryName = "Россия"
addresse.District = "Центральный"
addresse.RegionType = "г"
addresse.RegionName = "Москва"
addresse.CityType = "г"
addresse.City = "Москва"
addresse.StreetType = "ул"
addresse.Street = "Херсонская"
addresse.HouseNumber = "А"
addresse.Flat = "ААА"
addresse.OkatoCode = "45293562000"
addresse.KladrCode = "7700000000030250061"
addresse.FullAddress = "117461, Россия, г Москва, ул Херсонская, д. А, кв. ААА"
addresse.IsForeign = false
cli.Addresses = append(cli.Addresses, addresse)
document := Document{}
document.Hid = "96938652"
document.Type = "PASSPORT_RU"
document.Primary = false
document.ActualDate = "2020-01-21"
document.Series = "45 15"
document.Number = "111222"
document.FullValue = "45 12345678"
document.IssueDate = "2016-03-25"
document.IssueAuthority = "ОТДЕЛОМ УФМС РОССИИ ПО ГОР. МОСКВЕ ПО РАЙОНУ ЗЮЗИНО"
document.DepartmentCode = "770-116"
document.State.Code = "ACTUAL"
document.Type = "SNILS"
document.FullValue = "001-ААА-ААА 17"
cli.Documents = append(cli.Documents, document)
phone := Phone{}
phone.Hid = "67381378"
phone.Type = "PC"
phone.Primary = false
phone.ActualDate = "2020-01-30"
phone.CountryCode = "7"
phone.CityCode = "985"
phone.Number = client.Phone
phone.FullNumber = "АААА"
phone.Timezone = "UTC+3"
phone.NumberProfile = "MOBILE"
phone.RawSource = "+АААА"
phone.State.Code = "ACTUAL"
phone.IsForeign = false
cli.Phones = append(cli.Phones, phone)
source := Source{}
source.Hid = "146798431"
source.SystemInfo.SystemID = "BOSS"
source.SystemInfo.RawID = "56826"
cli.Sources = append(cli.Sources, source)
cli.Detail.Biometrics.IsAgreement = false
rs.Clients = append(rs.Clients, cli)
err = json.NewEncoder(w).Encode(rs)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
_, errWrite := w.Write([]byte("{\"Message\":\"" + err.Error() + "\"}"))
if errWrite != nil {
log.Printf("[ERROR] Not Writing to ResponseWriter due: %s", errWrite.Error())
}
}
}
|
package leetcode
/*A binary tree is univalued if every node in the tree has the same value.
Return true if and only if the given tree is univalued.
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/univalued-binary-tree
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。*/
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
func isUnivalTree(root *TreeNode) bool {
if root == nil {
return true
}
if root.Left != nil {
if root.Left.Val != root.Val {
return false
}
}
if root.Right != nil {
if root.Right.Val != root.Val {
return false
}
}
return isUnivalTree(root.Left) && isUnivalTree(root.Right)
}
|
package types
import (
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
)
// oracle module sentinel errors
var (
ErrUnknownFeedName = sdkerrors.Register(ModuleName, 2, "unknown feed")
ErrInvalidFeedName = sdkerrors.Register(ModuleName, 3, "invalid feed name")
ErrExistedFeedName = sdkerrors.Register(ModuleName, 4, "feed already exists")
ErrUnauthorized = sdkerrors.Register(ModuleName, 5, "unauthorized owner")
ErrInvalidServiceName = sdkerrors.Register(ModuleName, 6, "invalid service name")
ErrInvalidDescription = sdkerrors.Register(ModuleName, 7, "invalid description")
ErrNotRegisterFunc = sdkerrors.Register(ModuleName, 8, "method don't register")
ErrInvalidFeedState = sdkerrors.Register(ModuleName, 9, "invalid state feed")
ErrInvalidServiceFeeCap = sdkerrors.Register(ModuleName, 10, "service fee cap is invalid")
)
|
/*
Package rpio provides GPIO access on the Raspberry PI without any need
for external c libraries (ex: WiringPI or BCM2835).
Supports simple operations such as:
- Pin mode/direction (input/output)
- Pin write (high/low)
- Pin read (high/low)
- Pull up/down/off
Example of use:
rpio.Open()
defer rpio.Close()
pin := rpio.Pin(4)
pin.Output()
for {
pin.Toggle()
time.Sleep(time.Second)
}
The library use the raw BCM2835 pinouts, not the ports as they are mapped
on the output pins for the raspberry pi
Rev 1 Raspberry Pi
+------+------+--------+
| GPIO | Phys | Name |
+------+------+--------+
| 0 | 3 | SDA |
| 1 | 5 | SCL |
| 4 | 7 | GPIO 7 |
| 7 | 26 | CE1 |
| 8 | 24 | CE0 |
| 9 | 21 | MISO |
| 10 | 19 | MOSI |
| 11 | 23 | SCLK |
| 14 | 8 | TxD |
| 15 | 10 | RxD |
| 17 | 11 | GPIO 0 |
| 18 | 12 | GPIO 1 |
| 21 | 13 | GPIO 2 |
| 22 | 15 | GPIO 3 |
| 23 | 16 | GPIO 4 |
| 24 | 18 | GPIO 5 |
| 25 | 22 | GPIO 6 |
+------+------+--------+
See the spec for full details of the BCM2835 controller:
http://www.raspberrypi.org/wp-content/uploads/2012/02/BCM2835-ARM-Peripherals.pdf
*/
package rpio
import (
"bytes"
"encoding/binary"
"os"
"reflect"
"sync"
"syscall"
"time"
"unsafe"
)
type Direction uint8
type Pin uint8
type State uint8
type Pull uint8
// Memory offsets for gpio, see the spec for more details
const (
bcm2835Base = 0x20000000
pi1GPIOBase = bcm2835Base + 0x200000
memLength = 4096
pinMask uint32 = 7 // 0b111 - pinmode is 3 bits
)
// Pin direction, a pin can be set in Input or Output mode
const (
Input Direction = iota
Output
)
// State of pin, High / Low
const (
Low State = iota
High
)
// Pull Up / Down / Off
const (
PullOff Pull = iota
PullDown
PullUp
)
// Arrays for 8 / 32 bit access to memory and a semaphore for write locking
var (
memlock sync.Mutex
mem []uint32
mem8 []uint8
)
// Set pin as Input
func (pin Pin) Input() {
PinMode(pin, Input)
}
// Set pin as Output
func (pin Pin) Output() {
PinMode(pin, Output)
}
// Set pin High
func (pin Pin) High() {
WritePin(pin, High)
}
// Set pin Low
func (pin Pin) Low() {
WritePin(pin, Low)
}
// Toggle pin state
func (pin Pin) Toggle() {
TogglePin(pin)
}
// Set pin Direction
func (pin Pin) Mode(dir Direction) {
PinMode(pin, dir)
}
// Set pin state (high/low)
func (pin Pin) Write(state State) {
WritePin(pin, state)
}
// Read pin state (high/low)
func (pin Pin) Read() State {
return ReadPin(pin)
}
// Set a given pull up/down mode
func (pin Pin) Pull(pull Pull) {
PullMode(pin, pull)
}
// Pull up pin
func (pin Pin) PullUp() {
PullMode(pin, PullUp)
}
// Pull down pin
func (pin Pin) PullDown() {
PullMode(pin, PullDown)
}
// Disable pullup/down on pin
func (pin Pin) PullOff() {
PullMode(pin, PullOff)
}
// PinMode sets the direction of a given pin (Input or Output)
func PinMode(pin Pin, direction Direction) {
// Pin fsel register, 0 or 1 depending on bank
fsel := uint8(pin) / 10
shift := (uint8(pin) % 10) * 3
memlock.Lock()
defer memlock.Unlock()
if direction == Input {
mem[fsel] = mem[fsel] &^ (pinMask << shift)
} else {
mem[fsel] = (mem[fsel] &^ (pinMask << shift)) | (1 << shift)
}
}
// WritePin sets a given pin High or Low
// by setting the clear or set registers respectively
func WritePin(pin Pin, state State) {
p := uint8(pin)
// Clear register, 10 / 11 depending on bank
// Set register, 7 / 8 depending on bank
clearReg := p/32 + 10
setReg := p/32 + 7
memlock.Lock()
defer memlock.Unlock()
if state == Low {
mem[clearReg] = 1 << (p & 31)
} else {
mem[setReg] = 1 << (p & 31)
}
}
// Read the state of a pin
func ReadPin(pin Pin) State {
// Input level register offset (13 / 14 depending on bank)
levelReg := uint8(pin)/32 + 13
if (mem[levelReg] & (1 << uint8(pin))) != 0 {
return High
}
return Low
}
// Toggle a pin state (high -> low -> high)
// TODO: probably possible to do this much faster without read
func TogglePin(pin Pin) {
switch ReadPin(pin) {
case Low:
pin.High()
case High:
pin.Low()
}
}
func PullMode(pin Pin, pull Pull) {
// Pull up/down/off register has offset 38 / 39, pull is 37
pullClkReg := uint8(pin)/32 + 38
pullReg := 37
shift := (uint8(pin) % 32)
memlock.Lock()
defer memlock.Unlock()
switch pull {
case PullDown, PullUp:
mem[pullReg] = mem[pullReg]&^3 | uint32(pull)
case PullOff:
mem[pullReg] = mem[pullReg] &^ 3
}
// Wait for value to clock in, this is ugly, sorry :(
time.Sleep(time.Microsecond)
mem[pullClkReg] = 1 << shift
// Wait for value to clock in
time.Sleep(time.Microsecond)
mem[pullReg] = mem[pullReg] &^ 3
mem[pullClkReg] = 0
}
// Open and memory map GPIO memory range from /dev/mem .
// Some reflection magic is used to convert it to a unsafe []uint32 pointer
func Open() (err error) {
var file *os.File
var base int64
// Open fd for rw mem access; try gpiomem first
if file, err = os.OpenFile(
"/dev/gpiomem",
os.O_RDWR|os.O_SYNC,
0); os.IsNotExist(err) {
file, err = os.OpenFile(
"/dev/mem",
os.O_RDWR|os.O_SYNC,
0)
base = getGPIOBase()
}
if err != nil {
return
}
// FD can be closed after memory mapping
defer file.Close()
memlock.Lock()
defer memlock.Unlock()
// Memory map GPIO registers to byte array
mem8, err = syscall.Mmap(
int(file.Fd()),
base,
memLength,
syscall.PROT_READ|syscall.PROT_WRITE,
syscall.MAP_SHARED)
if err != nil {
return
}
// Convert mapped byte memory to unsafe []uint32 pointer, adjust length as needed
header := *(*reflect.SliceHeader)(unsafe.Pointer(&mem8))
header.Len /= (32 / 8) // (32 bit = 4 bytes)
header.Cap /= (32 / 8)
mem = *(*[]uint32)(unsafe.Pointer(&header))
return nil
}
// Close unmaps GPIO memory
func Close() error {
memlock.Lock()
defer memlock.Unlock()
return syscall.Munmap(mem8)
}
// Read /proc/device-tree/soc/ranges and determine the base address.
// Use the default Raspberry Pi 1 base address if this fails.
func getGPIOBase() (base int64) {
base = pi1GPIOBase
ranges, err := os.Open("/proc/device-tree/soc/ranges")
defer ranges.Close()
if err != nil {
return
}
b := make([]byte, 4)
n, err := ranges.ReadAt(b, 4)
if n != 4 || err != nil {
return
}
buf := bytes.NewReader(b)
var out uint32
err = binary.Read(buf, binary.BigEndian, &out)
if err != nil {
return
}
return int64(out + 0x200000)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.