text
stringlengths 11
4.05M
|
|---|
package controllers
import (
"encoding/json"
"net/http"
)
// SetValue modifies the underlying storage of the controller object
func (c *Controller) SetValue(w http.ResponseWriter, r *http.Request) {
if r.Method != "POST" {
w.WriteHeader(http.StatusMethodNotAllowed)
return
}
if err := r.ParseForm(); err != nil {
w.WriteHeader(http.StatusInternalServerError)
return
}
value := r.FormValue("value")
c.storage.Put(value)
w.WriteHeader(http.StatusOK)
p := Payload{Value: value}
if payload, err := json.Marshal(p); err == nil {
w.Write(payload)
} else if err != nil {
w.WriteHeader(http.StatusInternalServerError)
}
}
|
package clients
import (
"context"
"github.com/google/uuid"
"github.com/sumelms/microservice-course/internal/course/domain"
)
type courseClient struct {
service *domain.Service
}
func NewCourseClient(svc *domain.Service) *courseClient {
return &courseClient{service: svc}
}
func (c courseClient) CourseExists(ctx context.Context, id uuid.UUID) error {
_, err := c.service.Course(ctx, id)
if err != nil {
return err
}
return nil
}
|
package main
import (
as "github.com/dearcj/golangproj/asyncutils"
"time"
)
type BattleResult uint32
type BattleMode uint32
const (
BR_PLAYERS_WIN BattleResult = iota
BR_ENEMIES_WIN
)
type FListFunc func() FList
type BattleFSM struct {
run *Run
cbAfterTurn *as.AsyncUtil
cbAfterBattle *as.AsyncUtil
afterSkillFuncs []FListFunc
}
type NextBattleFSM struct {
*BattleFSM
}
func CreateBattleManager() *BattleFSM {
farFuture := time.Now().Add(time.Hour * 999999)
return &BattleFSM{
cbAfterBattle: as.CreateAsycnUtil(&farFuture),
cbAfterTurn: as.CreateAsycnUtil(&farFuture),
}
}
func (bm *BattleFSM) CallAfterTurn(f as.AsyncCallbackFunction) {
bm.cbAfterTurn.DelayedCall(f, 0)
}
func (bm *BattleFSM) CallAfterbattle(f as.AsyncCallbackFunction) {
bm.cbAfterBattle.DelayedCall(f, 0)
}
func (bm *BattleFSM) FinishTurn(s *Session) {
}
func (bm *BattleFSM) StartBattle(r *Run) (TurnStateType, time.Duration, CurStateFunc, NextStateFunc) {
return 0, config.Player.PrepareTime, func() {
//bm.BuffsOnBattleStart()
}, bm.SetPlayerTurn
}
func (bm *BattleFSM) EffectsAfterMove(f FListFunc) {
bm.afterSkillFuncs = append(bm.afterSkillFuncs, f)
}
func (bm *BattleFSM) ExecBuffsHarm() FList {
var se FList
for _, a := range bm.run.factory.actors {
unit := a.FindByComponent(config.Components.Unit)
if unit != nil {
/*barray := unit.(*Unit).Buffs
for _, b := range barray {
if b.DoEveryTurn != nil {
se = append(se, b.DoEveryTurn()...)
}
}*/
}
}
return se
}
func (bm *BattleFSM) SetPlayerTurn(r *Run) (TurnStateType, time.Duration, CurStateFunc, NextStateFunc) {
if bm.run.turnNumber > 0 {
for _, a := range r.factory.actors {
player := a.FindByComponent(config.Components.Player)
if player != nil && a.session != nil && r.stateFinished[a.session.id] == nil {
bm.FinishTurn(a.session)
}
}
}
bm.run.turnNumber++
bm.run.playerTurnStarted = time.Now()
return 0, config.Player.TurnTime, nil, bm.SetExecPlayerTurn
}
func (bm *BattleFSM) BotsFinishScene() {
bm.run.async.DelayedCall(func() {
for _, session := range bm.run.team.sessions {
if session.IsBot() {
bm.run.FinishSessionState(session)
}
}
}, 2*time.Second)
}
func (bm *BattleFSM) SetExecPlayerTurn(r *Run) (TurnStateType, time.Duration, CurStateFunc, NextStateFunc) {
/*var earlyTurnEffects FList
// r.SetState(config.TurnState.BATTLE_BEFORE_TURN, 0, nil)
bm.run.team.NeedToKnow(earlyTurnEffects)
//TODO: ADD CAN USE CHECKING AND MOVES CHECKING
//TODO: ADD 1 SKILL CHECKING AND 1 ITEM CHECKING
var br BattleResult
var turnEffects FList
br, turnEffects, _ = bm.run.BlindGame.ExecTurnActions(bm.run.turnActions.actions)
buffHarmsEffects := bm.ExecBuffsHarm()
bm.run.team.NeedToKnow(turnEffects, buffHarmsEffects)
bm.cbAfterTurn.Exec()
bm.run.BlindGame.ClearActions(nil, 0, 0)
if br != BR_NONE {
bm.cbAfterBattle.Exec()
battleEndEffect, _ := bm.BattleEnd()
r.team.NeedToKnow(battleEndEffect)
for _, s := range r.team.sessions {
s.NeedToKnow(s.player.Effect(confActions.AnimationEnded))
}
// return config.TurnState.BATTLE_AFTER_TURN, 0, r.HoldUntilAnimation(func(r *Run) (TurnStateType, time.Duration, NextStateFunc) {
// return bm.BattleFinish(br, killed)
// })
} else {
// r.team.NeedToKnow(cf(confActions.StartEnemyTurn))
// return bm.SetEnemyTurn(r)
}
*/
return 0, 0, nil, nil
}
|
// This file was generated for SObject TopicUserEvent, API Version v43.0 at 2018-07-30 03:47:51.127432943 -0400 EDT m=+37.471445409
package sobjects
import (
"fmt"
"strings"
)
type TopicUserEvent struct {
BaseSObject
ActionEnum string `force:",omitempty"`
CreatedDate string `force:",omitempty"`
Id string `force:",omitempty"`
TopicId string `force:",omitempty"`
UserId string `force:",omitempty"`
}
func (t *TopicUserEvent) ApiName() string {
return "TopicUserEvent"
}
func (t *TopicUserEvent) String() string {
builder := strings.Builder{}
builder.WriteString(fmt.Sprintf("TopicUserEvent #%s - %s\n", t.Id, t.Name))
builder.WriteString(fmt.Sprintf("\tActionEnum: %v\n", t.ActionEnum))
builder.WriteString(fmt.Sprintf("\tCreatedDate: %v\n", t.CreatedDate))
builder.WriteString(fmt.Sprintf("\tId: %v\n", t.Id))
builder.WriteString(fmt.Sprintf("\tTopicId: %v\n", t.TopicId))
builder.WriteString(fmt.Sprintf("\tUserId: %v\n", t.UserId))
return builder.String()
}
type TopicUserEventQueryResponse struct {
BaseQuery
Records []TopicUserEvent `json:"Records" force:"records"`
}
|
// Copyright 2023 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package alpha
import (
"bytes"
"context"
"fmt"
"io/ioutil"
"regexp"
"strings"
"time"
"google.golang.org/api/googleapi"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl"
)
// EncodeIAMCreateRequest encodes the create request for an iam resource.
func EncodeIAMCreateRequest(m map[string]interface{}, resourceName, idField string) map[string]interface{} {
req := make(map[string]interface{})
// Put base object into object field.
dcl.PutMapEntry(req, []string{resourceName}, m)
// Move name field from from nested object to id field.
dcl.MoveMapEntry(req, []string{resourceName, "name"}, []string{idField})
// Delete projectID field.
delete(req[resourceName].(map[string]interface{}), "projectId")
// Change value of id field to only last part after / and before @.
idParts := regexp.MustCompile("([^@/]+)[^/]*$").FindStringSubmatch(*req[idField].(*string))
if len(idParts) < 2 {
return req
}
req[idField] = idParts[1]
return req
}
// EncodeRoleCreateRequest properly encodes the create request for an iam role.
func EncodeRoleCreateRequest(m map[string]interface{}) map[string]interface{} {
return EncodeIAMCreateRequest(m, "role", "roleId")
}
// EncodeServiceAccountCreateRequest properly encodes the create request for an iam service account.
func EncodeServiceAccountCreateRequest(m map[string]interface{}) map[string]interface{} {
return EncodeIAMCreateRequest(m, "serviceAccount", "accountId")
}
// canonicalizeServiceAccountName compares service account names ignoring the part after @.
func canonicalizeServiceAccountName(m, n interface{}) bool {
mStr, ok := m.(*string)
if !ok {
return false
}
nStr, ok := n.(*string)
if !ok {
return false
}
if mStr == nil && nStr == nil {
return true
}
if mStr == nil || nStr == nil {
return false
}
// Compare values before @.
mVal := strings.Split(*mStr, "@")[0]
nVal := strings.Split(*nStr, "@")[0]
return dcl.PartialSelfLinkToSelfLink(&mVal, &nVal)
}
func (c *Client) GetWorkloadIdentityPool(ctx context.Context, r *WorkloadIdentityPool) (*WorkloadIdentityPool, error) {
ctx = dcl.ContextWithRequestID(ctx)
ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second))
defer cancel()
b, err := c.getWorkloadIdentityPoolRaw(ctx, r)
if err != nil {
if dcl.IsNotFound(err) {
return nil, &googleapi.Error{
Code: 404,
Message: err.Error(),
}
}
return nil, err
}
result, err := unmarshalWorkloadIdentityPool(b, c, r)
if err != nil {
return nil, err
}
result.Project = r.Project
result.Location = r.Location
result.Name = r.Name
c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result)
c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r)
result, err = canonicalizeWorkloadIdentityPoolNewState(c, result, r)
if err != nil {
return nil, err
}
c.Config.Logger.InfoWithContextf(ctx, "Created result state: %v", result)
return result, nil
}
func (c *Client) getWorkloadIdentityPoolRaw(ctx context.Context, r *WorkloadIdentityPool) ([]byte, error) {
u, err := r.getURL(c.Config.BasePath)
if err != nil {
return nil, err
}
var resp *dcl.RetryDetails
// Retry until project is ready.
ctt, cancel := context.WithTimeout(ctx, 120*time.Second)
defer cancel()
err = dcl.Do(ctt, func(ctt context.Context) (*dcl.RetryDetails, error) {
var err error
resp, err = dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider)
if err != nil {
if gerr, ok := err.(*googleapi.Error); ok {
if gerr.Code == 403 && strings.HasPrefix(gerr.Message, "Permission 'iam.workloadIdentityPools.get' denied on resource") {
return &dcl.RetryDetails{}, dcl.OperationNotDone{}
}
}
return nil, err
}
return nil, nil
}, c.Config.RetryProvider)
if err != nil {
return nil, err
}
defer resp.Response.Body.Close()
b, err := ioutil.ReadAll(resp.Response.Body)
if err != nil {
return nil, err
}
return b, nil
}
// normalizeServiceAccountName converts name to short name and removes domain from the tail.
// Example: Example: projects/xyz/serviceAccounts/test-id-xcad-1665079476@xyz.iam.gserviceaccount.com becomes test-id-xcad-1665079476
func normalizeServiceAccountName(name *string) *string {
newName := dcl.SelfLinkToName(name)
*newName = strings.Split(*newName, "@")[0]
return newName
}
func (r *ServiceAccount) getURL(userBasePath string) (string, error) {
nr := r.urlNormalized()
nr.Name = normalizeServiceAccountName(nr.Name)
params := map[string]any{
"project": dcl.ValueOrEmptyString(nr.Project),
"name": dcl.ValueOrEmptyString(nr.Name),
}
return dcl.URL("projects/{{project}}/serviceAccounts/{{name}}@{{project}}.iam.gserviceaccount.com", nr.basePath(), userBasePath, params), nil
}
func (r *ServiceAccount) createURL(userBasePath string) (string, error) {
nr := r.urlNormalized()
nr.Name = normalizeServiceAccountName(nr.Name)
params := map[string]any{
"project": dcl.ValueOrEmptyString(nr.Project),
}
return dcl.URL("projects/{{project}}/serviceAccounts", nr.basePath(), userBasePath, params), nil
}
func (r *ServiceAccount) deleteURL(userBasePath string) (string, error) {
nr := r.urlNormalized()
nr.Name = normalizeServiceAccountName(nr.Name)
params := map[string]any{
"project": dcl.ValueOrEmptyString(nr.Project),
"name": dcl.ValueOrEmptyString(nr.Name),
}
return dcl.URL("projects/{{project}}/serviceAccounts/{{name}}@{{project}}.iam.gserviceaccount.com", nr.basePath(), userBasePath, params), nil
}
// SetPolicyURL constructs url for setting IAM Policy.
func (r *ServiceAccount) SetPolicyURL(userBasePath string) string {
nr := r.urlNormalized()
fields := map[string]any{
"project": *nr.Project,
"name": *nr.Name,
}
return dcl.URL("projects/{{project}}/serviceAccounts/{{name}}@{{project}}.iam.gserviceaccount.com:setIamPolicy", nr.basePath(), userBasePath, fields)
}
func (r *ServiceAccount) getPolicyURL(userBasePath string) string {
nr := r.urlNormalized()
fields := map[string]any{
"project": *nr.Project,
"name": *nr.Name,
}
return dcl.URL("projects/{{project}}/serviceAccounts/{{name}}@{{project}}.iam.gserviceaccount.com:getIamPolicy", nr.basePath(), userBasePath, fields)
}
// We are using CUSTOM_URL_METHOD trait on GetIAMPolicy and SetIAMPolicy which requires us to define these custom methods for IAM.
// SetPolicyVerb sets the verb for SetPolicy.
func (r *ServiceAccount) SetPolicyVerb() string {
return "POST"
}
// IAMPolicyVersion defines version for IAMPolicy.
func (r *ServiceAccount) IAMPolicyVersion() int {
return 3
}
// GetPolicy gets the IAM policy.
func (r *ServiceAccount) GetPolicy(basePath string) (string, string, *bytes.Buffer, error) {
u := r.getPolicyURL(basePath)
body := &bytes.Buffer{}
body.WriteString(fmt.Sprintf(`{"options":{"requestedPolicyVersion": %d}}`, r.IAMPolicyVersion()))
return u, "POST", body, nil
}
|
package dsv
import (
"bytes"
"github.com/stretchr/testify/assert"
"log"
"testing"
)
func TestFloatParser(t *testing.T) {
type testRow struct {
Name string `dsv:"name"`
Float32 float32 `dsv:"float32"`
Float64 float64 `dsv:"float64"`
Comma float64 `dsv:"comma"`
}
raw := []byte("name,float32,float64,comma\nsergey,363.4,-3.27,\"1,989.12\"\nanton,1.82,715.0,\n")
r := bytes.NewReader(raw)
data := testRow{}
parser, err := NewCsvParser(r, true, &data)
assert.NoError(t, err)
eof, err := parser.Next()
assert.NoError(t, err)
assert.False(t, eof)
assert.Equal(t, "sergey", data.Name)
assert.Equal(t, float32(363.4), data.Float32)
assert.Equal(t, -3.27, data.Float64)
assert.Equal(t, 1989.12, data.Comma)
eof, err = parser.Next()
assert.NoError(t, err)
assert.False(t, eof)
assert.Equal(t, "anton", data.Name)
assert.Equal(t, float32(1.82), data.Float32)
assert.Equal(t, float64(715), data.Float64)
log.Printf("%v", complex(1, 5))
}
func TestComplexParser(t *testing.T) {
type testRow struct {
Name string `tsv:"name"`
Complex64 complex64 `tsv:"complex64"`
Complex128 complex128 `tsv:"complex128"`
}
raw := []byte("name,complex64,complex128\nsergey,3634+327i,-1.5+17.0i\n")
r := bytes.NewReader(raw)
data := testRow{}
parser, err := NewCsvParser(r, false, &data)
assert.NoError(t, err)
eof, err := parser.Next()
assert.NoError(t, err)
assert.False(t, eof)
assert.Equal(t, "sergey", data.Name)
assert.Equal(t, complex64(complex(3634, 327)), data.Complex64)
assert.Equal(t, -1.5+17.0i, data.Complex128)
}
func TestUintParser(t *testing.T) {
type testRow struct {
Name string `tsv:"name"`
Uint uint `tsv:"unit"`
Uint8 uint8 `tsv:"unit8"`
Uint16 uint16 `tsv:"unit16"`
Uint32 uint32 `tsv:"unit32"`
Uint64 uint64 `tsv:"unit64"`
}
raw := []byte("name,unit,unit8,unit16,unit32,unit64\nsergey,327,222,327,327,327\nanton,-3634,-222,-3634,-3634,-3634\n")
r := bytes.NewReader(raw)
data := testRow{}
parser, err := NewCsvParser(r, false, &data)
assert.NoError(t, err)
eof, err := parser.Next()
assert.NoError(t, err)
assert.False(t, eof)
assert.Equal(t, "sergey", data.Name)
assert.Equal(t, uint(327), data.Uint)
assert.Equal(t, uint8(222), data.Uint8)
assert.Equal(t, uint16(327), data.Uint16)
assert.Equal(t, uint32(327), data.Uint32)
assert.Equal(t, uint64(327), data.Uint64)
eof, err = parser.Next()
assert.Error(t, err)
}
func TestIntParser(t *testing.T) {
type testRow struct {
Name string `tsv:"name"`
Int int `tsv:"int"`
Int8 int8 `tsv:"int8"`
Int16 int16 `tsv:"int16"`
Int32 int32 `tsv:"int32"`
Int64 int64 `tsv:"int64"`
}
raw := []byte("name,int,int8,int16,int32,int64\nsergey,327,-111,327,327,-327\nanton,-3634,-222,-3634,-3634,-3634\n")
r := bytes.NewReader(raw)
data := testRow{}
parser, err := NewCsvParser(r, false, &data)
assert.NoError(t, err)
eof, err := parser.Next()
assert.NoError(t, err)
assert.False(t, eof)
assert.Equal(t, "sergey", data.Name)
assert.Equal(t, 327, data.Int)
assert.Equal(t, int8(-111), data.Int8)
assert.Equal(t, int16(327), data.Int16)
assert.Equal(t, int32(327), data.Int32)
assert.Equal(t, int64(-327), data.Int64)
eof, err = parser.Next()
assert.Error(t, err)
}
|
package cspec
// EIP represents the blockchain arguments at a
// given blockNumber. A list of these is a set
// of chainspec arguments, the ChainSpecArguments.
// The initial configuration is considered EIP0
type EIP struct {
Enabled bool //Enabled allows for explicit checks on presence
Name uint32
BlockNumber Number
BlockReward Number
DifficultyBomb Number
MaxCodeSize Number
// these are included because conceivably they are
// blockchain arguments that can be altered in an
// EIP.
ChainID Number
NetworkID Number
AccountStartNonce Number
GasLimit Number
MaxExtraDataSize Number
}
|
package network
// PacketID represents the network-level packet ID.
type PacketID uint16
//go:generate stringer -type=PacketID
const (
//revive:disable
MSG_HEAD PacketID = iota
MSG_SYS_reserve01
MSG_SYS_reserve02
MSG_SYS_reserve03
MSG_SYS_reserve04
MSG_SYS_reserve05
MSG_SYS_reserve06
MSG_SYS_reserve07
MSG_SYS_ADD_OBJECT
MSG_SYS_DEL_OBJECT
MSG_SYS_DISP_OBJECT
MSG_SYS_HIDE_OBJECT
MSG_SYS_reserve0C
MSG_SYS_reserve0D
MSG_SYS_reserve0E
MSG_SYS_EXTEND_THRESHOLD
MSG_SYS_END
MSG_SYS_NOP
MSG_SYS_ACK
MSG_SYS_TERMINAL_LOG
MSG_SYS_LOGIN
MSG_SYS_LOGOUT
MSG_SYS_SET_STATUS
MSG_SYS_PING
MSG_SYS_CAST_BINARY
MSG_SYS_HIDE_CLIENT
MSG_SYS_TIME
MSG_SYS_CASTED_BINARY
MSG_SYS_GET_FILE
MSG_SYS_ISSUE_LOGKEY
MSG_SYS_RECORD_LOG
MSG_SYS_ECHO
MSG_SYS_CREATE_STAGE
MSG_SYS_STAGE_DESTRUCT
MSG_SYS_ENTER_STAGE
MSG_SYS_BACK_STAGE
MSG_SYS_MOVE_STAGE
MSG_SYS_LEAVE_STAGE
MSG_SYS_LOCK_STAGE
MSG_SYS_UNLOCK_STAGE
MSG_SYS_RESERVE_STAGE
MSG_SYS_UNRESERVE_STAGE
MSG_SYS_SET_STAGE_PASS
MSG_SYS_WAIT_STAGE_BINARY
MSG_SYS_SET_STAGE_BINARY
MSG_SYS_GET_STAGE_BINARY
MSG_SYS_ENUMERATE_CLIENT
MSG_SYS_ENUMERATE_STAGE
MSG_SYS_CREATE_MUTEX
MSG_SYS_CREATE_OPEN_MUTEX
MSG_SYS_DELETE_MUTEX
MSG_SYS_OPEN_MUTEX
MSG_SYS_CLOSE_MUTEX
MSG_SYS_CREATE_SEMAPHORE
MSG_SYS_CREATE_ACQUIRE_SEMAPHORE
MSG_SYS_DELETE_SEMAPHORE
MSG_SYS_ACQUIRE_SEMAPHORE
MSG_SYS_RELEASE_SEMAPHORE
MSG_SYS_LOCK_GLOBAL_SEMA
MSG_SYS_UNLOCK_GLOBAL_SEMA
MSG_SYS_CHECK_SEMAPHORE
MSG_SYS_OPERATE_REGISTER
MSG_SYS_LOAD_REGISTER
MSG_SYS_NOTIFY_REGISTER
MSG_SYS_CREATE_OBJECT
MSG_SYS_DELETE_OBJECT
MSG_SYS_POSITION_OBJECT
MSG_SYS_ROTATE_OBJECT
MSG_SYS_DUPLICATE_OBJECT
MSG_SYS_SET_OBJECT_BINARY
MSG_SYS_GET_OBJECT_BINARY
MSG_SYS_GET_OBJECT_OWNER
MSG_SYS_UPDATE_OBJECT_BINARY
MSG_SYS_CLEANUP_OBJECT
MSG_SYS_reserve4A
MSG_SYS_reserve4B
MSG_SYS_reserve4C
MSG_SYS_reserve4D
MSG_SYS_reserve4E
MSG_SYS_reserve4F
MSG_SYS_INSERT_USER
MSG_SYS_DELETE_USER
MSG_SYS_SET_USER_BINARY
MSG_SYS_GET_USER_BINARY
MSG_SYS_NOTIFY_USER_BINARY
MSG_SYS_reserve55
MSG_SYS_reserve56
MSG_SYS_reserve57
MSG_SYS_UPDATE_RIGHT
MSG_SYS_AUTH_QUERY
MSG_SYS_AUTH_DATA
MSG_SYS_AUTH_TERMINAL
MSG_SYS_reserve5C
MSG_SYS_RIGHTS_RELOAD
MSG_SYS_reserve5E
MSG_SYS_reserve5F
MSG_MHF_SAVEDATA
MSG_MHF_LOADDATA
MSG_MHF_LIST_MEMBER
MSG_MHF_OPR_MEMBER
MSG_MHF_ENUMERATE_DIST_ITEM
MSG_MHF_APPLY_DIST_ITEM
MSG_MHF_ACQUIRE_DIST_ITEM
MSG_MHF_GET_DIST_DESCRIPTION
MSG_MHF_SEND_MAIL
MSG_MHF_READ_MAIL
MSG_MHF_LIST_MAIL
MSG_MHF_OPRT_MAIL
MSG_MHF_LOAD_FAVORITE_QUEST
MSG_MHF_SAVE_FAVORITE_QUEST
MSG_MHF_REGISTER_EVENT
MSG_MHF_RELEASE_EVENT
MSG_MHF_TRANSIT_MESSAGE
MSG_SYS_reserve71
MSG_SYS_reserve72
MSG_SYS_reserve73
MSG_SYS_reserve74
MSG_SYS_reserve75
MSG_SYS_reserve76
MSG_SYS_reserve77
MSG_SYS_reserve78
MSG_SYS_reserve79
MSG_SYS_reserve7A
MSG_SYS_reserve7B
MSG_SYS_reserve7C
MSG_CA_EXCHANGE_ITEM
MSG_SYS_reserve7E
MSG_MHF_PRESENT_BOX
MSG_MHF_SERVER_COMMAND
MSG_MHF_SHUT_CLIENT
MSG_MHF_ANNOUNCE
MSG_MHF_SET_LOGINWINDOW
MSG_SYS_TRANS_BINARY
MSG_SYS_COLLECT_BINARY
MSG_SYS_GET_STATE
MSG_SYS_SERIALIZE
MSG_SYS_ENUMLOBBY
MSG_SYS_ENUMUSER
MSG_SYS_INFOKYSERVER
MSG_MHF_GET_CA_UNIQUE_ID
MSG_MHF_SET_CA_ACHIEVEMENT
MSG_MHF_CARAVAN_MY_SCORE
MSG_MHF_CARAVAN_RANKING
MSG_MHF_CARAVAN_MY_RANK
MSG_MHF_CREATE_GUILD
MSG_MHF_OPERATE_GUILD
MSG_MHF_OPERATE_GUILD_MEMBER
MSG_MHF_INFO_GUILD
MSG_MHF_ENUMERATE_GUILD
MSG_MHF_UPDATE_GUILD
MSG_MHF_ARRANGE_GUILD_MEMBER
MSG_MHF_ENUMERATE_GUILD_MEMBER
MSG_MHF_ENUMERATE_CAMPAIGN
MSG_MHF_STATE_CAMPAIGN
MSG_MHF_APPLY_CAMPAIGN
MSG_MHF_ENUMERATE_ITEM
MSG_MHF_ACQUIRE_ITEM
MSG_MHF_TRANSFER_ITEM
MSG_MHF_MERCENARY_HUNTDATA
MSG_MHF_ENTRY_ROOKIE_GUILD
MSG_MHF_ENUMERATE_QUEST
MSG_MHF_ENUMERATE_EVENT
MSG_MHF_ENUMERATE_PRICE
MSG_MHF_ENUMERATE_RANKING
MSG_MHF_ENUMERATE_ORDER
MSG_MHF_ENUMERATE_SHOP
MSG_MHF_GET_EXTRA_INFO
MSG_MHF_UPDATE_INTERIOR
MSG_MHF_ENUMERATE_HOUSE
MSG_MHF_UPDATE_HOUSE
MSG_MHF_LOAD_HOUSE
MSG_MHF_OPERATE_WAREHOUSE
MSG_MHF_ENUMERATE_WAREHOUSE
MSG_MHF_UPDATE_WAREHOUSE
MSG_MHF_ACQUIRE_TITLE
MSG_MHF_ENUMERATE_TITLE
MSG_MHF_ENUMERATE_GUILD_ITEM
MSG_MHF_UPDATE_GUILD_ITEM
MSG_MHF_ENUMERATE_UNION_ITEM
MSG_MHF_UPDATE_UNION_ITEM
MSG_MHF_CREATE_JOINT
MSG_MHF_OPERATE_JOINT
MSG_MHF_INFO_JOINT
MSG_MHF_UPDATE_GUILD_ICON
MSG_MHF_INFO_FESTA
MSG_MHF_ENTRY_FESTA
MSG_MHF_CHARGE_FESTA
MSG_MHF_ACQUIRE_FESTA
MSG_MHF_STATE_FESTA_U
MSG_MHF_STATE_FESTA_G
MSG_MHF_ENUMERATE_FESTA_MEMBER
MSG_MHF_VOTE_FESTA
MSG_MHF_ACQUIRE_CAFE_ITEM
MSG_MHF_UPDATE_CAFEPOINT
MSG_MHF_CHECK_DAILY_CAFEPOINT
MSG_MHF_GET_COG_INFO
MSG_MHF_CHECK_MONTHLY_ITEM
MSG_MHF_ACQUIRE_MONTHLY_ITEM
MSG_MHF_CHECK_WEEKLY_STAMP
MSG_MHF_EXCHANGE_WEEKLY_STAMP
MSG_MHF_CREATE_MERCENARY
MSG_MHF_SAVE_MERCENARY
MSG_MHF_READ_MERCENARY_W
MSG_MHF_READ_MERCENARY_M
MSG_MHF_CONTRACT_MERCENARY
MSG_MHF_ENUMERATE_MERCENARY_LOG
MSG_MHF_ENUMERATE_GUACOT
MSG_MHF_UPDATE_GUACOT
MSG_MHF_INFO_TOURNAMENT
MSG_MHF_ENTRY_TOURNAMENT
MSG_MHF_ENTER_TOURNAMENT_QUEST
MSG_MHF_ACQUIRE_TOURNAMENT
MSG_MHF_GET_ACHIEVEMENT
MSG_MHF_RESET_ACHIEVEMENT
MSG_MHF_ADD_ACHIEVEMENT
MSG_MHF_PAYMENT_ACHIEVEMENT
MSG_MHF_DISPLAYED_ACHIEVEMENT
MSG_MHF_INFO_SCENARIO_COUNTER
MSG_MHF_SAVE_SCENARIO_DATA
MSG_MHF_LOAD_SCENARIO_DATA
MSG_MHF_GET_BBS_SNS_STATUS
MSG_MHF_APPLY_BBS_ARTICLE
MSG_MHF_GET_ETC_POINTS
MSG_MHF_UPDATE_ETC_POINT
MSG_MHF_GET_MYHOUSE_INFO
MSG_MHF_UPDATE_MYHOUSE_INFO
MSG_MHF_GET_WEEKLY_SCHEDULE
MSG_MHF_ENUMERATE_INV_GUILD
MSG_MHF_OPERATION_INV_GUILD
MSG_MHF_STAMPCARD_STAMP
MSG_MHF_STAMPCARD_PRIZE
MSG_MHF_UNRESERVE_SRG
MSG_MHF_LOAD_PLATE_DATA
MSG_MHF_SAVE_PLATE_DATA
MSG_MHF_LOAD_PLATE_BOX
MSG_MHF_SAVE_PLATE_BOX
MSG_MHF_READ_GUILDCARD
MSG_MHF_UPDATE_GUILDCARD
MSG_MHF_READ_BEAT_LEVEL
MSG_MHF_UPDATE_BEAT_LEVEL
MSG_MHF_READ_BEAT_LEVEL_ALL_RANKING
MSG_MHF_READ_BEAT_LEVEL_MY_RANKING
MSG_MHF_READ_LAST_WEEK_BEAT_RANKING
MSG_MHF_ACCEPT_READ_REWARD
MSG_MHF_GET_ADDITIONAL_BEAT_REWARD
MSG_MHF_GET_FIXED_SEIBATU_RANKING_TABLE
MSG_MHF_GET_BBS_USER_STATUS
MSG_MHF_KICK_EXPORT_FORCE
MSG_MHF_GET_BREAK_SEIBATU_LEVEL_REWARD
MSG_MHF_GET_WEEKLY_SEIBATU_RANKING_REWARD
MSG_MHF_GET_EARTH_STATUS
MSG_MHF_LOAD_PARTNER
MSG_MHF_SAVE_PARTNER
MSG_MHF_GET_GUILD_MISSION_LIST
MSG_MHF_GET_GUILD_MISSION_RECORD
MSG_MHF_ADD_GUILD_MISSION_COUNT
MSG_MHF_SET_GUILD_MISSION_TARGET
MSG_MHF_CANCEL_GUILD_MISSION_TARGET
MSG_MHF_LOAD_OTOMO_AIROU
MSG_MHF_SAVE_OTOMO_AIROU
MSG_MHF_ENUMERATE_GUILD_TRESURE
MSG_MHF_ENUMERATE_AIROULIST
MSG_MHF_REGIST_GUILD_TRESURE
MSG_MHF_ACQUIRE_GUILD_TRESURE
MSG_MHF_OPERATE_GUILD_TRESURE_REPORT
MSG_MHF_GET_GUILD_TRESURE_SOUVENIR
MSG_MHF_ACQUIRE_GUILD_TRESURE_SOUVENIR
MSG_MHF_ENUMERATE_FESTA_INTERMEDIATE_PRIZE
MSG_MHF_ACQUIRE_FESTA_INTERMEDIATE_PRIZE
MSG_MHF_LOAD_DECO_MYSET
MSG_MHF_SAVE_DECO_MYSET
MSG_MHF_reserve010F
MSG_MHF_LOAD_GUILD_COOKING
MSG_MHF_REGIST_GUILD_COOKING
MSG_MHF_LOAD_GUILD_ADVENTURE
MSG_MHF_REGIST_GUILD_ADVENTURE
MSG_MHF_ACQUIRE_GUILD_ADVENTURE
MSG_MHF_CHARGE_GUILD_ADVENTURE
MSG_MHF_LOAD_LEGEND_DISPATCH
MSG_MHF_LOAD_HUNTER_NAVI
MSG_MHF_SAVE_HUNTER_NAVI
MSG_MHF_REGIST_SPABI_TIME
MSG_MHF_GET_GUILD_WEEKLY_BONUS_MASTER
MSG_MHF_GET_GUILD_WEEKLY_BONUS_ACTIVE_COUNT
MSG_MHF_ADD_GUILD_WEEKLY_BONUS_EXCEPTIONAL_USER
MSG_MHF_GET_TOWER_INFO
MSG_MHF_POST_TOWER_INFO
MSG_MHF_GET_GEM_INFO
MSG_MHF_POST_GEM_INFO
MSG_MHF_GET_EARTH_VALUE
MSG_MHF_DEBUG_POST_VALUE
MSG_MHF_GET_PAPER_DATA
MSG_MHF_GET_NOTICE
MSG_MHF_POST_NOTICE
MSG_MHF_GET_BOOST_TIME
MSG_MHF_POST_BOOST_TIME
MSG_MHF_GET_BOOST_TIME_LIMIT
MSG_MHF_POST_BOOST_TIME_LIMIT
MSG_MHF_ENUMERATE_FESTA_PERSONAL_PRIZE
MSG_MHF_ACQUIRE_FESTA_PERSONAL_PRIZE
MSG_MHF_GET_RAND_FROM_TABLE
MSG_MHF_GET_CAFE_DURATION
MSG_MHF_GET_CAFE_DURATION_BONUS_INFO
MSG_MHF_RECEIVE_CAFE_DURATION_BONUS
MSG_MHF_POST_CAFE_DURATION_BONUS_RECEIVED
MSG_MHF_GET_GACHA_POINT
MSG_MHF_USE_GACHA_POINT
MSG_MHF_EXCHANGE_FPOINT_2_ITEM
MSG_MHF_EXCHANGE_ITEM_2_FPOINT
MSG_MHF_GET_FPOINT_EXCHANGE_LIST
MSG_MHF_PLAY_STEPUP_GACHA
MSG_MHF_RECEIVE_GACHA_ITEM
MSG_MHF_GET_STEPUP_STATUS
MSG_MHF_PLAY_FREE_GACHA
MSG_MHF_GET_TINY_BIN
MSG_MHF_POST_TINY_BIN
MSG_MHF_GET_SENYU_DAILY_COUNT
MSG_MHF_GET_GUILD_TARGET_MEMBER_NUM
MSG_MHF_GET_BOOST_RIGHT
MSG_MHF_START_BOOST_TIME
MSG_MHF_POST_BOOST_TIME_QUEST_RETURN
MSG_MHF_GET_BOX_GACHA_INFO
MSG_MHF_PLAY_BOX_GACHA
MSG_MHF_RESET_BOX_GACHA_INFO
MSG_MHF_GET_SEIBATTLE
MSG_MHF_POST_SEIBATTLE
MSG_MHF_GET_RYOUDAMA
MSG_MHF_POST_RYOUDAMA
MSG_MHF_GET_TENROUIRAI
MSG_MHF_POST_TENROUIRAI
MSG_MHF_POST_GUILD_SCOUT
MSG_MHF_CANCEL_GUILD_SCOUT
MSG_MHF_ANSWER_GUILD_SCOUT
MSG_MHF_GET_GUILD_SCOUT_LIST
MSG_MHF_GET_GUILD_MANAGE_RIGHT
MSG_MHF_SET_GUILD_MANAGE_RIGHT
MSG_MHF_PLAY_NORMAL_GACHA
MSG_MHF_GET_DAILY_MISSION_MASTER
MSG_MHF_GET_DAILY_MISSION_PERSONAL
MSG_MHF_SET_DAILY_MISSION_PERSONAL
MSG_MHF_GET_GACHA_PLAY_HISTORY
MSG_MHF_GET_REJECT_GUILD_SCOUT
MSG_MHF_SET_REJECT_GUILD_SCOUT
MSG_MHF_GET_CA_ACHIEVEMENT_HIST
MSG_MHF_SET_CA_ACHIEVEMENT_HIST
MSG_MHF_GET_KEEP_LOGIN_BOOST_STATUS
MSG_MHF_USE_KEEP_LOGIN_BOOST
MSG_MHF_GET_UD_SCHEDULE
MSG_MHF_GET_UD_INFO
MSG_MHF_GET_KIJU_INFO
MSG_MHF_SET_KIJU
MSG_MHF_ADD_UD_POINT
MSG_MHF_GET_UD_MY_POINT
MSG_MHF_GET_UD_TOTAL_POINT_INFO
MSG_MHF_GET_UD_BONUS_QUEST_INFO
MSG_MHF_GET_UD_SELECTED_COLOR_INFO
MSG_MHF_GET_UD_MONSTER_POINT
MSG_MHF_GET_UD_DAILY_PRESENT_LIST
MSG_MHF_GET_UD_NORMA_PRESENT_LIST
MSG_MHF_GET_UD_RANKING_REWARD_LIST
MSG_MHF_ACQUIRE_UD_ITEM
MSG_MHF_GET_REWARD_SONG
MSG_MHF_USE_REWARD_SONG
MSG_MHF_ADD_REWARD_SONG_COUNT
MSG_MHF_GET_UD_RANKING
MSG_MHF_GET_UD_MY_RANKING
MSG_MHF_ACQUIRE_MONTHLY_REWARD
MSG_MHF_GET_UD_GUILD_MAP_INFO
MSG_MHF_GENERATE_UD_GUILD_MAP
MSG_MHF_GET_UD_TACTICS_POINT
MSG_MHF_ADD_UD_TACTICS_POINT
MSG_MHF_GET_UD_TACTICS_RANKING
MSG_MHF_GET_UD_TACTICS_REWARD_LIST
MSG_MHF_GET_UD_TACTICS_LOG
MSG_MHF_GET_EQUIP_SKIN_HIST
MSG_MHF_UPDATE_EQUIP_SKIN_HIST
MSG_MHF_GET_UD_TACTICS_FOLLOWER
MSG_MHF_SET_UD_TACTICS_FOLLOWER
MSG_MHF_GET_UD_SHOP_COIN
MSG_MHF_USE_UD_SHOP_COIN
MSG_MHF_GET_ENHANCED_MINIDATA
MSG_MHF_SET_ENHANCED_MINIDATA
MSG_MHF_SEX_CHANGER
MSG_MHF_GET_LOBBY_CROWD
MSG_SYS_reserve180
MSG_MHF_GUILD_HUNTDATA
MSG_MHF_ADD_KOURYOU_POINT
MSG_MHF_GET_KOURYOU_POINT
MSG_MHF_EXCHANGE_KOURYOU_POINT
MSG_MHF_GET_UD_TACTICS_BONUS_QUEST
MSG_MHF_GET_UD_TACTICS_FIRST_QUEST_BONUS
MSG_MHF_GET_UD_TACTICS_REMAINING_POINT
MSG_SYS_reserve188
MSG_MHF_LOAD_PLATE_MYSET
MSG_MHF_SAVE_PLATE_MYSET
MSG_SYS_reserve18B
MSG_MHF_GET_RESTRICTION_EVENT
MSG_MHF_SET_RESTRICTION_EVENT
MSG_SYS_reserve18E
MSG_SYS_reserve18F
MSG_MHF_GET_TREND_WEAPON
MSG_MHF_UPDATE_USE_TREND_WEAPON_LOG
MSG_SYS_reserve192
MSG_SYS_reserve193
MSG_SYS_reserve194
MSG_MHF_SAVE_RENGOKU_DATA
MSG_MHF_LOAD_RENGOKU_DATA
MSG_MHF_GET_RENGOKU_BINARY
MSG_MHF_ENUMERATE_RENGOKU_RANKING
MSG_MHF_GET_RENGOKU_RANKING_RANK
MSG_MHF_ACQUIRE_EXCHANGE_SHOP
MSG_SYS_reserve19B
MSG_MHF_SAVE_MEZFES_DATA
MSG_MHF_LOAD_MEZFES_DATA
MSG_SYS_reserve19E
MSG_SYS_reserve19F
MSG_MHF_UPDATE_FORCE_GUILD_RANK
MSG_MHF_RESET_TITLE
MSG_SYS_reserve202
MSG_SYS_reserve203
MSG_SYS_reserve204
MSG_SYS_reserve205
MSG_SYS_reserve206
MSG_SYS_reserve207
MSG_SYS_reserve208
MSG_SYS_reserve209
MSG_SYS_reserve20A
MSG_SYS_reserve20B
MSG_SYS_reserve20C
MSG_SYS_reserve20D
MSG_SYS_reserve20E
MSG_SYS_reserve20F
//revive:enable
)
|
// Source : https://oj.leetcode.com/problems/next-permutation/
// Author : Austin Vern Songer
// Date : 2016-04-27
/**********************************************************************************
*
* Implement next permutation, which rearranges numbers into the lexicographically next
* greater permutation of numbers.
*
* If such arrangement is not possible, it must rearrange it as the lowest possible order
* (ie, sorted in ascending order).
*
* The replacement must be in-place, do not allocate extra memory.
*
* Here are some examples. Inputs are in the left-hand column and its corresponding outputs
* are in the right-hand column.
*
* 1,2,3 → 1,3,2
* 3,2,1 → 1,2,3
* 1,1,5 → 1,5,1
*
**********************************************************************************/
package main
import (
"fmt"
)
func nextPermutation(num []int) {
if len(num) <= 1 {
return
}
for i := len(num) - 1; i > 0; i-- {
if num[i-1] < num[i] {
j := len(num) - 1
for num[i-1] >= num[j] {
j--
}
num[j], num[i-1] = num[i-1], num[j]
reverse(num[i:])
fmt.Println(num)
return
}
if i == 1 {
return
}
}
}
func reverse(s []int) {
for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 {
s[i], s[j] = s[j], s[i]
}
}
func main() {
nextPermutation([]int{1, 2, 3})
}
|
// update_go_deps modifies the go.mod and go.sum files to sync to the most
// recent versions of all listed dependencies.
//
// If the go.mod file is not being updated, check the recent runs of this Task
// Driver to verify that:
//
// 1. It is running at all. If not, there may be a bot capacity problem, or a
// problem with the Task Scheduler.
// 2. It is succeeding. There are a number of reasons why it might fail, but the
// most common is that a change has landed in one of the dependencies which
// is not compatible with the current version of our code. Check the logs for
// the failing step(s). Note that dependencies may be shared, and upstream
// changes can result in a dependency graph which is impossible to satisfy.
// In this case, you may need to fork a dependency to keep it at a working
// revision, or disable this task until fixes propagate through the graph.
// 3. The CL uploaded by this task driver is passing the commit queue and
// landing. This task driver does not run all of the tests and so the CL it
// uploads may fail the commit queue for legitimate reasons. Look into the
// failures and determine whether fixes need to be applied in this repo, a
// dependency needs to be pinned to a different release, forked, etc.
package main
import (
"context"
"flag"
"fmt"
"path"
"path/filepath"
"strconv"
"go.skia.org/infra/go/auth"
"go.skia.org/infra/go/common"
"go.skia.org/infra/go/exec"
"go.skia.org/infra/go/util"
"go.skia.org/infra/task_driver/go/lib/auth_steps"
"go.skia.org/infra/task_driver/go/lib/checkout"
"go.skia.org/infra/task_driver/go/lib/gerrit_steps"
"go.skia.org/infra/task_driver/go/lib/golang"
"go.skia.org/infra/task_driver/go/lib/os_steps"
"go.skia.org/infra/task_driver/go/lib/rotations"
"go.skia.org/infra/task_driver/go/td"
)
var (
// Required properties for this task.
gerritProject = flag.String("gerrit_project", "", "Gerrit project name.")
gerritUrl = flag.String("gerrit_url", "", "URL of the Gerrit server.")
projectId = flag.String("project_id", "", "ID of the Google Cloud project.")
taskId = flag.String("task_id", "", "ID of this task.")
taskName = flag.String("task_name", "", "Name of the task.")
workdir = flag.String("workdir", ".", "Working directory")
checkoutFlags = checkout.SetupFlags(nil)
// Optional flags.
local = flag.Bool("local", false, "True if running locally (as opposed to on the bots)")
output = flag.String("o", "", "If provided, dump a JSON blob of step data to the given file. Prints to stdout if '-' is given.")
)
func main() {
// Setup.
ctx := td.StartRun(projectId, taskId, taskName, output, local)
defer td.EndRun(ctx)
rs, err := checkout.GetRepoState(checkoutFlags)
if err != nil {
td.Fatal(ctx, err)
}
if *gerritProject == "" {
td.Fatalf(ctx, "--gerrit_project is required.")
}
if *gerritUrl == "" {
td.Fatalf(ctx, "--gerrit_url is required.")
}
wd, err := os_steps.Abs(ctx, *workdir)
if err != nil {
td.Fatal(ctx, err)
}
// Check out the code.
co, err := checkout.EnsureGitCheckout(ctx, path.Join(wd, "repo"), rs)
if err != nil {
td.Fatal(ctx, err)
}
// Setup go.
ctx = golang.WithEnv(ctx, wd)
// Perform steps to update the dependencies.
// By default, the Go env includes GOFLAGS=-mod=readonly, which prevents
// commands from modifying go.mod; in this case, we want to modify it,
// so unset that variable.
ctx = td.WithEnv(ctx, []string{"GOFLAGS="})
if _, err := golang.Go(ctx, co.Dir(), "get", "-u", "-t", "-d", "./..."); err != nil {
td.Fatal(ctx, err)
}
// Install some tool dependencies.
if err := golang.InstallCommonDeps(ctx, co.Dir()); err != nil {
td.Fatal(ctx, err)
}
// These commands may also update dependencies, or their results may
// change based on the updated dependencies.
if _, err := golang.Go(ctx, co.Dir(), "build", "./..."); err != nil {
td.Fatal(ctx, err)
}
// Explicitly build the infra module, because "go build ./..." doesn't
// update go.sum for dependencies of the infra module when run in the
// Skia repo. We have some Skia bots which install things from the infra
// repo (eg. task drivers which are used directly and not imported), and
// go.mod and go.sum need to account for that.
if _, err := golang.Go(ctx, co.Dir(), "build", "-i", "go.skia.org/infra/..."); err != nil {
td.Fatal(ctx, err)
}
// Setting -exec=echo causes the tests to not actually run; therefore
// this compiles the tests but doesn't run them.
if _, err := golang.Go(ctx, co.Dir(), "test", "-exec=echo", "./..."); err != nil {
td.Fatal(ctx, err)
}
if _, err := golang.Go(ctx, co.Dir(), "generate", "./..."); err != nil {
td.Fatal(ctx, err)
}
// Regenerate the licenses file.
if rs.Repo == common.REPO_SKIA_INFRA {
if _, err := exec.RunCwd(ctx, filepath.Join(co.Dir(), "licenses"), "make", "regenerate"); err != nil {
td.Fatal(ctx, err)
}
}
// If we changed anything, upload a CL.
c, err := auth_steps.InitHttpClient(ctx, *local, auth.SCOPE_USERINFO_EMAIL)
if err != nil {
td.Fatal(ctx, err)
}
reviewers, err := rotations.GetCurrentTrooper(ctx, c)
if err != nil {
td.Fatal(ctx, err)
}
g, err := gerrit_steps.Init(ctx, *local, *gerritUrl)
if err != nil {
td.Fatal(ctx, err)
}
isTryJob := *local || rs.Issue != ""
if isTryJob {
var i int64
if err := td.Do(ctx, td.Props(fmt.Sprintf("Parse %q as int", rs.Issue)).Infra(), func(ctx context.Context) error {
var err error
i, err = strconv.ParseInt(rs.Issue, 10, 64)
return err
}); err != nil {
td.Fatal(ctx, err)
}
ci, err := gerrit_steps.GetIssueProperties(ctx, g, i)
if err != nil {
td.Fatal(ctx, err)
}
if !util.In(ci.Owner.Email, reviewers) {
reviewers = append(reviewers, ci.Owner.Email)
}
}
if err := gerrit_steps.UploadCL(ctx, g, co, *gerritProject, "master", rs.Revision, "Update Go Deps", reviewers, isTryJob); err != nil {
td.Fatal(ctx, err)
}
}
|
package models
import (
"time"
)
type ItemImpression struct {
ItemID uint64 `json:"item_id" gorm:"column:item_id;primary_key" sql:"not null;index;type:bigint(20)"`
View uint `json:"view" gorm:"column:view" sql:"not null"`
Star uint `json:"star" gorm:"column:star" sql:"not null"`
CreatedAt time.Time `json:"created_at" gorm:"column:created_at" sql:"not null;type:datetime"`
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at" sql:"not null;type:datetime"`
}
func NewItemImpression(itemID uint64, star uint) *ItemImpression {
return &ItemImpression{
ItemID: itemID,
View: 0,
Star: star,
}
}
func (e ItemImpression) TableName() string {
return "item_impression"
}
|
package command
import (
"flag"
"fmt"
"os"
"github.com/markbates/inflect"
)
type ScaffoldCommand struct {
}
func (command *ScaffoldCommand) Name() string {
return "scaffold"
}
func (command *ScaffoldCommand) Help() {
fmt.Printf(`Usage:
echo-scaffold scaffold <controller name> <field name>:<field type> ...
Description:
The echo-scaffold scaffold command creates a new controller and model with the given fields.
Example:
echo-scaffold controller Post Title:string Body:string
`)
}
func (command *ScaffoldCommand) Execute(args []string) {
flag := flag.NewFlagSet(command.Name(), flag.ExitOnError)
flag.Usage = command.Help
flag.Parse(args)
if flag.NArg() == 0 {
command.Help()
os.Exit(2)
}
flag.Args()[0] = inflect.Singularize(flag.Arg(0))
modelCommand := &ModelCommand{}
modelCommand.Execute(args)
controllerCommand := &ControllerCommand{}
controllerCommand.Execute([]string{modelCommand.ModelNamePlural})
}
|
//go:generate jsonenums -type=ConversationType -suffix=_enum
//go:generate jsonenums -type=RetentionMode -suffix=_enum
//go:generate jsonenums -type=Privacy -suffix=_enum
package schema
import (
"time"
"gopkg.in/mgo.v2/bson"
)
// Conversation represents a conversation within a namespace. Conversastions can be 1-on-1, group, or channel.
type Conversation struct {
ID bson.ObjectId `bson:"_id,omitempty"`
CreatorID bson.ObjectId `bson:"creator_id"`
Title string `bson:"title"`
Purpose string `bson:"purpose"`
Topic string `bson:"topic"`
Namespace struct {
ID bson.ObjectId `bson:"id"`
Path string `bson:"path"`
OwnerID bson.ObjectId `bson:"owner_id"`
OwnerType OwnerType `bson:"owner_type"`
} `bson:"namespace"`
ConversationType ConversationType `bson:"conversation_type"`
Privacy Privacy `bson:"privacy,ommitempty"`
Avatar struct {
FileSize int `bson:"file_size"`
OriginalFilename string `bson:"original_filename"`
ContenType string `bson:"content_type"`
Md5 string `bson:"md5"`
Sha256 string `bson:"sha256"`
}
Retention struct {
Mode RetentionMode `bson:"mode"`
Value int `bson:"value,omitempty"`
} `bson:"retention,omitempty"`
MessagesCount int `bson:"messages_count"`
ParticipantsCount int `bson:"participants_count"`
LastActiveAt time.Time `bson:"last_active_at"`
Archived bool `bson:"archived"`
ArchivedAt time.Time `bson:"archived_at"`
CreatedAt time.Time `bson:"created_at"`
UpdatedAt time.Time `bson:"updated_at"`
LastMessage struct {
ContentHTML string
ContentPlainText string
From struct {
UserID bson.ObjectId
Name string
} `bson:"from"`
CreatedAt time.Time `bson:"created_at"`
UpdatedAt time.Time `bson:"updated_at"`
} `bson:"last_message,omitempty"`
Errors Errors `bson:"-"`
}
// NewConversation creates a new instance of a Conversation
func NewConversation() *Conversation {
conversation := &Conversation{}
// setup the defaults
conversation.MessagesCount = 0
conversation.ParticipantsCount = 0
conversation.LastActiveAt = time.Now()
conversation.Archived = false
return conversation
}
// IsArchived is a helper method that returns if the conversation has been archived
func (c *Conversation) IsArchived() bool {
return c.Archived
}
// ConversationType represents the one of the 3 modes of conversation supported: private (aka 1-on-1s), group, and channels (aka. Rooms)
type ConversationType int
// Conversation Types
const (
ConversationTypePrivate ConversationType = iota
ConversationTypeGroup
ConversationTypeChannel
)
func (t ConversationType) String() string {
switch t {
case ConversationTypePrivate:
return "private"
case ConversationTypeGroup:
return "group"
case ConversationTypeChannel:
return "channel"
default:
return "invalid conversation type"
}
}
// Privacy represents the type of privacy and security a conversation has
type Privacy int
// Privacy modes
const (
PrivacyPersonal Privacy = iota
PrivacyPublic
PrivacyPrivate
PrivacyProtected
PrivacySecret
)
func (p Privacy) String() string {
switch p {
case PrivacyPersonal:
return "personal"
case PrivacyPublic:
return "public"
case PrivacyPrivate:
return "private"
case PrivacyProtected:
return "protected"
case PrivacySecret:
return "secret"
default:
return "invalid channel privacy"
}
}
// RetentionMode describes the type of rentention policy for messages in a conversation
type RetentionMode int
// Retention modes
const (
RetentionModeAll RetentionMode = iota
RetentionModeNone
RetentionModeAge
RetentionModeDays
)
func (m RetentionMode) String() string {
switch m {
case RetentionModeAll:
return "all"
case RetentionModeNone:
return "all"
case RetentionModeAge:
return "age"
case RetentionModeDays:
return "days"
default:
return "invalid"
}
}
|
package mirror
import (
"context"
"fmt"
"github.com/google/uuid"
)
func (d *Manager) NewProcess(ctx context.Context, nsID, rootID uuid.UUID, processType string) (*Process, error) {
// TODO: prevent multiple sync operations on a single namespace
process, err := d.callbacks.Store().CreateProcess(ctx, &Process{
ID: uuid.New(),
NamespaceID: nsID,
RootID: rootID,
Typ: processType,
Status: ProcessStatusPending,
})
if err != nil {
return nil, fmt.Errorf("creating a new mirroring process, err: %w", err)
}
return process, nil
}
|
package cid
import (
"encoding/binary"
"testing"
)
func TestUvarintRoundTrip(t *testing.T) {
testCases := []uint64{0, 1, 2, 127, 128, 129, 255, 256, 257, 1<<63 - 1}
for _, tc := range testCases {
buf := make([]byte, 16)
binary.PutUvarint(buf, tc)
v, l1 := uvarint(string(buf))
_, l2 := binary.Uvarint(buf)
if tc != v {
t.Errorf("roundtrip failed expected %d but got %d", tc, v)
}
if l1 != l2 {
t.Errorf("length incorrect expected %d but got %d", l2, l1)
}
}
}
|
package memfs
import (
"testing"
"gopkg.in/src-d/go-billy.v4/test"
. "gopkg.in/check.v1"
)
func Test(t *testing.T) { TestingT(t) }
type MemorySuite struct {
test.FilesystemSuite
path string
}
var _ = Suite(&MemorySuite{})
func (s *MemorySuite) SetUpTest(c *C) {
s.FilesystemSuite = test.NewFilesystemSuite(New())
}
|
package metrics
import (
redis "github.com/go-redis/redis/v8"
)
// AddRedisMetrics registers a metrics handler against a redis Client's PoolStats() method
func AddRedisMetrics(stats func() *redis.PoolStats) {
gaugeMetrics := []struct {
name string
desc string
f func() int64
}{
{"redis_conns", "Number of total connections in the pool", func() int64 { return int64(stats().TotalConns) }},
{"redis_idle_conns", "Number of idle connections in the pool", func() int64 { return int64(stats().IdleConns) }},
{"redis_stale_conns", "Number of stale connections in the pool", func() int64 { return int64(stats().StaleConns) }},
}
for _, m := range gaugeMetrics {
registry.addInt64DerivedGaugeMetric(m.name, m.desc, "redis", m.f)
}
cumulativeMetrics := []struct {
name string
desc string
f func() int64
}{
{"redis_miss_count_total", "Total number of times a connection was not found in the pool", func() int64 { return int64(stats().Misses) }},
{"redis_hit_count_total", "Total number of times a connection was found in the pool", func() int64 { return int64(stats().Hits) }},
}
for _, m := range cumulativeMetrics {
registry.addInt64DerivedCumulativeMetric(m.name, m.desc, "redis", m.f)
}
}
|
package rpki
type RpkiSessionRpc struct {
Information struct {
RpkiSessions []RpkiSession `xml:"rv-session"`
} `xml:"rv-session-information"`
}
type RpkiSession struct {
IpAddress string `xml:"ip-address"`
SessionState string `xml:"session-state"`
SessionFlaps int64 `xml:"session-flaps"`
Ipv4PrefixCount int64 `xml:"ip-prefix-count"`
Ipv6PrefixCount int64 `xml:"ip6-prefix-count"`
}
type RpkiStatisticsRpc struct {
Information struct {
Statistics RpkiStatistics `xml:"rv-statistics"`
} `xml:"rv-statistics-information"`
}
type RpkiStatistics struct {
RecordCount int64 `xml:"rv-record-count"`
ReplicationRecordCount int64 `xml:"rv-replication-record-count"`
PrefixCount int64 `xml:"rv-prefix-count"`
OriginASCount int64 `xml:"rv-origin-as-count"`
MemoryUtilization int64 `xml:"rv-memory-utilization"`
OriginResultsValid int64 `xml:"rv-policy-origin-validation-results-valid"`
OriginResultsInvalid int64 `xml:"rv-policy-origin-validation-results-invalid"`
OriginResultsUnknown int64 `xml:"rv-policy-origin-validation-results-unknown"`
}
|
package app
import (
"fmt"
"os"
"strings"
"testing"
_ "github.com/btnguyen2k/gocosmos"
"github.com/btnguyen2k/henge"
"github.com/btnguyen2k/prom"
)
func _createCosmosdbConnect(t *testing.T, testName string) *prom.SqlConnect {
driver := strings.ReplaceAll(os.Getenv("COSMOSDB_DRIVER"), `"`, "")
url := strings.ReplaceAll(os.Getenv("COSMOSDB_URL"), `"`, "")
if driver == "" || url == "" {
t.Skipf("%s skipped", testName)
return nil
}
timezone := strings.ReplaceAll(os.Getenv("TIMEZONE"), `"`, "")
if timezone == "" {
timezone = "UTC"
}
urlTimezone := strings.ReplaceAll(timezone, "/", "%2f")
url = strings.ReplaceAll(url, "${loc}", urlTimezone)
url = strings.ReplaceAll(url, "${tz}", urlTimezone)
url = strings.ReplaceAll(url, "${timezone}", urlTimezone)
url += ";Db=exter"
sqlc, err := henge.NewCosmosdbConnection(url, timezone, driver, 10000, nil)
if err != nil {
t.Fatalf("%s/%s failed: %s", testName, "NewCosmosdbConnection", err)
}
sqlc.GetDB().Exec("CREATE DATABASE exter WITH maxru=10000")
return sqlc
}
const tableNameCosmosdb = "exter_test_app"
var setupTestCosmosdb = func(t *testing.T, testName string) {
testSqlc = _createCosmosdbConnect(t, testName)
testSqlc.GetDB().Exec(fmt.Sprintf("DROP COLLECTION IF EXISTS %s", tableNameCosmosdb))
err := InitAppTableCosmosdb(testSqlc, tableNameCosmosdb)
if err != nil {
t.Fatalf("%s failed: %s", testName, err)
}
}
var teardownTestCosmosdb = func(t *testing.T, testName string) {
if testSqlc != nil {
defer func() {
defer func() { testSqlc = nil }()
testSqlc.Close()
}()
}
}
/*----------------------------------------------------------------------*/
func TestNewAppDaoCosmosdb(t *testing.T) {
testName := "TestNewAppDaoCosmosdb"
teardownTest := setupTest(t, testName, setupTestCosmosdb, teardownTestCosmosdb)
defer teardownTest(t)
appDao := NewAppDaoCosmosdb(testSqlc, tableNameCosmosdb)
if appDao == nil {
t.Fatalf("%s failed: nil", testName)
}
}
func _ensureCosmosdbNumRows(t *testing.T, testName string, sqlc *prom.SqlConnect, numRows int) {
if dbRows, err := sqlc.GetDB().Query(fmt.Sprintf("SELECT COUNT(1) FROM %s c WITH cross_partition=true", tableNameCosmosdb)); err != nil {
t.Fatalf("%s failed: %s", testName, err)
} else if rows, err := sqlc.FetchRows(dbRows); err != nil {
t.Fatalf("%s failed: %s", testName, err)
} else if value := rows[0]["$1"]; int(value.(float64)) != numRows {
t.Fatalf("%s failed: expected collection to have %#v rows but received %#v", testName, numRows, value)
}
}
func TestAppDaoCosmosdb_Create(t *testing.T) {
testName := "TestAppDaoCosmosdb_Create"
teardownTest := setupTest(t, testName, setupTestCosmosdb, teardownTestCosmosdb)
defer teardownTest(t)
appDao := NewAppDaoCosmosdb(testSqlc, tableNameCosmosdb)
doTestAppDao_Create(t, testName, appDao)
_ensureCosmosdbNumRows(t, testName, testSqlc, 1)
}
func TestAppDaoCosmosdb_Get(t *testing.T) {
testName := "TestAppDaoCosmosdb_Get"
teardownTest := setupTest(t, testName, setupTestCosmosdb, teardownTestCosmosdb)
defer teardownTest(t)
appDao := NewAppDaoCosmosdb(testSqlc, tableNameCosmosdb)
doTestAppDao_Get(t, testName, appDao)
}
func TestAppDaoCosmosdb_Delete(t *testing.T) {
testName := "TestAppDaoCosmosdb_Delete"
teardownTest := setupTest(t, testName, setupTestCosmosdb, teardownTestCosmosdb)
defer teardownTest(t)
appDao := NewAppDaoCosmosdb(testSqlc, tableNameCosmosdb)
doTestAppDao_Delete(t, testName, appDao)
_ensureCosmosdbNumRows(t, testName, testSqlc, 0)
}
func TestAppDaoCosmosdb_Update(t *testing.T) {
testName := "TestAppDaoCosmosdb_Update"
teardownTest := setupTest(t, testName, setupTestCosmosdb, teardownTestCosmosdb)
defer teardownTest(t)
appDao := NewAppDaoCosmosdb(testSqlc, tableNameCosmosdb)
doTestAppDao_Update(t, testName, appDao)
_ensureCosmosdbNumRows(t, testName, testSqlc, 1)
}
func TestAppDaoCosmosdb_GetUserApps(t *testing.T) {
testName := "TestAppDaoCosmosdb_GetUserApps"
teardownTest := setupTest(t, testName, setupTestCosmosdb, teardownTestCosmosdb)
defer teardownTest(t)
appDao := NewAppDaoCosmosdb(testSqlc, tableNameCosmosdb)
doTestAppDao_GetUserApps(t, testName, appDao)
_ensureCosmosdbNumRows(t, testName, testSqlc, 10)
}
|
package main
import (
_ "io/ioutil"
"os"
"github.com/FBreuer2/librsync-go"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
func CommandPatch(c *cli.Context) {
if len(c.Args()) > 3 {
logrus.Warnf("%d additional arguments passed are ignored", len(c.Args())-2)
}
if c.Args().Get(0) == "" {
logrus.Fatalf("Missing basis file")
}
if c.Args().Get(1) == "" {
logrus.Fatalf("Missing delta file")
}
if c.Args().Get(2) == "" {
logrus.Fatalf("Missing newfile file")
}
basis, err := os.Open(c.Args().Get(0))
if err != nil {
logrus.Fatal(err)
}
defer basis.Close()
delta, err := os.Open(c.Args().Get(1))
if err != nil {
logrus.Fatal(err)
}
defer delta.Close()
newfile, err := os.OpenFile(c.Args().Get(2), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.FileMode(0600))
if err != nil {
logrus.Fatal(err)
}
defer newfile.Close()
if err := librsync.Patch(basis, delta, newfile); err != nil {
logrus.Fatal(err)
}
}
|
package geeRPC
import (
"encoding/json"
"fmt"
"io"
"net"
"reflect"
"sync"
)
const MagicNumber = 0x3bef5c
type Option struct {
MagicNumber int
CodecType Type
}
var DefaultOption = &Option{
MagicNumber: MagicNumber,
CodecType: GobType,
}
type Server struct {
}
func NewServer() *Server {
return &Server{}
}
var DefaultServer = NewServer()
func (s *Server) Accept(lis net.Listener) {
for {
conn, err := lis.Accept()
if err != nil {
return
}
//
go s.ServeConn(conn)
}
}
func Accept(lis net.Listener) { DefaultServer.Accept(lis) }
func (s *Server) ServeConn(conn io.ReadWriteCloser) {
defer func() { conn.Close() }()
var opt Option
if err := json.NewDecoder(conn).Decode(&opt); err != nil {
return
}
if opt.MagicNumber != MagicNumber {
return
}
f := NewCodecFuncMap[opt.CodecType]
if f == nil {
return
}
}
var invalidRequest = struct{}{}
func (s *Server) serveCodec(cc Codec) {
sending := new(sync.Mutex)
wg := new(sync.WaitGroup)
for {
req, err := s.readRequest(cc)
if err != nil {
if req == nil {
break // it's not possible to recover, so close the connection
}
req.h.Error = err.Error()
s.sendResponse(cc, req.h, invalidRequest, sending)
continue
}
wg.Add(1)
go s.handleRequest(cc, req, sending, wg)
}
}
type request struct {
h *Header // header of request
argv, replyv reflect.Value // argv and replyv of request
}
func (s *Server) readRequestHeader(cc Codec) (*Header, error) {
var h Header
if err := cc.ReadHeader(&h); err != nil {
if err != io.EOF && err != io.ErrUnexpectedEOF {
}
return nil, err
}
return &h, nil
}
func (s *Server) readRequest(cc Codec) (*request, error) {
h, err := s.readRequestHeader(cc)
if err != nil {
return nil, err
}
req := &request{h: h}
// TODO: now we don't know the type of request argv
// day 1, just suppose it's string
req.argv = reflect.New(reflect.TypeOf(""))
if err = cc.ReadBody(req.argv.Interface()); err != nil {
}
return req, nil
}
func (s *Server) sendResponse(cc Codec, h *Header, body interface{}, sending *sync.Mutex) {
sending.Lock()
defer sending.Unlock()
if err := cc.Write(h, body); err != nil {
}
}
func (s *Server) handleRequest(cc Codec, req *request, sending *sync.Mutex, wg *sync.WaitGroup) {
// TODO, should call registered rpc methods to get the right replyv
// day 1, just print argv and send a hello message
defer wg.Done()
req.replyv = reflect.ValueOf(fmt.Sprintf("geerpc resp %d", req.h.Seq))
s.sendResponse(cc, req.h, req.replyv.Interface(), sending)
}
|
package kpalindrome
import "testing"
var palindromeTests = []struct {
s string
k int
res bool
}{
{"abxa", 1, true},
{"abxda", 1, false},
{"abccba", 0, true},
}
func TestIsKPalindrome(t *testing.T) {
for _, example := range palindromeTests {
if isKpalindrome(example.s, example.k) != example.res {
t.Error("Wrong result on isKPalindrome", example.s, example.k, example.res)
}
}
}
|
package main
import (
"log"
"os"
"time"
"github.com/joho/godotenv"
"github.com/sclevine/agouti"
)
func main() {
driver := agouti.ChromeDriver(agouti.Browser("chrome"))
if err := driver.Start(); err != nil {
log.Fatalf("failed to start driver:%v", err)
}
defer driver.Stop()
page, err := driver.NewPage()
if err != nil {
log.Fatalf("failed to open driver:%v", err)
}
if err := page.Navigate("https://work.e-typing.ne.jp/e-typing_pro/user/"); err != nil {
log.Fatalf("failed to navigate:%v", err)
}
id := page.FindByID("user_id")
pass := page.FindByID("password")
err = godotenv.Load("login.env")
if err != nil {
log.Fatalf("failed to loading .env file:%v", err)
}
id.Fill(os.Getenv("ID"))
pass.Fill(os.Getenv("PASS"))
if err := page.FindByID("login_btn").Submit(); err != nil {
log.Fatalf("failed to login:%v", err)
}
if err := page.FindByID("skilcheck_btn").Click(); err != nil {
log.Fatalf("failed to click:%v", err)
}
time.Sleep(time.Second)
if err := page.FindByID("typing_app_frame").SwitchToFrame(); err != nil {
log.Fatalf("failed to swich frame:%v", err)
}
if err := page.FindByXPath("//*[@id=\"start_btn\"]").Click(); err != nil {
log.Fatalf("failed to click:%v", err)
}
time.Sleep(time.Second)
if err := page.FindByXPath("/html/body").SendKeys(" "); err != nil {
log.Fatalf("failed to send space%v", err)
}
time.Sleep(time.Second * 3)
for {
keys, _ := page.FindByXPath("//*[@id=\"sentenceText\"]/div/span[2]").Text()
for _, key := range keys {
_ = page.Find("body").SendKeys(string([]rune{key}))
}
time.Sleep(time.Millisecond * 890)
}
}
|
package types
import (
"bytes"
"encoding/json"
"testing"
)
func TestByteString(t *testing.T) {
t.Parallel()
b := Byte('b')
if b.String() != "b" {
t.Errorf("Expected %q, got %s", "b", b.String())
}
}
func TestByteUnmarshal(t *testing.T) {
t.Parallel()
var b Byte
err := json.Unmarshal([]byte(`"b"`), &b)
if err != nil {
t.Error(err)
}
if b != 'b' {
t.Errorf("Expected %q, got %s", "b", b)
}
}
func TestByteMarshal(t *testing.T) {
t.Parallel()
b := Byte('b')
res, err := json.Marshal(&b)
if err != nil {
t.Error(err)
}
if !bytes.Equal(res, []byte(`"b"`)) {
t.Errorf("expected %s, got %s", `"b"`, b.String())
}
}
func TestByteValue(t *testing.T) {
t.Parallel()
b := Byte('b')
v, err := b.Value()
if err != nil {
t.Error(err)
}
if !bytes.Equal([]byte{byte(b)}, v.([]byte)) {
t.Errorf("byte mismatch, %v %v", b, v)
}
}
func TestByteScan(t *testing.T) {
t.Parallel()
var b Byte
s := "b"
err := b.Scan(s)
if err != nil {
t.Error(err)
}
if !bytes.Equal([]byte{byte(b)}, []byte{'b'}) {
t.Errorf("bad []byte: %#v ≠ %#v\n", b, "b")
}
}
|
package testdata
import (
"time"
"github.com/frk/gosql"
"github.com/frk/gosql/internal/testdata/common"
)
type UpdateWhereblockBasicSingle2Query struct {
User *common.User4 `rel:"test_user:u"`
Where struct {
CreatedAfter time.Time `sql:"u.created_at >"`
CreatedBefore time.Time `sql:"u.created_at <"`
FullName struct {
_ gosql.Column `sql:"u.full_name = 'John Doe'"`
_ gosql.Column `sql:"u.full_name = 'Jane Doe'" bool:"or"`
} `sql:">"`
}
}
|
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
// Code generated from the elasticsearch-specification DO NOT EDIT.
// https://github.com/elastic/elasticsearch-specification/tree/33e8a1c9cad22a5946ac735c4fba31af2da2cec2
package types
import (
"bytes"
"encoding/json"
"errors"
"io"
"strconv"
"github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/synonymformat"
)
// SynonymTokenFilter type.
//
// https://github.com/elastic/elasticsearch-specification/blob/33e8a1c9cad22a5946ac735c4fba31af2da2cec2/specification/_types/analysis/token_filters.ts#L121-L130
type SynonymTokenFilter struct {
Expand *bool `json:"expand,omitempty"`
Format *synonymformat.SynonymFormat `json:"format,omitempty"`
Lenient *bool `json:"lenient,omitempty"`
Synonyms []string `json:"synonyms,omitempty"`
SynonymsPath *string `json:"synonyms_path,omitempty"`
Tokenizer *string `json:"tokenizer,omitempty"`
Type string `json:"type,omitempty"`
Updateable *bool `json:"updateable,omitempty"`
Version *string `json:"version,omitempty"`
}
func (s *SynonymTokenFilter) UnmarshalJSON(data []byte) error {
dec := json.NewDecoder(bytes.NewReader(data))
for {
t, err := dec.Token()
if err != nil {
if errors.Is(err, io.EOF) {
break
}
return err
}
switch t {
case "expand":
var tmp interface{}
dec.Decode(&tmp)
switch v := tmp.(type) {
case string:
value, err := strconv.ParseBool(v)
if err != nil {
return err
}
s.Expand = &value
case bool:
s.Expand = &v
}
case "format":
if err := dec.Decode(&s.Format); err != nil {
return err
}
case "lenient":
var tmp interface{}
dec.Decode(&tmp)
switch v := tmp.(type) {
case string:
value, err := strconv.ParseBool(v)
if err != nil {
return err
}
s.Lenient = &value
case bool:
s.Lenient = &v
}
case "synonyms":
if err := dec.Decode(&s.Synonyms); err != nil {
return err
}
case "synonyms_path":
var tmp json.RawMessage
if err := dec.Decode(&tmp); err != nil {
return err
}
o := string(tmp[:])
o, err = strconv.Unquote(o)
if err != nil {
o = string(tmp[:])
}
s.SynonymsPath = &o
case "tokenizer":
var tmp json.RawMessage
if err := dec.Decode(&tmp); err != nil {
return err
}
o := string(tmp[:])
o, err = strconv.Unquote(o)
if err != nil {
o = string(tmp[:])
}
s.Tokenizer = &o
case "type":
if err := dec.Decode(&s.Type); err != nil {
return err
}
case "updateable":
var tmp interface{}
dec.Decode(&tmp)
switch v := tmp.(type) {
case string:
value, err := strconv.ParseBool(v)
if err != nil {
return err
}
s.Updateable = &value
case bool:
s.Updateable = &v
}
case "version":
if err := dec.Decode(&s.Version); err != nil {
return err
}
}
}
return nil
}
// MarshalJSON override marshalling to include literal value
func (s SynonymTokenFilter) MarshalJSON() ([]byte, error) {
type innerSynonymTokenFilter SynonymTokenFilter
tmp := innerSynonymTokenFilter{
Expand: s.Expand,
Format: s.Format,
Lenient: s.Lenient,
Synonyms: s.Synonyms,
SynonymsPath: s.SynonymsPath,
Tokenizer: s.Tokenizer,
Type: s.Type,
Updateable: s.Updateable,
Version: s.Version,
}
tmp.Type = "synonym"
return json.Marshal(tmp)
}
// NewSynonymTokenFilter returns a SynonymTokenFilter.
func NewSynonymTokenFilter() *SynonymTokenFilter {
r := &SynonymTokenFilter{}
return r
}
|
package cmds
import (
"context"
"gopkg.in/yaml.v3"
"github.com/caos/orbos/pkg/kubernetes/cli"
orbcfg "github.com/caos/orbos/pkg/orb"
"github.com/caos/orbos/internal/ctrlgitops"
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/git"
"github.com/caos/orbos/pkg/kubernetes"
)
func Takeoff(
monitor mntr.Monitor,
ctx context.Context,
orbConfig *orbcfg.Orb,
gitClient *git.Client,
recur bool,
destroy bool,
deploy bool,
verbose bool,
ingestionAddress string,
version string,
gitCommit string,
kubeconfig string,
gitOpsBoom bool,
gitOpsNetworking bool,
) error {
if err := gitClient.Configure(orbConfig.URL, []byte(orbConfig.Repokey)); err != nil {
return err
}
if err := gitClient.Clone(); err != nil {
return err
}
withORBITER := gitClient.Exists(git.OrbiterFile)
if withORBITER {
orbiterConfig := &ctrlgitops.OrbiterConfig{
Recur: recur,
Destroy: destroy,
Deploy: deploy,
Verbose: verbose,
Version: version,
OrbConfigPath: orbConfig.Path,
GitCommit: gitCommit,
IngestionAddress: ingestionAddress,
}
if err := ctrlgitops.Orbiter(ctx, monitor, orbiterConfig, gitClient); err != nil {
return err
}
}
if !deploy {
monitor.Info("Skipping operator deployments")
return nil
}
k8sClient, err := cli.Client(
monitor,
orbConfig,
gitClient,
kubeconfig,
gitOpsBoom || gitOpsNetworking,
false,
)
if err != nil {
return err
}
if err := kubernetes.EnsureCaosSystemNamespace(monitor, k8sClient); err != nil {
monitor.Info("failed to apply common resources into k8s-cluster")
return err
}
if withORBITER || gitOpsBoom || gitOpsNetworking {
orbConfigBytes, err := yaml.Marshal(orbConfig)
if err != nil {
return err
}
if err := kubernetes.EnsureOrbconfigSecret(monitor, k8sClient, orbConfigBytes); err != nil {
monitor.Info("failed to apply configuration resources into k8s-cluster")
return err
}
}
if err := deployBoom(monitor, gitClient, k8sClient, version, gitOpsBoom); err != nil {
return err
}
return deployNetworking(monitor, gitClient, k8sClient, version, gitOpsNetworking)
}
|
package mainMenu
import (
"github.com/myProj/scaner/new/include/appStruct"
"github.com/myProj/scaner/new/include/config/newWordsConfig"
"github.com/therecipe/qt/core"
"github.com/therecipe/qt/widgets"
"runtime"
"strings"
)
const (
//окно для добавления слов
addWordWindowWidth = 250
addWordWindowMaxWidth = 450
addWordWindowHeight = 200
addWordWindowName = "Добавление слов"
addWordWindowInputName = "Введите слово"
addWordWindowAddBtnName = "&Добавить"
//основное окно со списком слов
wordListWindowWidth = 400
wordListWindowHeight = 550
wordListName = "Слова для поиска"
wordListWindowName = "Список слов"
wordListWindowAddBtnName = "&Добавить слово"
wordListWindowDeleteBtnName = "&Удалить выделенные слова"
)
//newWordListWindow создает окно для отображения и редактирования
//списка слов
func newWordListWindow(guiC *appStruct.GuiComponent)*widgets.QWidget{
//создание окна и начальная настройка
wordListWindow := widgets.NewQWidget(nil,1)
wordListWindow.SetWindowFlags(core.Qt__Dialog) //флаг поднимает окно над всеми
wordListWindow.SetMinimumSize2(wordListWindowWidth,wordListWindowHeight)
wordListWindow.SetWindowTitle(wordListWindowName)
//для данного окна достаточно одного слоя (layout)
vBoxLayout := widgets.NewQVBoxLayout()
wordListWindow.SetLayout(vBoxLayout)
//создание кнопок для редактирования списка слов
btnAdd := widgets.NewQPushButton2(wordListWindowAddBtnName, nil)
btnDelete := widgets.NewQPushButton2(wordListWindowDeleteBtnName, nil)
//на данном виджете отображается список слов
wordList := widgets.NewQListWidget(nil)
//при таком флаге можно выделять несколько строк
wordList.SetSelectionMode(2)
wordList.SetWindowTitle(wordListName)
wordList.AddItems(newWordsConfig.GetDictWords())
//добавление компонентов на слой
vBoxLayout.Layout().AddWidget(wordList)
vBoxLayout.Layout().AddWidget(btnAdd)
vBoxLayout.Layout().AddWidget(btnDelete)
guiC.WordList = wordList
addWordWindow := setAddWordWindow(wordList) // окно для добавления слов
// вызов окна для добавления слов
btnAdd.ConnectClicked(func(bool){
addWordWindow.Show()
})
//удаляет слова из списка как на экране
//так и из списка в конфиге
btnDelete.ConnectClicked(func(bool){
deleteFromWordList(wordList)
})
return wordListWindow
}
//setAddWordWindow вызвается из newWordListWindow
//создает окно для добавления списка слов
func setAddWordWindow(wordList *widgets.QListWidget)*widgets.QWidget{
addWordWindow := widgets.NewQWidget(nil,0)
vBoxLayout := widgets.NewQVBoxLayout()
addWordWindow.SetLayout(vBoxLayout)
addWordWindow.SetWindowFlags(core.Qt__Dialog) //флаг поднимает окно над всеми
addWordWindow.SetMinimumSize2(addWordWindowWidth,addWordWindowHeight)
addWordWindow.SetWindowTitle(addWordWindowName)
addWordWindow.SetMaximumWidth(addWordWindowMaxWidth)
//пользовательский ввод
userInput := widgets.NewQTextEdit(nil)
//кнопка для добавления слова в конфиг
addBtn := widgets.NewQPushButton2(addWordWindowAddBtnName,nil)
//область отображает было ли слово успешно добавлено в словарь или нет
lblResultError := widgets.NewQLabel2("", nil, 0)
lblResultSuccess := widgets.NewQLabel2("", nil, 0)
lblResultError.SetStyleSheet("QLabel {color : red; }")
lblResultSuccess.SetStyleSheet("QLabel {color : green; }")
lblInputName := widgets.NewQLabel2(addWordWindowInputName,nil,0)
lblInputName.SetFixedHeight(15)
//добавление компонентов на форму
vBoxLayout.Layout().AddWidget(lblInputName)
vBoxLayout.Layout().AddWidget(userInput)
vBoxLayout.Layout().AddWidget(addBtn)
vBoxLayout.Layout().AddWidget(lblResultError)
vBoxLayout.Layout().AddWidget(lblResultSuccess)
//событие для добавления слова
addBtn.ConnectClicked(func(bool){
if userInput.ToPlainText() == ""{
lblResultError.SetText("Слово должно содержать\n хотя бы один символ")
return
}
fields := strings.Split(userInput.ToPlainText(),getLineBreakSeparator())
lblResultError.SetText("")
lblResultSuccess.SetText("")
errorResultText := ""
successResultText := ""
for _,w := range fields {
if w == ""{
continue
}
err := addToWordList(w,wordList)
if err != nil {
errorResultText += "Cлово \""+w+ "\" уже есть.\n"
} else {
successResultText += "Слово \""+w+"\" успешно добавлено.\n"
}
if errorResultText != "" {
lblResultError.SetText(errorResultText)
}
}
lblResultSuccess.SetText(successResultText)
userInput.Clear()
})
return addWordWindow
}
func getLineBreakSeparator()string{
if runtime.GOOS == "windows" {
return "\r\n"
}
return "\n"
}
|
package sieve
import (
"encoding/json"
"github.com/ActiveState/log"
"github.com/ActiveState/logyard-apps/common"
"github.com/ActiveState/zmqpubsub"
)
type Event struct {
Type string `json:"type"` // Event identifier.
Desc string `json:"desc"` // Event description
Severity string `json:"severity"` // Event severity (INFO, WARN, ERROR)
Info map[string]interface{} `json:"info"` // Aribtrary data specific to this event
Process string `json:"process"` // The process that generated this event
common.MessageCommon
}
func (event *Event) MustPublish(pub *zmqpubsub.Publisher) {
data, err := json.Marshal(event)
if err != nil {
log.Fatal(err)
}
pub.MustPublish("event."+event.Type, string(data))
}
|
package model
import "time"
type CategoryListResp struct {
Name string `json:"name"`
Num int `json:"num"`
Summary string `json:"summary"`
Url string `json:"url"`
}
type CategoryInfo struct {
Id int `xorm:"notnull 'id'" json:"id"`
Name string `xorm:"notnull 'name'" json:"name"`
CoverUrl string `xorm:"notnull 'cover_url'" json:"coverUrl"`
Summary string `xorm:"notnull 'summary'" json:"summary"`
PostNum int `xorm:"notnull 'post_num'" json:"postNum"`
AddTime time.Time `xorm:"notnull 'add_time'" json:"add_time"`
UpdateTime time.Time `xorm:"notnull 'update_time'" json:"update_time"`
DelFlag int `xorm:"notnull 'del_flag'" json:"del_flag"`
}
func CateTableName() string {
return "category"
}
|
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package plugins
import (
"errors"
"fmt"
"strings"
v1 "k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/util/sets"
cloudvolume "k8s.io/cloud-provider/volume"
)
// InTreePlugin handles translations between CSI and in-tree sources in a PV
type InTreePlugin interface {
// TranslateInTreeStorageClassToCSI takes in-tree volume options
// and translates them to a volume options consumable by CSI plugin
TranslateInTreeStorageClassToCSI(sc *storage.StorageClass) (*storage.StorageClass, error)
// TranslateInTreeInlineVolumeToCSI takes a inline volume and will translate
// the in-tree inline volume source to a CSIPersistentVolumeSource
// A PV object containing the CSIPersistentVolumeSource in it's spec is returned
TranslateInTreeInlineVolumeToCSI(volume *v1.Volume) (*v1.PersistentVolume, error)
// TranslateInTreePVToCSI takes a persistent volume and will translate
// the in-tree pv source to a CSI Source. The input persistent volume can be modified
TranslateInTreePVToCSI(pv *v1.PersistentVolume) (*v1.PersistentVolume, error)
// TranslateCSIPVToInTree takes a PV with a CSI PersistentVolume Source and will translate
// it to a in-tree Persistent Volume Source for the in-tree volume
// by the `Driver` field in the CSI Source. The input PV object can be modified
TranslateCSIPVToInTree(pv *v1.PersistentVolume) (*v1.PersistentVolume, error)
// CanSupport tests whether the plugin supports a given persistent volume
// specification from the API.
CanSupport(pv *v1.PersistentVolume) bool
// CanSupportInline tests whether the plugin supports a given inline volume
// specification from the API.
CanSupportInline(vol *v1.Volume) bool
// GetInTreePluginName returns the in-tree plugin name this migrates
GetInTreePluginName() string
// GetCSIPluginName returns the name of the CSI plugin that supersedes the in-tree plugin
GetCSIPluginName() string
// RepairVolumeHandle generates a correct volume handle based on node ID information.
RepairVolumeHandle(volumeHandle, nodeID string) (string, error)
}
const (
// fsTypeKey is the deprecated storage class parameter key for fstype
fsTypeKey = "fstype"
// csiFsTypeKey is the storage class parameter key for CSI fstype
csiFsTypeKey = "csi.storage.k8s.io/fstype"
// zoneKey is the deprecated storage class parameter key for zone
zoneKey = "zone"
// zonesKey is the deprecated storage class parameter key for zones
zonesKey = "zones"
)
// replaceTopology overwrites an existing topology key by a new one.
func replaceTopology(pv *v1.PersistentVolume, oldKey, newKey string) error {
for i := range pv.Spec.NodeAffinity.Required.NodeSelectorTerms {
for j, r := range pv.Spec.NodeAffinity.Required.NodeSelectorTerms[i].MatchExpressions {
if r.Key == oldKey {
pv.Spec.NodeAffinity.Required.NodeSelectorTerms[i].MatchExpressions[j].Key = newKey
}
}
}
return nil
}
// getTopologyZones returns all topology zones with the given key found in the PV.
func getTopologyZones(pv *v1.PersistentVolume, key string) []string {
if pv.Spec.NodeAffinity == nil ||
pv.Spec.NodeAffinity.Required == nil ||
len(pv.Spec.NodeAffinity.Required.NodeSelectorTerms) < 1 {
return nil
}
var values []string
for i := range pv.Spec.NodeAffinity.Required.NodeSelectorTerms {
for _, r := range pv.Spec.NodeAffinity.Required.NodeSelectorTerms[i].MatchExpressions {
if r.Key == key {
values = append(values, r.Values...)
}
}
}
return values
}
// addTopology appends the topology to the given PV.
func addTopology(pv *v1.PersistentVolume, topologyKey string, zones []string) error {
// Make sure there are no duplicate or empty strings
filteredZones := sets.String{}
for i := range zones {
zone := strings.TrimSpace(zones[i])
if len(zone) > 0 {
filteredZones.Insert(zone)
}
}
zones = filteredZones.List()
if len(zones) < 1 {
return errors.New("there are no valid zones to add to pv")
}
// Make sure the necessary fields exist
pv.Spec.NodeAffinity = new(v1.VolumeNodeAffinity)
pv.Spec.NodeAffinity.Required = new(v1.NodeSelector)
pv.Spec.NodeAffinity.Required.NodeSelectorTerms = make([]v1.NodeSelectorTerm, 1)
topology := v1.NodeSelectorRequirement{
Key: topologyKey,
Operator: v1.NodeSelectorOpIn,
Values: zones,
}
pv.Spec.NodeAffinity.Required.NodeSelectorTerms[0].MatchExpressions = append(
pv.Spec.NodeAffinity.Required.NodeSelectorTerms[0].MatchExpressions,
topology,
)
return nil
}
// translateTopology converts existing zone labels or in-tree topology to CSI topology.
// In-tree topology has precedence over zone labels.
func translateTopology(pv *v1.PersistentVolume, topologyKey string) error {
// If topology is already set, assume the content is accurate
if len(getTopologyZones(pv, topologyKey)) > 0 {
return nil
}
zones := getTopologyZones(pv, v1.LabelZoneFailureDomain)
if len(zones) > 0 {
return replaceTopology(pv, v1.LabelZoneFailureDomain, topologyKey)
}
if label, ok := pv.Labels[v1.LabelZoneFailureDomain]; ok {
zones = strings.Split(label, cloudvolume.LabelMultiZoneDelimiter)
if len(zones) > 0 {
return addTopology(pv, topologyKey, zones)
}
}
return nil
}
// translateAllowedTopologies translates allowed topologies within storage class
// from legacy failure domain to given CSI topology key
func translateAllowedTopologies(terms []v1.TopologySelectorTerm, key string) ([]v1.TopologySelectorTerm, error) {
if terms == nil {
return nil, nil
}
newTopologies := []v1.TopologySelectorTerm{}
for _, term := range terms {
newTerm := v1.TopologySelectorTerm{}
for _, exp := range term.MatchLabelExpressions {
var newExp v1.TopologySelectorLabelRequirement
if exp.Key == v1.LabelZoneFailureDomain {
newExp = v1.TopologySelectorLabelRequirement{
Key: key,
Values: exp.Values,
}
} else if exp.Key == key {
newExp = exp
} else {
return nil, fmt.Errorf("unknown topology key: %v", exp.Key)
}
newTerm.MatchLabelExpressions = append(newTerm.MatchLabelExpressions, newExp)
}
newTopologies = append(newTopologies, newTerm)
}
return newTopologies, nil
}
|
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
// Code generated from the elasticsearch-specification DO NOT EDIT.
// https://github.com/elastic/elasticsearch-specification/tree/33e8a1c9cad22a5946ac735c4fba31af2da2cec2
package types
import (
"bytes"
"encoding/json"
"errors"
"io"
"strconv"
)
// FetchProfileBreakdown type.
//
// https://github.com/elastic/elasticsearch-specification/blob/33e8a1c9cad22a5946ac735c4fba31af2da2cec2/specification/_global/search/_types/profile.ts#L148-L157
type FetchProfileBreakdown struct {
LoadSource *int `json:"load_source,omitempty"`
LoadSourceCount *int `json:"load_source_count,omitempty"`
LoadStoredFields *int `json:"load_stored_fields,omitempty"`
LoadStoredFieldsCount *int `json:"load_stored_fields_count,omitempty"`
NextReader *int `json:"next_reader,omitempty"`
NextReaderCount *int `json:"next_reader_count,omitempty"`
Process *int `json:"process,omitempty"`
ProcessCount *int `json:"process_count,omitempty"`
}
func (s *FetchProfileBreakdown) UnmarshalJSON(data []byte) error {
dec := json.NewDecoder(bytes.NewReader(data))
for {
t, err := dec.Token()
if err != nil {
if errors.Is(err, io.EOF) {
break
}
return err
}
switch t {
case "load_source":
var tmp interface{}
dec.Decode(&tmp)
switch v := tmp.(type) {
case string:
value, err := strconv.Atoi(v)
if err != nil {
return err
}
s.LoadSource = &value
case float64:
f := int(v)
s.LoadSource = &f
}
case "load_source_count":
var tmp interface{}
dec.Decode(&tmp)
switch v := tmp.(type) {
case string:
value, err := strconv.Atoi(v)
if err != nil {
return err
}
s.LoadSourceCount = &value
case float64:
f := int(v)
s.LoadSourceCount = &f
}
case "load_stored_fields":
var tmp interface{}
dec.Decode(&tmp)
switch v := tmp.(type) {
case string:
value, err := strconv.Atoi(v)
if err != nil {
return err
}
s.LoadStoredFields = &value
case float64:
f := int(v)
s.LoadStoredFields = &f
}
case "load_stored_fields_count":
var tmp interface{}
dec.Decode(&tmp)
switch v := tmp.(type) {
case string:
value, err := strconv.Atoi(v)
if err != nil {
return err
}
s.LoadStoredFieldsCount = &value
case float64:
f := int(v)
s.LoadStoredFieldsCount = &f
}
case "next_reader":
var tmp interface{}
dec.Decode(&tmp)
switch v := tmp.(type) {
case string:
value, err := strconv.Atoi(v)
if err != nil {
return err
}
s.NextReader = &value
case float64:
f := int(v)
s.NextReader = &f
}
case "next_reader_count":
var tmp interface{}
dec.Decode(&tmp)
switch v := tmp.(type) {
case string:
value, err := strconv.Atoi(v)
if err != nil {
return err
}
s.NextReaderCount = &value
case float64:
f := int(v)
s.NextReaderCount = &f
}
case "process":
var tmp interface{}
dec.Decode(&tmp)
switch v := tmp.(type) {
case string:
value, err := strconv.Atoi(v)
if err != nil {
return err
}
s.Process = &value
case float64:
f := int(v)
s.Process = &f
}
case "process_count":
var tmp interface{}
dec.Decode(&tmp)
switch v := tmp.(type) {
case string:
value, err := strconv.Atoi(v)
if err != nil {
return err
}
s.ProcessCount = &value
case float64:
f := int(v)
s.ProcessCount = &f
}
}
}
return nil
}
// NewFetchProfileBreakdown returns a FetchProfileBreakdown.
func NewFetchProfileBreakdown() *FetchProfileBreakdown {
r := &FetchProfileBreakdown{}
return r
}
|
package logger
// Logger super basic logging interface
type Logger interface {
Debug(v ...interface{})
Info(v ...interface{})
Error(v ...interface{})
}
type Empty struct{}
func (Empty) Debug(v ...interface{}) {}
func (Empty) Info(v ...interface{}) {}
func (Empty) Error(v ...interface{}) {}
var _ Logger = (*Empty)(nil)
|
package main
import (
"fmt"
"io/ioutil"
"os"
"strings"
"github.com/hashicorp/go-hclog"
"github.com/spf13/pflag"
"github.com/spf13/viper"
nsscache "github.com/MiLk/nsscache-go"
)
var (
systemShells []string
minUID = pflag.Int("min-uid", 2000, "Minimum UID number to accept")
minGID = pflag.Int("min-gid", 2000, "Minimum GID number to accept")
defHomeDir = pflag.String("homedir", "/tmp/{UID}", "Home directory to provide if none is available from NetAuth")
defShell = pflag.String("shell", "/bin/nologin", "Default shell to use if none is provided in the directory")
outDir = pflag.String("out", "/etc", "Output directory for cache files")
cfgfile = pflag.String("config", "", "Config file to use")
log hclog.Logger
)
func initialize() {
// Grab a listing of system shells and add them here
bytes, err := ioutil.ReadFile("/etc/shells")
if err != nil {
log.Error("Error reading /etc/shells %s", "error", err)
os.Exit(2)
}
shellString := string(bytes[:])
for _, s := range strings.Split(shellString, "\n") {
if s != "" {
systemShells = append(systemShells, s)
}
}
log.Info("The system will accept the following shells")
for _, s := range systemShells {
log.Info(fmt.Sprintf(" %s", s))
}
}
func main() {
log = hclog.L().Named("nsscache")
pflag.Parse()
viper.BindPFlags(pflag.CommandLine)
if *cfgfile != "" {
viper.SetConfigFile(*cfgfile)
} else {
viper.SetConfigName("config")
viper.AddConfigPath("/etc/netauth/")
viper.AddConfigPath("$HOME/.netauth")
viper.AddConfigPath(".")
}
if err := viper.ReadInConfig(); err != nil {
fmt.Println("Error reading config:", err)
os.Exit(1)
}
// Perform initialization
initialize()
filler, err := NewCacheFiller(int32(*minUID), int32(*minGID), *defShell, *defHomeDir, systemShells)
if err != nil {
log.Error("Error initializing Cache Filler: ", "error", err)
os.Exit(1)
}
cm := nsscache.NewCaches()
if err := cm.FillCaches(filler); err != nil {
log.Error("Unable to fill caches: ", "error", err)
os.Exit(1)
}
err = cm.WriteFiles(&nsscache.WriteOptions{
Directory: *outDir,
})
if err != nil {
log.Error("Error writing updated caches: ", "error", err)
}
log.Info("Caches Updated")
}
|
package utils
import (
"testing"
"net/http/httptest"
"fmt"
"net/http"
)
func TestNewOVSExporterClient(t *testing.T) {
endpoint := "http://localhost:8080"
client := NewOVSExporterClient(endpoint)
if client.Endpoint != endpoint + "/metrics" {
t.Errorf("expected %s, actual %s", endpoint + "/metrics", client.Endpoint)
}
}
func TestGetExporterMetrics(t *testing.T) {
sampleresponse := "openvswitch_interfaces_total 2\n"
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, sampleresponse)
}))
defer ts.Close()
client := NewOVSExporterClient(ts.URL)
metrics, err := client.GetExporterMetrics()
if err != nil {
t.Errorf("error retrieving metrics %s", err)
}
if metrics["openvswitch_interfaces_total"] != "2" {
t.Errorf("expected %s, actual %s", "2", metrics["openvswitch_interfaces_total"])
}
}
|
/**
* Copyright (C) 2019, Xiongfa Li.
* All right reserved.
* @author xiongfa.li
* @date 2019/2/21
* @time 17:59
* @version V1.0
* Description:
*/
package commonPool
import (
"container/list"
"time"
)
type PooledObjectFactory interface {
//对象被激活时调用
ActivateObject(interface{})
//对象处于idle状态,被定时释放时调用
DestroyObject(interface{})
//创建对象时调用
MakeObject() interface{}
//对象回收之后进入idle状态时被调用
PassivateObject(interface{})
//验证对象是否有效
ValidateObject(interface{}) bool
}
type CommonPool struct {
//池中最小保留的idle对象的数量,默认8
MinIdle int
//最大对象数量,默认32
MaxSize int
//获取资源的等待时间,BlockWhenExhausted 为 true 时有效。-1 代表无时间限制,一直阻塞直到有可用的资源
MaxWaitMillis time.Duration
//对象空闲的最小时间,达到此值后空闲对象将可能会被移除。-1 表示不移除;默认 30 分钟
MinEvictableIdleTimeMillis time.Duration
//资源回收协程执行一次回收操作,回收资源的数量。默认 3
NumTestsPerEvictionRun int
//创建对象时是否调用 Factory.ValidateObject 方法,默认 false
TestOnCreate bool
//获取对象时是否调用 Factory.ValidateObject 方法,默认 false
TestOnBorrow bool
//释放对象时是否调用 Factory.ValidateObject 方法,默认 false
TestOnReturn bool
//对象空闲时是否调用 Factory.ValidateObject 方法,默认 false
TestWhileIdle bool
//回收资源协程的执行周期,默认 -1 表示不定时回收
TimeBetweenEvictionRunsMillis time.Duration
//资源耗尽时,是否阻塞等待获取资源,默认 false
BlockWhenExhausted bool
//是否接受外部Object,默认false(暂时没有作用)
AcceptExternalObj bool
//对象工厂
Factory PooledObjectFactory
//inner vars
getChan chan interface{}
putChan chan interface{}
stop chan bool
curCount int
init bool
}
const (
IDLE = iota //在池中,处于空闲状态
ALLOCATED //被使用中
EVICTION //正在被逐出器验证
VALIDATION //正在验证
INVALID //驱逐测试或验证失败并将被销毁
ABANDONED //对象被客户端拿出后,长时间未返回池中,或没有调用 use 方法,即被标记为抛弃的
READY //可以被给客户端使用
)
type poolObject struct {
when time.Time
state int
obj interface{}
}
func (p *CommonPool) initDefault() {
if p.Factory == nil {
panic("Factory is Empty")
}
if p.init {
return
}
p.init = true
if p.MinIdle == 0 {
p.MinIdle = 8
}
if p.MaxSize == 0 {
p.MaxSize = 32
}
if p.MaxWaitMillis == 0 {
p.MaxSize = -1
}
if p.MinEvictableIdleTimeMillis == 0 {
p.MinEvictableIdleTimeMillis = 30 * time.Minute
}
if p.TimeBetweenEvictionRunsMillis == 0 {
p.TimeBetweenEvictionRunsMillis = -1
}
if p.NumTestsPerEvictionRun == 0 {
p.NumTestsPerEvictionRun = 3
}
p.getChan = make(chan interface{})
p.putChan = make(chan interface{})
p.stop = make(chan bool)
p.curCount = 0
}
//支持获取channel,但对factory的支持以及获取超时时间的配置项失效;支持回收channel,但对factory的支持失效,不建议使用
func (p *CommonPool) Init() (<-chan interface{}, chan<- interface{}) {
p.initDefault()
go func() {
queue := list.New()
var timer *time.Timer
if p.TimeBetweenEvictionRunsMillis == -1 {
timer = &time.Timer{C: make(chan time.Time)}
} else {
timer = time.NewTimer(p.TimeBetweenEvictionRunsMillis)
}
for {
//fmt.Println("main loop")
if queue.Len() == 0 {
o := p.make()
//到达对象池上限
if o == nil {
//等待用户归还对象
got := false
for !got {
select {
case <-p.stop:
return
case b := <-p.putChan:
if p.idleObj(b) {
queue.PushBack(&poolObject{time.Now(), IDLE, b})
got = true
}
case <-timer.C:
//fmt.Println("in sub loop")
e := queue.Front()
next := e
for e != nil && queue.Len() > p.MinIdle {
next = e.Next()
if p.MinEvictableIdleTimeMillis > 0 && time.Since(e.Value.(*poolObject).when) > p.MinEvictableIdleTimeMillis {
queue.Remove(e)
p.destoryObj(e.Value.(*poolObject).obj)
e.Value = nil
}
e = next
}
timer = time.NewTimer(p.TimeBetweenEvictionRunsMillis)
}
}
} else {
queue.PushBack(&poolObject{time.Now(), ALLOCATED, o})
}
}
e := queue.Front()
po := e.Value.(*poolObject)
if po.state == IDLE || po.state == ALLOCATED {
p.Factory.ActivateObject(po.obj)
po.state = READY
}
select {
case <-p.stop:
return
case b := <-p.putChan:
if p.idleObj(b) {
queue.PushBack(&poolObject{time.Now(), IDLE, b})
}
case p.getChan <- e.Value.(*poolObject).obj:
queue.Remove(e)
case <-timer.C:
e := queue.Front()
next := e
for e != nil && queue.Len() > p.MinIdle {
next = e.Next()
if p.MinEvictableIdleTimeMillis > 0 && time.Since(e.Value.(*poolObject).when) > p.MinEvictableIdleTimeMillis {
queue.Remove(e)
p.destoryObj(e.Value.(*poolObject).obj)
e.Value = nil
}
e = next
}
timer = time.NewTimer(p.TimeBetweenEvictionRunsMillis)
}
}
}()
return p.getChan, p.putChan
}
func (p *CommonPool) Close() {
close(p.stop)
}
func (p *CommonPool) syncMake() interface{} {
if p.curCount < p.MaxSize {
o := p.Factory.MakeObject()
p.curCount++
return o
}
return nil
}
func (p *CommonPool) idleObj(i interface{}) bool {
if i == nil {
return false
}
if p.TestWhileIdle {
if !p.Factory.ValidateObject(i) {
return false
}
}
p.Factory.PassivateObject(i)
return true
}
func (p *CommonPool) destoryObj(i interface{}) {
if i != nil {
p.Factory.DestroyObject(i)
}
}
func (p *CommonPool) make() interface{} {
i := p.syncMake()
if i != nil {
if p.TestOnCreate {
if !p.Factory.ValidateObject(i) {
return nil
}
}
}
return i
}
func (p *CommonPool) Get() interface{} {
var ret interface{}
if !p.BlockWhenExhausted {
select {
case ret = <-p.getChan:
if p.TestOnBorrow {
if !p.Factory.ValidateObject(ret) {
return nil
}
}
return ret
default:
return nil
}
}
if p.MaxWaitMillis == -1 {
ret := <-p.getChan
if p.TestOnBorrow {
if !p.Factory.ValidateObject(ret) {
return nil
}
}
return ret
}
select {
case ret = <-p.getChan:
if p.TestOnBorrow {
if !p.Factory.ValidateObject(ret) {
return nil
}
}
return ret
case <-time.After(p.MaxWaitMillis):
break
}
return ret
}
func (p *CommonPool) Put(i interface{}) {
if p.TestOnReturn {
if !p.Factory.ValidateObject(i) {
return
}
}
p.putChan <- i
}
type DummyFactory func() interface{}
func (f *DummyFactory) ActivateObject(interface{}) {}
func (f *DummyFactory) DestroyObject(interface{}) {}
func (f *DummyFactory) MakeObject() interface{} { return (*f)() }
func (f *DummyFactory) PassivateObject(interface{}) {}
func (f *DummyFactory) ValidateObject(interface{}) bool { return true }
type DefaultFactory struct {
Activate func(interface{})
Destroy func(interface{})
Make func() interface{}
Passivate func(interface{})
Validate func(interface{}) bool
}
func (f *DefaultFactory) ActivateObject(i interface{}) {
if f.Activate != nil {
f.Activate(i)
}
}
func (f *DefaultFactory) DestroyObject(i interface{}) {
if f.Destroy != nil {
f.Destroy(i)
}
}
func (f *DefaultFactory) MakeObject() interface{} {
if f.Make == nil {
panic("Make func is nil")
}
return f.Make()
}
func (f *DefaultFactory) PassivateObject(i interface{}) {
if f.Passivate != nil {
f.Passivate(i)
}
}
func (f *DefaultFactory) ValidateObject(i interface{}) bool {
if f.Validate != nil {
return f.Validate(i)
}
return true
}
|
package main
import (
"fmt"
)
type shape interface {
getArea() float64
}
type triangle struct {
height float64
base float64
}
type square struct {
sideLength float64
}
func (t triangle) getArea() float64 {
return (.5 * t.base * t.height)
}
func (s square) getArea() float64 {
return (s.sideLength * s.sideLength)
}
func main() {
s := square{}
t := triangle{}
s.sideLength = 5
t.base = 5
t.height = 2
fmt.Println("Area of Square of ", s.sideLength, "x", s.sideLength, "(L*W) = ", s.getArea())
fmt.Println("Area of Triangle of ", t.base, "x", t.height, "(.5*base*height) = ", t.getArea())
}
|
package main
import (
"os"
"log"
"github.com/nsf/termbox-go"
)
var logger *log.Logger
func main() {
game := NewGame()
game.player = NewPlayer()
game.world = NewWorld()
game.world.Build()
game.ChangeScene(NewIntro())
game.world.Move(game.player.entity, NewPoint(10, 10))
file, err := os.OpenFile("gorcs.log", os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0666)
if err != nil {
panic(err)
}
defer file.Close()
logger = log.New(file, "", log.LstdFlags|log.Lmicroseconds|log.Lshortfile)
err = termbox.Init()
if err != nil {
panic(err)
}
defer termbox.Close()
for {
game.Render()
event := termbox.PollEvent()
switch event.Key {
case termbox.KeyCtrlC:
return
}
game.Update(event)
}
}
|
package news
import (
"net/http"
"github.com/SanderV1992/golang_simple_blog/site"
)
func (server *Server) Register(router *site.Router) {
router.RouterFunc("News", 3, "/news/", http.HandlerFunc(server.List))
router.RouterFunc("NewsFull", -1, "/news/show/", http.HandlerFunc(server.Full))
router.RouterFunc("News add", 4, "/news/add/", http.HandlerFunc(server.Add))
router.RouterFunc("News edit", -1, "/news/edit/", http.HandlerFunc(server.Edit))
}
|
package HomeControllers
import (
"strings"
"lazybug.me/conv"
"lazybug.me/util"
"github.com/TruthHun/DocHub/helper"
"github.com/TruthHun/DocHub/models"
)
type SearchController struct {
BaseController
}
func (this *SearchController) Get() {
var (
p int = 1 //默认页码
listRows int = 10 //默认每页显示记录数
)
//path中的参数
params := conv.Path2Map(this.GetString(":splat"))
if _, ok := params["wd"]; !ok { //搜索关键字
params["wd"] = this.GetString("wd")
}
//缺少搜索关键字,直接返回首页
if len(params["wd"]) == 0 {
this.Redirect("/", 302)
return
}
if _, ok := params["type"]; !ok { //搜索类型
params["type"] = this.GetString("type")
}
params["type"] = helper.Default(params["type"], "all") //默认全部搜索
if _, ok := params["sort"]; !ok { //排序
params["sort"] = this.GetString("sort")
}
params["sort"] = helper.Default(params["sort"], "default") //默认排序
if _, ok := params["p"]; ok {
p = helper.Interface2Int(params["p"])
} else {
p, _ = this.GetInt("p")
}
//页码处理
p = util.NumberRange(p, 1, 100)
res := models.Search(params["wd"], params["type"], params["sort"], p, listRows, 1)
if res.Total > 0 && len(res.Ids) > 0 {
data := models.ModelDoc.GetDocsByIds(res.Ids)
if len(data) > 0 {
for index, val := range data {
if len(strings.TrimSpace(val["Description"].(string))) == 0 {
if desc := models.ModelDocText.GetDescByMd5(val["Md5"], 120); len(desc) == 0 {
data[index]["Description"] = val["Title"]
} else {
data[index]["Description"] = desc + "..."
}
}
}
}
this.Data["Data"] = data
}
if p == 1 {
wdSlice := strings.Split(this.Sys.DirtyWord, " ")
for _, wd := range wdSlice {
if !strings.Contains(params["wd"], wd) {
models.ReplaceInto(models.TableSearchLog, map[string]interface{}{"Wd": params["wd"]})
break
}
}
}
this.Data["Seo"] = models.ModelSeo.GetByPage("PC-Search", params["wd"], "文档搜索,"+params["wd"], "文档搜索,"+params["wd"], this.Sys.Site)
this.Data["Page"] = helper.Paginations(6, int(res.Total), listRows, p, "/search/", "type", params["type"], "sort", params["sort"], "p", p, "wd", params["wd"])
this.Data["Params"] = params
this.Data["Result"] = res
this.Data["ListRows"] = listRows
this.Data["WordLen"] = len(res.Word) //分词的个数
this.Data["SearchLog"] = models.ModelSearchLog.List(1, 10)
this.Layout = ""
this.Data["PageId"] = "wenku-search"
this.TplName = "index.html"
}
|
package Routes
import (
"github.com/team-zf/framework/Network"
"github.com/wuxia-server/login/Cmd"
"github.com/wuxia-server/login/Routes/Account"
"github.com/wuxia-server/login/Routes/Server"
)
var (
Route = Network.NewHttpRouteHandle()
)
func init() {
Route.SetRoute(Cmd.Account_Register, &Account.RegisterEvent{})
Route.SetRoute(Cmd.Account_PasswordLogin, &Account.PasswordLoginEvent{})
Route.SetRoute(Cmd.Account_TokenLogin, &Account.TokenLoginEvent{})
Route.SetRoute(Cmd.Server_List, &Server.ListEvent{})
Route.SetRoute(Cmd.Server_Select, &Server.SelectEvent{})
}
|
package assets
import (
"bytes"
"errors"
"fmt"
"io/ioutil"
"os"
"regexp"
"testing"
"golang.org/x/net/html"
)
var mainjsRe = regexp.MustCompile(`main\.(.*)\.js`)
func extractUiEntrypointFilename(doc *html.Node) (string, error) {
var mainjs string
var crawler func(*html.Node)
crawler = func(node *html.Node) {
if node.Type == html.ElementNode && node.Data == "script" {
for _, attr := range node.Attr {
if attr.Key == "src" {
if mainjsRe.MatchString(attr.Val) {
mainjs = attr.Val
}
return
}
}
}
for child := node.FirstChild; child != nil; child = child.NextSibling {
crawler(child)
}
}
crawler(doc)
if mainjs != "" {
return mainjs, nil
}
return "", errors.New("no entrypoint script tag found")
}
func TestCommittedAssets(t *testing.T) {
indexHTML, err := os.Open("../../dist/index.html")
if err != nil {
t.Errorf(err.Error())
}
doc, _ := html.Parse(indexHTML)
mainJSFilename, err := extractUiEntrypointFilename(doc)
if err != nil {
t.Fatalf(err.Error())
}
mainJSFromAssets, err := Assets.Open(mainJSFilename)
if err != nil {
t.Fatalf(err.Error())
}
mainJSAssetsBytes, err := ioutil.ReadAll(mainJSFromAssets)
if err != nil {
t.Fatalf(err.Error())
}
mainJSActualBytes, err := ioutil.ReadFile("../../dist" + mainJSFilename)
if err != nil {
t.Fatalf(fmt.Sprintf("%s. You may need to run `make assets` and recommit", err.Error()))
}
if bytes.Compare(mainJSAssetsBytes, mainJSActualBytes) != 0 {
t.Errorf("js generated assets do not match embedded assets")
}
}
|
package main
import (
"github.com/mndrix/tap-go"
"github.com/opencontainers/runtime-tools/cgroups"
"github.com/opencontainers/runtime-tools/validation/util"
)
func main() {
var major1, minor1, major2, minor2, major3, minor3 int64 = 10, 229, 8, 20, 10, 200
t := tap.New()
t.Header(0)
defer t.AutoPlan()
g, err := util.GetDefaultGenerator()
if err != nil {
util.Fatal(err)
}
g.SetLinuxCgroupsPath(cgroups.RelCgroupPath)
g.AddLinuxResourcesDevice(true, "c", &major1, &minor1, "rwm")
g.AddLinuxResourcesDevice(true, "b", &major2, &minor2, "rw")
g.AddLinuxResourcesDevice(true, "b", &major3, &minor3, "r")
err = util.RuntimeOutsideValidate(g, t, util.ValidateLinuxResourcesDevices)
if err != nil {
t.Fail(err.Error())
}
}
|
package g
import (
"encoding/json"
"fmt"
"github.com/toolkits/file"
"log"
"sync"
)
type RedisConfig struct {
Addr string `json:"addr"`
MaxIdle int `json:"maxIdle"`
}
type HttpConfig struct {
Enabled bool `json:"enabled"`
Listen string `json:"listen"`
}
type MysqlConfig struct {
User string `json:"user"`
Password string `json:"password"`
DB string `json:"dbname"`
Host string `json:"host"`
Port string `json:"port"`
}
type AgentDefaultConfig struct {
Name string `json:"name"`
Version string `json:"version"`
Tarball string `json:"tarball"`
Md5 string `json:"md5"`
Cmd string `json:"cmd"`
}
type AgentOtherConfig struct {
Prefix string `json:"prefix"`
Version string `json:"version"`
Tarball string `json:"tarball"`
Md5 string `json:"md5"`
Cmd string `json:"cmd"`
}
type InheritConfig struct {
Default *AgentDefaultConfig `json:"default"`
Others []*AgentOtherConfig `json:"others"`
}
type GlobalConfig struct {
Debug bool `json:"debug"`
TarballDir string `json:"tarballDir"`
Http *HttpConfig `json:"http"`
Mysql *MysqlConfig `json:"mysql"`
Agents []*InheritConfig `json:"agents"`
Redis *RedisConfig `json:"redis"`
}
var (
ConfigFile string
config *GlobalConfig
lock = new(sync.RWMutex)
)
func Config() *GlobalConfig {
lock.RLock()
defer lock.RUnlock()
return config
}
func ParseConfig(cfg string) error {
if cfg == "" {
return fmt.Errorf("configuration file is blank")
}
if !file.IsExist(cfg) {
return fmt.Errorf("configuration file is nonexistent")
}
ConfigFile = cfg
configContent, err := file.ToTrimString(cfg)
if err != nil {
log.Println("ToTrimString file error")
return err
}
var c GlobalConfig
err = json.Unmarshal([]byte(configContent), &c)
if err != nil {
log.Println("json.Unmarshal file error")
return err
}
lock.Lock()
defer lock.Unlock()
config = &c
log.Println("read config file:", cfg, "successfully")
return nil
}
|
package commons
import (
"testing"
)
func TestIsInArray(t *testing.T) {
acceptedExtensions := []string{"zip", "gzip", "tar"}
if r, _ := IsInArray("rar", acceptedExtensions); r == true {
t.Fail()
}
if r, index := IsInArray("tar", acceptedExtensions); r == false || index != 2 {
t.Fail()
}
}
|
package main
import "fmt"
func main() {
table := []int{1, 2, 6, 24, 120, 720}
n := 1
for _, x := range table {
if x >= n {
fmt.Println(x)
return
}
}
fmt.Println(-1)
}
|
package web
import (
"encoding/json"
"fmt"
"github.com/marcozov/Peerster/client"
"github.com/marcozov/Peerster/communications"
"github.com/marcozov/Peerster/messages"
"io/ioutil"
"net/http"
"strconv"
)
type Webserver struct {
port string
router *http.ServeMux
gossiper *communications.Gossiper
}
type MessageLogEntry struct {
FromNode string
SeqID uint32
Content string
}
func New(port string, g *communications.Gossiper) *Webserver {
return &Webserver{port: port, router: http.NewServeMux(), gossiper: g}
}
func (w *Webserver) Start() {
w.router.HandleFunc("/message", w.MessageHandler)
w.router.HandleFunc("/node", w.NodeHandler)
w.router.HandleFunc("/id", w.IdHandler)
w.router.HandleFunc("/routes", w.handleRoutes)
w.router.HandleFunc("/privateMessage", w.PrivateMessageHandler)
w.router.HandleFunc("/file", w.FileHandler)
w.router.HandleFunc("/privateFile", w.PrivateFileHandler)
w.router.Handle("/", http.FileServer(http.Dir("client")))
http.ListenAndServe("localhost:"+w.port, w.router)
}
func safeDecode(w http.ResponseWriter, r *http.Request, out interface{}) error {
data, err := ioutil.ReadAll(r.Body)
if err != nil {w.WriteHeader(http.StatusBadRequest)
return err
}
err = json.Unmarshal(data, out)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
return err
}
return nil
}
func (w *Webserver) ConvertMessageFormat(m *messages.RumorMessage) *MessageLogEntry {
return &MessageLogEntry{
FromNode: m.Origin,
SeqID: m.ID,
Content: m.Text,
}
}
func (w *Webserver) ConvertPrivateMessageFormat(m *messages.PrivateMessage) *MessageLogEntry {
return &MessageLogEntry{
FromNode: m.Origin,
SeqID: m.ID,
Content: m.Text,
//HopLimit: m.HopLimit,
}
}
func (w *Webserver) FileHandler(wr http.ResponseWriter, r *http.Request) {
switch r.Method {
case "GET":
case "POST":
var data string
err := safeDecode(wr, r, &data)
if err != nil {
wr.WriteHeader(http.StatusInternalServerError)
return
}
//client := client.NewClient("127.0.0.1", strconv.Itoa(w.gossiper.ClientListenerAddress.Port))
//
//messageWrapper := &messages.GossipPacket{
// DataRequest: &messages.DataRequest {
// Origin: "",
// Destination: "",
// HopLimit: 15,
// HashValue: []byte{},
// },
//}
fmt.Println("file handler data: ", data)
//client.SendMessage(messageWrapper)
wr.WriteHeader(http.StatusOK)
default:
wr.WriteHeader(http.StatusMethodNotAllowed)
}
}
func (w *Webserver) PrivateFileHandler(wr http.ResponseWriter, r *http.Request) {
switch r.Method {
case "GET":
case "POST":
type PrivateMessage struct {
Destination string
Content string
}
var data PrivateMessage
err := safeDecode(wr, r, &data)
if err != nil {
wr.WriteHeader(http.StatusInternalServerError)
return
}
destination := data.Destination
content := data.Content
fmt.Println("private file handler data: ", destination, content)
default:
wr.WriteHeader(http.StatusMethodNotAllowed)
}
}
func (w *Webserver) MessageHandler(wr http.ResponseWriter, r *http.Request) {
switch r.Method {
case "GET":
wr.WriteHeader(http.StatusOK)
var log []*MessageLogEntry
w.gossiper.Database.Mux.RLock()
messagesDB := w.gossiper.Database.Messages
for _, messagesPerPeer := range messagesDB {
for _, m := range messagesPerPeer {
fmt.Println("m: ", m)
log = append(log, w.ConvertMessageFormat(m))
}
}
w.gossiper.Database.Mux.RUnlock()
//fmt.Println("log: ", log)
data, err := json.Marshal(log)
if err != nil {
wr.WriteHeader(http.StatusInternalServerError)
return
}
wr.Write(data)
case "POST":
var data string
err := safeDecode(wr, r, &data)
if err != nil {
wr.WriteHeader(http.StatusInternalServerError)
return
}
client := client.NewClient("127.0.0.1", strconv.Itoa(w.gossiper.ClientListenerAddress.Port))
messageWrapper := &messages.GossipPacket{
Simple: &messages.SimpleMessage {
OriginalName: "",
RelayPeerAddr: "",
Contents: data,
},
}
client.SendMessage(messageWrapper)
wr.WriteHeader(http.StatusOK)
default:
wr.WriteHeader(http.StatusMethodNotAllowed)
}
}
func (w *Webserver) PrivateMessageHandler(wr http.ResponseWriter, r *http.Request) {
switch r.Method {
case "GET":
wr.WriteHeader(http.StatusOK)
var log []*MessageLogEntry
w.gossiper.Database.Mux.RLock()
privateMessagesDB := w.gossiper.PrivateDatabase
origin := r.URL.Query().Get("name")
//for _, messagesPerPeer := range privateMessagesDB.MessagesReceived {
// for _, m := range messagesPerPeer {
for _, m := range privateMessagesDB.MessagesReceived[origin] {
//fmt.Println(fmt.Sprintf("received private m (from %s): %s", origin, m))
log = append(log, w.ConvertPrivateMessageFormat(m))
}
w.gossiper.Database.Mux.RUnlock()
//for _, m := range privateMessagesDB.MessagesSent[origin] {
// fmt.Println(fmt.Sprintf("sent private m (to %s): %s", origin, m))
// log = append(log, w.ConvertPrivateMessageFormat(m))
//}
//}
//log =
//fmt.Println("log: ", log)
data, err := json.Marshal(log)
if err != nil {
wr.WriteHeader(http.StatusInternalServerError)
return
}
wr.Write(data)
case "POST":
type PrivateMessage struct {
Destination string
Content string
}
var data PrivateMessage
err := safeDecode(wr, r, &data)
if err != nil {
wr.WriteHeader(http.StatusInternalServerError)
return
}
client := client.NewClient("127.0.0.1", strconv.Itoa(w.gossiper.ClientListenerAddress.Port))
//messageWrapper := &messages.GossipPacket{
// Simple: &messages.SimpleMessage {
// OriginalName: "",
// RelayPeerAddr: "",
// Contents: data,
// },
//}
messageWrapper := &messages.GossipPacket{
Private: &messages.PrivateMessage{
Origin: "",
ID: 0,
Text: data.Content,
Destination: data.Destination,
HopLimit: 15,
},
}
client.SendMessage(messageWrapper)
wr.WriteHeader(http.StatusOK)
default:
wr.WriteHeader(http.StatusMethodNotAllowed)
}
}
func (w *Webserver) NodeHandler(wr http.ResponseWriter, r *http.Request) {
switch r.Method {
case "GET":
peers := w.gossiper.Peers.GetAllPeers()
peersArray := make([]string, len(peers))
i := 0
for _, peer := range peers {
peersArray[i] = peer.Address.String()
i++
}
data, err := json.Marshal(peersArray)
if err != nil {
wr.WriteHeader(http.StatusInternalServerError)
return
}
wr.WriteHeader(http.StatusOK)
wr.Write(data)
case "POST":
var data string
err := safeDecode(wr, r, &data)
if err != nil {
wr.WriteHeader(http.StatusInternalServerError)
return
}
fmt.Println("peer: ", data)
//w.gossiper.AddPeer(data)
w.gossiper.AddDeletePeer(data)
wr.WriteHeader(http.StatusOK)
default:
wr.WriteHeader(http.StatusMethodNotAllowed)
}
}
func (w *Webserver) IdHandler(wr http.ResponseWriter, r *http.Request) {
switch r.Method {
case "GET":
wr.WriteHeader(http.StatusOK)
data, err := json.Marshal(w.gossiper.Name)
if err != nil {
wr.WriteHeader(http.StatusInternalServerError)
return
}
wr.Write(data)
default:
wr.WriteHeader(http.StatusMethodNotAllowed)
}
}
func (w *Webserver) handleRoutes(wr http.ResponseWriter, r *http.Request) {
switch r.Method {
case "GET":
wr.WriteHeader(http.StatusOK)
w.gossiper.Database.Mux.RLock()
allStatuses := w.gossiper.Database.CurrentStatus.Want
w.gossiper.Database.Mux.RUnlock()
nodeList := make([]string, len(allStatuses))
for i, status := range allStatuses {
nodeList[i] = status.Identifier
}
data, err := json.Marshal(nodeList)
if err != nil {
wr.WriteHeader(http.StatusInternalServerError)
return
}
wr.Write(data)
default:
wr.WriteHeader(http.StatusMethodNotAllowed)
}
}
|
package domrender
import "github.com/vugu/vugu"
// namespaceToURI resolves the given namespaces to the URI with the specifications
func namespaceToURI(namespace string) string {
switch namespace {
case "html":
return "http://www.w3.org/1999/xhtml"
case "math":
return "http://www.w3.org/1998/Math/MathML"
case "svg":
return "http://www.w3.org/2000/svg"
case "xlink":
return "http://www.w3.org/1999/xlink"
case "xml":
return "http://www.w3.org/XML/1998/namespace"
case "xmlns":
return "http://www.w3.org/2000/xmlns/"
default:
return ""
}
}
type renderedCtx struct {
eventEnv vugu.EventEnv
first bool
}
// EventEnv implements RenderedCtx by returning the EventEnv.
func (c *renderedCtx) EventEnv() vugu.EventEnv {
return c.eventEnv
}
// First returns true for the first render and otherwise false.
func (c *renderedCtx) First() bool {
return c.first
}
type rendered0 interface {
Rendered()
}
type rendered1 interface {
Rendered(ctx vugu.RenderedCtx)
}
func invokeRendered(c interface{}, rctx *renderedCtx) {
if i, ok := c.(rendered0); ok {
i.Rendered()
} else if i, ok := c.(rendered1); ok {
i.Rendered(rctx)
}
}
|
package test
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"mime/multipart"
"net/http"
"os"
"strings"
"testing"
"github.com/stretchr/testify/assert"
)
/**
* @desc 多文件上传测试
* @author Ipencil
* @create 2019/3/15
*/
func upload(t *testing.T) {
t.SkipNow()
client := &http.Client{}
params := map[string]string{}
upurl := "http://localhost:8000/pencil/upload"
request, e := newfileUploadRequest(upurl, params)
if e != nil {
fmt.Println("error", e)
return
}
response, _ := client.Do(request)
defer func() { response.Body.Close() }()
assert.Equal(t, "200 OK", response.Status)
bytes, _ := ioutil.ReadAll(response.Body)
t.Log("result:", string(bytes))
}
//多图片上传
func upload_more(t *testing.T) {
t.SkipNow()
client := &http.Client{}
params := map[string]string{}
upurl := "http://localhost:8000/pencil/uploada"
request, e := newfileUploadRequest(upurl, params)
if e != nil {
fmt.Println("error", e)
return
}
response, _ := client.Do(request)
defer func() { response.Body.Close() }()
assert.Equal(t, "200 OK", response.Status)
bytes, _ := ioutil.ReadAll(response.Body)
t.Log("result:", string(bytes))
}
func newfileUploadRequest(uri string, params map[string]string) (*http.Request, error) {
filePaths := []string{
//"K:\\upload\\店铺\\店铺0.jpg",
//"K:\\upload\\店铺\\店铺1.jpg",
//"K:\\upload\\店铺\\店铺2.jpg",
//"K:\\upload\\轮播\\轮播0.jpg",
"K:\\upload\\轮播\\轮播1.jpg",
"K:\\upload\\轮播\\轮播2.jpg",
//"K:\\upload\\视频\\bb.mp4",
}
keys := []string{
"files",
//"infoimg1",
//"infoimg2",
//"loopimg0",
//"loopimg1",
//"loopimg2",
//"video0",
}
body := &bytes.Buffer{}
writer := multipart.NewWriter(body)
var part io.Writer
for i := 0; i < len(filePaths); i++ {
file, err := os.Open(filePaths[i])
if err != nil {
return nil, err
}
if strings.Contains(uri, "uploada") {
part, err = writer.CreateFormFile("upload[]", filePaths[i])
} else {
part, err = writer.CreateFormFile(keys[i], filePaths[i])
}
_, err = io.Copy(part, file)
file.Close()
}
for key, val := range params {
_ = writer.WriteField(key, val)
}
err := writer.Close()
if err != nil {
return nil, err
}
request, err := http.NewRequest("POST", uri, body)
request.Header.Set("Content-Type", writer.FormDataContentType())
return request, err
}
|
/*
Package synapse is a wrapper library for the Synapse API (https://docs.synapsefi.com)
Instantiate client
// credentials used to set headers for each method request
var client = synapse.New(
"CLIENT_ID",
"CLIENT_SECRET",
"IP_ADDRESS",
"FINGERPRINT",
)
# Examples
Enable logging & turn off developer mode (developer mode is true by default)
var client = synapse.New(
"CLIENT_ID",
"CLIENT_SECRET",
"IP_ADDRESS",
"FINGERPRINT",
true,
false,
)
Register Fingerprint
// payload response
{
"error": {
"en": "Fingerprint not registered. Please perform the MFA flow."
},
"error_code": "10",
"http_code": "202",
"phone_numbers": [
"developer@email.com",
"901-111-2222"
],
"success": false
}
// Submit a valid email address or phone number from "phone_numbers" list
res, err := user.Select2FA("developer@email.com")
// MFA sent to developer@email.com
res, err := user.VerifyPIN("123456")
Set an `IDEMPOTENCY_KEY` (for `POST` requests only)
scopeSettings := `{
"scope": [
"USERS|POST",
"USER|PATCH",
"NODES|POST",
"NODE|PATCH",
"TRANS|POST",
"TRAN|PATCH"
],
"url": "https://requestb.in/zp216zzp"
}`
idempotencyKey := `1234567890`
data, err := client.CreateSubscription(scopeSettings, idempotencyKey)
Submit optional query parameters
params := "per_page=3&page=2"
data, err := client.GetUsers(params)
*/
package synapse
import (
"github.com/mitchellh/mapstructure"
)
/********** GLOBAL VARIABLES **********/
var logMode = false
var developerMode = true
/********** TYPES **********/
type (
// Client represents the credentials used by the developer to instantiate a client
Client struct {
ClientID string
ClientSecret string
Fingerprint string
IP string
request Request
}
)
/********** METHODS **********/
func (c *Client) do(method, url, data string, queryParams []string) (map[string]interface{}, error) {
var body []byte
var err error
switch method {
case "GET":
body, err = c.request.Get(url, queryParams)
case "POST":
body, err = c.request.Post(url, data, queryParams)
case "PATCH":
body, err = c.request.Patch(url, data, queryParams)
case "DELETE":
body, err = c.request.Delete(url)
}
return readStream(body), err
}
/********** CLIENT **********/
// New creates a client object
func New(clientID, clientSecret, fingerprint, ipAddress string, modes ...bool) *Client {
log.info("========== CREATING CLIENT INSTANCE ==========")
if len(modes) > 0 {
if modes[0] == true {
logMode = true
}
if len(modes) > 1 && modes[1] == false {
developerMode = false
}
}
request := Request{
clientID: clientID,
clientSecret: clientSecret,
fingerprint: fingerprint,
ipAddress: ipAddress,
}
return &Client{
ClientID: clientID,
ClientSecret: clientSecret,
Fingerprint: fingerprint,
IP: ipAddress,
request: request,
}
}
/********** AUTHENTICATION **********/
// GetPublicKey returns a public key as a token representing client credentials
func (c *Client) GetPublicKey(scope ...string) (map[string]interface{}, error) {
log.info("========== GET PUBLIC KEY ==========")
url := buildURL(path["client"])
defaultScope := "OAUTH|POST,USERS|POST,USERS|GET,USER|GET,USER|PATCH,SUBSCRIPTIONS|GET,SUBSCRIPTIONS|POST,SUBSCRIPTION|GET,SUBSCRIPTION|PATCH,CLIENT|REPORTS,CLIENT|CONTROLS"
if len(scope) > 0 {
defaultScope = scope[0]
}
qp := []string{"issue_public_key=YES&scope=" + defaultScope}
if len(scope) > 1 {
userId := scope[1]
qp[0] += "&user_id=" + userId
}
return c.do("GET", url, "", qp)
}
/********** NODE **********/
// GetNodes returns all of the nodes
func (c *Client) GetNodes(queryParams ...string) (map[string]interface{}, error) {
log.info("========== GET CLIENT NODES ==========")
url := buildURL(path["nodes"])
return c.do("GET", url, "", queryParams)
}
// GetTradeMarketData returns data on a stock based on its ticker symbol
func (c *Client) GetTradeMarketData(tickerSymbol string) (map[string]interface{}, error) {
log.info("========== GET TRADE MARKET DATA ==========")
url := buildURL(path["nodes"], "trade-market-watch")
ts := []string{tickerSymbol}
return c.do("GET", url, "", ts)
}
// GetNodeTypes returns available node types
func (c *Client) GetNodeTypes() (map[string]interface{}, error) {
log.info("========== GET NODE TYPES ==========")
url := buildURL(path["nodes"], "types")
return c.do("GET", url, "", nil)
}
/********** OTHER **********/
// GetCryptoMarketData returns market data for cryptocurrencies
func (c *Client) GetCryptoMarketData() (map[string]interface{}, error) {
log.info("========== GET CRYPTO MARKET DATA ==========")
url := buildURL(path["nodes"], "crypto-market-watch")
return c.do("GET", url, "", nil)
}
// GetCryptoQuotes returns all of the quotes for crypto currencies
func (c *Client) GetCryptoQuotes(queryParams ...string) (map[string]interface{}, error) {
log.info("========== GET CRYPTO QUOTES ==========")
url := buildURL(path["nodes"], "crypto-quotes")
return c.do("GET", url, "", queryParams)
}
// GetInstitutions returns a list of all available banking institutions
func (c *Client) GetInstitutions() (map[string]interface{}, error) {
log.info("========== GET INSTITUTIONS ==========")
url := buildURL(path["institutions"])
return c.do("GET", url, "", nil)
}
// LocateATMs returns a list of nearby ATMs
func (c *Client) LocateATMs(queryParams ...string) (map[string]interface{}, error) {
log.info("========== LOCATE ATMS ==========")
url := buildURL(path["nodes"], "atms")
return c.do("GET", url, "", queryParams)
}
// VerifyAddress checks if an address if valid
func (c *Client) VerifyAddress(data string) (map[string]interface{}, error) {
log.info("========== VERIFY ADDRESS ==========")
url := buildURL("address-verification")
return c.do("POST", url, data, nil)
}
// VerifyRoutingNumber checks and returns the bank details of a routing number
func (c *Client) VerifyRoutingNumber(data string) (map[string]interface{}, error) {
log.info("========== VERIFY ROUTING NUMBER ==========")
url := buildURL("routing-number-verification")
return c.do("POST", url, data, nil)
}
/********** SUBSCRIPTION **********/
// GetSubscriptions returns all of the nodes associated with a user
func (c *Client) GetSubscriptions(queryParams ...string) (map[string]interface{}, error) {
log.info("========== GET SUBSCRIPTIONS ==========")
url := buildURL(path["subscriptions"])
return c.do("GET", url, "", queryParams)
}
// GetSubscription returns a single subscription
func (c *Client) GetSubscription(subscriptionID string) (map[string]interface{}, error) {
log.info("========== GET SUBSCRIPTION ==========")
url := buildURL(path["subscriptions"], subscriptionID)
return c.do("GET", url, "", nil)
}
// CreateSubscription creates a subscription and returns the subscription data
func (c *Client) CreateSubscription(data string, idempotencyKey ...string) (map[string]interface{}, error) {
log.info("========== CREATE SUBSCRIPTION ==========")
url := buildURL(path["subscriptions"])
return c.do("POST", url, data, idempotencyKey)
}
// UpdateSubscription updates an existing subscription
func (c *Client) UpdateSubscription(subscriptionID string, data string) (map[string]interface{}, error) {
log.info("========== UPDATE SUBSCRIPTION ==========")
url := buildURL(path["subscriptions"], subscriptionID)
return c.do("PATCH", url, data, nil)
}
// GetWebhookLogs returns all of the webhooks sent to a specific client
func (c *Client) GetWebhookLogs() (map[string]interface{}, error) {
log.info("========== GET WEBHOOK LOGS ==========")
url := buildURL(path["subscriptions"], "logs")
return c.do("GET", url, "", nil)
}
/********** TRANSACTION **********/
// GetTransactions returns all client transactions
func (c *Client) GetTransactions(queryParams ...string) (map[string]interface{}, error) {
log.info("========== GET CLIENT TRANSACTIONS ==========")
url := buildURL(path["transactions"])
return c.do("GET", url, "", queryParams)
}
/********** USER **********/
// GetUsers returns a list of users
func (c *Client) GetUsers(queryParams ...string) (map[string]interface{}, error) {
log.info("========== GET CLIENT USERS ==========")
url := buildURL(path["users"])
return c.do("GET", url, "", queryParams)
}
// GetUser returns a single user
func (c *Client) GetUser(userID, fingerprint, ipAddress string, queryParams ...string) (*User, error) {
log.info("========== GET USER ==========")
url := buildURL(path["users"], userID)
res, err := c.do("GET", url, "", queryParams)
var user User
mapstructure.Decode(res, &user)
user.Response = res
request := Request{
clientID: c.ClientID,
clientSecret: c.ClientSecret,
fingerprint: fingerprint,
ipAddress: ipAddress,
}
user.request = request
return &user, err
}
// CreateUser creates a single user and returns the new user data
func (c *Client) CreateUser(data, fingerprint, ipAddress string, idempotencyKey ...string) (*User, error) {
log.info("========== CREATE USER ==========")
var user User
user.request = Request{
clientID: c.ClientID,
clientSecret: c.ClientSecret,
fingerprint: fingerprint,
ipAddress: ipAddress,
}
url := buildURL(path["users"])
res, err := user.do("POST", url, data, idempotencyKey)
mapstructure.Decode(res, &user)
user.Response = res
return &user, err
}
// GetUserDocumentTypes returns available user document types
func (c *Client) GetUserDocumentTypes() (map[string]interface{}, error) {
log.info("========== GET USER DOCUMENT TYPES ==========")
url := buildURL(path["users"], "document-types")
return c.do("GET", url, "", nil)
}
// GetUserEntityTypes returns available user entity types
func (c *Client) GetUserEntityTypes() (map[string]interface{}, error) {
log.info("========== GET USER ENTITY TYPES ==========")
url := buildURL(path["users"], "entity-types")
return c.do("GET", url, "", nil)
}
// GetUserEntityScopes returns available user entity scopes
func (c *Client) GetUserEntityScopes() (map[string]interface{}, error) {
log.info("========== GET USER ENTITY TYPES ==========")
url := buildURL(path["users"], "entity-scopes")
return c.do("GET", url, "", nil)
}
|
package main
import (
"embed"
"fmt"
"html/template"
"net/http"
)
var indexTpl *template.Template
//go:embed templates/*
var templates embed.FS
func init() {
indexTpl = template.Must(template.ParseFS(templates, "templates/index.gohtml"))
}
func main() {
data := "hey there"
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
indexTpl.Execute(w, data)
})
http.HandleFunc("/dog", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "/dog called")
})
http.HandleFunc("/me/", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "/me/ called")
})
fmt.Println("Starting server on http://localhost:8080")
http.ListenAndServe(":8080", nil)
}
|
package str
import (
"fmt"
"strconv"
"strings"
)
func IdsInt64(ids string, sep ...string) []int64 {
if ids == "" {
return []int64{}
}
s := ","
if len(sep) > 0 {
s = sep[0]
}
var arr []string
if s == " " {
arr = strings.Fields(ids)
} else {
arr = strings.Split(ids, s)
}
count := len(arr)
ret := make([]int64, 0, count)
for i := 0; i < count; i++ {
if arr[i] != "" {
id, err := strconv.ParseInt(arr[i], 10, 64)
if err == nil {
ret = append(ret, id)
}
}
}
return ret
}
func IdsString(ids []int64, sep ...string) string {
count := len(ids)
arr := make([]string, count)
for i := 0; i < count; i++ {
arr[i] = fmt.Sprint(ids[i])
}
if len(sep) > 0 {
return strings.Join(arr, sep[0])
}
return strings.Join(arr, ",")
}
|
package chart
import (
"bytes"
"fmt"
"strings"
"github.com/pkg/errors"
"helm.sh/helm/v3/pkg/chart"
"helm.sh/helm/v3/pkg/chartutil"
"helm.sh/helm/v3/pkg/engine"
"helm.sh/helm/v3/pkg/release"
"helm.sh/helm/v3/pkg/releaseutil"
)
// helm.sh/helm/v3/pkg/action/install.go
const notesFileSuffix = "NOTES.txt"
// RenderResources renders the templates in a chart
func RenderResources(ch *chart.Chart, caps *chartutil.Capabilities, values chartutil.Values) ([]*release.Hook, []releaseutil.Manifest, error) {
hs := []*release.Hook{}
b := bytes.NewBuffer(nil)
if ch.Metadata.KubeVersion != "" {
if !chartutil.IsCompatibleRange(ch.Metadata.KubeVersion, caps.KubeVersion.String()) {
return hs, nil, errors.Errorf("chart requires kubeVersion: %s which is incompatible with Kubernetes %s", ch.Metadata.KubeVersion, caps.KubeVersion.String())
}
}
files, err := engine.Render(ch, values)
if err != nil {
return hs, nil, err
}
for k := range files {
if strings.HasSuffix(k, notesFileSuffix) {
delete(files, k)
}
}
// Sort hooks, manifests, and partials. Only hooks and manifests are returned,
// as partials are not used after renderer.Render. Empty manifests are also
// removed here.
hs, manifests, err := releaseutil.SortManifests(files, caps.APIVersions, releaseutil.InstallOrder)
if err != nil {
// By catching parse errors here, we can prevent bogus releases from going
// to Kubernetes.
//
// We return the files as a big blob of data to help the user debug parser
// errors.
for name, content := range files {
if strings.TrimSpace(content) == "" {
continue
}
fmt.Fprintf(b, "---\n# Source: %s\n%s\n", name, content)
}
return hs, manifests, err
}
return hs, manifests, nil
}
func IsEvent(events []release.HookEvent, x release.HookEvent) bool {
for _, event := range events {
if event == x {
return true
}
}
return false
}
// IsChartInstallable validates if a chart can be installed
//
// Application chart type is only installable
func IsChartInstallable(ch *chart.Chart) (bool, error) {
switch ch.Metadata.Type {
case "", "application":
return true, nil
}
return false, errors.Errorf("%s charts are not installable", ch.Metadata.Type)
}
|
package contacts
import (
"encoding/xml"
"github.com/davecgh/go-spew/spew"
. "gopkg.in/check.v1"
"regexp"
"testing"
"time"
)
const (
CONTACT_EXAMPLE = `<?xml version="1.0" encoding="UTF-8"?>
<feed gd:etag=""SHk7eTVSLyt7I2A9XR5WGUkJRAA."" xmlns="http://www.w3.org/2005/Atom" xmlns:batch="http://schemas.google.com/gdata/batch" xmlns:gContact="http://schemas.google.com/contact/2008" xmlns:gd="http://schemas.google.com/g/2005" xmlns:openSearch="http://a9.com/-/spec/opensearch/1.1/">
<id>easycore.sync.bridge@gmail.com</id>
<updated>2017-02-01T10:02:29.701Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/contact/2008#contact"/>
<title>Easy Core's Contacts</title>
<link rel="alternate" type="text/html" href="https://www.google.com/"/>
<link rel="http://schemas.google.com/g/2005#feed" type="application/atom+xml" href="https://www.google.com/m8/feeds/contacts/easycore.sync.bridge%40gmail.com/full"/>
<link rel="http://schemas.google.com/g/2005#post" type="application/atom+xml" href="https://www.google.com/m8/feeds/contacts/easycore.sync.bridge%40gmail.com/full"/>
<link rel="http://schemas.google.com/g/2005#batch" type="application/atom+xml" href="https://www.google.com/m8/feeds/contacts/easycore.sync.bridge%40gmail.com/full/batch"/>
<link rel="self" type="application/atom+xml" href="https://www.google.com/m8/feeds/contacts/easycore.sync.bridge%40gmail.com/full?max-results=25"/>
<author>
<name>Easy Core</name>
<email>easycore.sync.bridge@gmail.com</email>
</author>
<generator version="1.0" uri="http://www.google.com/m8/feeds">Contacts</generator>
<openSearch:totalResults>14</openSearch:totalResults>
<openSearch:startIndex>1</openSearch:startIndex>
<openSearch:itemsPerPage>25</openSearch:itemsPerPage>
<entry gd:etag=""QXo-cTVSLit7I2A9XR5WGUkJRQE."">
<id>http://www.google.com/m8/feeds/contacts/easycore.sync.bridge%40gmail.com/base/67e2e50a6ea2db</id>
<updated>2017-02-01T10:00:50.459Z</updated>
<app:edited xmlns:app="http://www.w3.org/2007/app">2017-02-01T10:00:50.459Z</app:edited>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/contact/2008#contact"/>
<title>John Doah</title>
<link rel="http://schemas.google.com/contacts/2008/rel#photo" type="image/*" href="https://www.google.com/m8/feeds/photos/media/easycore.sync.bridge%40gmail.com/67e2e50a6ea2db"/>
<link rel="self" type="application/atom+xml" href="https://www.google.com/m8/feeds/contacts/easycore.sync.bridge%40gmail.com/full/67e2e50a6ea2db"/>
<link rel="edit" type="application/atom+xml" href="https://www.google.com/m8/feeds/contacts/easycore.sync.bridge%40gmail.com/full/67e2e50a6ea2db"/>
<gd:name>
<gd:fullName>John Doah</gd:fullName>
<gd:givenName>John</gd:givenName>
<gd:familyName>Doah</gd:familyName>
</gd:name>
<gd:organization rel="http://schemas.google.com/g/2005#other">
<gd:orgName>Doah Enterprises</gd:orgName>
<gd:orgTitle>CEO</gd:orgTitle>
</gd:organization>
<gd:email rel="http://schemas.google.com/g/2005#home" address="john.doe@example.com" primary="true"/>
<gd:email rel="http://schemas.google.com/g/2005#work" address="john.work@example.com"/>
<gd:phoneNumber rel="http://schemas.google.com/g/2005#mobile">111 222 333</gd:phoneNumber>
<gd:phoneNumber rel="http://schemas.google.com/g/2005#work">111 333 444</gd:phoneNumber>
<gd:phoneNumber rel="http://schemas.google.com/g/2005#home">111 444 555</gd:phoneNumber>
<gd:phoneNumber rel="http://schemas.google.com/g/2005#main">111 555 666</gd:phoneNumber>
<gd:structuredPostalAddress rel="http://schemas.google.com/g/2005#home">
<gd:formattedAddress>Street 10
POBox
Vyton, Praha, Praha 11000
Czech republic</gd:formattedAddress>
<gd:street>Street 10</gd:street>
<gd:pobox>POBox</gd:pobox>
<gd:neighborhood>Vyton</gd:neighborhood>
<gd:city>Praha</gd:city>
<gd:region>Praha country</gd:region>
<gd:postcode>11000</gd:postcode>
<gd:country>Czech republic</gd:country>
</gd:structuredPostalAddress>
<gContact:groupMembershipInfo deleted="false" href="http://www.google.com/m8/feeds/groups/easycore.sync.bridge%40gmail.com/base/6"/>
</entry>
</feed>`
)
type ContactSuite struct{}
var _ = Suite(new(ContactSuite))
func TestContact(t *testing.T) { TestingT(t) }
func (s *ContactSuite) TestMarshal(c *C) {
contact := new(Contact)
contact.IdUrl = "http://www.google.com/m8/feeds/contacts/easycore.sync.bridge%40gmail.com/base/5d3bbddc8bae90a3"
contact.Title = "Frank Doe"
var err error
contact.Updated, err = time.Parse(time.RFC3339, "2017-01-18T16:09:09.345Z")
c.Assert(err, IsNil)
mail1 := NewEmail("frank.doe@example.com", "other", true)
mail2 := NewEmail("jane.doe@example.com", "home", false)
contact.Emails = []*Email{&mail1, &mail2}
out, err := xml.Marshal(contact)
c.Assert(err, IsNil)
expected := `<entry xmlns="http://www.w3.org/2005/Atom">
<id>http://www.google.com/m8/feeds/contacts/easycore.sync.bridge%40gmail.com/base/5d3bbddc8bae90a3</id>
<updated>2017-01-18T16:09:09.345Z</updated>
<title>Frank Doe</title>
<email xmlns="http://schemas.google.com/g/2005" rel="http://schemas.google.com/g/2005#other" address="frank.doe@example.com" primary="true"></email>
<email xmlns="http://schemas.google.com/g/2005" rel="http://schemas.google.com/g/2005#home" address="jane.doe@example.com"></email>
</entry>
`
reSpace := regexp.MustCompile(` +`)
expected = reSpace.ReplaceAllString(expected, " ")
re := regexp.MustCompile(`\r?\n ?`)
expected = re.ReplaceAllString(expected, "")
c.Assert(string(out), Equals, expected)
}
func (s *ContactSuite) TestUnmarshal(c *C) {
into := new(contactFeed)
err := xml.Unmarshal([]byte(CONTACT_EXAMPLE), &into)
c.Assert(err, IsNil)
c.Assert(len(into.Contacts), Equals, 1)
c.Assert(into.Contacts[0].Name.FullName, Equals, "John Doah")
c.Assert(len(into.Contacts[0].Emails), Equals, 2)
c.Assert(into.Contacts[0].Emails[0].Address, Equals, "john.doe@example.com")
c.Assert(into.Contacts[0].Emails[0].GetType(), Equals, "home")
c.Assert(into.Contacts[0].Emails[0].Primary, Equals, true)
c.Assert(len(into.Contacts[0].Addresses), Equals, 1)
c.Assert(into.Contacts[0].Addresses[0].Street, Equals, "Street 10")
c.Assert(into.Contacts[0].Addresses[0].POBox, Equals, "POBox")
c.Assert(into.Contacts[0].Addresses[0].City, Equals, "Praha")
c.Assert(into.Contacts[0].Addresses[0].Postcode, Equals, "11000")
c.Assert(into.Contacts[0].Addresses[0].Country, Equals, "Czech republic")
c.Assert(len(into.Contacts[0].Phones), Equals, 4)
c.Assert(into.Contacts[0].Phones[0].Number, Equals, "111 222 333")
c.Assert(into.Contacts[0].Phones[0].GetType(), Equals, "mobile")
c.Assert(into.Contacts[0].Phones[1].Number, Equals, "111 333 444")
c.Assert(into.Contacts[0].Phones[1].GetType(), Equals, "work")
spew.Dump(into)
}
|
package main
import (
"lesson/structStudy/stack/stackType"
)
// 入口函数
func main() {
stack1 := new(stackType.BaseStack)
stack1.Push(123)
stack1.Push(43)
stack1.Push(56)
stack1.String()
stack1.Pop()
stack1.String()
}
|
package main
import (
"fmt"
. "leetcode"
)
func main() {
fmt.Println(levelOrder(&TreeNode{
Val: 3,
Left: &TreeNode{Val: 9, Left: &TreeNode{Val: 8}},
Right: &TreeNode{
Val: 20,
Left: &TreeNode{Val: 15},
Right: &TreeNode{Val: 7},
},
}))
}
func levelOrder(root *TreeNode) []int {
var arr [][]int
var travel func(root *TreeNode, level int)
travel = func(root *TreeNode, level int) {
if root == nil {
return
}
if len(arr) <= level {
arr = append(arr, []int{})
}
arr[level] = append(arr[level], root.Val)
fmt.Println("LEVEL", level, arr)
travel(root.Left, level+1)
travel(root.Right, level+1)
}
travel(root, 0)
var ans []int
for _, l := range arr {
ans = append(ans, l...)
}
return ans
}
|
package LeetCode
func LongestCommonPrefix(strs []string) string {
prefix := ""
it := 0
for {
c := uint8(0)
b := false
for _,v:= range strs {
if len(v) == it {
b = false
break
}
if c == 0 {
c = v[it]
b = true
} else {
b = b && ( v[it] == c )
}
}
if !b {
break
}
prefix += string(c)
it++
}
return prefix
}
|
package main
import (
"fmt"
)
var fibs [10]int
func fibonacci(n int)(ret int){
if fibs[n]!=0{
ret = fibs[n]
return
}
if n <=1{
ret = 1
}else{
fibs[n] = fibonacci(n-1) + fibonacci(n-2)
ret= fibs[n]
}
return ret
}
func main(){
var result int= 0
var i int = 0
for ;i<10;i++{
result=fibonacci(i)
fmt.Printf("fibonaccif (%d) is: %d\n",i,result)
}
}
|
package day09
import (
"fmt"
"strconv"
)
func Run(lines []string) error {
numbers := make([]int, 0, len(lines))
for _, line := range lines {
number, err := strconv.Atoi(line)
if err != nil {
return fmt.Errorf("error parsing line: %v", err)
}
numbers = append(numbers, number)
}
firstInvalid, err := FirstInvalidValue(numbers, 25)
if err != nil {
return err
}
fmt.Println("Part 1:", firstInvalid)
weakness, err := FindWeakness(numbers, firstInvalid)
if err != nil {
return err
}
fmt.Println("Part 2:", weakness)
return nil
}
func IsValid(sequence []int, value int) bool {
for i, x := range sequence {
for j, y := range sequence {
if i != j && x+y == value {
return true
}
}
}
return false
}
func FirstInvalidValue(xmasCode []int, preambleLength int) (int, error) {
for i := preambleLength; i < len(xmasCode); i++ {
if !IsValid(xmasCode[i-preambleLength:i], xmasCode[i]) {
return xmasCode[i], nil
}
}
return 0, fmt.Errorf("there is no invalid value")
}
func FindWeakness(data []int, target int) (int, error) {
start, end, err := FindContiguousSum(data, target)
if err != nil {
return 0, err
}
min := data[start]
max := data[start]
for _, v := range data[start : end+1] {
if v > max {
max = v
}
if v < min {
min = v
}
}
return min + max, nil
}
func FindContiguousSum(data []int, target int) (int, int, error) {
for start := range data {
for end := start + 1; end < len(data); end++ {
if sumRange(data, start, end) == target {
return start, end, nil
}
}
}
return 0, 0, fmt.Errorf("no contiguous sum found")
}
func sumRange(data []int, start, end int) int {
sum := 0
for _, val := range data[start : end+1] {
sum += val
}
return sum
}
|
package main
import (
"encoding/json"
"flag"
"fmt"
"io"
"net/http"
"os"
"sort"
"strconv"
"strings"
"sync"
"text/tabwriter"
"time"
"golang.org/x/exp/slices"
"golang.org/x/exp/slog"
)
const (
urlDomain = "https://www.cineworld.co.uk"
fmtURLPath = "/uk/data-api-service/v1/quickbook/10108/film-events/in-cinema/%s/at-date/%s"
)
//nolint:gochecknoglobals // Flags to pass in arguments with.
var (
cinemaID = flag.String("c", "073", "ID of cinema to pull screenings for")
futureDays = flag.Int("f", 0, "start listing from this many days into the future")
include3d = flag.Bool("3", false, "include screenings in 3D")
listDays = flag.Int("l", 1, "retrieve listings from this many days")
)
func main() {
// Parse any flags passed in.
flag.Parse()
// Set up logging.
slog.SetDefault(slog.New(slog.NewTextHandler(os.Stdout)))
// Create somewhere to store results.
respStore := responseStorage{
responses: make(map[time.Time]Response),
mx: sync.Mutex{},
}
// Range across number of days we want listings from and request them all concurrently.
wgDays := sync.WaitGroup{}
for daysIntoFuture := 0; daysIntoFuture < *listDays; daysIntoFuture++ {
wgDays.Add(1)
go func(j int) {
defer wgDays.Done()
localDate := time.Now().AddDate(0, 0, *futureDays+j).Local()
fLocalDate := localDate.Format("2006-01-02")
url := urlDomain + fmt.Sprintf(fmtURLPath, *cinemaID, fLocalDate)
// Derived logger with URL attached.
slogw := slog.Default().With(slog.String("url", url))
res, err := http.Get(url)
if err != nil {
slogw.Error("getting URL", err)
return
}
defer res.Body.Close()
body, err := io.ReadAll(res.Body)
if res.StatusCode >= http.StatusMultipleChoices {
slogw.Error("response failed", err, slog.Int("status", res.StatusCode), slog.String("body", string(body)))
return
}
if err != nil {
slogw.Error("reading response body", err)
return
}
var filmEvents Response
if err := json.Unmarshal(body, &filmEvents); err != nil {
slogw.Error("unmarshaling response body", err)
return
}
// Store result in map.
respStore.mx.Lock()
respStore.responses[localDate] = filmEvents
respStore.mx.Unlock()
}(daysIntoFuture)
}
wgDays.Wait()
// Get keys from response storage, to iterate through the map in chronological order and print.
dateKeys := make([]time.Time, 0)
for key := range respStore.responses {
dateKeys = append(dateKeys, key)
}
sort.Slice(dateKeys, func(i, j int) bool {
return dateKeys[i].Before(dateKeys[j])
})
for i := 0; i < len(dateKeys); i++ {
fmt.Print(respStore.responses[dateKeys[i]])
}
}
type responseStorage struct {
responses map[time.Time]Response
mx sync.Mutex
}
// Response and its children structs were all generated thanks to [JSON-to-Go].
//
// [JSON-to-Go]: https://mholt.github.io/json-to-go/
type Response struct {
Body Body `json:"body"`
}
func (r Response) String() string {
return fmt.Sprint(r.Body)
}
type Body struct {
Films []Film `json:"films"`
Events []Event `json:"events"`
}
func (b Body) String() string {
// Closures that order Film structs.
name := func(f1, f2 *Film) bool {
return f1.Name < f2.Name
}
increasingLength := func(f1, f2 *Film) bool {
return f1.Length < f2.Length
}
// decreasingLength := func(f1, f2 *Film) bool {
// return f1.Length > f2.Length
// }
OrderedBy(name, increasingLength).Sort(b.Films)
// OrderedBy(name, decreasingLength).Sort(b.Films)
// OrderedBy(decreasingLength, name).Sort(b.Films)
var sBuilder strings.Builder
tabW := new(tabwriter.Writer)
tabW.Init(&sBuilder, 0, 0, 3, ' ', 0) //nolint:gomnd // Arbitrary padding value.
if len(b.Events) >= 1 {
xedt := strings.Split(b.Events[0].EventDateTime, "T")
dateHeader := xedt[0]
parsedTime, err := time.Parse("2006-01-02", xedt[0])
if err != nil {
slog.Error("parsing event date time", slog.String("input", xedt[0]), slog.Any("err", err))
} else {
dateHeader = parsedTime.Format("2006-01-02 Monday")
}
fmt.Fprintf(tabW, "\n%s\n", dateHeader)
}
for _, film := range b.Films {
fmt.Fprintf(tabW, "%s\t", film)
firstEvent := true
for _, event := range b.Events {
// Check if event's string representation is non-zero length before checking further.
if event.String() == "" {
continue
}
if event.FilmID == film.ID && event.CinemaID == *cinemaID {
if !firstEvent {
fmt.Fprint(tabW, " ")
}
fmt.Fprintf(tabW, "%s", event)
firstEvent = false
}
}
fmt.Fprintln(tabW, "\t")
}
tabW.Flush()
return sBuilder.String()
}
type Film struct {
ID string `json:"id"`
Name string `json:"name"`
PosterLink string `json:"posterLink"`
VideoLink string `json:"videoLink"`
Link string `json:"link"`
ReleaseYear string `json:"releaseYear"`
AttributeIDs []string `json:"attributeIds"`
Length int `json:"length"`
Weight int `json:"weight"`
}
func (f Film) String() string {
runningTime, _ := time.ParseDuration(strconv.Itoa(f.Length) + "m")
return fmt.Sprintf("%s\t%5s", f.Name,
strings.TrimSuffix(runningTime.Truncate(time.Minute).String(), "0s"))
}
type lessFunc func(p1, p2 *Film) bool
// MultiSorter implements the Sort interface, sorting the changes within.
type MultiSorter struct {
films []Film
lessFuncs []lessFunc
}
// Sort sorts the argument slice according to the less functions passed to OrderedBy.
func (ms *MultiSorter) Sort(changes []Film) {
ms.films = changes
sort.Sort(ms)
}
// OrderedBy returns a Sorter that sorts using the less functions, in order.
// Call its Sort method to sort the data.
func OrderedBy(less ...lessFunc) *MultiSorter {
return &MultiSorter{
films: nil,
lessFuncs: less,
}
}
// Len is part of sort.Interface.
func (ms *MultiSorter) Len() int {
return len(ms.films)
}
// Swap is part of sort.Interface.
func (ms *MultiSorter) Swap(i, j int) {
ms.films[i], ms.films[j] = ms.films[j], ms.films[i]
}
// Less is part of sort.Interface. It is implemented by looping along the less functions until it finds a comparison
// that discriminates between the two items (one is less than the other). Note that it can call the less functions
// twice per call. We could change the functions to return -1, 0, 1 and reduce the number of calls for greater
// efficiency: an exercise for the reader.
func (ms *MultiSorter) Less(i, j int) bool {
left, right := &ms.films[i], &ms.films[j]
// Try all but the last comparison.
var index int
for index = 0; index < len(ms.lessFuncs)-1; index++ {
less := ms.lessFuncs[index]
switch {
case less(left, right):
// left < right, so we have a decision.
return true
case less(right, left):
// left > right, so we have a decision.
return false
}
} // left == right; try the next comparison.
// All comparisons to here said "equal", so just return whatever the final comparison reports.
return ms.lessFuncs[index](left, right)
}
type Event struct {
CompositeBookingLink CompositeBookingLink `json:"compositeBookingLink"`
ID string `json:"id"`
FilmID string `json:"filmId"`
CinemaID string `json:"cinemaId"`
BusinessDay string `json:"businessDay"`
EventDateTime string `json:"eventDateTime"`
BookingLink string `json:"bookingLink"`
PresentationCode string `json:"presentationCode"`
Auditorium string `json:"auditorium"`
AuditoriumTinyName string `json:"auditoriumTinyName"`
AttributeIDs []string `json:"attributeIds"`
SoldOut bool `json:"soldOut"`
}
func (e Event) String() string {
if !*include3d && slices.Contains(e.AttributeIDs, "3d") {
slog.Debug("event is in 3D",
slog.Any("attributeIDs", e.AttributeIDs),
slog.String("dateTime", e.EventDateTime))
return ""
}
split := strings.Split(e.EventDateTime, "T")
if len(split) < 2 {
return e.EventDateTime
}
return split[1]
}
type CompositeBookingLink struct {
Type string `json:"type"`
BookingURL BookingURL `json:"bookingUrl"`
ObsoleteBookingURL string `json:"obsoleteBookingUrl"`
BlockOnlineSales interface{} `json:"blockOnlineSales"`
BlockOnlineSalesUntil interface{} `json:"blockOnlineSalesUntil"`
ServiceURL string `json:"serviceUrl"`
}
type BookingURL struct {
URL string `json:"url"`
Params Params `json:"params"`
}
type Params struct {
SiteCode string `json:"sitecode"`
Site string `json:"site"`
ID string `json:"id"`
Lang string `json:"lang"`
}
|
package collect
// 进程相关
|
package vaku
import "fmt"
// PathDestroy takes in a PathInput and calls the native delete on 'mount/metadata/path'
// This function only works on versioned (V2) key/value mounts. Note that this destroys ALL
// versions at the path, there is no current support for destroying specific versions.
func (c *Client) PathDestroy(i *PathInput) error {
var err error
// Initialize the input
i.opType = "destroy"
err = c.InitPathInput(i)
if err != nil {
return fmt.Errorf("failed to init destroy path %s: %w", i.Path, err)
}
// Do the actual destroy
_, err = c.Logical().Delete(i.opPath)
if err != nil {
return fmt.Errorf("failed to destroy secret at %s: %w", i.opPath, err)
}
return err
}
|
package config
import "github.com/xztaityozx/cpx/ff"
type Config struct {
FuzzyFinder ff.FuzzyFinder
}
|
package main
import (
"fmt"
"os"
)
func main() {
// Open our xmlFile
xmlFile, err := os.Open("users.xml")
// if we os.Open returns an error then handle it
if err != nil {
fmt.Println(err)
}
fmt.Println("Successfully Opened users.xml")
// defer the closing of our xmlFile so that we can parse it later on
defer xmlFile.Close()
}
|
package db
import (
"github.com/golang/glog"
"github.com/jinzhu/gorm"
//引入gorm
"github.com/feng/future/agfun/config"
_ "github.com/jinzhu/gorm/dialects/mysql"
"time"
)
func init() {
stdAddr := config.Conf().MysqlStr
stdDB = NewDB(stdAddr)
go timer(stdAddr)
}
var stdDB *DB
//DB 数据库
type DB struct {
*gorm.DB // mysql client
addr string // the addr of db server
}
//NewDB addr:数据库地址和密码"user:password@/dbname?charset=utf8&parseTime=True&loc=Local"
func NewDB(addr string) *DB {
glog.Infoln("NewDB****start")
var temp = &DB{}
temp.addr = addr
db, err := gorm.Open("mysql", addr)
if err != nil {
glog.Fatalln("NewDB*******Init Fail", err)
}
glog.Infoln("NewDB********success")
temp.DB = db
return temp
}
//CreateTable 创建表
func (db *DB) CreateTable(models interface{}) {
db.CreateTable(models)
}
func timer(addr string) {
timer1 := time.NewTicker(5 * time.Second)
for {
select {
case <-timer1.C:
err := stdDB.DB.DB().Ping()
if err != nil {
glog.Errorln("mysql connect fail,err:", err)
stdDB = NewDB(addr)
}
}
}
}
|
package client
import (
"github.com/weibocom/steem-rpc/transactions"
"github.com/weibocom/steem-rpc/types"
)
func (c *Client) CreateTransaction() (*types.Transaction, error) {
props, err := c.Database.GetDynamicGlobalProperties()
if err != nil {
return nil, err
}
refBlockPrefix, err := transactions.RefBlockPrefix(props.HeadBlockID)
if err != nil {
return nil, err
}
tx := &types.Transaction{
RefBlockNum: transactions.RefBlockNum(props.HeadBlockNumber),
RefBlockPrefix: refBlockPrefix,
}
return tx, nil
}
func (c *Client) CreateSignedTransaction(creator string, name string, fee int, jsonMeta string) (*transactions.SignedTransaction, error) {
tx, err := c.CreateTransaction()
if err != nil {
return nil, err
}
return transactions.NewSignedTransaction(tx), nil
}
|
package main
import "fmt"
func panicAndRecover() (x int) {
defer func() {
p := recover()
if p != nil {
x = 42
}
}()
panic("failed: unrecoverable error")
}
func main() {
fmt.Println(panicAndRecover())
}
|
package redis
import (
"ads.cost.com/config"
"ads.cost.com/logger"
"github.com/go-redis/redis"
"github.com/pkg/errors"
"go.uber.org/zap"
)
func InitRedis() error {
conf := config.GetConfig()
RedisClient := redis.NewClient(&redis.Options{
Addr: conf.RedisConfig.Addr,
Password: conf.RedisConfig.Password,
DB: conf.RedisConfig.DB,
})
pong, err := RedisClient.Ping().Result()
if err != nil {
err = errors.Wrap(err, "InitRedis")
return err
}
logger.GetLogger().Info("Redis ping:", zap.String("ping", pong))
return nil
}
|
package analyze
import (
"fmt"
"regexp"
"strings"
"testing"
"time"
"github.com/devspace-cloud/devspace/pkg/devspace/kubectl"
fakekube "github.com/devspace-cloud/devspace/pkg/devspace/kubectl/testing"
"github.com/devspace-cloud/devspace/pkg/util/log"
"github.com/mgutz/ansi"
"gotest.tools/assert"
k8sv1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes/fake"
)
type podTestCase struct {
name string
wait bool
pod k8sv1.Pod
updatedPod *k8sv1.Pod
expectedProblems []string
expectedErr string
}
func TestPods(t *testing.T) {
testCases := []podTestCase{
podTestCase{
name: "Wait for pod in creation",
wait: true,
pod: k8sv1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "testPod",
},
Status: k8sv1.PodStatus{
Reason: kubectl.WaitStatus[0],
},
},
updatedPod: &k8sv1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "testPod",
},
Status: k8sv1.PodStatus{
Reason: "Running",
StartTime: &metav1.Time{Time: time.Now().Add(-MinimumPodAge * 2)},
},
},
},
podTestCase{
name: "Wait for pod in initialization",
wait: true,
pod: k8sv1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "testPod",
},
Status: k8sv1.PodStatus{
Reason: "Init: something",
},
},
updatedPod: &k8sv1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "testPod",
},
Status: k8sv1.PodStatus{
Reason: "Running",
StartTime: &metav1.Time{Time: time.Now().Add(-MinimumPodAge * 2)},
},
},
},
podTestCase{
name: "Wait for minimalPodAge to pass",
wait: true,
pod: k8sv1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "testPod",
},
Status: k8sv1.PodStatus{
Reason: "Running",
StartTime: &metav1.Time{Time: time.Now()},
},
},
updatedPod: &k8sv1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "testPod",
},
Status: k8sv1.PodStatus{
Reason: "Running",
StartTime: &metav1.Time{Time: time.Now().Add(-MinimumPodAge * 2)},
},
},
},
podTestCase{
name: "Analyze pod with many problems",
wait: false,
pod: k8sv1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "testPod",
},
Status: k8sv1.PodStatus{
Reason: "Error",
ContainerStatuses: []k8sv1.ContainerStatus{
k8sv1.ContainerStatus{
Ready: true,
RestartCount: 1,
LastTerminationState: k8sv1.ContainerState{
Terminated: &k8sv1.ContainerStateTerminated{
FinishedAt: metav1.Time{Time: time.Now().Add(-IgnoreRestartsSince * 2)},
ExitCode: int32(1),
Message: "someMessage",
Reason: "someReason",
},
},
},
k8sv1.ContainerStatus{
State: k8sv1.ContainerState{
Terminated: &k8sv1.ContainerStateTerminated{
FinishedAt: metav1.Time{Time: time.Now().Add(-IgnoreRestartsSince * 2)},
Message: "someMessage2",
Reason: "someReason2",
ExitCode: int32(2),
},
},
},
},
InitContainerStatuses: []k8sv1.ContainerStatus{
k8sv1.ContainerStatus{
Ready: true,
},
k8sv1.ContainerStatus{
State: k8sv1.ContainerState{
Waiting: &k8sv1.ContainerStateWaiting{
Message: "someMessage3",
Reason: "someReason3",
},
},
},
},
},
},
expectedProblems: []string{
fmt.Sprintf("Pod %s:", ansi.Color("testPod", "white+b")),
fmt.Sprintf(" Status: %s", ansi.Color("Init:0/0", "yellow+b")),
fmt.Sprintf(" Container: %s/2 running", ansi.Color("1", "red+b")),
" Problems: ",
fmt.Sprintf(" - Container: %s", ansi.Color("", "white+b")),
fmt.Sprintf(" Status: %s (reason: %s)", ansi.Color("Terminated", "red+b"), ansi.Color("someReason2", "red+b")),
fmt.Sprintf(" Message: %s", ansi.Color("someMessage2", "white+b")),
fmt.Sprintf(" Last Execution Log: \n%s", ansi.Color("ContainerLogs", "red")),
" InitContainer Problems: ",
fmt.Sprintf(" - Container: %s", ansi.Color("", "white+b")),
fmt.Sprintf(" Status: %s (reason: %s)", ansi.Color("Waiting", "red+b"), ansi.Color("someReason3", "red+b")),
fmt.Sprintf(" Message: %s", ansi.Color("someMessage3", "white+b")),
},
},
}
for _, testCase := range testCases {
namespace := "testns"
kubeClient := &fakekube.Client{
Client: fake.NewSimpleClientset(),
}
kubeClient.Client.CoreV1().Namespaces().Create(&k8sv1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: namespace,
},
})
kubeClient.Client.CoreV1().Pods(namespace).Create(&testCase.pod)
analyzer := &analyzer{
client: kubeClient,
log: log.Discard,
}
go func() {
time.Sleep(time.Second / 2)
if testCase.updatedPod != nil {
kubeClient.Client.CoreV1().Pods(namespace).Update(testCase.updatedPod)
}
}()
problems, err := analyzer.pods(namespace, Options{Wait: testCase.wait})
if testCase.expectedErr == "" {
assert.NilError(t, err, "Error in testCase %s", testCase.name)
} else {
assert.Error(t, err, testCase.expectedErr, "Wrong or no error in testCase %s", testCase.name)
}
lineWithTimestamp := regexp.MustCompile("(?m)[\r\n]+^.*ago.*$")
result := ""
if len(problems) > 0 {
result = lineWithTimestamp.ReplaceAllString(problems[0], "")
}
expectedString := ""
if len(testCase.expectedProblems) > 0 {
expectedString = paddingLeft + strings.Join(testCase.expectedProblems, paddingLeft+"\n") + "\n"
}
assert.Equal(t, result, expectedString, "Unexpected problem list in testCase %s", testCase.name)
}
}
|
package srv
import (
"errors"
"fmt"
"github.com/golang/glog"
"qipai/dao"
"qipai/enum"
"qipai/game"
"qipai/model"
"qipai/utils"
"time"
)
var Club clubSrv
type clubSrv struct {
}
func (this *clubSrv) Create(club *model.Club) (err error) {
dao.Db().Save(club)
if club.ID == 0 {
err = errors.New("茶楼创建失败,请联系管理员")
return
}
// 创建成功后,把自己加入茶楼
if err = this.Join(club.ID, club.Uid); err != nil {
return
}
return
}
func (clubSrv) MyClubs(uid uint) (clubs []model.Club) {
// 我加入的
var cus []model.ClubUser
dao.Db().Where("uid = ? and status <> 0", uid).Find(&cus)
var ids []uint
for _, v := range cus {
ids = append(ids, v.ClubId)
}
dao.Db().Where(ids).Find(&clubs)
return
}
func (clubSrv) Join(clubId, userId uint) (err error) {
// 查询出茶楼信息
var club model.Club
dao.Db().First(&club, clubId)
if club.ID == 0 {
err = errors.New(fmt.Sprintf("编号为%d的茶楼不存在", clubId))
return
}
// 防止重复加入
var clubUser model.ClubUser
ret := dao.Db().Model(&model.ClubUser{}).Where(&model.ClubUser{Uid: userId, ClubId: clubId}).First(&clubUser)
if !ret.RecordNotFound() {
if clubUser.Status == enum.ClubUserWait {
err = errors.New("等待管理审核中,请联系管理审核。")
return
} else if clubUser.Status == enum.ClubUserDisable {
err = errors.New("您的账号已被该俱乐部管理员冻结")
return
}
return
}
cu := &model.ClubUser{
Uid: userId,
ClubId: clubId,
}
// 如果茶楼不需要审核,用户就直接成为正式用户
// 如果要加入的用户正好是老板,直接成为正式用户
if !club.Check || club.Uid == userId {
cu.Status = enum.ClubUserVip
}
// 茶楼需要审核,并且不是老板
if club.Check && club.Uid != userId {
err = errors.New("加入成功,等待管理员审核")
}
dao.Db().Save(cu)
return
}
func (clubSrv) UpdateInfo(clubId, bossUid uint, check, close bool, name, rollText, notice string) (err error) {
var club model.Club
dao.Db().First(&club, clubId)
if club.ID == 0 {
err = errors.New("该茶楼不存在")
return
}
// 查看是否是老板
if club.Uid != bossUid {
err = errors.New("您不是茶楼老板,无法编辑茶楼信息")
return
}
club.Check = check
club.Close = close
club.Name = name
club.Notice = notice
club.RollText = rollText
dao.Db().Save(&club)
return
}
func (this *clubSrv) IsClubUser(userId, clubId uint) (ok bool) {
var n int
dao.Db().Model(&model.ClubUser{}).Where(&model.ClubUser{Uid: userId, ClubId: clubId}).Count(&n)
ok = n > 0
return
}
type ClubUser struct {
Id uint `json:"id"`
Nick string `json:"nick"`
Avatar string `json:"avatar"`
ClubId uint `json:"clubId"` // 茶楼编号
Status enum.ClubUserType `json:"status"` // 0 等待审核,1 正式用户, 2 冻结用户
Admin bool `json:"admin"` // 是否是管理员 true 是管理员
CreatedAt time.Time `json:"-"`
DeletedAt *time.Time `json:"-"`
}
func (this *clubSrv) Users(clubId uint) (users []ClubUser) {
dao.Db().Table("club_users").
Select("users.id,users.nick, users.avatar,club_users.admin,club_users.status,club_users.created_at,club_users.deleted_at").
Joins("join users on club_users.uid=users.id").Where("club_users.club_id = ?", clubId).Scan(&users)
return
}
func (this *clubSrv) getClubUser(clubId, userId uint) (cu model.ClubUser, err error) {
if !this.IsClubUser(userId, clubId) {
err = errors.New("用户不属于该茶楼")
return
}
ret := dao.Db().Where(&model.ClubUser{ClubId: clubId, Uid: userId}).First(&cu)
if ret.RecordNotFound() {
err = errors.New("没在茶楼找到该用户")
return
}
return
}
// 设置、取消管理
func (this *clubSrv) SetAdmin(clubId, userId uint, ok bool) (err error) {
var cu model.ClubUser
cu, err = this.getClubUser(clubId, userId)
if err != nil {
return
}
if ok && cu.Admin {
err = errors.New("该用户已经是管理员")
return
}
cu.Admin = ok
dao.Db().Save(&cu)
return
}
// 冻结、取消冻结,ok 为true表示冻结
func (this *clubSrv) SetDisable(clubId, userId uint, ok bool) (err error) {
var cu model.ClubUser
cu, err = this.getClubUser(clubId, userId)
if err != nil {
return
}
if ok {
if cu.Status != enum.ClubUserVip {
err = errors.New("该用户还不是正式会员,无法冻结")
return
}
cu.Status = enum.ClubUserDisable
} else {
cu.Status = enum.ClubUserVip
}
dao.Db().Save(&cu)
return
}
// 设置、取消代付
func (this *clubSrv) SetPay(clubId, userId uint, ok bool) (err error) {
var cu model.ClubUser
cu, err = this.getClubUser(clubId, userId)
if err != nil {
return
}
var club model.Club
dao.Db().First(&club, cu.ClubId)
if club.ID == 0 {
err = errors.New("没找到该茶楼")
return
}
// 如果是取消代付,先判断当前用户是否是代付者
if !ok && userId != club.PayerUid {
err = errors.New("该账号不是代付账号")
return
}
if ok {
// 设置代付
club.PayerUid = userId
} else {
// 取消代付
club.PayerUid = 0
}
dao.Db().Save(&club)
return
}
// 移除用户
func (this *clubSrv) RemoveClubUser(clubId, userId uint) (err error) {
_, err = this.getClubUser(clubId, userId)
if err != nil {
return
}
// 如果是代付,无法直接删除
var club model.Club
dao.Db().First(&club, clubId)
if club.ID == 0 {
err = errors.New("该茶楼不存在")
return
}
if club.PayerUid != 0 && userId == club.PayerUid {
err = errors.New("该用户是代付,请先取消代付之后再删除")
return
}
dao.Db().Unscoped().Where("club_id=? and uid=?", clubId, userId).Delete(model.ClubUser{})
return
}
// 检查操作人员是不是茶楼管理员
func (this *clubSrv) IsAdmin(opUid, clubId uint) (ok bool) {
cu, err := this.getClubUser(clubId, opUid)
if err != nil {
return
}
ok = cu.Admin
return
}
// 检查操作人员是不是茶楼老板
func (clubSrv) IsBoss(opUid, clubId uint) (ok bool) {
var club model.Club
dao.Db().First(&club, clubId)
if club.Uid == opUid {
ok = true
}
return
}
// 指定用户获取指定茶楼
func (this *clubSrv) GetClub(uid, cid uint) (club model.Club, err error) {
if !this.IsClubUser(uid, cid) {
err = errors.New("您不是该茶楼成员")
return
}
dao.Db().First(&club, cid)
if club.ID == 0 {
err = errors.New("没找到您指定的茶楼")
}
return
}
func (this *clubSrv) DelClub(clubId, uid uint)(err error){
club,e:=dao.Club.Get(clubId)
if e!=nil{
err = e
return
}
if club.Uid != uid {
err = errors.New("您不是茶楼老板,无法解散茶楼!")
return
}
users,e := dao.Club.GetClubUsers(clubId)
if e!=nil {
err=e
return
}
e=dao.Club.DelClubUserByClubId(clubId)
if e!=nil{
err = e
return
}
e=dao.Club.Del(clubId)
if e!=nil{
err = e
return
}
// 删除属于茶楼的房间
dao.Db().Where(&model.Room{ClubId:clubId}).Delete(model.Room{})
// 通知所有在线的玩家,房间解散
for _,v:=range users{
p:=game.GetPlayer(v.Uid)
if p==nil{
glog.V(3).Infoln(v.Uid," 玩家不在线,无法通知")
continue
}
utils.Msg("").AddData("clubId", clubId).AddData("uid", uid).Send(game.BroadcastDelClub,p.Session)
}
return
}
|
// DRUNKWATER TEMPLATE(add description and prototypes)
// Question Title and Description on leetcode.com
// Function Declaration and Function Prototypes on leetcode.com
//508. Most Frequent Subtree Sum
//Given the root of a tree, you are asked to find the most frequent subtree sum. The subtree sum of a node is defined as the sum of all the node values formed by the subtree rooted at that node (including the node itself). So what is the most frequent subtree sum value? If there is a tie, return all the values with the highest frequency in any order.
//Examples 1
//Input:
// 5
// / \
//2 -3
//return [2, -3, 4], since all the values happen only once, return all of them in any order.
//Examples 2
//Input:
// 5
// / \
//2 -5
//return [2], since 2 happens twice, however -5 only occur once.
//Note: You may assume the sum of values in any subtree is in the range of 32-bit signed integer.
///**
// * Definition for a binary tree node.
// * type TreeNode struct {
// * Val int
// * Left *TreeNode
// * Right *TreeNode
// * }
// */
//func findFrequentTreeSum(root *TreeNode) []int {
//}
// Time Is Money
|
package arbitrage
import (
"fmt"
"gotrading/core"
"gotrading/graph"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("Arbitrage in 3 steps, starting and finishing with ABC", func() {
var (
arbitrage Arbitrage
)
BeforeEach(func() {
arbitrage = Arbitrage{}
})
Describe(`
Considering the combination: [ABC/DEF]@Exhange1 -> [XYZ/DEF]@Exhange1 and the orderbooks:
[ABC/DEF]@Exhange1 -> Best Bid: 1ABC = 2DEF, Best Ask: 1ABC = 2DEF
[XYZ/DEF]@Exhange1 -> Best Bid: 1XYZ = 3DEF, Best Ask: 1XYZ = 3DEF`, func() {
Context(`
When I fulfill all the orders, running the arbitrage`, func() {
var (
chains []ArbitrageChain
ob1 core.Orderbook
ob2 core.Orderbook
// ob3 core.Orderbook
paths []graph.Path
)
BeforeEach(func() {
exchange1 := core.Exchange{"Exchange1", make([]core.CurrencyPair, 0), nil}
abc := core.Currency("ABC")
def := core.Currency("DEF")
xyz := core.Currency("XYZ")
pair1 := core.CurrencyPair{abc, def}
pair2 := core.CurrencyPair{xyz, def}
bids1 := append([]core.Order{}, core.NewBid(pair1, 2, 6))
asks1 := append([]core.Order{}, core.NewAsk(pair1, 2, 6))
bids2 := append([]core.Order{}, core.NewBid(pair2, 3, 2))
asks2 := append([]core.Order{}, core.NewAsk(pair2, 3, 2))
ob1 = core.Orderbook{pair1, bids1, asks1, time.Now()}
ob2 = core.Orderbook{pair2, bids2, asks2, time.Now()}
endpoint1 := graph.Endpoint{abc, def, exchange1, &ob1}
endpoint2 := graph.Endpoint{xyz, def, exchange1, &ob2}
hits := make([]*graph.Hit, 2)
hits[0] = &(graph.Hit{&endpoint1, true, &abc, &def})
hits[1] = &(graph.Hit{&endpoint2, false, &xyz, &def})
paths = make([]graph.Path, 1)
paths[0] = graph.Path{hits, nil, nil, 0}
chains = arbitrage.Run(paths)
})
It("should return one chain", func() {
Expect(len(chains)).To(Equal(1))
})
It("should return one chain enforcing the initial volume to 3", func() {
c := chains[0]
fmt.Println(c)
Expect(c.VolumeToEngage).To(Equal(3.0))
})
It("should return one chain announcing a performance equal to 1x", func() {
c := chains[0]
Expect(c.Performance).To(Equal(2. / 3.))
})
It("should return 2 as a rate for the node #1", func() {
c := chains[0]
Expect(c.Rates[0]).To(Equal(2.0))
})
It("should return 2./3. as an adjusted volume for the node #2", func() {
c := chains[0]
Expect(c.Rates[1]).To(Equal(2. / 3.))
})
It("should return 6 as an adjusted volume for the node #1", func() {
c := chains[0]
Expect(c.AdjustedVolumes[0]).To(Equal(6.))
})
It("should return 2 as an adjusted volume for the node #2", func() {
c := chains[0]
Expect(c.AdjustedVolumes[1]).To(Equal(2.))
})
It("should return 3 for the volume of the order corresponding to node #1", func() {
c := chains[0]
Expect(c.Orders[0].BaseVolume).To(Equal(3.))
})
It("should return 2 for the volume of the order corresponding to node #2", func() {
c := chains[0]
Expect(c.Orders[1].BaseVolume).To(Equal(2.))
})
})
})
// Describe(`
// Considering the combination: [ABC/DEF]@Exhange1 -> [DEF/XYZ]@Exhange1 -> [XYZ/ABC]@Exhange1, and the orderbooks:
// [ABC/DEF]@Exhange1 -> Best Bid: 1ABC = 10DEF, Best Ask: 1ABC = 10DEF #ABC=0, DEF=10, XYZ=0
// [DEF/XYZ]@Exhange1 -> Best Bid: 1DEF = 10XYZ, Best Ask: 1DEF = 10XYZ #ABC=0, DEF=0, XYZ=100
// [XYZ/ABC]@Exhange1 -> Best Bid: 1XYZ = 0.01ABC, Best Ask: 1XYZ = 0.01ABC`, func() {
// Context(`
// When I fulfill all the orders, running the arbitrage`, func() {
// var (
// chains []ArbitrageChain
// ob1 core.Orderbook
// ob2 core.Orderbook
// ob3 core.Orderbook
// paths []graph.Path
// )
// BeforeEach(func() {
// exchange1 := core.Exchange{"Exchange1", make([]core.CurrencyPair, 0), nil}
// abc := core.Currency("ABC")
// def := core.Currency("DEF")
// xyz := core.Currency("XYZ")
// pair1 := core.CurrencyPair{abc, def}
// pair2 := core.CurrencyPair{def, xyz}
// pair3 := core.CurrencyPair{xyz, abc}
// bids1 := append([]core.Order{}, core.NewBid(pair1, 10, 1))
// asks1 := append([]core.Order{}, core.NewAsk(pair1, 10, 1))
// bids2 := append([]core.Order{}, core.NewBid(pair2, 10, 10))
// asks2 := append([]core.Order{}, core.NewAsk(pair2, 10, 1))
// bids3 := append([]core.Order{}, core.NewBid(pair3, 0.01, 100))
// asks3 := append([]core.Order{}, core.NewAsk(pair3, 0.01, 1))
// ob1 = core.Orderbook{pair1, bids1, asks1, time.Now()}
// ob2 = core.Orderbook{pair2, bids2, asks2, time.Now()}
// ob3 = core.Orderbook{pair3, bids3, asks3, time.Now()}
// endpoint1 := graph.Endpoint{abc, def, exchange1, &ob1}
// endpoint2 := graph.Endpoint{def, xyz, exchange1, &ob2}
// endpoint3 := graph.Endpoint{xyz, abc, exchange1, &ob3}
// hits := make([]*graph.Hit, 3)
// hits[0] = &(graph.Hit{&endpoint1, true, &abc, &def})
// hits[1] = &(graph.Hit{&endpoint2, true, &def, &xyz})
// hits[2] = &(graph.Hit{&endpoint3, true, &xyz, &abc})
// paths = make([]graph.Path, 1)
// paths[0] = graph.Path{hits, nil, nil}
// chains = arbitrage.Run(paths)
// })
// It("should return one chain", func() {
// Expect(len(chains)).To(Equal(1))
// })
// It("should return one chain enforcing the initial volume to 1", func() {
// c := chains[0]
// fmt.Println(c)
// Expect(c.VolumeToEngage).To(Equal(1.0))
// })
// It("should return one chain announcing a performance equal to 1x", func() {
// c := chains[0]
// Expect(c.Performance).To(Equal(1.0))
// })
// It("should return one chain announcing a performance equal to 10x if 1XYZ = 0.10ABC instead of 1XYZ = 0.01ABC", func() {
// ob3.Bids[0].Price = 0.10
// chains = arbitrage.Run(paths)
// c := chains[0]
// Expect(c.Performance).To(Equal(10.0))
// })
// It("should return one chain announcing a performance equal to 10x if 1XYZ = 0.10ABC instead of 1XYZ = 0.01ABC", func() {
// ob3.Bids[0].Price = 0.10
// chains = arbitrage.Run(paths)
// c := chains[0]
// Expect(c.Performance).To(Equal(10.0))
// })
// It("should return one chain enforcing the initial volume to 0.1 if only 10 XYZ are available", func() {
// ob3.Bids[0].BaseVolume = 10
// chains = arbitrage.Run(paths)
// c := chains[0]
// Expect(c.VolumeToEngage).To(Equal(0.1))
// })
// })
// })
// Describe(`
// Considering the combination: [ABC/DEF]@Exhange1 -> [XYZ/DEF]@Exhange1 -> [XYZ/ABC]@Exhange1, and the orderbooks:
// [ABC/DEF]@Exhange1 -> Best Bid: 1ABC = 10DEF, Best Ask: 1ABC = 10DEF #ABC=0, DEF=10, XYZ=0
// [XYZ/DEF]@Exhange1 -> Best Bid: 1XYZ = 0.01DEF, Best Ask: 1DEF = 0.1XYZ #ABC=0, DEF=0, XYZ=100
// [XYZ/ABC]@Exhange1 -> Best Bid: 1XYZ = 0.1ABC, Best Ask: 1XYZ = 0.01ABC`, func() {
// Context(`
// When I fulfill all the orders, running the arbitrage`, func() {
// var (
// chains []ArbitrageChain
// ob1 core.Orderbook
// ob2 core.Orderbook
// ob3 core.Orderbook
// paths []graph.Path
// )
// BeforeEach(func() {
// exchange1 := core.Exchange{"Exchange1", make([]core.CurrencyPair, 0), nil}
// abc := core.Currency("ABC")
// def := core.Currency("DEF")
// xyz := core.Currency("XYZ")
// pair1 := core.CurrencyPair{abc, def}
// pair2 := core.CurrencyPair{xyz, def}
// pair3 := core.CurrencyPair{xyz, abc}
// bids1 := append([]core.Order{}, core.NewBid(pair1, 10, 1))
// asks1 := append([]core.Order{}, core.NewAsk(pair1, 10, 1))
// bids2 := append([]core.Order{}, core.NewBid(pair2, 0.01, 1000))
// asks2 := append([]core.Order{}, core.NewAsk(pair2, 0.01, 1000))
// bids3 := append([]core.Order{}, core.NewBid(pair3, 0.001, 1000))
// asks3 := append([]core.Order{}, core.NewAsk(pair3, 0.001, 1000))
// ob1 = core.Orderbook{pair1, bids1, asks1, time.Now()}
// ob2 = core.Orderbook{pair2, bids2, asks2, time.Now()}
// ob3 = core.Orderbook{pair3, bids3, asks3, time.Now()}
// endpoint1 := graph.Endpoint{abc, def, exchange1, &ob1}
// endpoint2 := graph.Endpoint{xyz, def, exchange1, &ob2}
// endpoint3 := graph.Endpoint{xyz, abc, exchange1, &ob3}
// hits := make([]*graph.Hit, 3)
// hits[0] = &(graph.Hit{&endpoint1, true, &abc, &def})
// hits[1] = &(graph.Hit{&endpoint2, false, &xyz, &def})
// hits[2] = &(graph.Hit{&endpoint3, true, &xyz, &abc})
// paths = make([]graph.Path, 1)
// paths[0] = graph.Path{hits, nil, nil}
// chains = arbitrage.Run(paths)
// })
// It("should return one chain", func() {
// Expect(len(chains)).To(Equal(1))
// })
// It("should return one chain enforcing the initial volume to 1", func() {
// c := chains[0]
// fmt.Println(c)
// Expect(c.VolumeToEngage).To(Equal(1.0))
// })
// It("should return one chain announcing a performance equal to 1x", func() {
// c := chains[0]
// Expect(c.Performance).To(Equal(1.0))
// })
// It("should return one chain announcing a performance equal to 10x if 1XYZ = 0.10ABC instead of 1XYZ = 0.01ABC", func() {
// ob3.Bids[0].Price = 0.10
// chains = arbitrage.Run(paths)
// c := chains[0]
// Expect(c.Performance).To(Equal(10.0))
// })
// It("should return one chain announcing a performance equal to 10x if 1XYZ = 0.10ABC instead of 1XYZ = 0.01ABC", func() {
// ob3.Bids[0].Price = 0.10
// chains = arbitrage.Run(paths)
// c := chains[0]
// Expect(c.Performance).To(Equal(10.0))
// })
// It("should return one chain enforcing the initial volume to 0.1 if only 10 XYZ are available", func() {
// ob3.Bids[0].BaseVolume = 10
// chains = arbitrage.Run(paths)
// c := chains[0]
// Expect(c.VolumeToEngage).To(Equal(0.1))
// })
// })
// })
})
|
package mask
import (
"net"
"testing"
)
func TestValidMasks(t *testing.T) {
cidrOrFatal := func(s string) *net.IPNet {
_, ipn, err := net.ParseCIDR(s)
if err != nil {
t.Fatal(err)
}
return ipn
}
testCases := map[string]*net.IPNet{
"/ip4/1.2.3.4/ipcidr/0": cidrOrFatal("1.2.3.4/0"),
"/ip4/1.2.3.4/ipcidr/32": cidrOrFatal("1.2.3.4/32"),
"/ip4/1.2.3.4/ipcidr/24": cidrOrFatal("1.2.3.4/24"),
"/ip4/192.168.0.0/ipcidr/28": cidrOrFatal("192.168.0.0/28"),
"/ip6/fe80::/ipcidr/0": cidrOrFatal("fe80::/0"),
"/ip6/fe80::/ipcidr/64": cidrOrFatal("fe80::/64"),
"/ip6/fe80::/ipcidr/128": cidrOrFatal("fe80::/128"),
}
for s, m1 := range testCases {
m2, err := NewMask(s)
if err != nil {
t.Error("should be invalid:", s)
continue
}
if m1.String() != m2.String() {
t.Error("masks not equal:", m1, m2)
}
}
}
func TestInvalidMasks(t *testing.T) {
testCases := []string{
"/",
"/ip4/10.1.2.3",
"/ip6/::",
"/ip4/1.2.3.4/cidr/24",
"/ip6/fe80::/cidr/24",
"/eth/aa:aa:aa:aa:aa/ipcidr/24",
"foobar/ip4/1.2.3.4/ipcidr/32",
}
for _, s := range testCases {
_, err := NewMask(s)
if err != ErrInvalidFormat {
t.Error("should be invalid:", s)
}
}
testCases2 := []string{
"/ip4/1.2.3.4/ipcidr/33",
"/ip4/192.168.0.0/ipcidr/-1",
"/ip6/fe80::/ipcidr/129",
}
for _, s := range testCases2 {
_, err := NewMask(s)
if err == nil {
t.Error("should be invalid:", s)
}
}
}
func TestFiltered(t *testing.T) {
var tests = map[string]map[string]bool{
"/ip4/10.0.0.0/ipcidr/8": map[string]bool{
"10.3.3.4": true,
"10.3.4.4": true,
"10.4.4.4": true,
"15.52.34.3": false,
},
"/ip4/192.168.0.0/ipcidr/16": map[string]bool{
"192.168.0.0": true,
"192.168.1.0": true,
"192.1.0.0": false,
"10.4.4.4": false,
},
}
for mask, set := range tests {
m, err := NewMask(mask)
if err != nil {
t.Fatal(err)
}
for addr, val := range set {
ip := net.ParseIP(addr)
if m.Contains(ip) != val {
t.Fatalf("expected contains(%s, %s) == %s", mask, addr, val)
}
}
}
}
func TestParsing(t *testing.T) {
var addrs = map[string]string{
"/ip4/192.168.0.0/ipcidr/16": "192.168.0.0/16",
"/ip4/192.0.0.0/ipcidr/8": "192.0.0.0/8",
"/ip6/2001:db8::/ipcidr/32": "2001:db8::/32",
}
for k, v := range addrs {
m, err := NewMask(k)
if err != nil {
t.Fatal(err)
}
if m.String() != v {
t.Fatalf("mask is wrong: ", m, v)
}
orig, err := ConvertIPNet(m)
if err != nil {
t.Fatal(err)
}
if orig != k {
t.Fatal("backwards conversion failed: ", orig, k)
}
}
}
|
package main
import "testing"
func TestVisitableBuildItemWithTasksAndBugs(t *testing.T) {
c := []Visitable{
&Task{"Do stuff", 1},
&Task{"Implement Foo Bar", 5},
&Bug{"Error 500 on resource /foo/bar", 3},
}
storyPoints := new(EstimationVisitor)
for _, i := range c {
i.Accept(storyPoints)
}
}
|
package service
import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/google/uuid"
"github.com/sanguohot/medichain/chain"
"github.com/sanguohot/medichain/datacenter"
"github.com/sanguohot/medichain/etc"
"github.com/sanguohot/medichain/util"
)
type FileAddLogSimpleItem struct {
FileUUID string `json:"FileUuid"`
OwnerUUID string `json:"OwnerUuid"`
OrgUUID string `json:"OrgUuid"`
FileType string `json:"FileType"`
CreateTime uint64 `json:"CreateTime"`
}
type FileAddLogSimpleAction struct {
Total uint64 `json:"Total"`
List []FileAddLogSimpleItem `json:"List"`
}
func SetFileAddLogList() error {
err, list := chain.ChainGetFileAddLogListAll()
if err != nil {
return err
}
err = datacenter.SqliteSetFileAddLogList(list)
if err != nil {
return err
}
return nil
}
func GetFileAddLogList(idCartNo, orgUuidStr, fromTimeStr, toTimeStr, startStr, limitStr string) (error, *FileAddLogSimpleAction) {
if orgUuidStr != ""{
orgUuid, err := uuid.Parse(orgUuidStr)
if err != nil {
return err, nil
}
exist, err := chain.OrgsDataIsUuidExist(orgUuid)
if err != nil {
return err, nil
}
if !exist {
return util.ErrOrgNotExist, nil
}
}
var (
ownerUuidStr string
fl []datacenter.FileAddLog
)
if idCartNo != "" {
uuid, err := chain.UsersDataGetUuidByIdCartNoHash(crypto.Keccak256Hash([]byte(idCartNo)))
if err != nil {
return err, nil
}
ownerUuidStr = uuid.String()
}
err, startBig, limitBig := transformPagingParamFromStringToBigInt(startStr, limitStr)
if err != nil {
return err, nil
}
err, fromTime, toTime := transformTimeParamFromStringToUint64(fromTimeStr, toTimeStr)
if err != nil {
return err, nil
}
err, total := datacenter.SqliteGetFileAddLogTotal("", orgUuidStr, ownerUuidStr, fromTime, toTime)
if err != nil {
return err, nil
}
if total > 0 {
err, fl = datacenter.SqliteGetFileAddLogList("", orgUuidStr, ownerUuidStr, fromTime, toTime, startBig.Uint64(), limitBig.Uint64())
if err != nil {
return err, nil
}
}
return nil, wrapperToFileAddLogSimpleAction(total, fl)
}
func wrapperToFileAddLogSimpleAction(total uint64, list []datacenter.FileAddLog) *FileAddLogSimpleAction {
wrapper := FileAddLogSimpleAction{}
wrapper.Total = total
for _, item := range list {
wrapper.List = append(wrapper.List, FileAddLogSimpleItem{
FileUUID: item.FileUuid,
OwnerUUID: item.OwnerUuid,
OrgUUID: item.OrgUuid,
FileType: etc.FileTypeMap[common.HexToHash(item.FileTypeHash)],
CreateTime: item.CreateTime,
})
}
return &wrapper
}
func GetFileAddLogDetail(fileUuidStr string) (error, *datacenter.FileAddLog) {
_, err := uuid.Parse(fileUuidStr)
if err != nil {
return err, nil
}
err, fl := datacenter.SqliteGetFileAddLogList(fileUuidStr, "", "", 0, 0,0, 0)
if err != nil {
return err, nil
}
if len(fl) == 0 {
return util.ErrFileNotExist, nil
}
detail := fl[0]
detail.FileType = ""
return nil, &fl[0]
}
|
package lwwset
import (
"errors"
"testing"
"github.com/stretchr/testify/assert"
)
var (
lwwset LWWSet
)
func init() {
lwwset = Initialize()
}
// TestList checks the basic functionality of LWWSet List()
// List() should return all unique values added to the LWWSet
func TestList(t *testing.T) {
lwwset, _ = lwwset.Addition("xx")
expectedValue := []string{"xx"}
_, actualValue := lwwset.List()
assert.Equal(t, expectedValue, actualValue)
lwwset = Clear()
}
// TestList_UpdatedValue checks the functionality of LWWSet List() when
// multiple values are added to LWWSet it should return
// all the unique values added to the LWWSet
func TestList_UpdatedValue(t *testing.T) {
lwwset, _ = lwwset.Addition("xx")
lwwset, _ = lwwset.Addition("yy")
lwwset, _ = lwwset.Addition("zz")
expectedValue := []string{"xx", "yy", "zz"}
_, actualValue := lwwset.List()
assert.Equal(t, expectedValue, actualValue)
lwwset = Clear()
}
// TestList_ReAddValue checks the functionality of LWWSet List() when
// a value is added to LWWSet after it got removed it should return
// all the unique values added to the LWWSet
func TestList_ReAddValue(t *testing.T) {
lwwset, _ = lwwset.Addition("xx")
lwwset, _ = lwwset.Removal("xx")
lwwset, _ = lwwset.Addition("xx")
expectedValue := []string{"xx"}
_, actualValue := lwwset.List()
assert.Equal(t, expectedValue, actualValue)
lwwset = Clear()
}
// TestList_RemoveValue checks the functionality of LWWSet List() when
// multiple values are added & removed to LWWSet it should return
// all the unique values finally present to the LWWSet
func TestList_RemoveValue(t *testing.T) {
lwwset, _ = lwwset.Addition("xx")
lwwset, _ = lwwset.Removal("xx")
lwwset, _ = lwwset.Removal("zz")
expectedValue := []string{}
_, actualValue := lwwset.List()
assert.Equal(t, expectedValue, actualValue)
lwwset = Clear()
}
// TestList_RemoveEmpty checks the functionality of LWWSet List() when
// multiple values are removed to LWWSet it should return
// all the unique values finally present to the LWWSet
func TestList_RemoveEmpty(t *testing.T) {
lwwset, _ = lwwset.Removal("zz")
expectedValue := []string{}
_, actualValue := lwwset.List()
assert.Equal(t, expectedValue, actualValue)
lwwset = Clear()
}
// TestList_NoValue checks the functionality of LWWSet List() when
// no values are added to LWWSet, it should return
// an empty string slice when the LWWSet is empty
func TestList_NoValue(t *testing.T) {
expectedValue := []string{}
_, actualValue := lwwset.List()
assert.Equal(t, expectedValue, actualValue)
lwwset = Clear()
}
// TestClear checks the basic functionality of LWWSet Clear()
// utility function it clears all the values in a LWWSet
func TestClear(t *testing.T) {
lwwset, _ = lwwset.Addition("xx1")
lwwset, _ = lwwset.Addition("xx2")
lwwset = Clear()
expectedValue := []string{}
_, actualValue := lwwset.List()
assert.Equal(t, expectedValue, actualValue)
lwwset = Clear()
}
// TestClear_EmptyStore checks the functionality of LWWSet Clear() utility function
// when no values are in it, it clears all the values in a LWWSet set
func TestClear_EmptyStore(t *testing.T) {
lwwset = Clear()
expectedValue := []string{}
_, actualValue := lwwset.List()
assert.Equal(t, expectedValue, actualValue)
lwwset = Clear()
}
// TestLookup checks the basic functionality of LWWSet Lookup() function
// it returns a boolean if a value passed is present in the LWWSet set or not
func TestLookup(t *testing.T) {
lwwset, _ = lwwset.Addition("xx")
expectedValue := true
actualValue, actualError := lwwset.Lookup("xx")
assert.Nil(t, actualError)
assert.Equal(t, expectedValue, actualValue)
lwwset = Clear()
}
// TestLookup_NotPresent checks the functionality of LWWSet Lookup() function
// it returns false if a value passed is not present in the LWWSet
func TestLookup_NotPresent(t *testing.T) {
lwwset, _ = lwwset.Addition("xx")
expectedValue := false
actualValue, actualError := lwwset.Lookup("yy")
assert.Nil(t, actualError)
assert.Equal(t, expectedValue, actualValue)
lwwset = Clear()
}
// TestLookup_NotPresent checks the functionality of LWWSet Lookup() function
// it returns false if a value passed is not present in the LWWSet
func TestLookup_Removed(t *testing.T) {
lwwset, _ = lwwset.Addition("xx")
lwwset, _ = lwwset.Removal("xx")
expectedValue := false
actualValue, actualError := lwwset.Lookup("xx")
assert.Nil(t, actualError)
assert.Equal(t, expectedValue, actualValue)
lwwset = Clear()
}
// TestLookup_EmptySet checks the functionality of LWWSet Lookup() function
// it returns false if the LWWSet is empty irrespective of the value passed
func TestLookup_EmptySet(t *testing.T) {
expectedValue := false
actualValue, actualError := lwwset.Lookup("xx")
assert.Nil(t, actualError)
assert.Equal(t, expectedValue, actualValue)
lwwset = Clear()
}
// TestLookup_EmptyLookup checks the functionality of LWWSet Lookup() function
// it returns an error if the value passed is nil irrespective of the LWWSet
func TestLookup_EmptyLookup(t *testing.T) {
expectedValue := false
expectedError := errors.New("empty value provided")
actualValue, actualError := lwwset.Lookup("")
assert.Equal(t, expectedError, actualError)
assert.Equal(t, expectedValue, actualValue)
lwwset = Clear()
}
|
package amqp
import (
"context"
"sync"
"time"
"github.com/Azure/go-amqp"
"github.com/brigadecore/brigade-foundations/retries"
"github.com/brigadecore/brigade/v2/apiserver/internal/lib/queue"
myamqp "github.com/brigadecore/brigade/v2/internal/amqp"
"github.com/pkg/errors"
)
// WriterFactoryConfig encapsulates details required for connecting an
// AMQP-based implementation of the queue.WriterFactory interface to an
// underlying AMQP-based messaging service.
type WriterFactoryConfig struct {
// Address is the address of the AMQP-based messaging server.
Address string
// Username is the SASL username to use when connecting to the AMQP-based
// messaging server.
Username string
// Password is the SASL password to use when connection to the AMQP-based
// messaging server.
Password string
}
// writerFactory is an AMQP-based implementation of the queue.WriterFactory
// interface.
type writerFactory struct {
address string
dialOpts []amqp.ConnOption
amqpClient myamqp.Client
amqpClientMu *sync.Mutex
connectFn func() error
}
// NewWriterFactory returns an an AMQP-based implementation of the
// queue.WriterFactory interface.
func NewWriterFactory(config WriterFactoryConfig) queue.WriterFactory {
w := &writerFactory{
address: config.Address,
dialOpts: []amqp.ConnOption{
amqp.ConnSASLPlain(config.Username, config.Password),
},
amqpClientMu: &sync.Mutex{},
}
w.connectFn = w.connect
return w
}
// connect connects (or reconnects) to the underlying AMQP-based messaging
// server. This function is NOT concurrency safe and callers should take
// measures to ensure they are the exclusive caller of this function.
func (w *writerFactory) connect() error {
return retries.ManageRetries(
context.Background(),
"connect",
10,
10*time.Second,
func() (bool, error) {
if w.amqpClient != nil {
w.amqpClient.Close()
}
amqpClient, err := myamqp.Dial(w.address, w.dialOpts...)
if err != nil {
return true, errors.Wrap(err, "error dialing endpoint")
}
w.amqpClient = amqpClient
return false, nil
},
)
}
func (w *writerFactory) NewWriter(queueName string) (queue.Writer, error) {
// This entire function is a critical section of code so that we don't
// possibly have multiple callers looking for a new Writer opening multiple
// underlying connections to the messaging server.
w.amqpClientMu.Lock()
defer w.amqpClientMu.Unlock()
if w.amqpClient == nil {
if err := w.connectFn(); err != nil {
return nil, err
}
}
linkOpts := []amqp.LinkOption{
amqp.LinkTargetAddress(queueName),
}
// Every Writer will get its own Session and Sender
var amqpSession myamqp.Session
var amqpSender myamqp.Sender
var err error
maxRetryCount := 10
maxBackoff := 5 * time.Second
if err = retries.ManageRetries(
context.Background(),
"create writer",
maxRetryCount,
maxBackoff,
func() (bool, error) {
// We may be retrying, so try cleaning up the session and/or
// sender that we never ended up using.
if amqpSender != nil {
amqpSender.Close(context.TODO()) // nolint: errcheck
}
if amqpSession != nil {
amqpSession.Close(context.TODO()) // nolint: errcheck
}
if amqpSession, err = w.amqpClient.NewSession(); err != nil {
// Assume this happened because the existing connection is no good. Try
// to reconnect.
if err = w.connectFn(); err != nil {
// The connection function handles its own retries. If we got an error
// here, it's pretty serious. Bail.
return false, err
}
// We're reconnected now, so retry getting a session again.
return true, nil
}
if amqpSender, err = amqpSession.NewSender(linkOpts...); err != nil {
// Assume this happened because the existing connection is no good.
// Just retry again because we not only need a new connection, but
// also a new session.
return true, err
}
return false, nil
},
); err != nil {
return nil, err
}
return &writer{
queueName: queueName,
amqpSession: amqpSession,
amqpSender: amqpSender,
}, nil
}
func (w *writerFactory) Close(context.Context) error {
if err := w.amqpClient.Close(); err != nil {
return errors.Wrapf(err, "error closing AMQP client")
}
return nil
}
// writer is an AMQP-based implementation of the queue.Writer interface.
type writer struct {
queueName string
amqpSession myamqp.Session
amqpSender myamqp.Sender
}
func (w *writer) Write(
ctx context.Context,
message string,
opts *queue.MessageOptions,
) error {
if opts == nil {
opts = &queue.MessageOptions{}
}
msg := &amqp.Message{
Header: &amqp.MessageHeader{
Durable: opts.Durable,
},
Data: [][]byte{
[]byte(message),
},
}
if err := w.amqpSender.Send(ctx, msg); err != nil {
return errors.Wrapf(
err,
"error sending amqp message for queue %q",
w.queueName,
)
}
return nil
}
func (w *writer) Close(ctx context.Context) error {
if err := w.amqpSender.Close(ctx); err != nil {
return errors.Wrapf(
err,
"error closing AMQP sender for queue %q",
w.queueName,
)
}
if err := w.amqpSession.Close(ctx); err != nil {
return errors.Wrapf(
err,
"error closing AMQP session for queue %q",
w.queueName,
)
}
return nil
}
|
// Copyright 2022 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"context"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl"
emptypb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/empty_go_proto"
betapb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/vertexai/beta/vertexai_beta_go_proto"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/vertexai/beta"
)
// MetadataStoreServer implements the gRPC interface for MetadataStore.
type MetadataStoreServer struct{}
// ProtoToMetadataStoreEncryptionSpec converts a MetadataStoreEncryptionSpec object from its proto representation.
func ProtoToVertexaiBetaMetadataStoreEncryptionSpec(p *betapb.VertexaiBetaMetadataStoreEncryptionSpec) *beta.MetadataStoreEncryptionSpec {
if p == nil {
return nil
}
obj := &beta.MetadataStoreEncryptionSpec{
KmsKeyName: dcl.StringOrNil(p.GetKmsKeyName()),
}
return obj
}
// ProtoToMetadataStoreState converts a MetadataStoreState object from its proto representation.
func ProtoToVertexaiBetaMetadataStoreState(p *betapb.VertexaiBetaMetadataStoreState) *beta.MetadataStoreState {
if p == nil {
return nil
}
obj := &beta.MetadataStoreState{
DiskUtilizationBytes: dcl.Int64OrNil(p.GetDiskUtilizationBytes()),
}
return obj
}
// ProtoToMetadataStore converts a MetadataStore resource from its proto representation.
func ProtoToMetadataStore(p *betapb.VertexaiBetaMetadataStore) *beta.MetadataStore {
obj := &beta.MetadataStore{
Name: dcl.StringOrNil(p.GetName()),
CreateTime: dcl.StringOrNil(p.GetCreateTime()),
UpdateTime: dcl.StringOrNil(p.GetUpdateTime()),
EncryptionSpec: ProtoToVertexaiBetaMetadataStoreEncryptionSpec(p.GetEncryptionSpec()),
Description: dcl.StringOrNil(p.GetDescription()),
State: ProtoToVertexaiBetaMetadataStoreState(p.GetState()),
Project: dcl.StringOrNil(p.GetProject()),
Location: dcl.StringOrNil(p.GetLocation()),
}
return obj
}
// MetadataStoreEncryptionSpecToProto converts a MetadataStoreEncryptionSpec object to its proto representation.
func VertexaiBetaMetadataStoreEncryptionSpecToProto(o *beta.MetadataStoreEncryptionSpec) *betapb.VertexaiBetaMetadataStoreEncryptionSpec {
if o == nil {
return nil
}
p := &betapb.VertexaiBetaMetadataStoreEncryptionSpec{}
p.SetKmsKeyName(dcl.ValueOrEmptyString(o.KmsKeyName))
return p
}
// MetadataStoreStateToProto converts a MetadataStoreState object to its proto representation.
func VertexaiBetaMetadataStoreStateToProto(o *beta.MetadataStoreState) *betapb.VertexaiBetaMetadataStoreState {
if o == nil {
return nil
}
p := &betapb.VertexaiBetaMetadataStoreState{}
p.SetDiskUtilizationBytes(dcl.ValueOrEmptyInt64(o.DiskUtilizationBytes))
return p
}
// MetadataStoreToProto converts a MetadataStore resource to its proto representation.
func MetadataStoreToProto(resource *beta.MetadataStore) *betapb.VertexaiBetaMetadataStore {
p := &betapb.VertexaiBetaMetadataStore{}
p.SetName(dcl.ValueOrEmptyString(resource.Name))
p.SetCreateTime(dcl.ValueOrEmptyString(resource.CreateTime))
p.SetUpdateTime(dcl.ValueOrEmptyString(resource.UpdateTime))
p.SetEncryptionSpec(VertexaiBetaMetadataStoreEncryptionSpecToProto(resource.EncryptionSpec))
p.SetDescription(dcl.ValueOrEmptyString(resource.Description))
p.SetState(VertexaiBetaMetadataStoreStateToProto(resource.State))
p.SetProject(dcl.ValueOrEmptyString(resource.Project))
p.SetLocation(dcl.ValueOrEmptyString(resource.Location))
return p
}
// applyMetadataStore handles the gRPC request by passing it to the underlying MetadataStore Apply() method.
func (s *MetadataStoreServer) applyMetadataStore(ctx context.Context, c *beta.Client, request *betapb.ApplyVertexaiBetaMetadataStoreRequest) (*betapb.VertexaiBetaMetadataStore, error) {
p := ProtoToMetadataStore(request.GetResource())
res, err := c.ApplyMetadataStore(ctx, p)
if err != nil {
return nil, err
}
r := MetadataStoreToProto(res)
return r, nil
}
// applyVertexaiBetaMetadataStore handles the gRPC request by passing it to the underlying MetadataStore Apply() method.
func (s *MetadataStoreServer) ApplyVertexaiBetaMetadataStore(ctx context.Context, request *betapb.ApplyVertexaiBetaMetadataStoreRequest) (*betapb.VertexaiBetaMetadataStore, error) {
cl, err := createConfigMetadataStore(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
return s.applyMetadataStore(ctx, cl, request)
}
// DeleteMetadataStore handles the gRPC request by passing it to the underlying MetadataStore Delete() method.
func (s *MetadataStoreServer) DeleteVertexaiBetaMetadataStore(ctx context.Context, request *betapb.DeleteVertexaiBetaMetadataStoreRequest) (*emptypb.Empty, error) {
cl, err := createConfigMetadataStore(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
return &emptypb.Empty{}, cl.DeleteMetadataStore(ctx, ProtoToMetadataStore(request.GetResource()))
}
// ListVertexaiBetaMetadataStore handles the gRPC request by passing it to the underlying MetadataStoreList() method.
func (s *MetadataStoreServer) ListVertexaiBetaMetadataStore(ctx context.Context, request *betapb.ListVertexaiBetaMetadataStoreRequest) (*betapb.ListVertexaiBetaMetadataStoreResponse, error) {
cl, err := createConfigMetadataStore(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
resources, err := cl.ListMetadataStore(ctx, request.GetProject(), request.GetLocation())
if err != nil {
return nil, err
}
var protos []*betapb.VertexaiBetaMetadataStore
for _, r := range resources.Items {
rp := MetadataStoreToProto(r)
protos = append(protos, rp)
}
p := &betapb.ListVertexaiBetaMetadataStoreResponse{}
p.SetItems(protos)
return p, nil
}
func createConfigMetadataStore(ctx context.Context, service_account_file string) (*beta.Client, error) {
conf := dcl.NewConfig(dcl.WithUserAgent("dcl-test"), dcl.WithCredentialsFile(service_account_file))
return beta.NewClient(conf), nil
}
|
package main
import (
"errors"
"fmt"
"time"
"github.com/andywow/golang-lessons/lesson-calendar/internal/calendar/repository"
"github.com/andywow/golang-lessons/lesson-calendar/pkg/eventapi"
)
type createEventTest struct {
apiClient apiServerTestClient
err error
event *eventapi.Event
}
func (t *createEventTest) iCreateEvent() error {
t.apiClient.create()
defer t.apiClient.close()
eventTime := time.Now()
t.event = &eventapi.Event{
Description: "godog test",
Duration: 1,
Header: "godog test",
StartTime: &eventTime,
Username: fmt.Sprintf("godog user %d", time.Now().Unix()),
}
t.event, t.err = t.apiClient.client.CreateEvent(t.apiClient.callCtx, t.event)
return nil
}
func (t *createEventTest) thereAreNoCreateErrors() error {
return t.err
}
func (t *createEventTest) eventUuidShouldNotBeEmpty() error {
if t.event.Uuid == "" {
return errors.New("uuid not returned")
}
return nil
}
func (t *createEventTest) iCreateEventOnBusyTime() error {
t.apiClient.create()
defer t.apiClient.close()
t.event, t.err = t.apiClient.client.CreateEvent(t.apiClient.callCtx, t.event)
return nil
}
func (t *createEventTest) iReceiveDateAlreadyBusyError() error {
if t.err != repository.ErrDateBusy {
return t.err
}
return nil
}
|
package clock
import (
"context"
"errors"
"testing"
"time"
)
// Ensure that WithDeadline is cancelled when deadline exceeded.
func TestMock_WithDeadline(t *testing.T) {
m := NewMock()
ctx, _ := m.WithDeadline(context.Background(), m.Now().Add(time.Second))
m.Add(time.Second)
select {
case <-ctx.Done():
if !errors.Is(ctx.Err(), context.DeadlineExceeded) {
t.Error("invalid type of error returned when deadline exceeded")
}
default:
t.Error("context is not cancelled when deadline exceeded")
}
}
// Ensure that WithDeadline does nothing when the deadline is later than the current deadline.
func TestMock_WithDeadlineLaterThanCurrent(t *testing.T) {
m := NewMock()
ctx, _ := m.WithDeadline(context.Background(), m.Now().Add(time.Second))
ctx, _ = m.WithDeadline(ctx, m.Now().Add(10*time.Second))
m.Add(time.Second)
select {
case <-ctx.Done():
if !errors.Is(ctx.Err(), context.DeadlineExceeded) {
t.Error("invalid type of error returned when deadline exceeded")
}
default:
t.Error("context is not cancelled when deadline exceeded")
}
}
// Ensure that WithDeadline cancel closes Done channel with context.Canceled error.
func TestMock_WithDeadlineCancel(t *testing.T) {
m := NewMock()
ctx, cancel := m.WithDeadline(context.Background(), m.Now().Add(time.Second))
cancel()
select {
case <-ctx.Done():
if !errors.Is(ctx.Err(), context.Canceled) {
t.Error("invalid type of error returned after cancellation")
}
case <-time.After(time.Second):
t.Error("context is not cancelled after cancel was called")
}
}
// Ensure that WithDeadline closes child contexts after it was closed.
func TestMock_WithDeadlineCancelledWithParent(t *testing.T) {
m := NewMock()
parent, cancel := context.WithCancel(context.Background())
ctx, _ := m.WithDeadline(parent, m.Now().Add(time.Second))
cancel()
select {
case <-ctx.Done():
if !errors.Is(ctx.Err(), context.Canceled) {
t.Error("invalid type of error returned after cancellation")
}
case <-time.After(time.Second):
t.Error("context is not cancelled when parent context is cancelled")
}
}
// Ensure that WithDeadline cancelled immediately when deadline has already passed.
func TestMock_WithDeadlineImmediate(t *testing.T) {
m := NewMock()
ctx, _ := m.WithDeadline(context.Background(), m.Now().Add(-time.Second))
select {
case <-ctx.Done():
if !errors.Is(ctx.Err(), context.DeadlineExceeded) {
t.Error("invalid type of error returned when deadline has already passed")
}
default:
t.Error("context is not cancelled when deadline has already passed")
}
}
// Ensure that WithTimeout is cancelled when deadline exceeded.
func TestMock_WithTimeout(t *testing.T) {
m := NewMock()
ctx, _ := m.WithTimeout(context.Background(), time.Second)
m.Add(time.Second)
select {
case <-ctx.Done():
if !errors.Is(ctx.Err(), context.DeadlineExceeded) {
t.Error("invalid type of error returned when time is over")
}
default:
t.Error("context is not cancelled when time is over")
}
}
|
package profiler
import (
"fmt"
"os"
"bufio"
"bytes"
)
// type Resources struct {
// memoryUsed uint64
// cpuUsed int
// }
//type Resources map[string]interface{}
//https://stackoverflow.com/questions/31879817/golang-os-exec-realtime-memory-usage
//https://unix.stackexchange.com/questions/33381/getting-information-about-a-process-memory-usage-from-proc-pid-smaps
// func CalculateMemory(pid int) (uint64, error) {
// f, err := os.Open(fmt.Sprintf("/proc/%d/smaps", pid))
// if err != nil {
// return 0, err
// }
// defer f.Close()
// res := uint64(0)
// pfx := []byte("Pss:")
// r := bufio.NewScanner(f)
// for r.Scan() {
// line := r.Bytes()
// if bytes.HasPrefix(line, pfx) {
// var size uint64
// _, err := fmt.Sscanf(string(line[4:]), "%d", &size)
// if err != nil {
// return 0, err
// }
// res += size
// }
// }
// if err := r.Err(); err != nil {
// return 0, err
// }
// return res, nil
// }
func CalculateMemory(pid int, rs *Resource, resAr *[]Resource) {
f, err := os.Open(fmt.Sprintf("/proc/%d/smaps", pid))
if err != nil {
//return 0, err
}
defer f.Close()
res := uint64(0)
pfx := []byte("Pss:")
r := bufio.NewScanner(f)
for r.Scan() {
line := r.Bytes()
if bytes.HasPrefix(line, pfx) {
var size uint64
_, err := fmt.Sscanf(string(line[4:]), "%d", &size)
if err != nil {
//return 0, err
}
res += size
}
}
if err := r.Err(); err != nil {
//return 0, err
}
rs.memoryUsed = res
resAr = append(resAr,rs)
//rs := Resources{memoryUsed: res}
//fmt.Println(rs)
//fmt.Printf("Memory used: %d KB\n", res)
//return res, nil
}
//must see: https://golang.org/doc/diagnostics.html
|
//
// Copyright 2020 The AVFS authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package orefafs
import (
"os"
"time"
"github.com/avfs/avfs"
"github.com/avfs/avfs/idm/dummyidm"
"github.com/avfs/avfs/vfsutils"
)
// New returns a new memory file system (OrefaFS).
func New(opts ...Option) (*OrefaFS, error) {
vfs := &OrefaFS{
nodes: make(nodes),
curDir: string(avfs.PathSeparator),
feature: avfs.FeatBasicFs | avfs.FeatHardlink,
currentUser: dummyidm.RootUser,
umask: int32(vfsutils.UMask.Get()),
}
vfs.nodes[string(avfs.PathSeparator)] = &node{
mtime: time.Now().UnixNano(),
mode: os.ModeDir | 0o755,
}
for _, opt := range opts {
err := opt(vfs)
if err != nil {
return nil, err
}
}
if vfs.feature&avfs.FeatMainDirs != 0 {
um := vfs.umask
vfs.umask = 0
_ = vfsutils.CreateBaseDirs(vfs, "")
vfs.umask = um
vfs.curDir = avfs.RootDir
}
return vfs, nil
}
// Features returns the set of features provided by the file system or identity manager.
func (vfs *OrefaFS) Features() avfs.Feature {
return vfs.feature
}
// HasFeature returns true if the file system or identity manager provides a given feature.
func (vfs *OrefaFS) HasFeature(feature avfs.Feature) bool {
return vfs.feature&feature == feature
}
// Name returns the name of the fileSystem.
func (vfs *OrefaFS) Name() string {
return vfs.name
}
// OSType returns the operating system type of the file system.
func (vfs *OrefaFS) OSType() avfs.OSType {
return avfs.OsLinux
}
// Type returns the type of the fileSystem or Identity manager.
func (vfs *OrefaFS) Type() string {
return "OrefaFS"
}
// Options
// WithMainDirs returns an option function to create main directories (/home, /root and /tmp).
func WithMainDirs() Option {
return func(vfs *OrefaFS) error {
vfs.feature |= avfs.FeatMainDirs
return nil
}
}
|
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package rollout
import (
"context"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
kruisev1alpha1 "github.com/openkruise/rollouts/api/v1alpha1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/utils/pointer"
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/pkg/oam"
"github.com/oam-dev/kubevela/pkg/oam/util"
)
var _ = Describe("Kruise rollout test", func() {
ctx := context.Background()
BeforeEach(func() {
Expect(k8sClient.Create(ctx, rollout.DeepCopy())).Should(SatisfyAny(BeNil(), util.AlreadyExistMatcher{}))
Expect(k8sClient.Create(ctx, rt.DeepCopy())).Should(SatisfyAny(BeNil(), util.AlreadyExistMatcher{}))
Expect(k8sClient.Create(ctx, app.DeepCopy())).Should(SatisfyAny(BeNil(), util.AlreadyExistMatcher{}))
Expect(k8sClient.Create(ctx, rollingReleaseRollout.DeepCopy())).Should(SatisfyAny(BeNil(), util.AlreadyExistMatcher{}))
})
It("test get associated rollout func", func() {
rollouts, err := getAssociatedRollouts(ctx, k8sClient, &app, false)
Expect(err).Should(BeNil())
// test will only fetch one rollout in result
Expect(len(rollouts)).Should(BeEquivalentTo(1))
})
It("Suspend rollout", func() {
r := kruisev1alpha1.Rollout{}
Expect(k8sClient.Get(ctx, types.NamespacedName{Namespace: "default", Name: "my-rollout"}, &r)).Should(BeNil())
r.Status.Phase = kruisev1alpha1.RolloutPhaseProgressing
Expect(k8sClient.Status().Update(ctx, &r)).Should(BeNil())
Expect(SuspendRollout(ctx, k8sClient, &app, nil))
Expect(k8sClient.Get(ctx, types.NamespacedName{Namespace: "default", Name: "my-rollout"}, &r))
Expect(r.Spec.Strategy.Paused).Should(BeEquivalentTo(true))
})
It("Resume rollout", func() {
r := kruisev1alpha1.Rollout{}
Expect(k8sClient.Get(ctx, types.NamespacedName{Namespace: "default", Name: "my-rollout"}, &r)).Should(BeNil())
Expect(r.Spec.Strategy.Paused).Should(BeEquivalentTo(true))
Expect(ResumeRollout(ctx, k8sClient, &app, nil))
Expect(k8sClient.Get(ctx, types.NamespacedName{Namespace: "default", Name: "my-rollout"}, &r))
Expect(r.Spec.Strategy.Paused).Should(BeEquivalentTo(false))
})
It("Rollback rollout", func() {
r := kruisev1alpha1.Rollout{}
Expect(k8sClient.Get(ctx, types.NamespacedName{Namespace: "default", Name: "my-rollout"}, &r)).Should(BeNil())
r.Spec.Strategy.Paused = true
Expect(k8sClient.Update(ctx, &r)).Should(BeNil())
Expect(RollbackRollout(ctx, k8sClient, &app, nil))
Expect(k8sClient.Get(ctx, types.NamespacedName{Namespace: "default", Name: "my-rollout"}, &r))
Expect(r.Spec.Strategy.Paused).Should(BeEquivalentTo(false))
})
})
var app = v1beta1.Application{
TypeMeta: metav1.TypeMeta{
APIVersion: "core.oam.dev/v1beta1",
Kind: "Application",
},
ObjectMeta: metav1.ObjectMeta{
Name: "rollout-app",
Namespace: "default",
Generation: 1,
},
Spec: v1beta1.ApplicationSpec{
Components: []common.ApplicationComponent{},
},
}
var rt = v1beta1.ResourceTracker{
TypeMeta: metav1.TypeMeta{
APIVersion: "core.oam.dev/v1beta1",
Kind: "ResourceTracker",
},
ObjectMeta: metav1.ObjectMeta{
Name: "rollout-app",
Labels: map[string]string{
"app.oam.dev/appRevision": "rollout-app-v1",
"app.oam.dev/name": "rollout-app",
"app.oam.dev/namespace": "default",
},
},
Spec: v1beta1.ResourceTrackerSpec{
ApplicationGeneration: 1,
Type: v1beta1.ResourceTrackerTypeVersioned,
ManagedResources: []v1beta1.ManagedResource{
{
ClusterObjectReference: common.ClusterObjectReference{
ObjectReference: v1.ObjectReference{
APIVersion: "rollouts.kruise.io/v1alpha1",
Kind: "Rollout",
Name: "my-rollout",
Namespace: "default",
},
},
OAMObjectReference: common.OAMObjectReference{
Component: "my-rollout",
},
},
{
ClusterObjectReference: common.ClusterObjectReference{
ObjectReference: v1.ObjectReference{
APIVersion: "rollouts.kruise.io/v1alpha1",
Kind: "Rollout",
Name: "rolling-release-rollout",
Namespace: "default",
},
},
OAMObjectReference: common.OAMObjectReference{
Component: "my-rollout",
},
},
},
},
}
var rollout = kruisev1alpha1.Rollout{
TypeMeta: metav1.TypeMeta{
APIVersion: "rollouts.kruise.io/v1alpha1",
Kind: "Rollout",
},
ObjectMeta: metav1.ObjectMeta{
Name: "my-rollout",
Namespace: "default",
},
Spec: kruisev1alpha1.RolloutSpec{
ObjectRef: kruisev1alpha1.ObjectRef{
WorkloadRef: &kruisev1alpha1.WorkloadRef{
APIVersion: "appsv1",
Kind: "Deployment",
Name: "canary-demo",
},
},
Strategy: kruisev1alpha1.RolloutStrategy{
Canary: &kruisev1alpha1.CanaryStrategy{
Steps: []kruisev1alpha1.CanaryStep{
{
Weight: pointer.Int32(30),
},
},
},
Paused: false,
},
},
}
var rollingReleaseRollout = kruisev1alpha1.Rollout{
TypeMeta: metav1.TypeMeta{
APIVersion: "rollouts.kruise.io/v1alpha1",
Kind: "Rollout",
},
ObjectMeta: metav1.ObjectMeta{
Name: "rolling-release-rollout",
Namespace: "default",
Annotations: map[string]string{
oam.AnnotationSkipResume: "true",
},
},
Spec: kruisev1alpha1.RolloutSpec{
ObjectRef: kruisev1alpha1.ObjectRef{
WorkloadRef: &kruisev1alpha1.WorkloadRef{
APIVersion: "appsv1",
Kind: "Deployment",
Name: "canary-demo",
},
},
Strategy: kruisev1alpha1.RolloutStrategy{
Canary: &kruisev1alpha1.CanaryStrategy{
Steps: []kruisev1alpha1.CanaryStep{
{
Weight: pointer.Int32(30),
},
},
},
Paused: false,
},
},
}
|
package keeper
import (
"context"
"fmt"
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
"github.com/octalmage/gitgood/x/gitgood/types"
)
func (k msgServer) CreateTeam(goCtx context.Context, msg *types.MsgCreateTeam) (*types.MsgCreateTeamResponse, error) {
ctx := sdk.UnwrapSDKContext(goCtx)
var team = types.Team{
Creator: msg.Creator,
Name: msg.Name,
Users: msg.Users,
SlackIntegration: msg.SlackIntegration,
}
id := k.AppendTeam(
ctx,
team,
)
return &types.MsgCreateTeamResponse{
Id: id,
}, nil
}
func (k msgServer) UpdateTeam(goCtx context.Context, msg *types.MsgUpdateTeam) (*types.MsgUpdateTeamResponse, error) {
ctx := sdk.UnwrapSDKContext(goCtx)
var team = types.Team{
Creator: msg.Creator,
Id: msg.Id,
Name: msg.Name,
Users: msg.Users,
SlackIntegration: msg.SlackIntegration,
}
// Checks that the element exists
if !k.HasTeam(ctx, msg.Id) {
return nil, sdkerrors.Wrap(sdkerrors.ErrKeyNotFound, fmt.Sprintf("key %d doesn't exist", msg.Id))
}
// Checks if the the msg sender is the same as the current owner
if msg.Creator != k.GetTeamOwner(ctx, msg.Id) {
return nil, sdkerrors.Wrap(sdkerrors.ErrUnauthorized, "incorrect owner")
}
k.SetTeam(ctx, team)
return &types.MsgUpdateTeamResponse{}, nil
}
func (k msgServer) DeleteTeam(goCtx context.Context, msg *types.MsgDeleteTeam) (*types.MsgDeleteTeamResponse, error) {
ctx := sdk.UnwrapSDKContext(goCtx)
if !k.HasTeam(ctx, msg.Id) {
return nil, sdkerrors.Wrap(sdkerrors.ErrKeyNotFound, fmt.Sprintf("key %d doesn't exist", msg.Id))
}
if msg.Creator != k.GetTeamOwner(ctx, msg.Id) {
return nil, sdkerrors.Wrap(sdkerrors.ErrUnauthorized, "incorrect owner")
}
k.RemoveTeam(ctx, msg.Id)
return &types.MsgDeleteTeamResponse{}, nil
}
|
package common
import (
"gorm.io/gorm"
"time"
)
type Model struct {
Status uint8 `gorm:"comment:状态"`
Opr string `gorm:"comment:操作人"`
CreatedAt time.Time `gorm:"comment:创建时间"`
UpdatedAt time.Time `gorm:"comment:更新时间"`
DeletedAt gorm.DeletedAt `gorm:"index;comment:删新时间"`
}
|
package main
import (
"bufio"
"fmt"
"github.com/spf13/pflag"
"os"
)
type selpg_args struct {
start_page int
end_page int
in_filename string
dest string
page_len int
page_type int
}
var progname string
const INT_MAX = int(^uint(0) >> 1)
func (arg *selpg_args)process_args() {
progname = os.Args[0]
pflag.IntVarP(&arg.start_page, "start_page", "s", 0, "start page")
pflag.IntVarP(&arg.end_page, "end_page", "e", 0, "end page")
pflag.IntVarP(&arg.page_type, "page_type", "f", 0, "Page type")
pflag.IntVarP(&arg.page_len, "page_len", "l", 36, "page len")
pflag.StringVarP(&arg.dest, "dest", "d", "", "dest")
pflag.Parse()
file_arr := pflag.Args()
arg.in_filename =""
count := 0
for{
if arg.in_filename =="" && file_arr[count] != "<"{
arg.in_filename = file_arr[count]
}
if file_arr[count] == ">"|| file_arr[count] == "|"{
count++
arg.dest = file_arr[count]
}
count++
if count >= len(file_arr){
break
}
}
if len(os.Args) < 3 {
fmt.Fprintf(os.Stderr, "%s: please input enough arguments\n", progname)
usage()
os.Exit(1)
}
if arg.start_page < 1 || arg.start_page > INT_MAX {
fmt.Fprintf(os.Stderr, "%s: please input positive integer for start_page\n", progname)
usage()
os.Exit(2)
}
if arg.end_page < 1 || arg.end_page > (INT_MAX-1) || arg.end_page < arg.start_page {
fmt.Fprintf(os.Stderr, "%s: please input positive integer for end_page or start_page should not be greater than end_page\n", progname)
usage()
os.Exit(3)
}
if arg.page_len < 1 || arg.page_len > (INT_MAX-1) {
fmt.Fprintf(os.Stderr, "%s: please input positive integer for page_len\n", progname)
pflag.Usage()
os.Exit(4)
}
}
func (arg selpg_args)process_input() {
var read *bufio.Reader
var write *bufio.Writer
if arg.in_filename == "" {
read = bufio.NewReader(os.Stdin)
} else {
fin, err := os.Open(arg.in_filename)
if err != nil {
fmt.Fprintf(os.Stderr, "%s: could not open input file %s\n", progname, arg.in_filename)
os.Exit(5)
}
read = bufio.NewReader(fin)
defer fin.Close()
}
if arg.dest == "" {
write = bufio.NewWriter(os.Stdout)
} else {
fin1, err := os.Open(arg.dest)
if err != nil {
fmt.Fprintf(os.Stderr, "%s: could not open output file %s\n", progname, arg.dest)
os.Exit(6)
}
write = bufio.NewWriter(fin1)
defer fin1.Close()
}
line_number, page_number, pLen := 1, 1, arg.page_len
judge_Flag := '\n'
if arg.page_type==1 {
judge_Flag = '\f'
pLen = 1
}
for {
line,err:= read.ReadString(byte(judge_Flag));
if err != nil && len(line) == 0 {
break
}
if line_number > pLen {
page_number++
line_number = 1
}
if page_number >= arg.start_page && page_number <= arg.end_page {
write.Write([]byte(line))
}
line_number++
}
if page_number < arg.end_page {
fmt.Fprintf(os.Stderr,
"\n%s: put exist error\n", progname)
}
}
func usage() {
fmt.Fprintf(os.Stderr, "\nUSAGE: %s -sstart_page -eend_page [ -f | -llines_per_page ] [ -ddest ] [ in_filename ]\n", progname)
}
func main() {
arg := selpg_args{}
arg.process_args()
arg.process_input()
}
|
package Command
import "testing"
func TestCommand_Execute(t *testing.T) {
laowang := NewPerson("wang",NewCommand(nil,nil))
laozhang := NewPerson("zhang",NewCommand(&laowang,laowang.Listen))
laoma := NewPerson("ma",NewCommand(&laozhang,laozhang.Buy))
laochen := NewPerson("chen",NewCommand(&laoma,laoma.Cook))
laoli := NewPerson("li",NewCommand(&laochen,laochen.Wash))
laoli.Talk()
}
//先调laoli.talk,然后NewCommand(&laochen,laochen.Wash)即命令老陈laochen.wash
//调到最上面laowang.Listen,老王没有可调了NewCommand(nil,nil),所以在listen那的函数要将execute去掉,否则报错
|
package main
import (
"image"
"image/color"
"image/gif"
"io/ioutil"
"log"
"os"
"time"
)
//图片显示的内容
const base = "#8XOHLTI!@%&?/-12345679eW)i=+;:,. "
//话图片
func drawImage(img image.Image) string {
ret := ""
blen := len(base)
bounds := img.Bounds()
for j := 0; j < bounds.Dy(); j += 2 {
for i := 0; i < bounds.Dx(); i++ {
grey := getGrey(img.At(i, j))
index := int(float64(blen+1) * grey / 255)
if index >= blen {
ret += " "
} else {
ret += string(base[index])
}
}
ret += "\r\n"
}
return ret
}
//灰度算法 -- 可以自行百度
func getGrey(color color.Color) float64 {
r, g, b, _ := color.RGBA()
var rr, gg, bb int
rr = int(r >> 8)
gg = int(g >> 8)
bb = int(b >> 8)
var gray float64
gray = float64(rr)*299 + float64(gg)*578 + float64(bb)*114
gray = gray / float64(1000)
return gray
}
//将gif图片转化为字符动起来
func main() {
if len(os.Args) <= 1 {
log.Fatal("请输入图片路径")
}
file, err := os.Open(os.Args[1])
if err != nil {
log.Fatal(err)
}
defer file.Close()
//得到gif图片
gs, err := gif.DecodeAll(file)
if err != nil {
log.Fatal(err)
}
for {
for i := 1; i < len(gs.Image); i++ {
//定时跑,
time.Sleep(300 * time.Millisecond)
str := drawImage(gs.Image[i])
ioutil.WriteFile("text.txt", []byte(str), 0777)
}
}
}
|
package atlas
// Represents a File to be outputted
type File struct {
Atlas *Atlas
FileName string
X int
Y int
Width int
Height int
}
|
package controllers
import (
"encoding/json"
"fmt"
"github.com/astaxie/beego"
"github.com/sinksmell/files-cmp/models"
"mime/multipart"
)
// 用于检测文件及md5的Controller
type CheckController struct {
beego.Controller
}
func (c *CheckController) URLMapping() {
c.Mapping("GET", c.Get)
c.Mapping("Hash", c.Hash)
c.Mapping("File", c.File)
}
// @Title Test CheckController
// @Description get all objects
// @Success 200
// @Failure 403
// @router / [get]
func (c *CheckController) Get() {
// 一个测试路由
resp := &models.Response{}
resp.Code = models.SUCCESS
resp.Msg = "OK"
c.Data["json"] = resp
c.ServeJSON()
}
// @Title Check Hash
// @Description 用于检测组文件的hash值是否相同
// @Param body body models.HashRequest true "body for Check Content"
// @Success 200
// @Failure 403
// @router /hash [post]
func (c *CheckController) Hash() {
var req models.HashRequest
resp := &models.Response{}
if err := json.Unmarshal(c.Ctx.Input.RequestBody, &req); err != nil {
fmt.Println(req)
resp.Code = models.REQ_ERR
resp.Msg = err.Error()
} else {
if equal, _ := models.CmpMd5(models.GROUP_PATH+req.FileName, req.Hash); equal {
resp.Code = models.EQUAL
resp.Msg = "OK"
} else {
resp.Code = models.NOT_EQUAL
resp.Msg = "Not Equal"
}
}
c.Data["json"] = resp
c.ServeJSON()
}
// Update ...
// @Title Update
// @Description 上传对应的文件 检测是否相同
// @Success 200
// @Failure 403 body is empty
// @router /file [post]
func (c *CheckController) File() {
var (
resp *models.Response
f multipart.File
h *multipart.FileHeader
err error
)
resp = &models.Response{}
checkType := c.GetString("type") // 获取文件比较类型 分组文件对比 还是小文件对比
f, h, err = c.GetFile("file") // 获取待比较的文件
if err != nil {
// 为了使错误处理看起来比较简洁 使用goto+label统一处理
resp.Code = models.REQ_ERR
goto ERR
}
if f != nil {
defer f.Close()
}
// 保存位置在 static/upload/, 没有文件夹要先创建
err = c.SaveToFile("file", models.UPLOAD_PATH+h.Filename)
if err != nil {
resp.Code = models.FILE_SAVE_ERR
goto ERR
}
switch checkType {
case models.CMP_GROUP:
// 比较分组文件
if files, err := models.CmpGroup(h.Filename); err != nil {
resp.Code = models.FILE_DIFF_ERR
goto ERR
} else {
resp.Code = models.SUCCESS
resp.Ack = files // 期望客户端发送 列表内的文件 进行比对
}
case models.CMP_FILE:
// 比较小文件
resp.Code = models.SUCCESS
resp.Diff = models.CmpFile(h.Filename)
case models.CMP_NON:
resp.Code = models.SUCCESS
resp.Msg = models.CMP_NON
}
c.Data["json"] = resp
c.ServeJSON()
// 未出错的话在这里就进行返回
return
ERR:
resp.Msg = err.Error()
c.Data["json"] = resp
c.ServeJSON()
}
|
package tblfmt
import (
"bytes"
"encoding/json"
"reflect"
"testing"
)
func TestJSONEncoder(t *testing.T) {
resultSet := rs()
var err error
var i int
for resultSet.Next() {
exp := resultSet.toMap(i)
buf := new(bytes.Buffer)
if err = EncodeJSON(buf, resultSet); err != nil {
t.Fatalf("expected no error when JSON encoding, got: %v", err)
}
var res []map[string]interface{}
b := buf.Bytes()
if err = json.Unmarshal(b, &res); err != nil {
t.Fatalf("expected no error unmarshaling JSON, got: %v\n-- encoded --\n%s\n-- end--", err, string(b))
}
if !reflect.DeepEqual(res, exp) {
t.Errorf("expected results to be equal, got:\n-- encoded --\n%s\n-- end--", string(b))
}
i++
}
}
|
package blocks
import (
"bytes"
"crypto/sha256"
"cryptom/internal"
"encoding/gob"
"fmt"
"log"
"time"
)
type Block struct {
Timestamp int64
Identifier string
Data []byte
Transactions []*Tx
Hash []byte
PrevBlockHash []byte
Header Header
Nonce int
}
// NewBlock is the function that creates a new block and add it into the chain
func NewBlock(data string, transactions []*Tx, prevBlockHash []byte) *Block {
block := &Block{
Identifier: internal.GenerateID(),
Data: []byte(data),
Transactions: transactions,
PrevBlockHash: prevBlockHash,
Timestamp: time.Now().Unix(),
}
pow := NewPow(block)
nonce, hash := pow.Run()
block.Hash = hash[:]
block.Nonce = nonce
return block
}
func NewGenesisBlock(base *Tx) *Block {
fmt.Println("Creating the GENESIS block")
return NewBlock("GENESIS", []*Tx{base}, []byte{})
}
// Serialize transform the block's data to slice of bytes
func (b *Block) Serialize() []byte {
var res bytes.Buffer
encoder := gob.NewEncoder(&res)
err := encoder.Encode(b)
if err != nil {
log.Println("cannot serialize " + err.Error())
return nil
}
return res.Bytes()
}
// DeserializeBlock deserializes a block
func DeserializeBlock(d []byte) *Block {
var block Block
decoder := gob.NewDecoder(bytes.NewReader(d))
err := decoder.Decode(&block)
if err != nil {
log.Panic(err)
}
return &block
}
// hash all the transactions in the block
// TODO implement a merkle tree for store transaction hashes
func (b *Block) HashTransactions() []byte {
var (
hashes [][]byte
hash [32]byte
)
for _, tx := range b.Transactions {
hashes = append(hashes, tx.Hash())
}
hash = sha256.Sum256(bytes.Join(hashes, []byte{}))
return hash[:]
}
|
package cron
import (
"encoding/json"
"github.com/elves-project/agent/src/funcs"
"github.com/elves-project/agent/src/g"
"github.com/elves-project/agent/src/thrift/apache-thrift"
"github.com/elves-project/agent/src/thrift/hearbeat"
"github.com/gy-games-libs/seelog"
"net"
"os"
"strconv"
"time"
)
type HeartBeatMessage struct {
Data map[string]string `json:"data"`
}
func HearBeatCron(sec int64) {
t := time.NewTicker(time.Second * time.Duration((sec))).C
for {
<-t
go sendToHeartBeat()
go g.UpdateHbTime()
}
}
func sendToHeartBeat() {
seelog.Info("Send Info To HeartBeat Start...")
cfg := g.Config()
protocolFactory := thrift.NewTBinaryProtocolFactoryDefault()
seelog.Debug("[func:SendToHeartBeat] HeartBeat Addr:" + cfg.HeartBeat.Addr + ":" + strconv.Itoa(cfg.HeartBeat.Port))
transport, err := thrift.NewTSocket(net.JoinHostPort(cfg.HeartBeat.Addr, strconv.Itoa(cfg.HeartBeat.Port)))
defer transport.Close()
transport.SetTimeout(time.Duration(cfg.HeartBeat.Timeout) * time.Second)
if err != nil {
seelog.Error("[func:SendToHeartBeat] ", os.Stderr, "Error resolving address:", err)
go g.SaveErrorStat("[func:SendToHeartBeat] " + "Error resolving address:" + err.Error())
} else {
client := hearbeat.NewHeartbeatServiceClientFactory(transport, protocolFactory)
if err := transport.Open(); err != nil {
seelog.Error("[func:SendToHeartBeat] ", os.Stderr, "Error opening socket to "+cfg.HeartBeat.Addr+":"+string(cfg.HeartBeat.Port), " ", err)
go g.SaveErrorStat("[func:SendToHeartBeat] " + "Error opening socket to " + cfg.HeartBeat.Addr + ":" + string(cfg.HeartBeat.Port) + " " + err.Error())
} else {
ai := &hearbeat.AgentInfo{}
ai.IP = cfg.Ip
ai.ID = cfg.Asset
ai.Version = g.VERSION
Apps, _ := json.Marshal(cfg.Apps)
ai.Apps = funcs.BytesString(Apps)
jsrets, err := client.HeartbeatPackage(ai)
if err != nil {
seelog.Error("[func:SendToHeartBeat] ", err)
go g.SaveErrorStat("[func:SendToHeartBeat] " + err.Error())
} else {
var rdat HeartBeatMessage
seelog.Debug("[func:SendToHeartBeat] HeartBeat Return App ", jsrets)
json.Unmarshal([]byte(jsrets), &rdat)
if err := json.Unmarshal([]byte(jsrets), &rdat); err != nil {
seelog.Error("[func:SendToHeartBeat] ", err)
go g.SaveErrorStat("[func:SendToHeartBeat] " + err.Error())
} else {
for b, v := range rdat.Data {
if _, ok := cfg.Apps[b]; ok {
if v != cfg.Apps[b] {
seelog.Debug("[func:SendToHeartBeat] App '" + b + "' Need Update(Local Ver:" + cfg.Apps[b] + ",Remote Ver:" + v + ")..")
go appupdate(b, v)
}
} else {
seelog.Debug("[func:SendToHeartBeat] App '" + b + "' Need Install(Ver:" + v + ")..")
go appupdate(b, v)
}
}
for localk, localv := range cfg.Apps {
if _, ok := rdat.Data[localk]; !ok {
seelog.Debug("[func:SendToHeartBeat] App '" + localk + "' Will Remove(Ver:" + localv + ")..")
delete(cfg.Apps, localk)
g.SaveConfig()
}
}
}
}
}
}
seelog.Info("Send Info To HeartBeat Finish...")
}
func appupdate(appname string, appver string) {
seelog.Info("App[" + appname + "] Update Start..")
seelog.Debug("[func:appupdate] Apps Download..(" + g.Config().AppsDownloadAddr + "/" + appname + "_" + appver + ".zip" + ")")
if err := os.RemoveAll(g.Root + "/apps/" + appname); err == nil {
if err := funcs.Download(g.Config().AppsDownloadAddr+"/"+appname+"_"+appver+".zip", g.Root+"/apps/"+appname, "app-worker-package.zip"); err == nil {
if err := funcs.Unzip(g.Root+"/apps/"+appname+"/app-worker-package.zip", g.Root+"/apps/"+appname); err == nil {
g.Config().Apps[appname] = appver
g.SaveConfig()
} else {
seelog.Error("[func:appupdate] New Apps Unzip Error ", err)
go g.SaveErrorStat("[func:appupdate] New Apps Unzip Error " + err.Error())
}
} else {
seelog.Error("[func:appupdate] Apps ("+appname+") Download Error ", err)
go g.SaveErrorStat("[func:appupdate] Apps (" + appname + ") Download Error " + err.Error())
}
} else {
seelog.Error("[func:appupdate] Apps ("+appname+") Update Remove First Error ", err)
go g.SaveErrorStat("[func:appupdate] Apps (" + appname + ") Update Remove First Error " + err.Error())
}
seelog.Info("App[" + appname + "] Update Finish..")
}
|
package main
import "fmt"
func main() {
var myBool bool
var myStr string
var myInt int
myBool = true
myStr = "lava"
myInt = 10
if myBool {
fmt.Println("myBool is true")
}
if myInt > 1 {
fmt.Println("myInt is greater than 1")
} else {
fmt.Println("myInt is greater than 1")
}
if myStr == "lava" || myInt < 9 {
fmt.Println("condition 1")
} else if !myBool {
fmt.Println("condition 2")
} else {
fmt.Println("condition 3")
}
}
|
package lineartable
import "testing"
func TestSortedArrayMerge(t *testing.T) {
a := []int64{1, 3, 5, 7, 9, 10, 11}
b := []int64{2, 4, 6, 8}
t.Log(SortedArrayMerge([]int64{}, b))
t.Log(SortedArrayMerge(a, []int64{}))
t.Log(SortedArrayMerge([]int64{}, []int64{}))
t.Log(SortedArrayMerge(a, b))
}
func TestSortedArrayMerge2(t *testing.T) {
a := []int64{1, 3, 5, 7, 9, 10, 11}
b := []int64{2, 4, 6, 8}
t.Log(SortedArrayMerge2([]int64{}, b))
t.Log(SortedArrayMerge2(a, []int64{}))
t.Log(SortedArrayMerge2([]int64{}, []int64{}))
t.Log(SortedArrayMerge2(a, b))
}
|
package frame
import (
"bytes"
"encoding/binary"
"io"
"net"
"sync"
"testing"
randc "crypto/rand"
randm "math/rand"
"github.com/ebusto/jitter"
"github.com/ebusto/mux"
)
func TestFrame(t *testing.T) {
a, b := net.Pipe()
fa := New(jitter.New(a, nil), nil)
fb := New(jitter.New(b, nil), nil)
ok := make(chan int64)
max := int64(100)
add := func(name string, s io.ReadWriter) {
var n int64
seen := make(map[int64]bool)
t.Logf("[%s] writing %d\n", name, n)
if err := binary.Write(s, binary.LittleEndian, n); err != nil {
panic(err)
}
for {
if err := binary.Read(s, binary.LittleEndian, &n); err != nil {
panic(err)
}
t.Logf("[%s] read %d, sending %d\n", name, n, n+1)
if _, ok := seen[n]; ok {
t.Fatalf("[%s] already seen %d\n", n)
}
seen[n] = true
if err := binary.Write(s, binary.LittleEndian, n+1); err != nil {
panic(err)
}
if n == max {
break
}
}
ok <- n
return
}
go add("A", fa)
go add("B", fb)
if n := <-ok; n != max {
t.Errorf("[1] n = %d, expected %d", n, max)
}
if n := <-ok; n != max {
t.Errorf("[2] n = %d, expected %d", n, max)
}
}
func TestFrameMux(t *testing.T) {
a, b := net.Pipe()
ma := mux.New(New(jitter.New(a, nil), nil))
mb := mux.New(New(jitter.New(b, nil), nil))
var wg sync.WaitGroup
for i := 0; i < 5; i++ {
wg.Add(1)
go testFrameMux(t, &wg, byte(i), ma, mb)
}
wg.Wait()
}
func testFrameMux(t *testing.T, wg *sync.WaitGroup, id byte, ma *mux.Mux, mb *mux.Mux) {
sa := ma.Stream(id)
sb := mb.Stream(id)
src := make([]byte, 10000)
if _, err := randc.Read(src); err != nil {
t.Fatal(err)
}
ok := make(chan bool)
go func() {
i := 0
for i < len(src) {
l := i
h := i + randm.Intn(len(src)-i) + 1
i = h
n, err := sa.Write(src[l:h])
if err != nil {
t.Fatal(err)
}
t.Logf("[%d] wrote %d [%d], total %d\n", id, n, h-l, i)
}
t.Logf("[%d] write done\n", id)
ok <- true
}()
var dst []byte
go func() {
buf := make([]byte, 16384)
for len(dst) != len(src) {
h := randm.Intn(len(buf))
n, err := sb.Read(buf[0:h])
if err != nil {
t.Fatal(err)
}
dst = append(dst, buf[0:n]...)
t.Logf("[%d] read %d, total %d\n", id, n, len(dst))
}
t.Logf("[%d] read done\n", id)
ok <- true
}()
<-ok
<-ok
if !bytes.Equal(src, dst) {
t.Errorf("[%d] mismatch", id)
}
wg.Done()
}
|
package controller
import (
"constant"
"model"
"net/http"
"util/context"
"github.com/labstack/echo"
"github.com/sirupsen/logrus"
)
/**
* @apiDefine AddEmergCT AddEmergCT
* @apiDescription 添加紧急联系人
*
* @apiParam {String} name 名字
* @apiParam {String} phone_num 手机号
*
* @apiParamExample {json} Request-Example:
* {
* "name": "名字",
* "phone_num": "手机号",
* }
*
* @apiSuccess {Number} status=200 状态码
* @apiSuccess {Object} data 正确返回数据
*
* @apiSuccessExample Success-Response:
* HTTP/1.1 200 OK
* {
* "status": 200,
* "data": ""
* }
*
* @apiError {Number} status 状态码
* @apiError {String} err_msg 错误信息
*
* @apiErrorExample Error-Response:
* HTTP/1.1 401 Unauthorized
* {
* "status": 401,
* "err_msg": "Unauthorized"
* }
*/
/**
* @api {post} /api/v1/user/action/add_emerg_ct AddEmergCT
* @apiVersion 1.0.0
* @apiName AddEmergCT
* @apiGroup User
* @apiUse AddEmergCT
*/
func AddEmergCT(c echo.Context) error {
param := model.NamePhone{}
err := c.Bind(¶m)
if err != nil {
writeUserLog("AddEmergCT", constant.ErrorMsgParamWrong, err)
return context.RetError(c, http.StatusBadRequest, http.StatusBadRequest, constant.ErrorMsgParamWrong)
}
userID := context.GetJWTUserID(c)
if err != nil || userID == "" {
writeUserLog("AddEmergCT", "unAuth", err)
return context.RetError(c, http.StatusUnauthorized, http.StatusUnauthorized, "unAuth")
}
err = model.AddEmergCT(userID, param)
if err != nil {
writeUserLog("AddEmergCT", "添加紧急联系人时服务器错误", err)
return context.RetError(c, http.StatusBadGateway, http.StatusBadGateway, "添加紧急联系人时服务器错误")
}
return context.RetData(c, "")
}
/**
* @apiDefine AddShareCT AddShareCT
* @apiDescription 添加共享位置联系人
*
* @apiParam {String} user_id 共享联系人的user_id
*
* @apiParamExample {json} Request-Example:
* {
* "user_id": "user_id",
* }
*
* @apiSuccess {Number} status=200 状态码
* @apiSuccess {Object} data 正确返回数据
*
* @apiSuccessExample Success-Response:
* HTTP/1.1 200 OK
* {
* "status": 200,
* "data": ""
* }
*
* @apiError {Number} status 状态码
* @apiError {String} err_msg 错误信息
*
* @apiErrorExample Error-Response:
* HTTP/1.1 401 Unauthorized
* {
* "status": 401,
* "err_msg": "Unauthorized"
* }
*/
/**
* @api {post} /api/v1/user/action/add_share_ct AddShareCT
* @apiVersion 1.0.0
* @apiName AddShareCT
* @apiGroup User
* @apiUse AddShareCT
*/
func AddShareCT(c echo.Context) error {
param := UserIDParam{}
err := c.Bind(¶m)
if err != nil {
writeUserLog("AddShareCT", constant.ErrorMsgParamWrong, err)
return context.RetError(c, http.StatusBadRequest, http.StatusBadRequest, constant.ErrorMsgParamWrong)
}
userID := context.GetJWTUserID(c)
if err != nil || userID == "" {
writeUserLog("AddShareCT", "unAuth", err)
return context.RetError(c, http.StatusUnauthorized, http.StatusUnauthorized, "unAuth")
}
err = model.AddShareCT(userID, param.UserID)
if err != nil {
writeUserLog("AddShareCT", "添加共享联系人错误", err)
return context.RetError(c, http.StatusBadGateway, http.StatusBadGateway, "添加共享联系人错误")
}
return context.RetData(c, "")
}
/**
* @apiDefine DelEmergCT DelEmergCT
* @apiDescription 删除紧急联系人
*
* @apiParam {String} name 名字
*
* @apiParamExample {json} Request-Example:
* {
* "name": "名字",
* }
*
* @apiSuccess {Number} status=200 状态码
* @apiSuccess {Object} data 正确返回数据
*
* @apiSuccessExample Success-Response:
* HTTP/1.1 200 OK
* {
* "status": 200,
* "data": ""
* }
*
* @apiError {Number} status 状态码
* @apiError {String} err_msg 错误信息
*
* @apiErrorExample Error-Response:
* HTTP/1.1 401 Unauthorized
* {
* "status": 401,
* "err_msg": "Unauthorized"
* }
*/
/**
* @api {delete} /api/v1/user/action/del_emerg_ct DelEmergCT
* @apiVersion 1.0.0
* @apiName DelEmergCT
* @apiGroup User
* @apiUse DelEmergCT
*/
func DelEmergCT(c echo.Context) error {
param := model.NamePhone{}
err := c.Bind(¶m)
if err != nil {
writeUserLog("DelEmergCT", constant.ErrorMsgParamWrong, err)
return context.RetError(c, http.StatusBadRequest, http.StatusBadRequest, constant.ErrorMsgParamWrong)
}
userID := context.GetJWTUserID(c)
if err != nil || userID == "" {
writeUserLog("DelEmergCT", "unAuth", err)
return context.RetError(c, http.StatusUnauthorized, http.StatusUnauthorized, "unAuth")
}
err = model.DelEmergCT(userID, param.Name)
if err != nil || userID == "" {
writeUserLog("DelEmergCT", "删除紧急联系人时服务器错误", err)
return context.RetError(c, http.StatusBadGateway, http.StatusBadGateway, "删除紧急联系人时服务器错误")
}
return context.RetData(c, "")
}
func writeUserLog(funcName, errMsg string, err error) {
logger.WithFields(logrus.Fields{
"package": "controller",
"file": "user.go",
"function": funcName,
"err": err,
}).Warn(errMsg)
}
|
/*
* @lc app=leetcode.cn id=79 lang=golang
*
* [79] 单词搜索
*/
package main
import "fmt"
// @lc code=start
var dx []int = []int{-1, 0, 0, 1}
var dy []int = []int{0, -1, 1, 0}
func backtracking(board [][]byte, word string, row, col, index int, visited [][]bool) bool {
if len(word)-1 == index {
return board[row][col] == word[index]
}
var x, y, cols, rows int
rows, cols = len(board), len(board[0])
if board[row][col] == word[index] {
visited[row][col] = true
for i := 0; i < 4; i++ {
x = row + dx[i]
y = col + dy[i]
if x >= 0 && x < rows && y >= 0 && y < cols && !visited[x][y] && backtracking(board, word, x, y, index+1, visited) {
return true
}
}
visited[row][col] = false
}
return false
}
func exist(board [][]byte, word string) bool {
visited := make([][]bool, len(board))
for i := range visited {
visited[i] = make([]bool, len(board[0]))
}
for i, v := range board {
for j := range v {
if backtracking(board, word, i, j, 0, visited) {
return true
}
}
}
return false
}
// @lc code=end
func main() {
fmt.Println(exist([][]byte{
{'A', 'B', 'C', 'E'},
{'S', 'F', 'C', 'S'},
{'A', 'D', 'E', 'E'},
}, "ABCESEEDASFC"))
}
|
package entity
type Entity interface {
TableName() string
}
|
package presenters
// import (
// "fmt"
// "net/url"
// "time"
// "github.com/messagedb/messagedb/meta/schema"
// )
// type Team struct {
// Id string `json:"id"`
// OrganizationId string `json:"org_id"`
// Name string `json:"name"`
// Description string `json:"description"`
// TeamType schema.TeamType `json:"team_type"`
// CreatedAt time.Time `json:"created_at"`
// UpdatedAt time.Time `json:"updated_at"`
// }
// func (t *Team) GetLocation() *url.URL {
// uri, err := url.Parse(fmt.Sprintf("/teams/%s", t.Id))
// if err != nil {
// return nil
// }
// return uri
// }
// func TeamPresenter(t *schema.Team) *Team {
// team := &Team{}
// team.Id = t.Id.Hex()
// team.OrganizationId = t.OrganizationId.Hex()
// team.Name = t.Name
// team.Description = t.Description
// team.TeamType = t.TeamType
// team.CreatedAt = t.CreatedAt
// team.UpdatedAt = t.UpdatedAt
// return team
// }
// func TeamCollectionPresenter(items []*schema.Team) []*Team {
// collection := make([]*Team, 0)
// for _, item := range items {
// collection = append(collection, TeamPresenter(item))
// }
// return collection
// }
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.