text stringlengths 11 4.05M |
|---|
/*
Copyright 2020 Huawei Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ymodem
// BlockKind denotes large or small block size.
type BlockKind bool
const (
// SmallBlock indicates 128 bytes per block.
SmallBlock BlockKind = false
// LargeBlock indicates 1024 bytes per block.
LargeBlock BlockKind = true
)
// String returns a description of the kind of transfer block.
func (b BlockKind) String() string {
if b == LargeBlock {
return "large (1024)"
}
return "normal (128)"
}
func (b BlockKind) size() int {
if b == LargeBlock {
return 1024
}
return 128
}
|
package main
//求 1+2+...+n ,要求不能使用乘除法、for、while、if、else、switch、case等关键字及条件判断语句(A?B:C)。
func sumNums(n int) int {
result := 0
var sum func(x int) bool
sum = func(x int) bool {
result += x
return x > 0 && sum(x-1)
}
sum(n)
return result
}
func main() {
println(sumNums(6))
}
|
package css_test
import (
"testing"
sitter "github.com/kiteco/go-tree-sitter"
"github.com/kiteco/go-tree-sitter/css"
"github.com/stretchr/testify/assert"
)
func TestGrammar(t *testing.T) {
assert := assert.New(t)
parser := sitter.NewParser()
parser.SetLanguage(css.GetLanguage())
sourceCode := []byte(`
div {
background-color: #010101;
}
`)
tree := parser.Parse(sourceCode)
assert.Equal(
"(stylesheet (rule_set (selectors (tag_name)) (block (declaration (property_name) (color_value)))))",
tree.RootNode().String(),
)
}
|
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package policy
import (
"context"
"time"
"chromiumos/tast/common/tape"
"chromiumos/tast/ctxutil"
"chromiumos/tast/remote/policyutil"
"chromiumos/tast/rpc"
"chromiumos/tast/services/cros/enterpriseconnectors"
"chromiumos/tast/services/cros/graphics"
"chromiumos/tast/testing"
)
const deviceTrustInsessionEnrollmentTimeout = 7 * time.Minute
type userParamInsession struct {
poolID string
loginPossible bool
}
func init() {
testing.AddTest(&testing.Test{
Func: DeviceTrustInsession,
LacrosStatus: testing.LacrosVariantUnneeded,
Desc: "Checks that Device Trust is working insession with a fake IdP",
Contacts: []string{
"lmasopust@google.com",
"rodmartin@google.com",
"cbe-device-trust-eng@google.com",
},
SoftwareDeps: []string{
"chrome",
"chrome_internal",
"reboot",
},
ServiceDeps: []string{
"tast.cros.hwsec.OwnershipService",
"tast.cros.tape.Service",
"tast.cros.enterpriseconnectors.DeviceTrustService",
"tast.cros.graphics.ScreenshotService",
},
Attr: []string{
"group:mainline", "informational",
},
VarDeps: []string{
tape.ServiceAccountVar,
},
Params: []testing.Param{{
Name: "host_allowed",
Val: userParamInsession{
poolID: tape.DeviceTrustEnabled,
loginPossible: true,
},
}, {
Name: "host_not_allowed",
Val: userParamInsession{
poolID: tape.DeviceTrustDisabled,
loginPossible: false,
},
}},
Timeout: 7 * time.Minute,
})
}
func DeviceTrustInsession(ctx context.Context, s *testing.State) {
param := s.Param().(userParamInsession)
poolID := param.poolID
// Shorten deadline to leave time for cleanup.
cleanupCtx := ctx
ctx, cancel := ctxutil.Shorten(ctx, 3*time.Minute)
defer cancel()
defer func(ctx context.Context) {
if err := policyutil.EnsureTPMAndSystemStateAreReset(ctx, s.DUT(), s.RPCHint()); err != nil {
s.Error("Failed to reset TPM after test: ", err)
}
}(cleanupCtx)
if err := policyutil.EnsureTPMAndSystemStateAreReset(ctx, s.DUT(), s.RPCHint()); err != nil {
s.Fatal("Failed to reset TPM: ", err)
}
cl, err := rpc.Dial(ctx, s.DUT(), s.RPCHint())
if err != nil {
s.Fatal("Failed to connect to the RPC service on the DUT: ", err)
}
defer cl.Close(cleanupCtx)
tapeClient, err := tape.NewClient(ctx, []byte(s.RequiredVar(tape.ServiceAccountVar)))
if err != nil {
s.Fatal("Failed to create tape client: ", err)
}
timeout := int32(deviceTrustInsessionEnrollmentTimeout.Seconds())
// Create an account manager and lease a test account for the duration of the test.
accManager, acc, err := tape.NewOwnedTestAccountManagerFromClient(ctx, tapeClient, false /*lock*/, tape.WithTimeout(timeout), tape.WithPoolID(poolID))
if err != nil {
s.Fatal("Failed to create an account manager and lease an account: ", err)
}
defer accManager.CleanUp(cleanupCtx)
screenshotService := graphics.NewScreenshotServiceClient(cl.Conn)
captureScreenshotOnError := func(ctx context.Context, hasError func() bool) {
if !hasError() {
return
}
screenshotService.CaptureScreenshot(ctx, &graphics.CaptureScreenshotRequest{FilePrefix: "deviceTrustLoginError"})
}
defer captureScreenshotOnError(ctx, s.HasError)
service := enterpriseconnectors.NewDeviceTrustServiceClient(cl.Conn)
s.Log("Enrolling device")
if _, err = service.Enroll(ctx, &enterpriseconnectors.EnrollRequest{User: acc.Username, Pass: acc.Password}); err != nil {
s.Fatal("Remote call Enroll() failed: ", err)
}
// Deprovision the DUT at the end of the test.
defer func(ctx context.Context) {
if err := tapeClient.DeprovisionHelper(cleanupCtx, cl, acc.CustomerID); err != nil {
s.Fatal("Failed to deprovision device: ", err)
}
}(cleanupCtx)
res, err := service.ConnectToFakeIdP(ctx, &enterpriseconnectors.ConnectToFakeIdPRequest{User: acc.Username, Pass: acc.Password})
if err != nil {
s.Fatal("Remote call ConnectToFakeIdP() failed: ", err)
}
if res.Succesful != param.loginPossible {
s.Errorf("Unexpected value for loginPossible: got %t, want %t", res.Succesful, param.loginPossible)
}
}
|
package main
import (
"testing"
)
func TestCode(t *testing.T) {
var tests = []struct {
houseStart int
houseEnd int
appleLoc int
orangeLoc int
apples []int
oranges []int
appleOut int
orangeOut int
}{
{
houseStart: 7,
houseEnd: 11,
appleLoc: 5,
orangeLoc: 15,
apples: []int{-2, 2, 1},
oranges: []int{5, -6},
appleOut: 1,
orangeOut: 1,
},
}
for _, test := range tests {
a, o := countAO(
test.houseStart, test.houseEnd, test.appleLoc, test.orangeLoc, test.apples, test.oranges,
)
if a != test.appleOut {
t.Errorf(" Got Apples %v while Expecting %v", a, test.appleOut)
}
if o != test.orangeOut {
t.Errorf("Got Oranges %v while Expecting %v", o, test.orangeOut)
}
}
}
|
package core
type Config struct {
VmessUUID string
VmessPort uint
ShadowsocksPassword string
ShadowsocksPort uint
VmessWsPort uint
VmessWsUUID string
VmessWsPath string
}
type Inbound struct {
Port int `json:"port"`
Protocol string `json:"protocol"`
Settings struct {
Method string `json:"method"`
Password string `json:"password"`
Clients []struct {
ID string `json:"id"`
Level int `json:"level"`
AlterID int `json:"alterId"`
} `json:"clients"`
} `json:"settings"`
Listen string `json:"listen,omitempty"`
Tag string `json:"tag,omitempty"`
StreamSettings struct {
Network string `json:"network"`
WsSettings struct {
Path string `json:"path"`
} `json:"wsSettings"`
} `json:"streamSettings,omitempty"`
}
type V2rayConfig struct {
Inbounds []Inbound `json:"inbounds"`
Outbounds []struct {
Protocol string `json:"protocol"`
Settings struct {
} `json:"settings"`
Tag string `json:"tag,omitempty"`
} `json:"outbounds"`
Routing struct {
Rules []struct {
Type string `json:"type"`
IP []string `json:"ip,omitempty"`
OutboundTag string `json:"outboundTag"`
InboundTag []string `json:"inboundTag,omitempty"`
} `json:"rules"`
} `json:"routing"`
}
type IpRes struct {
Origin string `json:"origin"`
}
|
// +build ignore
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// START OMIT
package runtime
type g struct {
stack stack // offset known to runtime/cgo
stackguard0 uintptr // offset known to liblink
stackguard1 uintptr // offset known to liblink
_panic *_panic // innermost panic - offset known to liblink // HL
_defer *_defer // innermost defer
m *m // current m; offset known to arm liblink
// ...
goid int64 // HL
// END OMIT
waitsince int64 // approx time when the g become blocked
waitreason string // if status==Gwaiting
schedlink guintptr
preempt bool // preemption signal, duplicates stackguard0 = stackpreempt
paniconfault bool // panic (instead of crash) on unexpected fault address
preemptscan bool // preempted g does scan for gc
gcscandone bool // g has scanned stack; protected by _Gscan bit in status
gcscanvalid bool // false at start of gc cycle, true if G has not run since last scan; transition from true to false by calling queueRescan and false to true by calling dequeueRescan
throwsplit bool // must not split stack
raceignore int8 // ignore race detection events
sysblocktraced bool // StartTrace has emitted EvGoInSyscall about this goroutine
sysexitticks int64 // cputicks when syscall has returned (for tracing)
traceseq uint64 // trace event sequencer
tracelastp puintptr // last P emitted an event for this goroutine
lockedm *m
sig uint32
writebuf []byte
sigcode0 uintptr
sigcode1 uintptr
sigpc uintptr
gopc uintptr // pc of go statement that created this goroutine
startpc uintptr // pc of goroutine function
racectx uintptr
waiting *sudog // sudog structures this g is waiting on (that have a valid elem ptr); in lock order
cgoCtxt []uintptr // cgo traceback context
}
|
package cmd
import (
"fmt"
"github.com/georgevella/helmtmpl/config"
"github.com/spf13/cobra"
"strings"
)
var environment string
func init() {
rootCmd.AddCommand(renderCmd)
rootCmd.Flags().StringVarP(&environment, "environment", "e", "", "Name of environment to render")
}
var renderCmd = &cobra.Command{
Use: "render [CHART] [NAME]",
Short: "Renders a Helm Chart with the selected environment variables and templates",
Long: ``,
Run: func(cmd *cobra.Command, args []string) {
cfg := config.LoadConfiguration()
for _, env := range cfg.Environments {
if len(environment) == 0 {
// determine environment from selectors specified in config
for _, selector := range env.Selector {
switch strings.ToLower(selector.Type) {
case "branch":
}
}
} else {
if env.Name == environment {
renderEnvironment(&env, cfg)
}
}
}
},
}
func renderEnvironment(env *config.Environment, cfg *config.Config) {
fmt.Print(env)
} |
package users
import (
"github.com/graphql-go/graphql"
"github.com/juliotorresmoreno/unravel-server/crud"
"github.com/juliotorresmoreno/unravel-server/db"
"github.com/juliotorresmoreno/unravel-server/models"
)
var tipos = map[string]graphql.Type{
"id": graphql.Int,
"nombres": graphql.String,
"apellidos": graphql.String,
"fullName": graphql.String,
"email": graphql.String,
"usuario": graphql.String,
"passwd": graphql.String,
"recovery": graphql.String,
"tipo": graphql.String,
"code": graphql.String,
"create_at": graphql.String,
"update_at": graphql.String,
}
var categoryType = graphql.NewObject(graphql.ObjectConfig{
Name: "Category",
Fields: graphql.Fields{
"id": &graphql.Field{
Type: tipos["id"],
},
"nombres": &graphql.Field{
Type: tipos["nombres"],
},
"apellidos": &graphql.Field{
Type: tipos["apellidos"],
},
"fullName": &graphql.Field{
Type: tipos["fullName"],
},
"email": &graphql.Field{
Type: tipos["email"],
},
"usuario": &graphql.Field{
Type: tipos["usuario"],
},
"passwd": &graphql.Field{
Type: tipos["passwd"],
},
"recovery": &graphql.Field{
Type: tipos["recovery"],
},
"tipo": &graphql.Field{
Type: tipos["tipo"],
},
"code": &graphql.Field{
Type: tipos["code"],
},
"created_at": &graphql.Field{
Type: tipos["created_at"],
},
"updated_at": &graphql.Field{
Type: tipos["updated_at"],
},
},
})
//GetData Obtiene los datos
var GetData = graphql.Fields{
"categoryList": &graphql.Field{
Type: graphql.NewList(categoryType),
Description: "List of category",
Args: graphql.FieldConfigArgument{
"id": &graphql.ArgumentConfig{
Type: graphql.Int,
},
},
Resolve: func(params graphql.ResolveParams) (interface{}, error) {
var orm = db.GetXORM()
defer orm.Close()
data := make([]models.Category, 0)
err := crud.GraphQLGet(params, orm, &data)
return data, err
},
},
}
//SetData Establece los datos
var SetData = graphql.Fields{
"createCategory": &graphql.Field{
Type: categoryType,
Description: "Create new category",
Args: graphql.FieldConfigArgument{
"nombres": &graphql.ArgumentConfig{
Type: tipos["nombres"],
},
"apellidos": &graphql.ArgumentConfig{
Type: tipos["apellidos"],
},
"fullName": &graphql.ArgumentConfig{
Type: tipos["fullName"],
},
"email": &graphql.ArgumentConfig{
Type: tipos["email"],
},
"usuario": &graphql.ArgumentConfig{
Type: tipos["usuario"],
},
"passwd": &graphql.ArgumentConfig{
Type: tipos["passwd"],
},
"recovery": &graphql.ArgumentConfig{
Type: tipos["recovery"],
},
"tipo": &graphql.ArgumentConfig{
Type: tipos["tipo"],
},
"code": &graphql.ArgumentConfig{
Type: tipos["code"],
},
},
Resolve: func(params graphql.ResolveParams) (interface{}, error) {
var orm = db.GetXORM()
defer orm.Close()
data := models.User{}
_, err := crud.GraphQLPut(params, orm, &data)
return data, err
},
},
"updateCategory": &graphql.Field{
Type: categoryType,
Description: "Update existing category, mark it done or not done",
Args: graphql.FieldConfigArgument{
"id": &graphql.ArgumentConfig{
Type: graphql.NewNonNull(tipos["id"]),
},
"nombres": &graphql.ArgumentConfig{
Type: tipos["nombres"],
},
"apellidos": &graphql.ArgumentConfig{
Type: tipos["apellidos"],
},
"fullName": &graphql.ArgumentConfig{
Type: tipos["fullName"],
},
"email": &graphql.ArgumentConfig{
Type: tipos["email"],
},
"usuario": &graphql.ArgumentConfig{
Type: tipos["usuario"],
},
"passwd": &graphql.ArgumentConfig{
Type: tipos["passwd"],
},
"recovery": &graphql.ArgumentConfig{
Type: tipos["recovery"],
},
"tipo": &graphql.ArgumentConfig{
Type: tipos["tipo"],
},
"code": &graphql.ArgumentConfig{
Type: tipos["code"],
},
},
Resolve: func(params graphql.ResolveParams) (interface{}, error) {
var orm = db.GetXORM()
defer orm.Close()
data := models.User{}
crud.GraphQLPost(params, orm, &data)
return data, nil
},
},
"deleteCategory": &graphql.Field{
Type: categoryType,
Description: "Delete existing category",
Args: graphql.FieldConfigArgument{
"id": &graphql.ArgumentConfig{
Type: graphql.NewNonNull(tipos["id"]),
},
},
Resolve: func(params graphql.ResolveParams) (interface{}, error) {
var orm = db.GetXORM()
defer orm.Close()
data := models.User{}
crud.GraphQLDelete(params, orm, &data)
return data, nil
},
},
}
|
package common
import (
"bytes"
"context"
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"net/url"
"strings"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestAPIError(t *testing.T) {
ae := NotFound("ClusterNotReadyException: test")
ae.Resource = "c"
assert.Equal(t, "ClusterNotReadyException: test", ae.Error())
assert.True(t, ae.IsMissing())
assert.True(t, ae.IsRetriable())
ae.StatusCode = http.StatusTooManyRequests
assert.True(t, ae.IsTooManyRequests())
}
func TestCommonErrorFromWorkspaceClientToE2(t *testing.T) {
ws := DatabricksClient{
Host: "https://qwerty.cloud.databricks.com/",
}
accountsAPIForWorkspaceClient := ws.commonErrorClarity(&http.Response{
Request: httptest.NewRequest(
"GET", "https://accounts.cloud.databricks.com/api/2.0/accounts/a/log-delivery",
nil),
})
require.Error(t, accountsAPIForWorkspaceClient)
assert.True(t, strings.HasPrefix(accountsAPIForWorkspaceClient.Error(),
"Accounts API (/api/2.0/accounts/a/log-delivery) requires you to set accounts.cloud.databricks.com"),
"Actual message: %s", accountsAPIForWorkspaceClient.Error())
workspaceAPIFromWorkspaceClient := ws.commonErrorClarity(&http.Response{
Request: httptest.NewRequest(
"GET", "https://qwerty.cloud.databricks.com/api/2.0/clusters/list",
nil),
})
assert.Nil(t, workspaceAPIFromWorkspaceClient)
}
func TestCommonErrorFromE2ClientToWorkspace(t *testing.T) {
ws := DatabricksClient{
Host: "accounts.cloud.databricks.com",
}
ws.fixHost()
accountsAPIForWorkspaceClient := ws.commonErrorClarity(&http.Response{
Request: httptest.NewRequest(
"GET", "https://querty.cloud.databricks.com/api/2.0/clusters/list",
nil),
})
require.Error(t, accountsAPIForWorkspaceClient)
assert.True(t, strings.HasPrefix(accountsAPIForWorkspaceClient.Error(),
"Databricks API (/api/2.0/clusters/list) requires you to set `host` property (or DATABRICKS_HOST env variable)"),
"Actual message: %s", accountsAPIForWorkspaceClient.Error())
e2APIFromE2Client := ws.commonErrorClarity(&http.Response{
Request: httptest.NewRequest(
"GET", "https://accounts.cloud.databricks.com/api/2.0/accounts/a/log-delivery",
nil),
})
assert.Nil(t, e2APIFromE2Client)
}
type errReader int
func (errReader) Read(p []byte) (n int, err error) {
return 0, fmt.Errorf("test error")
}
func (errReader) Close() error {
return fmt.Errorf("test error")
}
func TestParseError_IO(t *testing.T) {
ws := DatabricksClient{
Host: "qwerty.cloud.databricks.com",
}
var body errReader
err := ws.parseError(&http.Response{
Request: httptest.NewRequest(
"GET", "https://querty.cloud.databricks.com/api/2.0/clusters/list",
nil),
Body: body,
})
require.Error(t, err)
assert.True(t, strings.HasPrefix(err.Error(), "test error"),
"Actual message: %s", err.Error())
}
func TestParseError_MWS(t *testing.T) {
ws := DatabricksClient{
Host: "qwerty.cloud.databricks.com",
}
err := ws.parseError(&http.Response{
Request: httptest.NewRequest(
"GET", "https://accounts.cloud.databricks.com/api/2.0/accounts/a/log-delivery",
nil),
Body: http.NoBody,
StatusCode: 400,
})
require.Error(t, err)
assert.True(t, strings.HasPrefix(err.Error(),
"Accounts API (/api/2.0/accounts/a/log-delivery) requires you to set accounts.cloud.databricks.com"),
"Actual message: %s", err.Error())
}
func TestParseError_API12(t *testing.T) {
ws := DatabricksClient{
Host: "qwerty.cloud.databricks.com",
}
err := ws.parseError(&http.Response{
Request: httptest.NewRequest(
"GET", "https://querty.cloud.databricks.com/api/2.0/clusters/list",
nil),
Body: ioutil.NopCloser(bytes.NewReader([]byte(`{
"error": "Error from API 1.2"
}`))),
})
require.Error(t, err)
assert.True(t, strings.HasPrefix(err.Error(), "Error from API 1.2"),
"Actual message: %s", err.Error())
}
func TestParseError_SCIM(t *testing.T) {
ws := DatabricksClient{
Host: "qwerty.cloud.databricks.com",
}
err := ws.parseError(&http.Response{
Request: httptest.NewRequest(
"GET", "https://querty.cloud.databricks.com/api/2.0/clusters/list",
nil),
Body: ioutil.NopCloser(bytes.NewReader([]byte(`{
"detail": "Detailed SCIM message",
"status": "MALFUNCTION",
"string_value": "sensitive",
"content": "sensitive"
}`))),
})
require.Error(t, err)
assert.True(t, strings.HasPrefix(err.Error(), "Detailed SCIM message"),
"Actual message: %s", err.Error())
}
func TestParseError_SCIMNull(t *testing.T) {
ws := DatabricksClient{
Host: "qwerty.cloud.databricks.com",
}
err := ws.parseError(&http.Response{
Request: httptest.NewRequest(
"GET", "https://querty.cloud.databricks.com/api/2.0/clusters/list",
nil),
Body: ioutil.NopCloser(bytes.NewReader([]byte(`{
"detail": "null"
}`))),
})
require.Error(t, err)
assert.True(t, strings.HasPrefix(err.Error(), "SCIM API Internal Error"),
"Actual message: %s", err.Error())
}
func TestCheckHTTPRetry_Connection(t *testing.T) {
ws := DatabricksClient{
Host: "qwerty.cloud.databricks.com",
}
retry, err := ws.checkHTTPRetry(context.Background(), nil, &url.Error{
Err: fmt.Errorf("connection refused"),
URL: "xyz",
})
assert.True(t, retry)
require.Error(t, err)
assert.True(t, strings.Contains(err.Error(), "connection refused"),
"Actual message: %s", err.Error())
}
func TestCheckHTTPRetry_NilResp(t *testing.T) {
ws := DatabricksClient{
Host: "qwerty.cloud.databricks.com",
}
retry, _ := ws.checkHTTPRetry(context.Background(), nil, fmt.Errorf("test error"))
assert.False(t, retry)
}
func TestCheckHTTPRetry_429(t *testing.T) {
ws := DatabricksClient{
Host: "qwerty.cloud.databricks.com",
}
retry, err := ws.checkHTTPRetry(context.Background(), &http.Response{
StatusCode: 429,
}, fmt.Errorf("test error"))
assert.True(t, retry)
require.Error(t, err)
assert.True(t, strings.HasPrefix(err.Error(), "Current request has to be retried"),
"Actual message: %s", err.Error())
}
func singleRequestServer(t *testing.T, method, url, response string) (*DatabricksClient, *httptest.Server) {
server := httptest.NewServer(http.HandlerFunc(
func(rw http.ResponseWriter, req *http.Request) {
if req.Method == method && req.RequestURI == url {
_, err := rw.Write([]byte(response))
assert.NoError(t, err)
return
}
assert.Fail(t, fmt.Sprintf("Received unexpected call: %s %s",
req.Method, req.RequestURI))
}))
client := &DatabricksClient{
Host: server.URL + "/",
Token: "..",
InsecureSkipVerify: true,
DebugHeaders: true,
}
err := client.Configure()
assert.NoError(t, err)
return client, server
}
func TestGet_Error(t *testing.T) {
defer CleanupEnvironment()()
ws := DatabricksClient{}
ws.configureHTTPCLient()
err := ws.Get(context.Background(), "/imaginary/endpoint", nil, nil)
require.Error(t, err)
assert.True(t, strings.HasPrefix(err.Error(), "authentication is not configured"),
"Actual message: %s", err.Error())
}
func TestPost_Error(t *testing.T) {
ws, server := singleRequestServer(t, "POST", "/api/2.0/imaginary/endpoint", `{corrupt: "json"`)
defer server.Close()
var resp map[string]string
err := ws.Post(context.Background(), "/imaginary/endpoint", APIErrorBody{
ScimDetail: "some",
}, &resp)
require.Error(t, err)
assert.True(t, strings.HasPrefix(err.Error(), "Invalid JSON received (16 bytes)"),
"Actual message: %s", err.Error())
}
func TestDelete(t *testing.T) {
ws, server := singleRequestServer(t, "DELETE", "/api/2.0/imaginary/endpoint", ``)
defer server.Close()
err := ws.Delete(context.Background(), "/imaginary/endpoint", APIErrorBody{
ScimDetail: "some",
})
require.NoError(t, err)
}
func TestPatch(t *testing.T) {
ws, server := singleRequestServer(t, "PATCH", "/api/2.0/imaginary/endpoint", ``)
defer server.Close()
err := ws.Patch(context.Background(), "/imaginary/endpoint", APIErrorBody{
ScimDetail: "some",
})
require.NoError(t, err)
}
func TestPut(t *testing.T) {
ws, server := singleRequestServer(t, "PUT", "/api/2.0/imaginary/endpoint", ``)
defer server.Close()
err := ws.Put(context.Background(), "/imaginary/endpoint", APIErrorBody{
ScimDetail: "some",
})
require.NoError(t, err)
}
func TestUnmarshall(t *testing.T) {
ws := DatabricksClient{}
err := ws.unmarshall("/a/b/c", nil, nil)
require.NoError(t, err)
err = ws.unmarshall("/a/b/c", nil, "abc")
require.NoError(t, err)
}
func TestAPI2(t *testing.T) {
ws := DatabricksClient{Host: "ht_tp://example.com/"}
err := ws.completeUrl(&http.Request{})
require.Error(t, err)
assert.True(t, strings.HasPrefix(err.Error(), "no URL found in request"),
"Actual message: %s", err.Error())
err = ws.completeUrl(&http.Request{
Header: http.Header{},
URL: &url.URL{
Path: "/x/y/x",
},
})
require.Error(t, err)
assert.True(t, strings.HasPrefix(err.Error(),
`parse "ht_tp://example.com/": first path segment in URL cannot contain colon`),
"Actual message: %s", err.Error())
}
func TestScim(t *testing.T) {
ws, server := singleRequestServer(t, "GET", "/api/2.0/imaginary/endpoint", `{"a": "b"}`)
defer server.Close()
var resp map[string]string
err := ws.Scim(context.Background(), "GET", "/imaginary/endpoint", nil, &resp)
require.NoError(t, err)
}
func TestMakeRequestBody(t *testing.T) {
type x struct {
Scope string `json:"scope" url:"scope"`
}
requestURL := "/a/b/c"
_, err := makeRequestBody("GET", &requestURL, x{"test"}, true)
require.NoError(t, err)
assert.Equal(t, "/a/b/c?scope=test", requestURL)
body, _ := makeRequestBody("POST", &requestURL, "abc", false)
assert.Equal(t, []byte("abc"), body)
}
func TestClient_HandleErrors(t *testing.T) {
tests := []struct {
name string
response string
responseStatus int
expectedErrorCode string
expectedMessage string
expectedResource string
expectedStatusCode int
apiCall func(client *DatabricksClient) error
}{
{
name: "Status 404",
response: `{
"error_code": "RESOURCE_DOES_NOT_EXIST",
"message": "Token ... does not exist!"
}`,
responseStatus: http.StatusNotFound,
expectedErrorCode: "RESOURCE_DOES_NOT_EXIST",
expectedMessage: "Token ... does not exist!",
expectedResource: "/api/2.0/token/create",
expectedStatusCode: 404,
apiCall: func(client *DatabricksClient) error {
return client.Post(context.Background(), "/token/create", map[string]string{
"foo": "bar",
}, nil)
},
},
{
name: "HTML Status 404",
response: `<pre> Hello world </pre>`,
responseStatus: http.StatusNotFound,
expectedErrorCode: "NOT_FOUND",
expectedMessage: "Hello world",
expectedResource: "/api/2.0/token/create",
expectedStatusCode: 404,
apiCall: func(client *DatabricksClient) error {
return client.Post(context.Background(), "/token/create", map[string]string{
"foo": "bar",
}, nil)
},
},
{
name: "Invalid HTML Status 404",
response: `<html> Hello world </html>`,
responseStatus: http.StatusNotFound,
expectedErrorCode: "NOT_FOUND",
expectedMessage: "Response from server (404 Not Found) <html> Hello world </html>: invalid character '<' looking for beginning of value",
expectedResource: "/api/2.0/token/create",
expectedStatusCode: 404,
apiCall: func(client *DatabricksClient) error {
return client.Post(context.Background(), "/token/create", map[string]string{
"foo": "bar",
}, nil)
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
rw.WriteHeader(tt.responseStatus)
_, err := rw.Write([]byte(tt.response))
assert.NoError(t, err, err)
}))
// Close the server when test finishes
defer server.Close()
client := DatabricksClient{
Host: server.URL,
Token: "...",
}
err := client.Configure()
assert.NoError(t, err)
err = tt.apiCall(&client)
t.Log(err)
assert.IsType(t, APIError{}, err)
assert.Equal(t, tt.expectedErrorCode, err.(APIError).ErrorCode, "error code is not the same")
assert.Equal(t, tt.expectedMessage, err.(APIError).Message, "message is not the same")
assert.Equal(t, tt.expectedResource, err.(APIError).Resource, "resource is not the same")
assert.Equal(t, tt.expectedStatusCode, err.(APIError).StatusCode, "status code is not the same")
})
}
}
|
//
// Package - transpiled by c4go
//
// If you have found any issues, please raise an issue at:
// https://github.com/Konstantin8105/c4go/
//
package pkg
// package_import_t - transpiled function from /home/istvan/packages/downloaded/cbuild/package/import.c:15
type package_import_t struct {
alias []byte
filename []byte
c_file int32
pkg []package_t
}
// package_import_free - transpiled function from /home/istvan/packages/downloaded/cbuild/package/import.c:22
func package_import_free(imp []package_import_t) []package_t {
if len(imp) == 0 {
return nil
}
if (int64(uintptr(unsafe.Pointer(&imp[0].alias[0])))/int64(1) - int64(uintptr(unsafe.Pointer(&imp[0].filename[0])))/int64(1)) == 0 {
_ = imp[0].alias
} else {
_ = imp[0].alias
_ = imp[0].filename
}
var pkg []package_t = imp[0].pkg
_ = imp
return pkg
}
// package_import_add - transpiled function from /home/istvan/packages/downloaded/cbuild/package/import.c:37
func package_import_add(alias []byte, filename []byte, parent []package_t, error_ [][]byte) []package_import_t {
var imp []package_import_t = make([]package_import_t, 1)
hash_set(parent[0].deps, alias, imp)
imp[0].alias = alias
imp[0].filename = filename
imp[0].c_file = 0
imp[0].pkg = package_new(filename, error_, parent[0].force, parent[0].silent)
if len(imp[0].pkg) == 0 {
return nil
}
package_export_write_headers(imp[0].pkg)
return imp
}
// package_import_add_c_file - transpiled function from /home/istvan/packages/downloaded/cbuild/package/import.c:53
func package_import_add_c_file(parent []package_t, filename []byte, error_ [][]byte) []package_import_t {
var alias []byte = realpath(filename, nil)
if len(alias) == 0 {
error_[0] = noarch.Strerror((noarch.ErrnoLocation())[0])
return nil
}
var imp []package_import_t = make([]package_import_t, 1)
hash_set(parent[0].deps, alias, imp)
imp[0].alias = alias
imp[0].filename = filename
imp[0].c_file = 1
imp[0].pkg = package_c_file(alias, error_)
return imp
}
// package_import_passthrough - transpiled function from /home/istvan/packages/downloaded/cbuild/package/import.c:72
func package_import_passthrough(parent []package_t, filename []byte, error_ [][]byte) []package_import_t {
var imp []package_import_t = package_import_add(filename, filename, parent, error_)
if len(error_[0]) != 0 {
return nil
}
if len(imp) == 0 || len(imp[0].pkg) == 0 {
asprintf(error_, []byte("Could not import '%s'\x00"), filename)
return nil
}
{
var val interface{}
{
var k khiter_t = khiter_t((khint_t(0)))
for ; k < khiter_t(((imp[0].pkg[0].exports)[0].n_buckets)); k++ {
if !noarch.Not((imp[0].pkg[0].exports)[0].flags[k>>uint64(4)] >> uint64(uint32((khint32_t((khint_t((k & khiter_t((khint_t((khint32_t((uint32(15))))))) << uint64(1)))))))) & khint32_t((3))) {
// Warning (*ast.MemberExpr): /home/istvan/packages/downloaded/cbuild/package/import.c:80 :cannot determine type for LHS '[hash_t * hash_t *]', will use 'void *' for all fields. Is lvalue = true. n.Name = n_buckets
// Warning (*ast.MemberExpr): /home/istvan/packages/downloaded/cbuild/package/import.c:80 :cannot determine type for LHS '[hash_t * hash_t *]', will use 'void *' for all fields. Is lvalue = true. n.Name = flags
continue
}
// Warning (*ast.MemberExpr): /home/istvan/packages/downloaded/cbuild/package/import.c:80 :cannot determine type for LHS '[hash_t * hash_t *]', will use 'void *' for all fields. Is lvalue = true. n.Name = vals
val = (imp[0].pkg[0].exports)[0].vals[k]
{
var exp []package_export_t = val.([]package_export_t)
hash_set(parent[0].exports, exp[0].export_name, exp)
}
}
}
}
package_export_export_headers(parent, imp[0].pkg)
return imp
}
|
package main
import "fmt"
func fbn(n int) []uint64 {
var arr = make([]uint64, n)
arr[0] = 1
arr[1] = 1
for i := 2; i < n; i++ {
arr[i] = arr[i-1] + arr[i-2]
}
return arr
}
func main() {
s := fbn(10)
fmt.Println(s)
}
|
package api
import (
// "log"
"github.com/J-HowHuang/Ramen-Live/backend/pkg/loc"
)
func HandleGetNearbyRegions(message map[string]interface{}) map[string]interface{} {
location := message["user_location"].(map[string]interface{})
lat := location["lat"].(float64)
lon := location["lon"].(float64)
nearbyRegions := loc.NearbyRegions(lat, lon)
resp := make(map[string]interface{})
resp["nearby_message"] = nearbyRegions
return resp
} |
// +build !js
package math4g_test
import (
"fmt"
"github.com/shibukawa/math4g"
"math"
)
func ExampleTranslateMatrix() {
// move 10, 20
translate := math4g.TranslateMat32(10, 20)
x, y := translate.TransformPoint(5, 5)
fmt.Println(x, y)
// Output: 15 25
}
func ExampleRotateMatrix() {
// rotate 90 degree
rotate := math4g.RotateMat32(math4g.Pi * 0.5)
x, y := rotate.TransformPoint(10, 10)
fmt.Println(x, y)
// Output: -10 10
}
func ExampleScaleMatrix() {
// scale x1.5 for x-axis, x2.5 for y-axis
scale := math4g.ScaleMat32(1.5, 2.5)
x, y := scale.TransformPoint(10, 10)
fmt.Println(x, y)
// Output: 15 25
}
func ExampleMatrix() {
translate := math4g.TranslateMat32(10, 20)
rotate := math4g.RotateMat32(math4g.Pi * 0.5)
scale := math4g.ScaleMat32(1.5, 2.5)
x, y := translate.Multiply(rotate).Multiply(scale).TransformPoint(10, 10)
fmt.Println(math.Floor(float64(x) + 0.5), math.Floor(float64(y) + .5))
// Output: -45 50
} |
package nfs
import (
"github.com/storageos/cluster-operator/pkg/util"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
)
const (
// DataVolName is the NFS data volume name.
DataVolName = "nfs-data"
)
func (d *Deployment) createStatefulSet(pvcVS *corev1.PersistentVolumeClaimVolumeSource, nfsPort int, httpPort int) error {
replicas := int32(1)
spec := &appsv1.StatefulSetSpec{
ServiceName: d.nfsServer.Name,
Replicas: &replicas,
Selector: &metav1.LabelSelector{
MatchLabels: d.labelsForStatefulSet(),
},
Template: d.createPodTemplateSpec(nfsPort, httpPort),
}
// Add the block volume in the pod spec volumes.
vol := corev1.Volume{
Name: DataVolName,
VolumeSource: corev1.VolumeSource{
PersistentVolumeClaim: pvcVS,
},
}
spec.Template.Spec.Volumes = append(spec.Template.Spec.Volumes, vol)
if err := util.AddTolerations(&spec.Template.Spec, d.nfsServer.Spec.Tolerations); err != nil {
return err
}
// If the cluster was configured with node selectors to only run on certain
// nodes, use the same selectors to selct the nodes that the NFS pods can
// run on. NFSServer does not currently allow setting node selectors or
// affinity directly.
util.AddRequiredNodeAffinity(&spec.Template.Spec, d.cluster.Spec.NodeSelectorTerms)
return d.k8sResourceManager.StatefulSet(d.nfsServer.Name, d.nfsServer.Namespace, nil, spec).Create()
}
func (d *Deployment) createPodTemplateSpec(nfsPort int, httpPort int) corev1.PodTemplateSpec {
return corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: d.labelsForStatefulSet(),
},
Spec: corev1.PodSpec{
ServiceAccountName: d.getServiceAccountName(),
Containers: []corev1.Container{
{
ImagePullPolicy: "IfNotPresent",
Name: "nfsd",
Image: d.nfsServer.Spec.GetContainerImage(d.cluster.Spec.GetNFSServerImage()),
Env: []corev1.EnvVar{
{
Name: "GANESHA_CONFIGFILE",
Value: "/config/" + d.nfsServer.Name,
},
{
Name: "NAME",
Value: d.nfsServer.Name,
},
{
Name: "NAMESPACE",
Value: d.nfsServer.Namespace,
},
},
Ports: []corev1.ContainerPort{
{
Name: "nfs-port",
ContainerPort: int32(nfsPort),
},
{
Name: "http-port",
ContainerPort: int32(httpPort),
},
},
VolumeMounts: []corev1.VolumeMount{
{
Name: "nfs-config",
MountPath: "/config",
},
{
Name: DataVolName,
MountPath: "/export",
},
},
SecurityContext: &corev1.SecurityContext{
Capabilities: &corev1.Capabilities{
Add: []corev1.Capability{
"SYS_ADMIN",
"DAC_READ_SEARCH",
},
},
},
ReadinessProbe: &corev1.Probe{
Handler: corev1.Handler{
HTTPGet: &corev1.HTTPGetAction{
Port: intstr.FromInt(httpPort),
Path: HealthEndpointPath,
},
},
},
},
},
Volumes: []corev1.Volume{
{
Name: "nfs-config",
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: d.nfsServer.Name,
},
},
},
},
},
},
}
}
|
package gosi
import (
"encoding/json"
"math"
"github.com/shirou/gopsutil/v3/disk"
"github.com/inhies/go-bytesize"
)
type DiskStat struct {
Name string `json:"name"`
Total string `json:"total"`
Free string `json:"free"`
Used string `json:"used"`
UsedPercent uint `json:"usedPercent"`
}
type DiskStats []DiskStat
func (d DiskStats) Json() []byte {
j, _ := json.Marshal(d)
return j
}
func Disk() DiskStats {
ret := []DiskStat{}
p, _ := disk.Partitions(true)
bytesize.Format = "%.0f "
for _, v := range p {
if isVaildPartition(v) {
d, _ := disk.Usage(v.Mountpoint)
total := bytesize.New(float64(d.Total))
free := bytesize.New(float64(d.Free))
used := bytesize.New(float64(d.Used))
usedPercent := math.Round(d.UsedPercent*10) / 10
ret = append(ret, DiskStat{d.Path, total.String(), free.String(), used.String(), uint(usedPercent)})
}
}
return ret
}
|
package main
import (
"bufio"
"fmt"
"log"
"net/http"
"os"
"github.com/go-icap/icap"
)
var (
ISTag = "\"GOLANG\""
lines []string
)
func readLines(path string) ([]string, error) {
file, err := os.Open(path)
if err != nil {
return nil, err
}
defer file.Close()
scanner := bufio.NewScanner(file)
for scanner.Scan() {
lines = append(lines, scanner.Text())
}
return lines, scanner.Err()
}
func contains(s []string, str string) bool {
for _, v := range s {
if v == str {
return true
}
}
return false
}
func main() {
var err error
lines, err = readLines("filter_list.txt")
if err != nil {
log.Fatalf("readLines: %s", err)
}
icap.HandleFunc("/request", filter)
icap.ListenAndServe(":13440", icap.HandlerFunc(filter))
}
func filter(w icap.ResponseWriter, req *icap.Request) {
h := w.Header()
h.Set("ISTag", ISTag)
h.Set("Service", "Golang icap")
switch req.Method {
case "OPTIONS":
h.Set("Methods", "REQMOD")
h.Set("Allow", "204")
h.Set("Preview", "0")
h.Set("Transfer-Preview", "*")
w.WriteHeader(200, nil, false)
case "REQMOD":
log.Printf("Got RequestURI : [%s] ", req.Request.RequestURI)
if contains(lines, req.Request.RequestURI) {
w.WriteHeader(200, &http.Response{
StatusCode: http.StatusForbidden,
Status: "Forbidden",
}, false)
} else {
w.WriteHeader(204, nil, false)
}
case "ERRDUMMY":
w.WriteHeader(400, nil, false)
fmt.Println("Malformed request")
default:
w.WriteHeader(405, nil, false)
fmt.Println("Invalid request method")
}
}
|
package main
import(
"unicode"
)
func removeZeros(s string) string{
PIdx :=0
ret := ""
for ; PIdx<len(s); PIdx++{
if s[PIdx] == '.'{
break
}
}
start := 0
end := len(s) - 1
for ; start < PIdx-1; start++{
if s[start] != '0'{
break
}
}
for ; end > PIdx; end--{
if s[end] != '0'{
break
}
}
ret += s[start:PIdx]
if end > PIdx {
ret += s[PIdx:end+1]
}
return ret
}
func check(s string) (Num , bool){
PointExist := false
SNum := Num{}
IntNum := ""
Sign := false
if s[0] == '-'{
s = s[1:]
Sign = true
}
for _,ch := range(s){
if ch =='.'{
if PointExist{
return SNum, false
} else{
PointExist = true
IntNum += "."
}
} else if !unicode.IsDigit(ch){
return SNum, false
} else{
IntNum += string(ch)
}
}
if len(IntNum)==0 || (len(IntNum)==1 && IntNum[0]=='.') {
return SNum , false
}
if IntNum[0]=='.' {
IntNum = "0" + IntNum
}
IntNum = removeZeros(IntNum)
i := 0
for ; i < len(IntNum) ; i++ {
if IntNum[i] == '.'{
SNum.LenAfterPoint = len(IntNum) - i - 1
break
}
}
if i < len(IntNum) {
IntNum = IntNum[:i] + IntNum[i+1:]
}
SNum.IntNum = IntNum
SNum.Sign = Sign
return SNum, true
}
|
package v1
import (
"errors"
"fmt"
"net/http"
"net/http/httptest"
"testing"
"time"
"github.com/golang/mock/gomock"
"github.com/google/uuid"
"github.com/labstack/echo/v4"
"github.com/stretchr/testify/assert"
mockConfig "github.com/traPtitech/trap-collection-server/src/config/mock"
"github.com/traPtitech/trap-collection-server/src/domain"
"github.com/traPtitech/trap-collection-server/src/domain/values"
"github.com/traPtitech/trap-collection-server/src/handler/common"
"github.com/traPtitech/trap-collection-server/src/handler/v1/openapi"
"github.com/traPtitech/trap-collection-server/src/service"
"github.com/traPtitech/trap-collection-server/src/service/mock"
)
func TestPostGame(t *testing.T) {
t.Parallel()
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockAppConfig := mockConfig.NewMockApp(ctrl)
mockAppConfig.
EXPECT().
FeatureV1Write().
Return(true)
mockConf := mockConfig.NewMockHandler(ctrl)
mockConf.
EXPECT().
SessionKey().
Return("key", nil)
mockConf.
EXPECT().
SessionSecret().
Return("secret", nil)
sess, err := common.NewSession(mockConf)
if err != nil {
t.Fatalf("failed to create session: %v", err)
return
}
session, err := NewSession(sess)
if err != nil {
t.Fatalf("failed to create session: %v", err)
return
}
mockGameService := mock.NewMockGame(ctrl)
gameHandler := NewGame(mockAppConfig, session, mockGameService)
type test struct {
description string
sessionExist bool
authSession *domain.OIDCSession
newGame *openapi.NewGame
executeCreateGame bool
game *domain.Game
CreateGameErr error
apiGame openapi.GameInfo
isErr bool
err error
statusCode int
}
gameID := values.NewGameID()
now := time.Now()
testCases := []test{
{
description: "特に問題ないのでエラーなし",
sessionExist: true,
authSession: domain.NewOIDCSession(
"accessToken",
time.Now().Add(time.Hour),
),
newGame: &openapi.NewGame{
Name: "test",
Description: "test",
},
executeCreateGame: true,
game: domain.NewGame(
gameID,
values.NewGameName("test"),
values.NewGameDescription("test"),
now,
),
apiGame: openapi.GameInfo{
Id: uuid.UUID(gameID).String(),
Name: "test",
Description: "test",
CreatedAt: now,
},
},
{
description: "セッションがないので500",
sessionExist: false,
isErr: true,
statusCode: http.StatusInternalServerError,
},
{
description: "authSessionがないので500",
sessionExist: true,
isErr: true,
statusCode: http.StatusInternalServerError,
},
{
description: "名前が空なので400",
sessionExist: true,
authSession: domain.NewOIDCSession(
"accessToken",
time.Now().Add(time.Hour),
),
newGame: &openapi.NewGame{
Name: "",
Description: "test",
},
isErr: true,
statusCode: http.StatusBadRequest,
},
{
description: "名前が長すぎるので400",
sessionExist: true,
authSession: domain.NewOIDCSession(
"accessToken",
time.Now().Add(time.Hour),
),
newGame: &openapi.NewGame{
Name: "012345678901234567890123456789012",
Description: "test",
},
isErr: true,
statusCode: http.StatusBadRequest,
},
{
description: "説明が空文字でもエラーなし",
sessionExist: true,
authSession: domain.NewOIDCSession(
"accessToken",
time.Now().Add(time.Hour),
),
newGame: &openapi.NewGame{
Name: "test",
Description: "",
},
executeCreateGame: true,
game: domain.NewGame(
gameID,
values.NewGameName("test"),
values.NewGameDescription(""),
now,
),
apiGame: openapi.GameInfo{
Id: uuid.UUID(gameID).String(),
Name: "test",
Description: "",
CreatedAt: now,
},
},
{
description: "CreateGameがエラーなので500",
sessionExist: true,
authSession: domain.NewOIDCSession(
"accessToken",
time.Now().Add(time.Hour),
),
newGame: &openapi.NewGame{
Name: "test",
Description: "test",
},
executeCreateGame: true,
CreateGameErr: errors.New("test"),
isErr: true,
statusCode: http.StatusInternalServerError,
},
}
for _, testCase := range testCases {
t.Run(testCase.description, func(t *testing.T) {
e := echo.New()
req := httptest.NewRequest(http.MethodPost, "/api/game", nil)
rec := httptest.NewRecorder()
c := e.NewContext(req, rec)
if testCase.sessionExist {
sess, err := session.New(req)
if err != nil {
t.Fatal(err)
}
if testCase.authSession != nil {
sess.Values[accessTokenSessionKey] = string(testCase.authSession.GetAccessToken())
sess.Values[expiresAtSessionKey] = testCase.authSession.GetExpiresAt()
}
err = sess.Save(req, rec)
if err != nil {
t.Fatalf("failed to save session: %v", err)
}
setCookieHeader(c)
sess, err = session.Get(req)
if err != nil {
t.Fatal(err)
}
c.Set(sessionContextKey, sess)
}
if testCase.executeCreateGame {
mockGameService.
EXPECT().
CreateGame(gomock.Any(), gomock.Any(), values.NewGameName(testCase.newGame.Name), values.NewGameDescription(testCase.newGame.Description)).
Return(testCase.game, testCase.CreateGameErr)
}
game, err := gameHandler.PostGame(c, testCase.newGame)
if testCase.isErr {
if testCase.statusCode != 0 {
var httpError *echo.HTTPError
if errors.As(err, &httpError) {
assert.Equal(t, testCase.statusCode, httpError.Code)
} else {
t.Errorf("error is not *echo.HTTPError")
}
} else if testCase.err == nil {
assert.Error(t, err)
} else if !errors.Is(err, testCase.err) {
t.Errorf("error must be %v, but actual is %v", testCase.err, err)
}
} else {
assert.NoError(t, err)
}
if err != nil {
return
}
assert.Equal(t, testCase.apiGame, *game)
})
}
}
func TestGetGame(t *testing.T) {
t.Parallel()
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockAppConfig := mockConfig.NewMockApp(ctrl)
mockAppConfig.
EXPECT().
FeatureV1Write().
Return(true)
mockConf := mockConfig.NewMockHandler(ctrl)
mockConf.
EXPECT().
SessionKey().
Return("key", nil)
mockConf.
EXPECT().
SessionSecret().
Return("secret", nil)
sess, err := common.NewSession(mockConf)
if err != nil {
t.Fatalf("failed to create session: %v", err)
return
}
session, err := NewSession(sess)
if err != nil {
t.Fatalf("failed to create session: %v", err)
return
}
mockGameService := mock.NewMockGame(ctrl)
gameHandler := NewGame(mockAppConfig, session, mockGameService)
type test struct {
description string
strGameID string
executeGetGame bool
game *service.GameInfo
GetGameErr error
apiGame openapi.Game
isErr bool
err error
statusCode int
}
gameID := values.NewGameID()
gameVersionID := values.NewGameVersionID()
now := time.Now()
testCases := []test{
{
description: "特に問題ないのでエラーなし",
strGameID: uuid.UUID(gameID).String(),
executeGetGame: true,
game: &service.GameInfo{
Game: domain.NewGame(
gameID,
values.NewGameName("test"),
values.NewGameDescription("test"),
now,
),
LatestVersion: domain.NewGameVersion(
gameVersionID,
values.NewGameVersionName("test"),
values.NewGameVersionDescription("test"),
now,
),
},
apiGame: openapi.Game{
Id: uuid.UUID(gameID).String(),
Name: "test",
Description: "test",
CreatedAt: now,
Version: &openapi.GameVersion{
Id: uuid.UUID(gameVersionID).String(),
Name: "test",
Description: "test",
CreatedAt: now,
},
},
},
{
description: "gameIDがuuidでないので400",
strGameID: "invalid",
isErr: true,
statusCode: http.StatusBadRequest,
},
{
description: "ゲームが存在しないので400",
strGameID: uuid.UUID(gameID).String(),
executeGetGame: true,
GetGameErr: service.ErrNoGame,
isErr: true,
statusCode: http.StatusBadRequest,
},
{
description: "GetGameがエラーなので500",
strGameID: uuid.UUID(gameID).String(),
executeGetGame: true,
GetGameErr: errors.New("error"),
isErr: true,
statusCode: http.StatusInternalServerError,
},
{
description: "gameVersionがnilでもエラーなし",
strGameID: uuid.UUID(gameID).String(),
executeGetGame: true,
game: &service.GameInfo{
Game: domain.NewGame(
gameID,
values.NewGameName("test"),
values.NewGameDescription("test"),
now,
),
},
apiGame: openapi.Game{
Id: uuid.UUID(gameID).String(),
Name: "test",
Description: "test",
CreatedAt: now,
},
},
}
for _, testCase := range testCases {
t.Run(testCase.description, func(t *testing.T) {
e := echo.New()
req := httptest.NewRequest(http.MethodGet, fmt.Sprintf("/api/game/%s", testCase.strGameID), nil)
rec := httptest.NewRecorder()
c := e.NewContext(req, rec)
if testCase.executeGetGame {
mockGameService.
EXPECT().
GetGame(gomock.Any(), gomock.Any()).
Return(testCase.game, testCase.GetGameErr)
}
game, err := gameHandler.GetGame(c, testCase.strGameID)
if testCase.isErr {
if testCase.statusCode != 0 {
var httpError *echo.HTTPError
if errors.As(err, &httpError) {
assert.Equal(t, testCase.statusCode, httpError.Code)
} else {
t.Errorf("error is not *echo.HTTPError")
}
} else if testCase.err == nil {
assert.Error(t, err)
} else if !errors.Is(err, testCase.err) {
t.Errorf("error must be %v, but actual is %v", testCase.err, err)
}
} else {
assert.NoError(t, err)
}
if err != nil {
return
}
assert.Equal(t, testCase.apiGame, *game)
})
}
}
func TestPutGame(t *testing.T) {
t.Parallel()
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockAppConfig := mockConfig.NewMockApp(ctrl)
mockAppConfig.
EXPECT().
FeatureV1Write().
Return(true)
mockConf := mockConfig.NewMockHandler(ctrl)
mockConf.
EXPECT().
SessionKey().
Return("key", nil)
mockConf.
EXPECT().
SessionSecret().
Return("secret", nil)
sess, err := common.NewSession(mockConf)
if err != nil {
t.Fatalf("failed to create session: %v", err)
return
}
session, err := NewSession(sess)
if err != nil {
t.Fatalf("failed to create session: %v", err)
return
}
mockGameService := mock.NewMockGame(ctrl)
gameHandler := NewGame(mockAppConfig, session, mockGameService)
type test struct {
description string
strGameID string
newGame *openapi.NewGame
executeUpdateGame bool
game *domain.Game
UpdateGameErr error
apiGame openapi.GameInfo
isErr bool
err error
statusCode int
}
gameID := values.NewGameID()
now := time.Now()
testCases := []test{
{
description: "特に問題ないのでエラーなし",
strGameID: uuid.UUID(gameID).String(),
newGame: &openapi.NewGame{
Name: "test",
Description: "test",
},
executeUpdateGame: true,
game: domain.NewGame(
gameID,
values.NewGameName("test"),
values.NewGameDescription("test"),
now,
),
apiGame: openapi.GameInfo{
Id: uuid.UUID(gameID).String(),
Name: "test",
Description: "test",
CreatedAt: now,
},
},
{
description: "gameIDがuuidでないので400",
strGameID: "invalid",
isErr: true,
statusCode: http.StatusBadRequest,
},
{
description: "名前が空なので400",
strGameID: uuid.UUID(gameID).String(),
newGame: &openapi.NewGame{
Name: "",
Description: "test",
},
isErr: true,
statusCode: http.StatusBadRequest,
},
{
description: "名前が長すぎるので400",
strGameID: uuid.UUID(gameID).String(),
newGame: &openapi.NewGame{
Name: "012345678901234567890123456789012",
Description: "test",
},
isErr: true,
statusCode: http.StatusBadRequest,
},
{
description: "説明が空文字でもエラーなし",
strGameID: uuid.UUID(gameID).String(),
newGame: &openapi.NewGame{
Name: "test",
Description: "",
},
executeUpdateGame: true,
game: domain.NewGame(
gameID,
values.NewGameName("test"),
values.NewGameDescription(""),
now,
),
apiGame: openapi.GameInfo{
Id: uuid.UUID(gameID).String(),
Name: "test",
Description: "",
CreatedAt: now,
},
},
{
description: "ゲームが存在しないので400",
strGameID: uuid.UUID(gameID).String(),
newGame: &openapi.NewGame{
Name: "test",
Description: "test",
},
executeUpdateGame: true,
UpdateGameErr: service.ErrNoGame,
isErr: true,
statusCode: http.StatusBadRequest,
},
{
description: "UpdateGameがエラーなので500",
strGameID: uuid.UUID(gameID).String(),
newGame: &openapi.NewGame{
Name: "test",
Description: "test",
},
executeUpdateGame: true,
UpdateGameErr: errors.New("test"),
isErr: true,
statusCode: http.StatusInternalServerError,
},
}
for _, testCase := range testCases {
t.Run(testCase.description, func(t *testing.T) {
e := echo.New()
req := httptest.NewRequest(http.MethodPut, fmt.Sprintf("/api/game/%s", testCase.strGameID), nil)
rec := httptest.NewRecorder()
c := e.NewContext(req, rec)
if testCase.executeUpdateGame {
mockGameService.
EXPECT().
UpdateGame(gomock.Any(), gomock.Any(), values.NewGameName(testCase.newGame.Name), values.NewGameDescription(testCase.newGame.Description)).
Return(testCase.game, testCase.UpdateGameErr)
}
game, err := gameHandler.PutGame(c, testCase.strGameID, testCase.newGame)
if testCase.isErr {
if testCase.statusCode != 0 {
var httpError *echo.HTTPError
if errors.As(err, &httpError) {
assert.Equal(t, testCase.statusCode, httpError.Code)
} else {
t.Errorf("error is not *echo.HTTPError")
}
} else if testCase.err == nil {
assert.Error(t, err)
} else if !errors.Is(err, testCase.err) {
t.Errorf("error must be %v, but actual is %v", testCase.err, err)
}
} else {
assert.NoError(t, err)
}
if err != nil {
return
}
assert.Equal(t, testCase.apiGame, *game)
})
}
}
func TestGetGames(t *testing.T) {
t.Parallel()
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockAppConfig := mockConfig.NewMockApp(ctrl)
mockAppConfig.
EXPECT().
FeatureV1Write().
Return(true)
mockConf := mockConfig.NewMockHandler(ctrl)
mockConf.
EXPECT().
SessionKey().
Return("key", nil)
mockConf.
EXPECT().
SessionSecret().
Return("secret", nil)
sess, err := common.NewSession(mockConf)
if err != nil {
t.Fatalf("failed to create session: %v", err)
return
}
session, err := NewSession(sess)
if err != nil {
t.Fatalf("failed to create session: %v", err)
return
}
mockGameService := mock.NewMockGame(ctrl)
gameHandler := NewGame(mockAppConfig, session, mockGameService)
type test struct {
description string
strAll string
executeGetGames bool
GetGamesErr error
sessionExist bool
authSession *domain.OIDCSession
executeGetMyGames bool
GetMyGamesErr error
games []*service.GameInfo
apiGames []*openapi.Game
isErr bool
err error
statusCode int
}
gameID1 := values.NewGameID()
gameID2 := values.NewGameID()
gameVersionID1 := values.NewGameVersionID()
gameVersionID2 := values.NewGameVersionID()
now := time.Now()
testCases := []test{
{
description: "特に問題ないので問題なし",
strAll: "true",
executeGetGames: true,
games: []*service.GameInfo{
{
Game: domain.NewGame(
gameID1,
values.NewGameName("test1"),
values.NewGameDescription("test1"),
now,
),
LatestVersion: domain.NewGameVersion(
gameVersionID1,
values.NewGameVersionName("test1"),
values.NewGameVersionDescription("test1"),
now,
),
},
},
apiGames: []*openapi.Game{
{
Id: uuid.UUID(gameID1).String(),
Name: "test1",
Description: "test1",
CreatedAt: now,
Version: &openapi.GameVersion{
Id: uuid.UUID(gameVersionID1).String(),
Name: "test1",
Description: "test1",
CreatedAt: now,
},
},
},
},
{
description: "GetGamesがエラーなので500",
strAll: "true",
executeGetGames: true,
GetGamesErr: errors.New("test"),
isErr: true,
statusCode: http.StatusInternalServerError,
},
{
description: "allが誤っているので400",
strAll: "invalid",
isErr: true,
statusCode: http.StatusBadRequest,
},
{
description: "allがfalseなので問題なし",
strAll: "false",
sessionExist: true,
authSession: domain.NewOIDCSession(
"accessToken",
time.Now().Add(time.Hour),
),
executeGetMyGames: true,
games: []*service.GameInfo{
{
Game: domain.NewGame(
gameID1,
values.NewGameName("test1"),
values.NewGameDescription("test1"),
now,
),
LatestVersion: domain.NewGameVersion(
gameVersionID1,
values.NewGameVersionName("test1"),
values.NewGameVersionDescription("test1"),
now,
),
},
},
apiGames: []*openapi.Game{
{
Id: uuid.UUID(gameID1).String(),
Name: "test1",
Description: "test1",
CreatedAt: now,
Version: &openapi.GameVersion{
Id: uuid.UUID(gameVersionID1).String(),
Name: "test1",
Description: "test1",
CreatedAt: now,
},
},
},
},
{
description: "allが空文字でも問題なし",
strAll: "",
sessionExist: true,
authSession: domain.NewOIDCSession(
"accessToken",
time.Now().Add(time.Hour),
),
executeGetMyGames: true,
games: []*service.GameInfo{
{
Game: domain.NewGame(
gameID1,
values.NewGameName("test1"),
values.NewGameDescription("test1"),
now,
),
LatestVersion: domain.NewGameVersion(
gameVersionID1,
values.NewGameVersionName("test1"),
values.NewGameVersionDescription("test1"),
now,
),
},
},
apiGames: []*openapi.Game{
{
Id: uuid.UUID(gameID1).String(),
Name: "test1",
Description: "test1",
CreatedAt: now,
Version: &openapi.GameVersion{
Id: uuid.UUID(gameVersionID1).String(),
Name: "test1",
Description: "test1",
CreatedAt: now,
},
},
},
},
{
description: "sessionが存在しないので500",
strAll: "false",
sessionExist: false,
isErr: true,
statusCode: http.StatusInternalServerError,
},
{
description: "authSessionが存在しないので500",
strAll: "false",
sessionExist: true,
isErr: true,
statusCode: http.StatusInternalServerError,
},
{
description: "GetMyGamesがエラーなので500",
strAll: "false",
sessionExist: true,
authSession: domain.NewOIDCSession(
"accessToken",
time.Now().Add(time.Hour),
),
executeGetMyGames: true,
GetMyGamesErr: errors.New("test"),
isErr: true,
statusCode: http.StatusInternalServerError,
},
{
description: "ゲームが存在しなくても問題なし",
strAll: "true",
executeGetGames: true,
games: []*service.GameInfo{},
apiGames: []*openapi.Game{},
},
{
description: "ゲームが複数でも問題なし",
strAll: "true",
executeGetGames: true,
games: []*service.GameInfo{
{
Game: domain.NewGame(
gameID1,
values.NewGameName("test1"),
values.NewGameDescription("test1"),
now,
),
LatestVersion: domain.NewGameVersion(
gameVersionID1,
values.NewGameVersionName("test1"),
values.NewGameVersionDescription("test1"),
now,
),
},
{
Game: domain.NewGame(
gameID2,
values.NewGameName("test2"),
values.NewGameDescription("test2"),
now,
),
LatestVersion: domain.NewGameVersion(
gameVersionID2,
values.NewGameVersionName("test2"),
values.NewGameVersionDescription("test2"),
now,
),
},
},
apiGames: []*openapi.Game{
{
Id: uuid.UUID(gameID1).String(),
Name: "test1",
Description: "test1",
CreatedAt: now,
Version: &openapi.GameVersion{
Id: uuid.UUID(gameVersionID1).String(),
Name: "test1",
Description: "test1",
CreatedAt: now,
},
},
{
Id: uuid.UUID(gameID2).String(),
Name: "test2",
Description: "test2",
CreatedAt: now,
Version: &openapi.GameVersion{
Id: uuid.UUID(gameVersionID2).String(),
Name: "test2",
Description: "test2",
CreatedAt: now,
},
},
},
},
{
description: "versionが存在しなくても問題なし",
strAll: "true",
executeGetGames: true,
games: []*service.GameInfo{
{
Game: domain.NewGame(
gameID1,
values.NewGameName("test1"),
values.NewGameDescription("test1"),
now,
),
},
},
apiGames: []*openapi.Game{
{
Id: uuid.UUID(gameID1).String(),
Name: "test1",
Description: "test1",
CreatedAt: now,
},
},
},
{
description: "falseかつゲームが存在しなくても問題なし",
strAll: "false",
sessionExist: true,
authSession: domain.NewOIDCSession(
"accessToken",
time.Now().Add(time.Hour),
),
executeGetMyGames: true,
games: []*service.GameInfo{},
apiGames: []*openapi.Game{},
},
{
description: "falseかつゲームが複数でも問題なし",
strAll: "false",
sessionExist: true,
authSession: domain.NewOIDCSession(
"accessToken",
time.Now().Add(time.Hour),
),
executeGetMyGames: true,
games: []*service.GameInfo{
{
Game: domain.NewGame(
gameID1,
values.NewGameName("test1"),
values.NewGameDescription("test1"),
now,
),
LatestVersion: domain.NewGameVersion(
gameVersionID1,
values.NewGameVersionName("test1"),
values.NewGameVersionDescription("test1"),
now,
),
},
{
Game: domain.NewGame(
gameID2,
values.NewGameName("test2"),
values.NewGameDescription("test2"),
now,
),
LatestVersion: domain.NewGameVersion(
gameVersionID2,
values.NewGameVersionName("test2"),
values.NewGameVersionDescription("test2"),
now,
),
},
},
apiGames: []*openapi.Game{
{
Id: uuid.UUID(gameID1).String(),
Name: "test1",
Description: "test1",
CreatedAt: now,
Version: &openapi.GameVersion{
Id: uuid.UUID(gameVersionID1).String(),
Name: "test1",
Description: "test1",
CreatedAt: now,
},
},
{
Id: uuid.UUID(gameID2).String(),
Name: "test2",
Description: "test2",
CreatedAt: now,
Version: &openapi.GameVersion{
Id: uuid.UUID(gameVersionID2).String(),
Name: "test2",
Description: "test2",
CreatedAt: now,
},
},
},
},
{
description: "falseかつversionが存在しなくても問題なし",
strAll: "false",
sessionExist: true,
authSession: domain.NewOIDCSession(
"accessToken",
time.Now().Add(time.Hour),
),
executeGetMyGames: true,
games: []*service.GameInfo{
{
Game: domain.NewGame(
gameID1,
values.NewGameName("test1"),
values.NewGameDescription("test1"),
now,
),
},
},
apiGames: []*openapi.Game{
{
Id: uuid.UUID(gameID1).String(),
Name: "test1",
Description: "test1",
CreatedAt: now,
},
},
},
}
for _, testCase := range testCases {
t.Run(testCase.description, func(t *testing.T) {
e := echo.New()
req := httptest.NewRequest(http.MethodPost, "/api/games", nil)
rec := httptest.NewRecorder()
c := e.NewContext(req, rec)
if testCase.sessionExist {
sess, err := session.New(req)
if err != nil {
t.Fatal(err)
}
if testCase.authSession != nil {
sess.Values[accessTokenSessionKey] = string(testCase.authSession.GetAccessToken())
sess.Values[expiresAtSessionKey] = testCase.authSession.GetExpiresAt()
}
err = sess.Save(req, rec)
if err != nil {
t.Fatalf("failed to save session: %v", err)
}
setCookieHeader(c)
sess, err = session.Get(req)
if err != nil {
t.Fatal(err)
}
c.Set(sessionContextKey, sess)
}
if testCase.executeGetGames {
mockGameService.
EXPECT().
GetGames(gomock.Any()).
Return(testCase.games, testCase.GetGamesErr)
}
if testCase.executeGetMyGames {
mockGameService.
EXPECT().
GetMyGames(gomock.Any(), gomock.Any()).
Return(testCase.games, testCase.GetMyGamesErr)
}
games, err := gameHandler.GetGames(c, testCase.strAll)
if testCase.isErr {
if testCase.statusCode != 0 {
var httpError *echo.HTTPError
if errors.As(err, &httpError) {
assert.Equal(t, testCase.statusCode, httpError.Code)
} else {
t.Errorf("error is not *echo.HTTPError")
}
} else if testCase.err == nil {
assert.Error(t, err)
} else if !errors.Is(err, testCase.err) {
t.Errorf("error must be %v, but actual is %v", testCase.err, err)
}
} else {
assert.NoError(t, err)
}
if err != nil {
return
}
assert.Len(t, games, len(testCase.apiGames))
for i, game := range games {
assert.Equal(t, testCase.apiGames[i].Id, game.Id)
assert.Equal(t, testCase.apiGames[i].Name, game.Name)
assert.Equal(t, testCase.apiGames[i].Description, game.Description)
assert.Equal(t, testCase.apiGames[i].CreatedAt, game.CreatedAt)
if testCase.apiGames[i].Version != nil {
assert.Equal(t, testCase.apiGames[i].Version.Id, game.Version.Id)
assert.Equal(t, testCase.apiGames[i].Version.Name, game.Version.Name)
assert.Equal(t, testCase.apiGames[i].Version.Description, game.Version.Description)
assert.Equal(t, testCase.apiGames[i].Version.CreatedAt, game.Version.CreatedAt)
} else {
assert.Nil(t, game.Version)
}
}
})
}
}
func TestDeleteGames(t *testing.T) {
t.Parallel()
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockAppConfig := mockConfig.NewMockApp(ctrl)
mockAppConfig.
EXPECT().
FeatureV1Write().
Return(true)
mockConf := mockConfig.NewMockHandler(ctrl)
mockConf.
EXPECT().
SessionKey().
Return("key", nil)
mockConf.
EXPECT().
SessionSecret().
Return("secret", nil)
sess, err := common.NewSession(mockConf)
if err != nil {
t.Fatalf("failed to create session: %v", err)
return
}
session, err := NewSession(sess)
if err != nil {
t.Fatalf("failed to create session: %v", err)
return
}
mockGameService := mock.NewMockGame(ctrl)
gameHandler := NewGame(mockAppConfig, session, mockGameService)
type test struct {
description string
strGameID string
executeDeleteGame bool
gameID values.GameID
DeleteGameErr error
isErr bool
err error
statusCode int
}
gameID := values.NewGameID()
testCases := []test{
{
description: "特に問題ないのでエラーなし",
strGameID: uuid.UUID(gameID).String(),
executeDeleteGame: true,
gameID: gameID,
},
{
description: "gameIDが不正なので400",
strGameID: "invalid",
isErr: true,
statusCode: http.StatusBadRequest,
},
{
description: "ゲームが存在しないので400",
strGameID: uuid.UUID(gameID).String(),
executeDeleteGame: true,
gameID: gameID,
DeleteGameErr: service.ErrNoGame,
isErr: true,
statusCode: http.StatusBadRequest,
},
{
description: "DeleteGameがエラーなので500",
strGameID: uuid.UUID(gameID).String(),
executeDeleteGame: true,
gameID: gameID,
DeleteGameErr: errors.New("test"),
isErr: true,
statusCode: http.StatusInternalServerError,
},
}
for _, testCase := range testCases {
t.Run(testCase.description, func(t *testing.T) {
e := echo.New()
req := httptest.NewRequest(http.MethodDelete, fmt.Sprintf("/api/game/%s", testCase.strGameID), nil)
rec := httptest.NewRecorder()
c := e.NewContext(req, rec)
if testCase.executeDeleteGame {
mockGameService.
EXPECT().
DeleteGame(gomock.Any(), testCase.gameID).
Return(testCase.DeleteGameErr)
}
err := gameHandler.DeleteGames(c, testCase.strGameID)
if testCase.isErr {
if testCase.statusCode != 0 {
var httpError *echo.HTTPError
if errors.As(err, &httpError) {
assert.Equal(t, testCase.statusCode, httpError.Code)
} else {
t.Errorf("error is not *echo.HTTPError")
}
} else if testCase.err == nil {
assert.Error(t, err)
} else if !errors.Is(err, testCase.err) {
t.Errorf("error must be %v, but actual is %v", testCase.err, err)
}
} else {
assert.NoError(t, err)
}
})
}
}
|
//go:build s390x
// +build s390x
package arch
const Arch = ArchS390
const Flavor = FlavorDefault
|
package problem14
import (
"exercises/aoc2020/common"
"fmt"
"math"
"regexp"
"strconv"
)
func Solve() (uint64, uint64, error) {
return SolveBoth("./problem14/input.txt")
}
func SolveBoth(inputFile string) (uint64, uint64, error) {
instructions, err := common.ParseFile(inputFile, parseLine)
if err != nil {
return 0, 0, err
}
maskOnes, maskZeros := uint64(0), uint64(math.MaxUint64)
maskXs := []uint64{}
memoryA := map[uint64]uint64{}
memoryB := map[uint64]uint64{}
for instr := range instructions {
switch instr.instructionType {
case mask:
maskOnes = instr.ones
maskZeros = instr.zeros
maskXs = instr.xs
case mem:
valueA := (instr.value | maskOnes) & (maskZeros ^ math.MaxUint64)
memoryA[instr.address] = valueA
addressB := (instr.address & maskZeros) | maskOnes
setMemoryAddresses(memoryB, maskXs, addressB, instr.value)
}
}
solutionA := uint64(0)
for _, v := range memoryA {
solutionA += v
}
solutionB := uint64(0)
for _, v := range memoryB {
solutionB += v
}
return solutionA, solutionB, nil
}
func setMemoryAddresses(memoryB map[uint64]uint64, maskXs []uint64, addressB, value uint64) {
if len(maskXs) == 0 {
memoryB[addressB] = value
return
}
setMemoryAddresses(memoryB, maskXs[1:], addressB, value)
addressB ^= maskXs[0]
setMemoryAddresses(memoryB, maskXs[1:], addressB, value)
}
const (
mask = iota
mem
)
type instruction struct {
instructionType int
address uint64
value uint64
ones uint64
zeros uint64
xs []uint64
}
var maskRe = regexp.MustCompile(`^mask = ([X01]{36})$`)
var memRe = regexp.MustCompile(`^mem\[(\d+)] = (\d+)$`)
func parseLine(line string) (instruction, error) {
matches := maskRe.FindStringSubmatch(line)
if len(matches) > 0 {
ones, zeros, xs := parseBitMask(matches[1])
result := instruction{
instructionType: mask,
ones: ones,
zeros: zeros,
xs: xs,
}
return result, nil
}
matches2 := memRe.FindStringSubmatch(line)
if len(matches2) > 0 {
address, err := strconv.ParseUint(matches2[1], 10, 64)
if err != nil {
return instruction{}, err
}
value, err := strconv.ParseUint(matches2[2], 10, 64)
if err != nil {
return instruction{}, err
}
result := instruction{
instructionType: mem,
address: address,
value: value,
}
return result, nil
}
return instruction{}, fmt.Errorf("unable to parse line: \"%q\"", line)
}
func parseBitMask(bitmask string) (uint64, uint64, []uint64) {
ones, zeros := uint64(0), uint64(0)
xs := []uint64{}
length := len(bitmask)
for i, c := range bitmask {
switch c {
case '1':
ones |= 1 << (length - i - 1)
case '0':
zeros |= 1 << (length - i - 1)
case 'X':
xs = append(xs, 1<<(length-i-1))
}
}
return ones, zeros, xs
}
|
package views
import (
"strings"
"net/http"
"text/template"
)
type TagsInstallData struct {
ClientSecret string
RepoOpts string
Packages string
}
func ServeTags(w http.ResponseWriter, r *http.Request, baseurl string, secret string, logo string, directory string, foldersMap map[string]string, tagsData map[string][]string) {
// check for secret presence in HTTP header
client_secret := r.Header.Get("secret")
requestPath := strings.TrimSuffix(r.URL.Path,"/")
// build data for template
passTmplData := passPromptData{baseurl, strings.ReplaceAll(logo,"'","'\"'\"'"), client_secret, requestPath}
// set default template
tmplSrc := passPromptView
// if correct secret, print menu (index)
if client_secret != secret {
// render pass prompt template
tmpl, err := template.New("passPrompt").Parse(tmplSrc)
if err != nil { panic(err) }
err = tmpl.Execute(w, passTmplData)
if err != nil { panic(err) }
return
}
// generate body of bash case with repo packages
repoPackages := repoPackagesCasePrint(foldersMap, true, directory, baseurl)
// find packages
tags := strings.Split(strings.Replace(requestPath,"/t/","",-1), ",")
packagesList := ""
for _, key := range tags {
if val, ok := tagsData[key]; ok {
packages := strings.Join(val[:],"\n")+"\n"
packagesList = packagesList + packages
}
}
// build data for template
tagsTmplData := TagsInstallData{client_secret, repoPackages, packagesList}
// render template
tmpl, err := template.New("tagsInstall").Parse(tmplTagsInstall)
if err != nil { panic(err) }
err = tmpl.Execute(w, tagsTmplData)
if err != nil { panic(err) }
}
var tmplTagsInstall = bashTemplHead + `
SECRET="{{ .ClientSecret }}"
selectPackage() {
case "$1" in
{{ .RepoOpts }}
esac
}
PACKAGES=$(cat <<-END
{{ .Packages }}END
)
barPrint
echo -ne " Follwing dotfiles will be installed in order:\n "
COMMA=""
for PACKAGE in $PACKAGES; do
echo -en "$COMMA$PACKAGE"
COMMA=", "
done
exec 3<>/dev/tty
confirmPrompt
barPrint
for NAME in $PACKAGES; do
selectPackage $NAME
done
`
|
package main
import (
"fmt"
"bufio"
"os"
"strings"
)
func main() {
reader := bufio.NewReader(os.Stdin)
s, _ := reader.ReadString('\n')
items_str, _ := reader.ReadString('\n')
items := strings.Split(items_str, " ")
fmt.Printf(s)
fmt.Printf("%v", items)
} |
package internal
import (
"bytes"
"errors"
"io"
"os"
"strings"
"github.com/jedib0t/go-pretty/table"
"gopkg.in/yaml.v2"
)
// Signatures represents the plugins/rules from the
// .yaml configuration file. It's the root of a config
// file.
type Signatures struct {
Plugins []Plugin `yaml:"plugins"`
}
// Plugin means an entry to test for during scan.
type Plugin struct {
Endpoints []string `yaml:"endpoints"`
Checks []Check `yaml:"checks"`
FollowRedirects bool `yaml:"follow_redirects"`
}
// Check is a check the scan runs in.
type Check struct {
MustMatchOne []string `yaml:"match"`
MustMatchAll []string `yaml:"all_match"`
MustNotMatch []string `yaml:"no_match"`
StatusCode *int `yaml:"status_code"`
Name string `yaml:"name"`
Remediation string `yaml:"remediation"`
Severity string `yaml:"severity"`
Description string `yaml:"description"`
Headers []string `yaml:"headers"`
NoHeaders []string `yaml:"no_headers"`
}
// ErrInvalidHeaderFormat is an error meaning an header
// format is invalid.
type ErrInvalidHeaderFormat struct {
Header string
}
func (e ErrInvalidHeaderFormat) Error() string {
return "invalid header format: " + e.Header + " should be \"KEY:VALUE\""
}
// Match analyses the HTTP Response. A match means that
// one of the criteria has been met (through the strategies
// of MatchAll/MatchOne/NotMatch, and Headers/NotHeaders).
func (check *Check) Match(resp *HTTPResponse) (bool, error) {
// Test nils
if check == nil {
return false, &ErrNilParameter{"check"}
}
if check.StatusCode == nil {
return false, &ErrNilParameter{"check.StatusCode"}
}
if resp == nil {
return false, &ErrNilParameter{"resp"}
}
// Test status code
if resp.StatusCode != *check.StatusCode {
return false, nil
}
// Check for MatchAll
for _, match := range check.MustMatchAll {
if !bytes.Contains(resp.Body, []byte(match)) {
return false, nil
}
}
// Check for MatchOne
found := false
for _, match := range check.MustMatchOne {
if bytes.Contains(resp.Body, []byte(match)) {
found = true
break
}
}
if !found {
return false, nil
}
// Check for NotMatch
for _, match := range check.MustNotMatch {
if bytes.Contains(resp.Body, []byte(match)) {
return false, nil
}
}
// Check for headers
for _, header := range check.Headers {
hs := strings.Split(header, ":")
if len(hs) != 2 {
return false, &ErrInvalidHeaderFormat{header}
}
hKey := hs[0]
hVal := hs[1]
// Check for header in the HTTPResponse by its key
respHVal, ok := resp.Header[hKey]
if !ok {
return false, nil
}
// Look for a match
found = false
for _, respHeaderValue := range respHVal {
if strings.Contains(respHeaderValue, hVal) {
found = true
break
}
}
if !found {
return false, nil
}
}
// Check for NoHeaders
for _, header := range check.NoHeaders {
pNH := strings.Split(header, ":")
if len(pNH) != 2 {
return false, &ErrInvalidHeaderFormat{header}
}
nhKey := pNH[0]
nhVal := pNH[1]
if respHeaderValues, kFound := resp.Header[nhKey]; kFound {
vFound := false
for _, respHeaderValue := range respHeaderValues {
if strings.Contains(respHeaderValue, nhVal) {
vFound = true
break
}
}
if vFound {
return false, nil
}
}
}
// If matches everything, then it's fine
return true, nil
}
// ErrCheckInvalidField is an error meaning a check
// field is invalid.
type ErrCheckInvalidField struct {
Check string
Field string
}
func (e ErrCheckInvalidField) Error() string {
return "missing or empty " + e.Field + " in " + e.Check + " plugin checks."
}
// ErrInvalidPathSignaturesFile is an error meaning
// that the path to the signatures file is invalid.
var ErrInvalidPathSignaturesFile = errors.New("path of signatures file is not valid")
// ErrBothEndpointSet is an error meaning endpoint and
// endpoints are set at same time.
var ErrBothEndpointSet = errors.New("URI and URIs can't be set at the same time in plugin checks")
func ReaderFromFile(path string) (io.Reader, error) {
// Check signature file exists
if _, err := os.Stat(path); os.IsNotExist(err) {
return nil, ErrInvalidPathSignaturesFile
}
// Open file
signFile, err := os.Open(path)
if err != nil {
return nil, err
}
return signFile, nil
}
// ParseSignatures parses and returns the signatures
// from the path of the file containg those.
func ParseSignatures(r io.Reader) (*Signatures, error) {
// Read its content
signData, err := io.ReadAll(r)
if err != nil {
return nil, err
}
// Build signatures
var sign Signatures
err = yaml.Unmarshal(signData, &sign)
if err != nil {
return nil, err
}
// Validate plugins
for _, plugin := range sign.Plugins {
// Ensure the plugin's checks content are valid
for _, check := range plugin.Checks {
// Check main fields are not empty
switch "" {
case check.Description:
return nil, &ErrCheckInvalidField{Check: check.Name, Field: "description"}
case check.Remediation:
return nil, &ErrCheckInvalidField{Check: check.Name, Field: "remediation"}
case check.Severity:
return nil, &ErrCheckInvalidField{Check: check.Name, Field: "severity"}
}
// Check severity is valid
if _, err := StringToSeverity(check.Severity); err != nil {
return nil, err
}
// Check headers to ensure they match KEY:VALUE fmt
for _, header := range check.Headers {
if strings.Count(header, ":") != 1 {
return nil, &ErrInvalidHeaderFormat{header}
}
}
}
}
return &sign, nil
}
// PrintSignatures prints the sign with the save severity as the sevStr.
func PrintSignatures(sign *Signatures, sevStr string, w io.Writer) {
cpt := 0
t := table.NewWriter()
t.SetOutputMirror(w)
t.AppendHeader(table.Row{"Endpoint", "Check Name", "Severity", "Description"})
for _, plugin := range sign.Plugins {
for _, check := range plugin.Checks {
if sevStr == check.Severity {
t.AppendRow([]interface{}{plugin.Endpoints, check.Name, check.Severity, check.Description})
cpt++
}
}
}
t.AppendFooter(table.Row{"", "", "Total Checks", cpt})
t.Render()
}
|
package main
import (
"context"
"flag"
"fmt"
"net/http"
"os"
"os/signal"
"time"
"github.com/aws/aws-lambda-go/events"
"github.com/aws/aws-lambda-go/lambda"
chiadapter "github.com/awslabs/aws-lambda-go-api-proxy/chi"
log "github.com/sirupsen/logrus"
"github.com/ramrodo/tech-assessment-loan-startup/router"
"github.com/ramrodo/tech-assessment-loan-startup/runtime"
)
const (
defaultGracefulTimeout = time.Second * 15
defaultPort = 3000
serverWriteTimeout = time.Second * 10
serverReadTimeOut = time.Second * 10
serverIdleTimeout = time.Second * 10
)
var port int
var gracefulTimeout *time.Duration
func main() {
flag.IntVar(&port, "port", defaultPort, "server port")
if runtime.IsLambdaEnvironment() {
lambda.Start(HandleLambdaEvent)
return
}
startDevelopmentServer()
}
func startDevelopmentServer() {
flag.Parse()
gracefulTimeout = flag.Duration("graceful-timeout", defaultGracefulTimeout, "the duration for which the server gracefully wait for existing connections to finish - e.g. 15s or 1m")
router := router.NewRouter()
srv := &http.Server{
Addr: fmt.Sprintf(":%d", port),
WriteTimeout: serverWriteTimeout,
ReadTimeout: serverReadTimeOut,
IdleTimeout: serverIdleTimeout,
Handler: router,
}
// Run our server in a goroutine so that it doesn't block.
go func() {
log.Infof("starting server on port %d...\n", port)
if err := srv.ListenAndServe(); err != nil {
log.Fatal(err)
}
}()
c := make(chan os.Signal, 1)
// We'll accept graceful shutdowns when quit via SIGINT (Ctrl+C)
// SIGKILL, SIGQUIT or SIGTERM (Ctrl+/) will not be caught.
signal.Notify(c, os.Interrupt)
// Block until we receive our signal.
<-c
// Create a deadline to wait for.
ctx, cancel := context.WithTimeout(context.Background(), *gracefulTimeout)
defer cancel()
// Doesn't block if no connections, but will otherwise wait
// until the timeout deadline.
srv.Shutdown(ctx)
log.Info("shutting down")
os.Exit(0)
}
func HandleLambdaEvent(ctx context.Context, req events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) {
router := router.NewRouter()
adapter := chiadapter.New(router)
return adapter.ProxyWithContext(ctx, req)
}
|
package admin
import (
"time"
"github.com/astaxie/beego/orm"
)
type Customer struct {
Id int `orm:"column(id);auto" description:"主键"`
Uid string `orm:"column(uid);size(50)" description:"用户ID"`
Username string `orm:"column(username);size(255);null" description:"用户名"`
Password string `orm:"column(password);size(255);null" description:"密码"`
Nickname string `orm:"column(nickname);size(255);null" description:"昵称"`
Image string `orm:"column(image);size(255);null" description:"头像"`
Url string `orm:"column(url);size(255);null" description:"博客地址"`
Signature string `orm:"column(signature);size(255);null" description:"个性签名"`
Email string `orm:"column(email);size(50);null" description:"邮箱"`
Phone string `orm:"column(phone);size(50);null" description:"电话"`
Wishlist int `orm:"column(wishlist);null" description:"收藏"`
Review int `orm:"column(review);null" description:"评论"`
Like int `orm:"column(like);null" description:"点赞"`
Status int `orm:"column(status);null" description:"1可用,2禁用,0删除"`
Created time.Time `orm:"column(created);type(datetime);null" description:"创建时间"`
Updated time.Time `orm:"column(updated);type(datetime);null" description:"修改时间"`
}
// 自动建表
func (t *Customer) TableName() string {
return "customer"
}
func init (){
orm.RegisterModel(new(Customer))
}
// 添加客户 函数
func AddCustomer(m *Customer) (id int64,err error) {
o:=orm.NewOrm()
id,err=o.Insert(m)
return
}
|
// Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package transport_test
import (
"context"
"fmt"
"strings"
"testing"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/closedts"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/closedts/ctpb"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/closedts/transport"
transporttestutils "github.com/cockroachdb/cockroach/pkg/kv/kvserver/closedts/transport/testutils"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/testutils/skip"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/stop"
"github.com/cockroachdb/errors"
"github.com/kr/pretty"
)
// NewTestContainer sets up an environment suitable for black box testing the
// transport subsystem. The returned test container contains most notably a
// Clients and Server set up to communicate to each other via a Dialer (which
// keeps a transcript that can be verified).
func NewTestContainer() *TestContainer {
stopper := stop.NewStopper()
st := cluster.MakeTestingClusterSettings()
p := &TestProducer{}
sink := newTestNotifyee(stopper)
refreshed := &RefreshTracker{}
s := transport.NewServer(stopper, p, refreshed.Add)
dialer := transporttestutils.NewChanDialer(stopper, s)
c := transport.NewClients(transport.Config{
NodeID: roachpb.NodeID(12345),
Settings: st,
Stopper: stopper,
Dialer: dialer,
Sink: sink,
})
return &TestContainer{
Settings: st,
Stopper: stopper,
Producer: p,
Notifyee: sink,
Refreshed: refreshed,
Server: s,
Dialer: dialer,
Clients: c,
}
}
func assertNumSubscribers(t *testing.T, p *TestProducer, exp int) {
testutils.SucceedsSoon(t, func() error {
n := p.numSubscriptions()
if n > exp {
t.Fatalf("expected a single subscription, got %d", n)
}
if n < exp {
return errors.New("waiting for subscription")
}
return nil
})
}
func TestTransportConnectOnRequest(t *testing.T) {
defer leaktest.AfterTest(t)()
skip.WithIssue(t, closedts.IssueTrackingRemovalOfOldClosedTimestampsCode)
container := NewTestContainer()
defer container.Stopper.Stop(context.Background())
const (
nodeID = 1
rangeID = 13
)
// Requesting an update for a Range implies a connection attempt.
container.Clients.Request(nodeID, rangeID)
// Find the connection (via its subscription to receive new Entries).
assertNumSubscribers(t, container.Producer, 1)
// Verify that the client soon asks the server for an update for this range.
testutils.SucceedsSoon(t, func() error {
act := container.Refreshed.Get()
exp := []roachpb.RangeID{rangeID}
if diff := pretty.Diff(act, exp); len(diff) != 0 {
// We have to kick the tires a little bit. The client can only send
// the request as the reaction to an Entry.
container.Producer.sendAll(ctpb.Entry{})
return errors.Errorf("diff(act, exp): %s", strings.Join(diff, "\n"))
}
return nil
})
}
func TestTransportClientReceivesEntries(t *testing.T) {
defer leaktest.AfterTest(t)()
skip.WithIssue(t, closedts.IssueTrackingRemovalOfOldClosedTimestampsCode)
container := NewTestContainer()
defer container.Stopper.Stop(context.Background())
const nodeID = 7
// Manual reconnections don't spawn new clients.
container.Clients.EnsureClient(nodeID)
container.Clients.EnsureClient(nodeID)
container.Clients.EnsureClient(nodeID)
assertNumSubscribers(t, container.Producer, 1)
// But connecting to other nodes does (only once).
for i := 0; i < 7; i++ {
container.Clients.EnsureClient(nodeID + 1)
container.Clients.EnsureClient(nodeID + 2)
container.Clients.Request(nodeID+3, roachpb.RangeID(7))
}
assertNumSubscribers(t, container.Producer, 4)
// Our initial client doesn't do anything except say "hello" via
// a Reaction.
testutils.SucceedsSoon(t, func() error {
expectedTranscript := []interface{}{
&ctpb.Reaction{},
}
return checkTranscript(t, container.Dialer.Transcript(nodeID), expectedTranscript)
})
// Now the producer (to which the server should maintain a subscription for this client, and
// notifications from which it should relay) emits an Entry.
e1 := ctpb.Entry{ClosedTimestamp: hlc.Timestamp{WallTime: 1e9}, Epoch: 12, MLAI: map[roachpb.RangeID]ctpb.LAI{12: 7}}
container.Producer.sendAll(e1)
// The client should see this entry soon thereafter. it responds with an empty
// Reaction (since we haven't Request()ed anything).
testutils.SucceedsSoon(t, func() error {
expectedTranscript := []interface{}{
&ctpb.Reaction{},
&e1,
&ctpb.Reaction{},
}
return checkTranscript(t, container.Dialer.Transcript(nodeID), expectedTranscript)
})
// And again, but only after Request() is called (which should be reflected in the transcript).
const rangeID = 7
container.Clients.Request(nodeID, rangeID)
e2 := ctpb.Entry{ClosedTimestamp: hlc.Timestamp{WallTime: 2e9}, Epoch: 13, MLAI: map[roachpb.RangeID]ctpb.LAI{13: 8}}
container.Producer.sendAll(e2)
testutils.SucceedsSoon(t, func() error {
expectedTranscript := []interface{}{
&ctpb.Reaction{},
&e1,
&ctpb.Reaction{},
&e2,
&ctpb.Reaction{Requested: []roachpb.RangeID{rangeID}},
}
return checkTranscript(t, container.Dialer.Transcript(nodeID), expectedTranscript)
})
}
func checkTranscript(t *testing.T, actI, expI []interface{}) error {
t.Helper()
var act, exp []string
for _, i := range actI {
act = append(act, strings.TrimSpace(fmt.Sprintf("%v", i)))
}
for _, i := range expI {
exp = append(exp, strings.TrimSpace(fmt.Sprintf("%v", i)))
}
diffErr := errors.Errorf("actual:\n%s\nexpected:\n%s", strings.Join(act, "\n"), strings.Join(exp, "\n"))
if len(act) > len(exp) {
t.Fatal(errors.Wrap(diffErr, "actual transcript longer than expected"))
}
if len(act) < len(exp) {
return errors.Wrap(diffErr, "waiting for more")
}
if diff := pretty.Diff(actI, expI); len(diff) != 0 {
t.Fatal(errors.Wrapf(diffErr, "diff:\n%v\n", strings.Join(diff, "\n")))
}
return nil
}
|
package client
import (
"fmt"
"net/url"
"strings"
"github.com/hyperhq/hyper/engine"
"github.com/hyperhq/runv/hypervisor/types"
gflag "github.com/jessevdk/go-flags"
)
func (cli *HyperClient) HyperCmdRmi(args ...string) error {
var opts struct {
Noprune bool `long:"no-prune" default:"false" default-mask:"-" description:"Do not delete untagged parents"`
Force bool `short:"f" long:"force" default:"true" default-mask:"-" description:"Force removal of the image"`
}
var parser = gflag.NewParser(&opts, gflag.Default)
parser.Usage = "rmi [OPTIONS] IMAGE [IMAGE...]\n\nRemove one or more images"
args, err := parser.Parse()
if err != nil {
if !strings.Contains(err.Error(), "Usage") {
return err
} else {
return nil
}
}
var (
noprune string = "no"
force string = "yes"
)
if len(args) < 2 {
return fmt.Errorf("\"rmi\" requires a minimum of 1 argument, please provide IMAGE ID.\n")
}
if opts.Noprune == true {
noprune = "yes"
}
if opts.Force == false {
force = "no"
}
images := args[1:]
for _, imageId := range images {
v := url.Values{}
v.Set("imageId", imageId)
v.Set("noprune", noprune)
v.Set("force", force)
body, _, err := readBody(cli.call("POST", "/images/remove?"+v.Encode(), nil, nil))
if err != nil {
fmt.Fprintf(cli.err, "Error remove the image(%s): %s\n", imageId, err.Error())
continue
}
out := engine.NewOutput()
remoteInfo, err := out.AddEnv()
if err != nil {
fmt.Fprintf(cli.err, "Error remove the image(%s): %s\n", imageId, err.Error())
continue
}
if _, err := out.Write(body); err != nil {
fmt.Fprintf(cli.err, "Error remove the image(%s): %s\n", imageId, err.Error())
continue
}
out.Close()
errCode := remoteInfo.GetInt("Code")
if errCode == types.E_OK || errCode == types.E_VM_SHUTDOWN {
//fmt.Println("VM is successful to start!")
fmt.Fprintf(cli.out, "Image(%s) is successful to be deleted!\n", imageId)
} else {
fmt.Fprintf(cli.err, "Error remove the image(%s): %s\n", imageId, remoteInfo.Get("Cause"))
}
}
return nil
}
|
package util
import (
"math/rand"
"runtime"
"time"
)
func Platform() (string, string) {
return runtime.GOOS, runtime.GOARCH
}
func SetSeed() {
rand.Seed(time.Now().UnixNano())
}
func Random(n int) (res int) {
res = rand.Intn(n)
return
}
func Sum() func(int) int {
var sum int = 0
return func(n int) int {
sum += n
return sum
}
}
type Obj interface {
GetName() string
SetName(name string)
}
type Engine struct {
Name string
}
func (e Engine) GetName() string {
return e.Name
}
func (e *Engine) SetName(name string) {
e.Name = name
}
|
package bench
import "testing"
func benchmarkCumulativeWithMemo(b *testing.B, day int) {
for ii := 0; ii < b.N; ii++ {
cumulativeWithMemo(day)
}
}
func benchmarkCumulativeWithoutMemo(b *testing.B, day int) {
for ii := 0; ii < b.N; ii++ {
cumulativeWithoutMemo(day)
}
}
func BenchmarkCumulative10(b *testing.B) { benchmarkCumulativeWithMemo(b, 1000) }
func BenchmarkCumulative1000(b *testing.B) { benchmarkCumulativeWithMemo(b, 100000) }
func BenchmarkCumulativeWithoutMemo10(b *testing.B) { benchmarkCumulativeWithoutMemo(b, 1000) }
func BenchmarkCumulativeWithoutMemo1000(b *testing.B) { benchmarkCumulativeWithoutMemo(b, 100000) }
|
package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"reflect"
"strings"
"time"
"github.com/gocql/gocql"
)
var session *gocql.Session
func executeQuery1(query string) ([]map[string]interface{}, error) {
return nil, nil
}
func createSession(clusterIPs []string, keyspace string) (*gocql.Session, error) {
if session != nil {
return session, nil
}
cluster := gocql.NewCluster(clusterIPs...)
cluster.Keyspace = keyspace
sess, err := cluster.CreateSession()
if err != nil {
return nil, err
}
session = sess
return session, nil
}
func getPartitionKeys(nameSpaceName string, tableName string, col column) ([]map[string]interface{}, error) {
query := fmt.Sprintf(`SELECT distinct "%s" FROM %s.%s where TOKEN("%s") >= -9223372036854775808 AND TOKEN("%s") <= 9223372036854775807`, col.Name, nameSpaceName, tableName, col.Name, col.Name)
log.Println("getPartitionKeys: Fetching partition key")
data, err := executeQuery(session, query)
if err != nil {
log.Println("getPartitionKeys: Error: ", err)
return nil, err
}
log.Println("getPartitionKeys: Partition Data: ", data)
return data, nil
}
func getMultiColumnPartitionKeys(nameSpaceName string, tableName string, cols []column) ([]map[string]interface{}, error) {
colList := ""
wherePart := ""
for _, col := range cols {
if colList == "" {
colList = fmt.Sprintf(`"%s"`, col.Name)
continue
}
colList = fmt.Sprintf(`%s, "%s"`, colList, col.Name)
}
selectQuery := fmt.Sprintf(`SELECT distinct %s FROM %s.%s `, colList, nameSpaceName, tableName)
wherePart = fmt.Sprintf(` where Token(%s)>= -9223372036854775808 and Token(%s) <= 9223372036854775807`, colList, colList)
finalQuery := fmt.Sprintf("%s %s", selectQuery, wherePart)
data, err := executeQuery(session, finalQuery)
if err != nil {
log.Println("getPartitionKeys: Error: ", err)
return nil, err
}
log.Println("getPartitionKeys: Partition Data: ", data)
return data, nil
}
func getColumnMetadata(nameSpaceName string, tableName string) ([]column, error) {
query := fmt.Sprintf(`SELECT * FROM system_schema.columns where keyspace_name='%s' and table_name = '%s'`, nameSpaceName, tableName)
data, err := executeQuery(session, query)
if err != nil {
return nil, err
}
columns, err := translateData(data, nameSpaceName, tableName)
if err != nil {
return nil, err
}
return columns, nil
}
func translateData(data []map[string]interface{}, namespaceName, tableName string) ([]column, error) {
var columns []column
for i := 0; i < len(data); i++ {
isUDT := checkIfUDT(ToString(data[i]["type"]))
if isUDT {
cols, err := getTypes(namespaceName, ToString(data[i]["type"]), ToString(data[i]["column_name"]))
if err != nil {
return nil, err
}
columns = append(columns, cols...)
} else {
col := column{
Name: ToString(data[i]["column_name"]),
ClusterSequence: ToInt(data[i]["position"]),
Datatype: ToString(data[i]["type"]),
Kind: ToString(data[i]["kind"]),
}
columns = append(columns, col)
}
}
return columns, nil
}
func getTypes(nameSpaceName string, typeName string, columnName string) ([]column, error) {
query := fmt.Sprintf(`SELECT * FROM system_schema.types where keyspace_name='%s' and type_name = '%s'`, nameSpaceName, typeName)
data, err := executeQuery(session, query)
if err != nil {
return nil, err
}
columns, err := translateTypesToColumns(data, nameSpaceName, columnName)
if err != nil {
return nil, err
}
return columns, nil
}
func translateTypesToColumns(data []map[string]interface{}, nameSpaceName, columnName string) ([]column, error) {
var columns []column
for _, row := range data {
fields := row["field_names"].([]string)
types := row["field_types"].([]string)
for index, fieldName := range fields {
if checkIfUDT(types[index]) {
cols, err := getTypes(nameSpaceName, ToString(types[index]), fmt.Sprintf("%s.%s", columnName, fieldName))
if err != nil {
return nil, err
}
columns = append(columns, cols...)
} else {
col := column{
Name: fmt.Sprintf("%s.%s", columnName, fieldName),
ClusterSequence: -1,
Datatype: ToString(types[index]),
Kind: "",
}
columns = append(columns, col)
}
}
}
return columns, nil
}
func checkIfUDT(typename string) bool {
switch strings.ToLower(typename) {
case "ascii", "bigint", "blob", "boolean", "counter", "decimal", "double", "float", "inet", "int", "text", "timestamp", "timeuuid", "uuid", "varchar", "varint":
return false
}
return true
}
func createSourceTableQuery(config tableConfig, partitionColumns []column) string {
tablePart := fmt.Sprintf(" from %s.%s ", config.SourceKeySpace, config.SourceTable)
selectPart := ""
wherePart := ""
for colName, _ := range config.ColumnMapping {
for _, pcol := range partitionColumns {
if pcol.Name == colName {
if wherePart != "" {
wherePart = fmt.Sprintf("%s and %s = $%s ", wherePart, pcol.Name, colName)
break
}
wherePart = fmt.Sprintf(" where %s = $%s", pcol.Name, colName)
break
}
}
if selectPart != "" {
selectPart = fmt.Sprintf("%s, %s", selectPart, colName)
continue
}
selectPart = fmt.Sprintf("select %s", colName)
}
query := fmt.Sprintf("%s %s %s", selectPart, tablePart, wherePart)
return query
}
func createDestinationTableQuery(config tableConfig, partitionColumns []column) string {
tablePart := fmt.Sprintf("update %s.%s set", config.DestinationKeySpace, config.DestinationTable)
updatePart := ""
wherePart := ""
for sourceColName, colName := range config.ColumnMapping {
tocontinue := false
for _, pcol := range partitionColumns {
tocontinue = false
if pcol.Name == colName {
if wherePart != "" {
wherePart = fmt.Sprintf(" %s and %s = $%s ", wherePart, pcol.Name, sourceColName)
tocontinue = true
break
}
wherePart = fmt.Sprintf(" where %s = $%s", pcol.Name, sourceColName)
tocontinue = true
break
}
}
if tocontinue {
continue
}
if updatePart != "" {
updatePart = fmt.Sprintf("%s, %s = $%s", updatePart, colName, sourceColName)
continue
}
updatePart = fmt.Sprintf(" %s = $%s", colName, sourceColName)
}
// for _, pcol := range partitionColumns {
// if wherePart != "" {
// wherePart = fmt.Sprintf(" and %s = $%s ,", pcol.Name, pcol.Name)
// continue
// }
// wherePart = fmt.Sprintf(" where %s = $%s", pcol.Name, pcol.Name)
// }
query := fmt.Sprintf("%s %s %s", tablePart, updatePart, wherePart)
return query
}
func getSourceTableData1(session *gocql.Session, selectQuery string, partCol column, partitionValue interface{}) ([]map[string]interface{}, error) {
finalQuery, err := replaceQueryColNames(selectQuery, partCol, partitionValue)
if err != nil {
return nil, err
}
// switch t := partitionValue.(type) {
// case int, float64, bool:
// finalQuery = strings.Replace(selectQuery, fmt.Sprint("$", partCol.Name), fmt.Sprint(partitionValue.(int)), -1)
// case string:
// finalQuery = strings.Replace(selectQuery, fmt.Sprint("$", partCol.Name), fmt.Sprint("'", partitionValue.(string), "'"), -1)
// default:
// return nil, fmt.Errorf("Unkown DataType for Partition Column, Data: %v", t)
// }
return executeQuery(session, finalQuery)
}
func getSourceTableData(session *gocql.Session, selectQuery string, partCols []column, colList map[string]interface{}) ([]map[string]interface{}, error) {
var err error
for _, partCol := range partCols {
if colList[partCol.Name] != nil {
selectQuery, err = replaceQueryColNames(selectQuery, partCol, colList[partCol.Name])
if err != nil {
return nil, err
}
}
}
return executeQuery(session, selectQuery)
}
func replaceQueryColNames(query string, col column, value interface{}) (string, error) {
finalQuery := ""
switch t := value.(type) {
case int, float64, bool:
finalQuery = strings.Replace(query, fmt.Sprint("$", col.Name), fmt.Sprint(value.(int)), -1)
case string:
finalQuery = strings.Replace(query, fmt.Sprint("$", col.Name), fmt.Sprint("'", value.(string), "'"), -1)
case nil:
finalQuery = strings.Replace(query, fmt.Sprint("$", col.Name), "nil", -1)
case time.Time:
finalQuery = strings.Replace(query, fmt.Sprint("$", col.Name), fmt.Sprint("'", value.(time.Time).Format("2006-01-02 15:04:05"), "'"), -1)
default:
return "", fmt.Errorf("Unkown DataType for Partition Column, Data: %v", t, "; DataType:", reflect.TypeOf(t))
}
return finalQuery, nil
}
func insertDestData(session *gocql.Session, insertQuery string, data []map[string]interface{}, sourceTableCols []column) error {
var err error
finalQuery := ""
for _, v := range data {
finalQuery = insertQuery
for _, col := range sourceTableCols {
finalQuery, err = replaceQueryColNames(finalQuery, col, v[col.Name])
if err != nil {
return err
}
}
_, err = executeQuery(session, finalQuery)
if err != nil {
return err
}
}
return nil
}
func executeQuery(session *gocql.Session, query string) ([]map[string]interface{}, error) {
q := session.Query(query)
defer q.Release()
return q.Iter().SliceMap()
}
// func insertQuery(session *gocql.Session, query string) ([]map[string]interface{}, error) {
// }
func getPartitionColumn(columns []column) []column {
var pcolumns []column
for _, value := range columns {
if value.Kind == "partition_key" {
pcolumns = append(pcolumns, value)
}
}
return pcolumns
}
func getClusterColumn(columns []column) []column {
var clcolumns []column
for _, value := range columns {
if value.Kind == "clustering" {
clcolumns = append(clcolumns, value)
}
}
return clcolumns
}
func readConf(tableConfigPath string) (tableConfig, error) {
cfg := tableConfig{}
fileData, err := ioutil.ReadFile(tableConfigPath)
if err != nil {
return cfg, err
}
err = json.Unmarshal(fileData, &cfg)
return cfg, err
}
|
package scanner
// SendAnalysisData sends analysis data if the user opts in
func (scanner *Scanner) SendAnalysisData(data map[string]interface{}) {
scanner.analysisDataSender(data)
}
|
package main
type A struct {
a int
}
func (aa *A) get() int {
return aa.a
}
func (aa *A) get2() int {
return aa.a
}
func main() {
aa := A{1}
for i := 1; i < 100; i++ {
tmp := i
go func() {
if tmp < 30 {
aa.get()
} else {
aa.get2()
}
}()
}
}
|
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package firewall wraps basic iptables call to control
// filtering of incoming/outgoing traffic.
package firewall
import (
"chromiumos/tast/common/network/firewall"
"chromiumos/tast/remote/network/cmd"
"chromiumos/tast/ssh"
)
// Runner is an alias for common firewall Runner but only for remote execution.
type Runner = firewall.Runner
// NewRemoteRunner creates a firewall runner for remote execution.
func NewRemoteRunner(host *ssh.Conn) *Runner {
return firewall.NewRunner(&cmd.RemoteCmdRunner{Host: host})
}
|
package main
import (
"fmt"
"net"
"sync"
"time"
. "github.com/miekg/dns"
)
func HelloServer(w ResponseWriter, req *Msg) {
m := new(Msg)
m.SetReply(req)
m.Extra = make([]RR, 1)
m.Extra[0] = &TXT{Hdr: RR_Header{Name: m.Question[0].Name, Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{"Hello world"}}
w.WriteMsg(m)
}
func AnotherHelloServer(w ResponseWriter, req *Msg) {
m := new(Msg)
m.SetReply(req)
m.Extra = make([]RR, 1)
m.Extra[0] = &TXT{Hdr: RR_Header{Name: m.Question[0].Name, Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{"Hello example"}}
w.WriteMsg(m)
}
func RunLocalUDPServerWithFinChan(laddr string) (*Server, string, chan struct{}, error) {
pc, err := net.ListenPacket("udp", laddr)
if err != nil {
return nil, "", nil, err
}
server := &Server{PacketConn: pc, ReadTimeout: time.Hour, WriteTimeout: time.Hour}
waitLock := sync.Mutex{}
waitLock.Lock()
server.NotifyStartedFunc = waitLock.Unlock
fin := make(chan struct{}, 0)
go func() {
server.ActivateAndServe()
close(fin)
pc.Close()
}()
waitLock.Lock()
return server, pc.LocalAddr().String(), fin, nil
}
func RunLocalUDPServer(laddr string) (*Server, string, error) {
server, l, _, err := RunLocalUDPServerWithFinChan(laddr)
return server, l, err
}
func main() {
HandleFunc("miek.nl.", HelloServer)
HandleFunc("example.com.", AnotherHelloServer)
//defer HandleRemove("miek.nl.")
//defer HandleRemove("example.com.")
s, addrstr, err := RunLocalUDPServer("127.0.0.1:53")
if err != nil {
fmt.Printf("unable to run test server: %v", err)
}
fmt.Printf("Address server %s",addrstr)
defer s.Shutdown()
for true {
time.Sleep(time.Second)
}
}
|
package main
import (
"github.com/xiaotuanyu120/cobra_example/cmd"
)
var Version = "0.1.1"
func main() {
cmd.Version = Version
cmd.Execute()
}
|
// Package ai provides drivers for the computer client.
package ai
|
package main
import (
"fmt"
"sort"
)
func main() {
candidates := []int{1} //{10, 1, 2, 7, 6, 1, 5}
fmt.Println(candidates)
target := 1 //8
results := combinationSum2(candidates, target)
fmt.Println(results)
}
func combinationSum2(candidates []int, target int) [][]int {
sort.Ints(candidates)
results := [][]int{}
result := []int{}
dfs(candidates, target, &result, &results, 0)
return results
}
func dfs(candidates []int, target int, result *[]int, results *[][]int, index int) {
if target < 0 {
return
}
if target == 0 {
*results = append(*results, *result)
return
}
for i := index; i < len(candidates); i++ {
if i > index && i < len(candidates) && candidates[i-1] == candidates[i] {
continue
}
v := candidates[i]
if target < v {
break
}
dup := make([]int, len(*result))
copy(dup, *result)
dup = append(dup, v)
dfs(candidates, target-v, &dup, results, i+1)
}
}
|
package main
import (
"fmt"
)
func main() {
var s1 []int
fmt.Println(s1)
a := [10]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
fmt.Println(a)
s2 := a[5:]
fmt.Println(s2)
s3 := make([]int, 10, 100)//类型,填充元素,容量 每次2倍提升
fmt.Println(s3)
fmt.Println(len(s3), cap(s3))
fmt.Println("------------")
s4 :=[]byte{'a','b','c','d','e','f','g','h','i','j','k','l','m','n'}
sa := s4[2:5]
sb := s4[4:6]
fmt.Println(s4)
fmt.Println(string(sa))
fmt.Println(string(sb))
sc := sa[3:5]
fmt.Println(string(sc))//切片的容量会到数组的尾部
s5 := make([]int, 3, 6)
fmt.Printf("%p \n", s5)
s5 = append(s1, 1,2,4,5)
fmt.Printf("%v %p\n", s5, s5)
s5 = append(s1, 1,2,4,5)
fmt.Printf("%v %p\n", s5, s5)
array := []int{1,2,3,4,5,6}
s6 := array[2:5]
s7 := array[1:3]
fmt.Println(s6, s7)
s7 = append(s7, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)//如果切片超过原数组长度则会COPY 内存地址 *2 *2 之后交集部分就不会被覆盖
s6[0] = 9;
fmt.Println(s6, s7)
s8 := []int{1,2,3,4,5,6}
s9 := []int{7,8,9}
//copy(s8, s9)
copy(s9, s8)
fmt.Println(s8)
fmt.Println(s9)
//---homework--//
s10 := []int{1,2,3,4,5,6,6,7}
fmt.Println(s10)
s11 := s10[:]
fmt.Println(s11)
}
|
package lc
func maxProduct(nums []int) int {
if len(nums) == 1 {
return nums[0]
}
var neg, pos int
max := nums[0]
for i := 0; i < len(nums); i++ {
if nums[i] > 0 {
if pos == 0 && neg == 0 {
pos = nums[i]
continue
}
if pos == 0 {
pos, neg = nums[i], neg*nums[i]
} else {
pos, neg = pos*nums[i], neg*nums[i]
}
} else if nums[i] < 0 {
if pos == 0 && neg == 0 {
neg = nums[i]
continue
}
if pos > max {
max = pos
}
if pos*nums[i] < nums[i] {
pos, neg = neg*nums[i], pos*nums[i]
} else {
pos, neg = neg*nums[i], nums[i]
}
} else {
if pos > max {
max = pos
}
neg, pos = 0, 0
}
}
if pos > max {
return pos
}
return max
}
|
package config
import (
"github.com/workfoxes/gobase/pkg/config/client"
"github.com/go-chi/chi"
"net/http"
)
var (
conn string
)
type Context struct {
AccountId string
DB *client.Database
BaseDB *client.Database
Cache *client.RedisClient
}
func LoadContext(r *http.Request, userId string) *Context {
_AccountId := chi.URLParam(r, "AccountId")
var _db *client.Database
if _AccountId == "" {
_db = client.LoadBaseDatabase()
} else {
_db = client.LoadDatabase(_AccountId)
}
redisPrefix := _AccountId
if userId != "" {
redisPrefix += "::" + userId
}
return &Context{
AccountId: _AccountId,
DB: _db,
BaseDB: client.LoadBaseDatabase(),
Cache: client.NewRedisClient(redisPrefix),
}
}
|
package prezi
import "time"
const timeFormat = `January _2, 2006`
type Date time.Time
func (ct Date) MarshalBinary() ([]byte, error) {
t := time.Time(ct)
return t.MarshalBinary()
}
func (ct Date) MarshalJSON() ([]byte, error) {
return []byte(`"` + ct.String() + `"`), nil
}
func (ct *Date) UnmarshalBinary(data []byte) (err error) {
var t time.Time
err = t.UnmarshalBinary(data)
*ct = Date(t)
return
}
func (ct *Date) UnmarshalJSON(data []byte) (err error) {
t, err := time.Parse(`"`+timeFormat+`"`, string(data))
*ct = Date(t)
return
}
func (ct Date) String() string {
t := time.Time(ct)
return t.Format(timeFormat)
}
|
package gnr
type Union struct {
Objects []Object
}
func NewUnion(o ...Object) *Union {
return &Union{o}
}
func (u *Union) RayInteraction(r *Ray) []*InteractionResult {
// Check ray interaction with all objects, only return the one closes to the origin
irs := ObjectSlice(u.Objects).AggregateSliceInteractionResult(func(irs []*InteractionResult, o Object) []*InteractionResult {
return append(irs, o.RayInteraction(r)...)
})
return InteractionResultSlice(irs).SortBy(InteractionResultDistance)
}
func (u *Union) Contains(p *Vector3f) bool {
return ObjectSlice(u.Objects).Any(func(o Object) bool {
return o.Contains(p)
})
}
type Intersection struct {
Objects []Object
}
func NewIntersection(o ...Object) *Intersection {
return &Intersection{o}
}
func (is *Intersection) RayInteraction(r *Ray) []*InteractionResult {
irs := ObjectSlice(is.Objects).AggregateSliceInteractionResult(func(irs []*InteractionResult, o Object) []*InteractionResult {
return append(irs, o.RayInteraction(r)...)
})
irs = InteractionResultSlice(irs).Where(func(ir *InteractionResult) bool {
return is.Contains(ir.PointOfImpact)
})
return irs
}
func (is *Intersection) Contains(p *Vector3f) bool {
return ObjectSlice(is.Objects).All(func(o Object) bool {
return o.Contains(p)
})
}
type Difference struct {
Minuend, Subtrahend Object
}
func (d *Difference) RayInteraction(r *Ray) []*InteractionResult {
mIrs := d.Minuend.RayInteraction(r)
sIrs := d.Subtrahend.RayInteraction(r)
mIrs = InteractionResultSlice(mIrs).Where(func(ir *InteractionResult) bool {
return !d.Subtrahend.Contains(ir.PointOfImpact)
})
sIrs = InteractionResultSlice(sIrs).Where(func(ir *InteractionResult) bool {
return d.Minuend.Contains(ir.PointOfImpact)
})
return append(mIrs, sIrs...)
}
func (d *Difference) Contains(p *Vector3f) bool {
return d.Minuend.Contains(p) && !d.Subtrahend.Contains(p)
}
|
package world
import (
"github.com/galaco/bsp/primitives/leaf"
"github.com/galaco/lambda-client/scene/visibility"
"github.com/galaco/lambda-core/entity"
"github.com/galaco/lambda-core/mesh"
"github.com/galaco/lambda-core/model"
"github.com/go-gl/mathgl/mgl32"
"sync"
)
type World struct {
entity.Base
staticModel model.Bsp
staticProps []model.StaticProp
sky Sky
visibleClusterLeafs []*model.ClusterLeaf
visData *visibility.Vis
LeafCache *visibility.Cache
currentLeaf *leaf.Leaf
rebuildMutex sync.Mutex
}
func (entity *World) Bsp() *model.Bsp {
return &entity.staticModel
}
func (entity *World) Sky() *Sky {
return &entity.sky
}
func (entity *World) VisibleClusters() []*model.ClusterLeaf {
entity.rebuildMutex.Lock()
vw := entity.visibleClusterLeafs
entity.rebuildMutex.Unlock()
return vw
}
// Rebuild the current facelist to render, by first
// recalculating using vvis data
func (entity *World) TestVisibility(position mgl32.Vec3) {
// View hasn't moved
currentLeaf := entity.visData.FindCurrentLeaf(position)
if currentLeaf == entity.currentLeaf {
return
}
if currentLeaf == nil || currentLeaf.Cluster == -1 {
// Still outside the world
if entity.currentLeaf == nil {
return
}
entity.currentLeaf = currentLeaf
entity.AsyncRebuildVisibleWorld()
return
}
// Haven't changed cluster
if entity.LeafCache != nil && entity.LeafCache.ClusterId == currentLeaf.Cluster {
return
}
entity.currentLeaf = currentLeaf
entity.LeafCache = entity.visData.GetPVSCacheForCluster(currentLeaf.Cluster)
entity.AsyncRebuildVisibleWorld()
}
// Launches rebuilding the visible world in a separate thread
// Note: This *could* cause rendering issues if the rebuild is slower than
// travelling between clusters
func (entity *World) AsyncRebuildVisibleWorld() {
func(currentLeaf *leaf.Leaf) {
visibleWorld := make([]*model.ClusterLeaf, 0)
visibleClusterIds := make([]int16, 0)
if currentLeaf != nil && currentLeaf.Cluster != -1 {
visibleClusterIds = entity.visData.PVSForCluster(currentLeaf.Cluster)
}
// nothing visible so render everything
if len(visibleClusterIds) == 0 {
for idx := range entity.staticModel.ClusterLeafs() {
visibleWorld = append(visibleWorld, &entity.staticModel.ClusterLeafs()[idx])
}
} else {
for _, clusterId := range visibleClusterIds {
visibleWorld = append(visibleWorld, &entity.staticModel.ClusterLeafs()[clusterId])
}
}
entity.rebuildMutex.Lock()
entity.visibleClusterLeafs = visibleWorld
entity.rebuildMutex.Unlock()
}(entity.currentLeaf)
}
// Build skybox from tree
func (entity *World) BuildSkybox(sky *model.Model, position mgl32.Vec3, scale float32) {
// Rebuild bsp faces
visibleModel := model.NewBsp(entity.staticModel.Mesh().(*mesh.Mesh))
visibleWorld := make([]*model.ClusterLeaf, 0)
l := entity.visData.FindCurrentLeaf(position)
visibleClusterIds := entity.visData.PVSForCluster(l.Cluster)
// nothing visible so render everything
if len(visibleClusterIds) == 0 {
for idx := range entity.staticModel.ClusterLeafs() {
visibleWorld = append(visibleWorld, &entity.staticModel.ClusterLeafs()[idx])
}
} else {
for clusterId := range visibleClusterIds {
visibleWorld = append(visibleWorld, &entity.staticModel.ClusterLeafs()[clusterId])
}
}
entity.sky = *NewSky(visibleModel, visibleWorld, position, scale, sky)
}
func NewWorld(world model.Bsp, staticProps []model.StaticProp, visData *visibility.Vis) *World {
c := World{
staticModel: world,
staticProps: staticProps,
visData: visData,
}
c.TestVisibility(mgl32.Vec3{0, 0, 0})
return &c
}
|
package matchers
import (
"errors"
"net/http/httptest"
)
func requireRespRec(actual interface{}) (*httptest.ResponseRecorder, error) {
rr, ok := actual.(*httptest.ResponseRecorder)
if !ok {
return nil, errors.New("actual must be a *httptest.ResponseRecorder")
}
return rr, nil
}
func mustRespSec(actual interface{}) *httptest.ResponseRecorder {
return actual.(*httptest.ResponseRecorder)
}
const noNegate = "do not negate this matcher"
|
// Package languagecode provides utilities for representing languages in code,
// and handling their serializations and deserializations in a convenient way.
//
// All serializations will result in `LanguageUndefined` if input data is not a
// recognized language code. Some conversions are lossy as not all languages
// have all codes. To avoid loss, you can use the Has<Format> methods to check
// whether the language has a code of the wanted format before conversion.
//
// ISO-639-2 and ISO-639-2/B codes are treated distinctly and the B codes will
// not default to the formal code. If you need defaulting you can perform a
// FormatAlpha3B lookup first and if it returns `LanguageUndefined`, perform a
// FormatAlpha3 lookup. This design choice is made because the defaulting logic
// requirements vary case by case.
//
// All the exported types are one word in memory and as such provide fast
// equality checks, hashing for usage as keys in maps. Conversions between the
// types are zero overhead (don't even escape the stack), serialization is
// worst case O(1), and deserialization is worst case O(n).
package languagecode
// Language represents the language codes of a language.
type Language struct {
code code
}
// LanguageUndefined represents an undefined language.
var LanguageUndefined = Language{}
// Alpha3 returns the language with the Alpha3 serialization.
func (l Language) Alpha3() LanguageAlpha3 {
return LanguageAlpha3{Language: l}
}
// HasAlpha3 returns a boolean indicating whether or not the language has a
// designated Alpha3 code.
func (l Language) HasAlpha3() bool {
return FormatAlpha3.Serialize(l) != empty3
}
// Alpha3B returns the language with the Alpha3B serialization.
func (l Language) Alpha3B() LanguageAlpha3B {
return LanguageAlpha3B{Language: l}
}
// HasAlpha3B returns a boolean indicating whether or not the language has a
// designated Alpha3B code.
func (l Language) HasAlpha3B() bool {
return FormatAlpha3B.Serialize(l) != empty3
}
// Alpha2 returns the language with the Alpha2 serialization.
func (l Language) Alpha2() LanguageAlpha2 {
return LanguageAlpha2{Language: l}
}
// HasAlpha2 returns a boolean indicating whether or not the language has a
// designated Alpha2 code.
func (l Language) HasAlpha2() bool {
return FormatAlpha2.Serialize(l) != empty2
}
// GoString implements fmt.GoStringer.
func (l Language) GoString() string {
return "languagecode.Language{" + FormatAlpha3.Serialize(l) + "}"
}
func (l Language) marshalTextWithFormat(format Format) ([]byte, error) {
return []byte(format.Serialize(l)), nil
}
func (l *Language) unmarshalTextWithFormat(format Format, text []byte) error {
*l = format.Deserialize(string(text))
return nil
}
|
package transcoder
import (
"fmt"
"log"
"os"
"os/exec"
"path"
"path/filepath"
"strings"
"sync"
"github.com/fsnotify/fsnotify"
)
var commandTempl = "ffmpeg -i %s -profile:v baseline -level 3.0 -s 640x360 -start_number 0 -hls_time 10 -hls_list_size 0 -f hls %s"
var allowedExts = map[string]bool{
".mp4": true,
".mov": true,
}
type Transcoder struct {
srcDir string
outDir string
procVideos []string
mu sync.Mutex
}
func New(srcDir string, outDir string) *Transcoder {
return &Transcoder{srcDir: srcDir, outDir: outDir, procVideos: make([]string, 0, 10)}
}
func (t *Transcoder) Start() {
go t.startWatcher()
t.procVideosInDir()
}
func (t *Transcoder) procVideosInDir() {
err := filepath.Walk(t.srcDir, func(path string, info os.FileInfo, err error) error {
if info.IsDir() {
return nil
}
if allowedExts[filepath.Ext(path)] {
go t.procVideo(path)
}
return nil
})
if err != nil {
log.Fatal("ERROR", err)
}
}
func (t *Transcoder) getOutpath(path string) string {
filename := filepath.Base(path)
subdir := strings.ReplaceAll(filename, ".", "_")
outpath := filepath.Join(t.outDir, subdir, "index.m3u8")
err := os.MkdirAll(filepath.Dir(outpath), 0755)
if err != nil {
log.Fatal(err)
}
return outpath
}
func (t *Transcoder) IsProccessing(urlpath string) bool {
path := path.Join(t.outDir, urlpath)
for _, p := range t.procVideos {
if p == path {
return true
}
}
return false
}
func (t *Transcoder) removeProcVideo(path string) {
t.mu.Lock()
defer t.mu.Unlock()
for i, p := range t.procVideos {
if p == path {
t.procVideos = append(t.procVideos[:i], t.procVideos[i+1:]...)
break
}
}
}
func (t *Transcoder) addProcVideo(path string) {
t.mu.Lock()
defer t.mu.Unlock()
t.procVideos = append(t.procVideos, path)
}
func (t *Transcoder) procVideo(path string) {
filename := filepath.Base(path)
outpath := t.getOutpath(path)
command := fmt.Sprintf(commandTempl, path, outpath)
args := strings.Split(command, " ")
log.Println("Processing file", filename)
t.addProcVideo(outpath)
cmd := exec.Command(args[0], args[1:]...)
err := cmd.Run()
if err != nil {
log.Fatal("ERROR", err)
}
t.removeProcVideo(outpath)
log.Println("File", filename, "has been successfully processed")
}
func (t *Transcoder) startWatcher() {
watcher, err := fsnotify.NewWatcher()
if err != nil {
log.Fatal(err)
}
defer watcher.Close()
errCh := make(chan error)
go func() {
for {
select {
case event := <-watcher.Events:
switch {
case event.Op&fsnotify.Create == fsnotify.Create:
t.procVideo(event.Name)
case event.Op&fsnotify.Rename == fsnotify.Rename:
t.procVideo(event.Name)
}
case err := <-watcher.Errors:
errCh <- err
}
}
}()
if err := watcher.Add(t.srcDir); err != nil {
log.Fatal("ERROR", err)
}
log.Fatal(<-errCh)
}
|
package engine
import (
"container/list"
"errors"
"fmt"
"github.com/denkhaus/tcgl/applog"
"github.com/fsouza/go-dockerclient"
"math"
)
var (
errCircularDependency = errors.New("Manifest error:: circular dependency detected")
)
type ContainerAggregateFunc func(e *list.Element, val interface{}) interface{}
type ContainerFunc func(e *list.Element) error
// Node represents a host running Docker. Each node has an ID and an address
// (in the form <scheme>://<host>:<port>/).
type Node struct {
id string
manifestId string
address string
engine *Engine
tree *Tree
client *docker.Client
}
/////////////////////////////////////////////////////////////////////////////////////////////////
// String
/////////////////////////////////////////////////////////////////////////////////////////////////
func (n Node) String() string {
//ret := fmt.Sprintf("id:%s, address:%s, containers: %d", n.id, n.address, n.tree.Length())
//ret = n.Aggregate(ret, func(cont Container, val interface{}) interface{} {
// res := val.(string)
// res += fmt.Sprintf("%s, ", cont.name)
// return res
//}).(string)
return n.id
}
/////////////////////////////////////////////////////////////////////////////////////////////////
// Info1
/////////////////////////////////////////////////////////////////////////////////////////////////
func (n Node) Info1() string {
ret := fmt.Sprintf("id:%s, address:%s", n.id, n.address)
return ret
}
/////////////////////////////////////////////////////////////////////////////////////////////////
// NewNode
/////////////////////////////////////////////////////////////////////////////////////////////////
func NewNode(id, address, manId string, e *Engine, tmps []Template) (*Node, error) {
node := &Node{id: id, address: address, manifestId: manId, engine: e, tree: NewTree()}
if client, err := docker.NewClient(address); err != nil {
return nil, err
} else {
node.client = client
}
applog.Infof("Apply container templates -> [%s]", node.Info1())
if err := node.Apply(tmps); err != nil {
return nil, err
}
return node, nil
}
///////////////////////////////////////////////////////////////////////////////////////////////
//
///////////////////////////////////////////////////////////////////////////////////////////////
func (n Node) ApplyState(cnts []docker.APIContainers) error {
for _, apiCnt := range cnts {
for _, name := range apiCnt.Names {
n.ForAll(func(e *list.Element) error {
cnt := e.Value.(Container)
if name[1:] == cnt.FullQualifiedName() { // trim "/"
applog.Debugf("Apply Id of container %s on node [%s]", *cnt, n)
cnt.SetId(apiCnt.ID)
e.Value = cnt
}
return nil
})
}
}
return nil
}
///////////////////////////////////////////////////////////////////////////////////////////////
//
///////////////////////////////////////////////////////////////////////////////////////////////
func (n Node) Apply(tmps []Template) error {
tree := n.tree
for _, tmp := range tmps {
cont, err := NewContainerFromTemplate(tmp, &n)
if err != nil {
return err
}
cont.RemoveSelfReference()
//check if container is required
var requiredIns *list.Element
nRequiredIdx := math.MaxInt64
for e := tree.First(); e != nil; e = e.Next() {
cnt := e.Value.(Container)
for _, name := range cnt.reqmnts {
if name == cont.name {
idx := tree.GetIndex(cnt)
if idx < nRequiredIdx {
requiredIns = cnt.elm
nRequiredIdx = idx
}
}
}
}
deps := cont.reqmnts
if len(deps) == 0 && requiredIns == nil {
applog.Debugf("Apply template %s - no requirements, not required", cont.name)
//if is not required and has no requirements
cont.elm = tree.TreePushBack(*cont)
continue
}
//check if container has requirements
var hasRequirementsIns *list.Element
nHasRequirementsIdx := math.MaxInt64
for _, name := range deps {
c := tree.GetContainerByName(name)
if cnt, ok := c.(Container); ok {
idx := tree.GetIndex(cnt)
if idx < nHasRequirementsIdx {
hasRequirementsIns = cnt.elm
nHasRequirementsIdx = idx
}
} else {
tree.AddUnmetRequirement(name)
}
}
// try to insert before nRequiredIdx and after nHasRequirementsIdx
if hasRequirementsIns != nil && requiredIns != nil &&
nRequiredIdx <= nHasRequirementsIdx {
return errCircularDependency
}
applog.Debugf("Apply template %s: reqIdx: %d, hasReqIdx: %d", cont.name, nRequiredIdx, nHasRequirementsIdx)
if hasRequirementsIns != nil && requiredIns == nil { // only has requirements
cont.elm = tree.TreeInsertAfter(*cont, hasRequirementsIns)
} else if hasRequirementsIns == nil && requiredIns != nil { //only is required
cont.elm = tree.TreeInsertBefore(*cont, requiredIns)
} else if nRequiredIdx > nHasRequirementsIdx {
}
}
applog.Debugf("Apply templates:: Building node with %d container(s) finished.", tree.Length())
//TODO check unmet requirements
return nil
}
///////////////////////////////////////////////////////////////////////////////////////////////
//
///////////////////////////////////////////////////////////////////////////////////////////////
func (n Node) HasContainers() bool {
return n.tree.Length() != 0
}
///////////////////////////////////////////////////////////////////////////////////////////////
//
///////////////////////////////////////////////////////////////////////////////////////////////
func (n Node) Aggregate(val interface{}, fn ContainerAggregateFunc) interface{} {
for e := n.tree.First(); e != nil; e = e.Next() {
val = fn(e, val)
}
return val
}
///////////////////////////////////////////////////////////////////////////////////////////////
//
///////////////////////////////////////////////////////////////////////////////////////////////
func (n Node) ForAll(fn ContainerFunc) error {
for e := n.tree.First(); e != nil; e = e.Next() {
if err := fn(e); err != nil {
return err
}
}
return nil
}
///////////////////////////////////////////////////////////////////////////////////////////////
//
///////////////////////////////////////////////////////////////////////////////////////////////
func (n Node) ForAllReversed(fn ContainerFunc) error {
for e := n.tree.Last(); e != nil; e = e.Prev() {
if err := fn(e); err != nil {
return err
}
}
return nil
}
///////////////////////////////////////////////////////////////////////////////////////////////
// Lift containers (provision + run).
// When forced, this will rebuild all images
// and recreate all containers.
///////////////////////////////////////////////////////////////////////////////////////////////
func (n Node) Lift(force bool, kill bool) error {
if err := n.Provision(force); err != nil {
return err
}
if err := n.runOrStart(force, kill); err != nil {
return err
}
return nil
}
///////////////////////////////////////////////////////////////////////////////////////////////
// Provision containers.
// When forced, this will rebuild all images.
///////////////////////////////////////////////////////////////////////////////////////////////
func (n Node) Provision(force bool) error {
err := n.ForAll(func(e *list.Element) error {
cnt := e.Value.(Container)
return cnt.provision(force)
})
return err
}
///////////////////////////////////////////////////////////////////////////////////////////////
// Run containers.
// When forced, removes existing containers first.
///////////////////////////////////////////////////////////////////////////////////////////////
func (n Node) Run(force bool, kill bool) error {
if force {
if err := n.Remove(force, kill); err != nil {
return err
}
}
err := n.ForAll(func(e *list.Element) error {
cnt := e.Value.(Container)
return cnt.run()
})
return err
}
///////////////////////////////////////////////////////////////////////////////////////////////
// Run or start containers.
// When forced, removes existing containers first.
///////////////////////////////////////////////////////////////////////////////////////////////
func (n Node) runOrStart(force bool, kill bool) error {
if force {
if err := n.Remove(force, kill); err != nil {
return err
}
}
err := n.ForAll(func(e *list.Element) error {
cnt := e.Value.(Container)
return cnt.runOrStart()
})
return err
}
///////////////////////////////////////////////////////////////////////////////////////////////
// Start containers.
///////////////////////////////////////////////////////////////////////////////////////////////
func (n Node) Start() error {
err := n.ForAll(func(e *list.Element) error {
cnt := e.Value.(Container)
return cnt.start()
})
return err
}
///////////////////////////////////////////////////////////////////////////////////////////////
// Kill containers.
///////////////////////////////////////////////////////////////////////////////////////////////
func (n Node) Kill() error {
err := n.ForAllReversed(func(e *list.Element) error {
cnt := e.Value.(Container)
return cnt.kill()
})
return err
}
///////////////////////////////////////////////////////////////////////////////////////////////
// Stop containers.
///////////////////////////////////////////////////////////////////////////////////////////////
func (n Node) Stop() error {
err := n.ForAllReversed(func(e *list.Element) error {
cnt := e.Value.(Container)
return cnt.stop()
})
return err
}
///////////////////////////////////////////////////////////////////////////////////////////////
// Remove containers.
// When forced, stops existing containers first.
///////////////////////////////////////////////////////////////////////////////////////////////
func (n Node) Remove(force bool, kill bool) error {
if force {
if kill {
if err := n.Kill(); err != nil {
return err
}
} else {
if err := n.Stop(); err != nil {
return err
}
}
}
err := n.ForAllReversed(func(e *list.Element) error {
cnt := e.Value.(Container)
return cnt.remove()
})
return err
}
///////////////////////////////////////////////////////////////////////////////////////////////
// Status of containers.
///////////////////////////////////////////////////////////////////////////////////////////////
func (n Node) Status() error {
err := n.ForAll(func(e *list.Element) error {
cnt := e.Value.(Container)
return cnt.status()
})
return err
}
|
package buqi
import (
"log"
"net"
)
// Local 本地端
type Local struct {
*Socket
}
// NewLocal 新建一个本地端
func NewLocal(password *Password, listenAddr, remoteAddr *net.TCPAddr) *Local {
return &Local{
Socket: &Socket{
Cipher: NewCipher(password),
ListenAddr: listenAddr,
RemoteAddr: remoteAddr,
},
}
}
// Listen 本地端启动监听,接收来自本机浏览器的连接
func (local *Local) Listen(didListen func(listenAddr net.Addr)) {
listener, err := net.ListenTCP("tcp", local.ListenAddr)
defer listener.Close()
if err == nil {
if didListen != nil {
didListen(listener.Addr())
}
for {
conn, err := listener.AcceptTCP()
if err == nil {
// conn 被断开则直接清空数据
conn.SetLinger(0)
go local.handleConn(conn)
}
}
}
}
// handleConn 处理连接
func (local *Local) handleConn(user *net.TCPConn) {
defer user.Close()
conn, err := local.DialRemote()
if err == nil {
defer conn.Close()
// conn 被断开则直接清空数据
conn.SetLinger(0)
// 读取服务器的数据发送到本地
go func() {
local.DecodeCopy(user, conn)
}()
// 将客户端的数据发送到服务端
local.EncodeCopy(conn, user)
} else {
log.Fatal(err)
}
}
|
package readers
import (
"bufio"
"compress/gzip"
"io"
"os"
)
func ReadFileLines(path string) (chan []byte, error) {
var err error
var file *os.File
if file, err = os.Open(path); err == nil {
return ReadLines(file)
}
return nil, err
}
func ReadGzipLines(path string) (chan []byte, error) {
var err error
var gfile *os.File
if gfile, err = os.Open(path); err == nil {
file, err := gzip.NewReader(gfile)
if err != nil {
return nil, err
}
return ReadLines(file)
}
return nil, err
}
func ReadLines(r io.Reader) (chan []byte, error) {
out := make(chan []byte, 100)
go func() {
reader := bufio.NewReaderSize(r, 102400)
var isPrefix bool = true
var err error = nil
var line, ln []byte
for err == nil {
line, isPrefix, err = reader.ReadLine()
ln = append(ln, line...)
if !isPrefix {
out <- ln
ln = []byte{}
}
}
close(out)
}()
return out, nil
}
|
// Copyright © 2018 NAME HERE <EMAIL ADDRESS>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cmd
import (
"fmt"
"github.com/spf13/cobra"
"time"
"github.com/pkg/errors"
"path/filepath"
"os"
)
// clusterDataCmd represents the clusterData command
var clusterDataCmd = &cobra.Command{
Use: "clusterData",
Short: "Cluster network flow data to detect anomalies",
Long: `Cluster network flow data to detect anomalies.
Anomalies are found in log file.`,
Args: func(cmd *cobra.Command, args []string) error {
log_path, err := cmd.Flags().GetString("log-path")
if err != nil{
dir, _ := filepath.Split(log_path)
if _, err := os.Stat(dir); os.IsNotExist(err) {
return errors.New("Log path does not exist")
}
}
return nil
},
Run: func(cmd *cobra.Command, args []string) {
fmt.Println("clusterData called")
ClusterData = true
},
}
var ClusterData bool
var DbNameClusterData string
var LogPath string
var NumCpu int
var MinDensePoints int
var MinClusterPoints int
var PointsMode string
var DeltaTClusterData time.Duration
var WindowArrayLen int
var NumKneeFlatPoints int
var KneeSmoothingWindow int
var KneeFindElbow bool
var CpuProfile bool
var MemProfile bool
func init() {
rootCmd.AddCommand(clusterDataCmd)
clusterDataCmd.Flags().StringVarP(&DbNameClusterData, "db-name", "","", "Name of SQLite database used for clustering")
clusterDataCmd.MarkFlagRequired("db-name")
clusterDataCmd.Flags().StringVarP(&LogPath, "log-path", "","", "Path to write log. Make sure path is valid.")
clusterDataCmd.MarkFlagRequired("log-path")
clusterDataCmd.Flags().IntVarP(&NumCpu, "num-cpu", "",0, "Number of CPUs to use")
clusterDataCmd.Flags().IntVarP(&MinDensePoints, "min-dense-points", "", 10, "Minimum number of points to consider a unit as dense")
clusterDataCmd.Flags().IntVarP(&MinClusterPoints, "min-cluster-points", "",10, "Minimum number of points to consider a group of units as cluster")
clusterDataCmd.Flags().StringVarP(&PointsMode, "points-mode", "", "default", "Mode for point calculation in minimum number of points of a dense unit and cluster. Default mode is to take the number of points. Use 'percentage' to take the percentage of points.")
clusterDataCmd.Flags().DurationVarP(&DeltaTClusterData, "delta-t", "", 300 * time.Millisecond, "Delta time")
clusterDataCmd.Flags().IntVarP(&WindowArrayLen, "window-array-len", "", 2, "Number of micro slots for window time sliding")
clusterDataCmd.Flags().IntVarP(&NumKneeFlatPoints, "num-knee-flat-points", "", 1, "number of 'flat' points that is required before considering a point as knee or elbow.")
clusterDataCmd.Flags().IntVarP(&KneeSmoothingWindow, "knee-smoothing-window", "", 3, "The smootingWindow parameter is used to indicate the average used for the Gaussian kernel average smoother")
clusterDataCmd.Flags().BoolVarP(&KneeFindElbow, "knee-find-elbow", "", false, "Indicates whether to find an elbow or a knee when the value of parameter is true or false respectively")
clusterDataCmd.Flags().BoolVarP(&CpuProfile, "cpu-profile", "", false, "write cpu profile to this file")
clusterDataCmd.Flags().BoolVarP(&MemProfile, "mem-profile", "", false, "write memory profile to this file")
// Here you will define your flags and configuration settings.
// Cobra supports Persistent Flags which will work for this command
// and all subcommands, e.g.:
// clusterDataCmd.PersistentFlags().String("foo", "", "A help for foo")
// Cobra supports local flags which will only run when this command
// is called directly, e.g.:
// clusterDataCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
}
|
package chanrpc
import (
"fmt"
"reflect"
)
type Server struct {
functions map[string]interface{}
chanReq chan *Request
}
type Request struct {
f string
args []interface{}
resp bool
chanResp chan *Response
}
type Response struct {
rets []interface{}
err error
}
func NewServer(l int) *Server {
return &Server{
functions: make(map[string]interface{}),
chanReq: make(chan *Request, l)}
}
func (s *Server) R() chan *Request {
return s.chanReq
}
func (s *Server) Register(name string, f interface{}) {
_, ok := s.functions[name]
if ok {
panic("chanrpc Register error")
}
s.functions[name] = f
}
func (s *Server) Send(f string, args ...interface{}) {
req := &Request{f: f, args: args, resp: false}
s.chanReq <- req
}
func (s *Server) Call(f string, args ...interface{}) (rets []interface{}, err error) {
req := &Request{f: f, args: args, resp: true, chanResp: make(chan *Response)}
s.chanReq <- req
resp := <-req.chanResp
rets = resp.rets
err = resp.err
close(req.chanResp)
return
}
func (s *Server) Exec(r *Request) (err error) {
var (
f interface{}
ok bool
rType reflect.Type
rValue reflect.Value
retValues []reflect.Value
retInterfaces []interface{}
)
f, ok = s.functions[r.f]
if !ok {
err = fmt.Errorf("chanrpc Exec error, invalid function: %s", r.f)
if r.resp {
r.chanResp <- &Response{rets: retInterfaces, err: err}
}
return
}
rType = reflect.TypeOf(f)
rValue = reflect.ValueOf(f)
in := make([]reflect.Value, rType.NumIn())
for i := 0; i < rType.NumIn(); i++ {
in[i] = reflect.ValueOf(r.args[i])
}
retValues = rValue.Call(in)
if r.resp {
retInterfaces = make([]interface{}, len(retValues))
for i, rv := range retValues {
retInterfaces[i] = rv.Interface()
}
r.chanResp <- &Response{rets: retInterfaces, err: nil}
}
return
}
|
package command
import (
"flag"
"os"
"sort"
"testing"
"github.com/mitchellh/cli"
"github.com/stretchr/testify/assert"
)
var (
// default test values
metaTest *Meta
fClientCert string
fClientKey string
fCACert string
fCAPath string
fAddr = "http://127.0.0.1:8200"
fInsecure = false
)
func setup() {
metaTest = &Meta{
UI: &cli.BasicUi{},
flagAddress: fAddr,
flagCACert: fCACert,
flagCAPath: fCAPath,
flagClientCert: fClientCert,
flagClientKey: fClientKey,
flagInsecure: fInsecure,
}
}
func teardown() {}
func TestMain(m *testing.M) {
// set up tests
setup()
// run the tests
retCode := m.Run()
// clean up setup
teardown()
// call with result of m.Run()
os.Exit(retCode)
}
// ripped off https://github.com/hashicorp/vault/blob/master/meta/meta_test.go
func TestFlagSet(t *testing.T) {
cases := []struct {
Flags FlagSetFlags
Expected []string
}{
{
FlagSetNone,
[]string{},
},
{
FlagSetServer,
[]string{"address", "ca-cert", "ca-path", "client-cert", "client-key", "tls-skip-verify", "redact", "key-store", "kms-provider", "aws-kms-id", "gcp-kms-crypto-key", "gcp-kms-key-ring", "gcp-kms-region", "gcp-kms-project", "storage-bucket", "storage-key", "key-local-path", "namespace"},
},
}
for _, tc := range cases {
var m Meta
fs := m.FlagSet("foo", tc.Flags)
actual := make([]string, 0)
fs.VisitAll(func(f *flag.Flag) {
actual = append(actual, f.Name)
})
sort.Strings(actual)
sort.Strings(tc.Expected)
assert.EqualValues(t, tc.Expected, actual)
}
}
func TestConfig(t *testing.T) {
// empty address
config, err := metaTest.Config("")
assert.NotNil(t, config)
assert.NoError(t, err)
assert.Equal(t, metaTest.flagAddress, config.Address)
// empty Meta.flagAddress
addr := metaTest.flagAddress
metaTest.flagAddress = ""
config, err = metaTest.Config("")
assert.NotNil(t, config)
assert.NoError(t, err)
metaTest.flagAddress = addr
// pass in some address
addr = "http://vault:8200"
config, err = metaTest.Config(addr)
assert.NotNil(t, config)
assert.NoError(t, err)
assert.Equal(t, addr, config.Address)
// empty TLS config
m := &Meta{}
config, err = m.Config("")
assert.NotNil(t, config)
assert.NoError(t, err)
// inject weird evn var
os.Setenv("VAULT_SKIP_VERIFY", "foobar")
defer os.Setenv("VAULT_SKIP_VERIFY", "")
config, err = metaTest.Config("")
assert.Nil(t, config)
assert.Error(t, err)
}
func TestClient(t *testing.T) {
client, err := metaTest.Client("", "")
assert.NotNil(t, client)
assert.NoError(t, err)
// supply address
addr := "http://vault:8200"
token := "token"
client, err = metaTest.Client(addr, token)
assert.NotNil(t, client)
assert.NoError(t, err)
assert.Equal(t, token, client.Token())
// mess up config = mess up client
os.Setenv("VAULT_SKIP_VERIFY", "foobar")
defer os.Setenv("VAULT_SKIP_VERIFY", "")
client, err = metaTest.Client("", "")
assert.Nil(t, client)
assert.Error(t, err)
}
|
package main
import (
"fmt"
)
func main(){
p := new(int) // p, of type *int, points to an unnamed int variable
fmt.Println(*p) // "0"
*p = 2 // sets the unnamed int to 2
fmt.Println(*p) // "2"
p2 := new(int)
q2 := new(int)
fmt.Println(p2 == q2) // "false"
}
|
package common
import "time"
// for common
const (
EmptyString = ""
PaymentAddressLength = 66
ZeroByte = byte(0x00)
DateOutputFormat = "2006-01-02T15:04:05.999999"
DateInputFormat = "2006-01-02T15:04:05.999999"
NextForceUpdate = "2019-06-15T23:59:00.000000"
)
// for exit code
const (
ExitCodeUnknow = iota
ExitByOs
ExitByLogging
ExitCodeForceUpdate
)
// For all Transaction information
const (
TxNormalType = "n" // normal tx(send and receive coin)
TxRewardType = "s" // reward tx
TxReturnStakingType = "rs" //
TxCustomTokenType = "t" // token tx with no supporting privacy
TxCustomTokenPrivacyType = "tp" // token tx with supporting privacy
MaxTxSize = 100 // unit KB = 100KB
)
// for mining consensus
const (
MaxBlockSize = 2000 //unit kilobytes = 2 Megabyte
MaxTxsInBlock = 1000
MinTxsInBlock = 10 // minium txs for block to get immediate process (meaning no wait time)
MinBlockWaitTime = 2 // second
MaxBlockWaitTime = 4 - MinBlockWaitTime // second
MinBeaconBlkInterval = 3 * time.Second //second
MinShardBlkInterval = 5 * time.Second //second
)
// special token ids (aka. PropertyID in custom token)
var (
ConstantID = Hash{4} // To send Constant in custom token
)
// centralized website's pubkey
var (
CentralizedWebsitePubKey = []byte{2, 194, 130, 176, 102, 36, 183, 114, 109, 135, 49, 114, 177, 92, 214, 31, 25, 4, 72, 103, 196, 161, 36, 69, 121, 102, 159, 24, 31, 131, 101, 20, 0}
// CentralizedWebsitePubKey = []byte{3, 159, 2, 42, 22, 163, 195, 221, 129, 31, 217, 133, 149, 16, 68, 108, 42, 192, 58, 95, 39, 204, 63, 68, 203, 132, 221, 48, 181, 131, 40, 189, 0}
)
// board addresses
const (
// DCBAddress = "1NHpWKZYCLQeGKSSsJewsA8p3nsPoAZbmEmtsuBqd6yU7KJnzJZVt39b7AgP"
// GOVAddress = "1NHoFQ3Nr8fQm3ZLk2ACSgZXjVH6JobpuV65RD3QAEEGe76KknMQhGbc4g8P"
BurningAddress = "1NHp2EKw7ALdXUzBfoRJvKrBBM9nkejyDcHVPvUjDcWRyG22dHHyiBKQGL1c"
)
// CONSENSUS
const (
EPOCH = 10
RANDOM_TIME = 5
OFFSET = 1
NODEMODE_RELAY = "relay"
NODEMODE_SHARD = "shard"
NODEMODE_AUTO = "auto"
NODEMODE_BEACON = "beacon"
BEACON_ROLE = "beacon"
SHARD_ROLE = "shard"
PROPOSER_ROLE = "proposer"
VALIDATOR_ROLE = "validator"
PENDING_ROLE = "pending"
MAX_SHARD_NUMBER = 2
)
// Units converter
const (
WeiToMilliEtherRatio = int64(1000000000000000)
WeiToEtherRatio = int64(1000000000000000000)
)
|
package model
// DecimalHolder defines all graph that should be able to store a decimal value.
type DecimalHolder interface {
AcceptDecimal(val int64) error
}
|
package main
package main
|
package bytedance
import (
"fmt"
"strconv"
)
func Code1020() {
arr := []int{-1, 0, 1, 2, -1, -4}
fmt.Println(threeSum(arr))
}
/**
给定一个包含 n 个整数的数组 nums,判断 nums 中是否存在三个元素 a,b,c ,使得 a + b + c = 0 ?找出所有满足条件且不重复的三元组。
注意:答案中不可以包含重复的三元组。
例如, 给定数组 nums = [-1, 0, 1, 2, -1, -4],
满足要求的三元组集合为:
[
[-1, 0, 1],
[-1, -1, 2]
]
```
*/
func threeSum(nums []int) [][]int {
if len(nums) == 0 {
return make([][]int, 0)
}
//fmt.Println(nums)
newArr := get_sort(nums)
res := make(map[string][]int, 0)
for i := 0; i < len(newArr); i++ {
left := i + 1
right := len(newArr) - 1
for left < right {
sum := nums[i] + nums[left] + nums[right]
if sum == 0 {
addNums := []int{nums[i], nums[left], nums[right]}
hashKey := strconv.Itoa(nums[i]) + strconv.Itoa(nums[left]) + strconv.Itoa(nums[right])
res[hashKey] = addNums
}
if sum <= 0 {
left++
} else {
right--
}
}
}
resArr := make([][]int, 0)
for _, item := range res {
resArr = append(resArr, item)
}
return resArr
}
func get_sort(nums []int) []int {
if len(nums) <= 0 {
return nums
}
head, tail := 0, len(nums)-1
key := nums[0]
for i := 1; i <= tail; {
if nums[i] > key {
nums[i], nums[tail] = nums[tail], nums[i]
tail--
} else {
nums[i], nums[head] = nums[head], nums[i]
head++
i++
}
}
get_sort(nums[:head])
get_sort(nums[head+1:])
return nums
}
|
package runtime_test
import (
. "github.com/d11wtq/bijou/runtime"
"github.com/d11wtq/bijou/test"
"testing"
)
func TestBooleanType(t *testing.T) {
if True.Type() != BooleanType {
t.Fatalf(`expected True.Type() == BooleanType, got %s`, True.Type())
}
if False.Type() != BooleanType {
t.Fatalf(`expected False.Type() == BooleanType, got %s`, False.Type())
}
}
func TestBooleanEvalToSelf(t *testing.T) {
env := test.FakeEnv()
v1, err := True.Eval(env)
if err != nil {
t.Fatalf(`expected err == nil, got %s`, err)
}
if v1 != True {
t.Fatalf(`expected v1 == True, got %s`, v1)
}
v2, err := False.Eval(env)
if err != nil {
t.Fatalf(`expected err == nil, got %s`, err)
}
if v2 != False {
t.Fatalf(`expected v2 == False, got %s`, v2)
}
}
func TestBooleanEq(t *testing.T) {
if !True.Eq(True) {
t.Fatalf(`expected True.Eq(True), got false`)
}
if !False.Eq(False) {
t.Fatalf(`expected False.Eq(False), got false`)
}
if False.Eq(True) {
t.Fatalf(`expected !False.Eq(True), got true`)
}
if True.Eq(False) {
t.Fatalf(`expected !True.Eq(False), got true`)
}
}
func TestBooleanGt(t *testing.T) {
if True.Gt(True) {
t.Fatalf(`expected !True.Gt(True), got true`)
}
if !True.Gt(False) {
t.Fatalf(`expected True.Gt(False), got false`)
}
if False.Gt(False) {
t.Fatalf(`expected !False.Gt(False), got true`)
}
if False.Gt(True) {
t.Fatalf(`expected !False.Gt(True), got true`)
}
if False.Gt(Int(0)) {
t.Fatalf(`expected !False.Gt(Int(0)), got true`)
}
if !False.Gt(Nil) {
t.Fatalf(`expected False.Gt(Nil), got false`)
}
}
func TestBooleanLt(t *testing.T) {
if True.Lt(True) {
t.Fatalf(`expected !True.Lt(True), got true`)
}
if !False.Lt(True) {
t.Fatalf(`expected False.Gt(True), got false`)
}
if False.Lt(False) {
t.Fatalf(`expected !False.Gt(False), got true`)
}
if True.Lt(False) {
t.Fatalf(`expected !True.Lt(False), got true`)
}
if !False.Lt(Int(0)) {
t.Fatalf(`expected False.Lt(Int(0)), got false`)
}
if !True.Lt(Int(0)) {
t.Fatalf(`expected True.Lt(Int(0)), got false`)
}
if False.Lt(Nil) {
t.Fatalf(`expected !False.Lt(Nil), got true`)
}
}
func TestBooleanString(t *testing.T) {
if True.String() != "true" {
t.Fatalf(`expected True.String() == "true", got %s`, True.String())
}
if False.String() != "false" {
t.Fatalf(`expected False.String() == "true", got %s`, False.String())
}
}
|
/*
* Copyright (C) 2018 Pierre Marchand <pierre.m@atelier-cartographique.be>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE
*/
package main
import (
"bytes"
"errors"
"flag"
"fmt"
"go/build"
"io/ioutil"
"log"
"os"
"path"
"strings"
"text/template"
"time"
)
var (
funcMap = template.FuncMap{
"first": func(s string) string {
return strings.ToLower(string(s[0]))
},
}
)
type stringSlice []string
func (s *stringSlice) String() string {
return strings.Join(*s, ",")
}
func (s *stringSlice) Set(value string) error {
*s = append(*s, value)
return nil
}
type typeMap map[string]string
var basicTypes = typeMap{
"Bool": "bool",
"String": "string",
"Int": "int",
"Int8": "int8",
"Int16": "int16",
"Int32": "int32",
"Int64": "int64",
"UInt": "uint",
"UInt8": "uint8",
"UInt16": "uint16",
"UInt32": "uint32",
"UInt64": "uint64",
"UintPtr": "uintptr",
"Byte": "byte",
"Rune": "rune",
"Float32": "float32",
"Float64": "float64",
"Complex64": "complex64",
"Complex128": "complex128",
}
type generator struct {
packageName string
types typeMap
imports []string
}
func (g *generator) generate(templatePath string) ([]byte, error) {
bs, _ := ioutil.ReadFile(templatePath)
t := template.Must(template.New("option").Parse(string(bs)))
data := struct {
PackageName string
Timestamp time.Time
Types typeMap
Imports []string
}{
g.packageName,
time.Now().UTC(),
g.types,
g.imports,
}
var buf bytes.Buffer
err := t.Execute(&buf, data)
if err != nil {
return nil, err
}
return buf.Bytes(), nil
}
func main() {
log.SetFlags(0)
log.SetPrefix("option: ")
var imports stringSlice
templateType := flag.String("type", "none", "type to generate [ func | option | result | array], (required)")
basics := flag.Bool("basics", false, "generate for basic types")
flag.Var(&imports, "import", "a package to import, can be repeated")
outputName := flag.String("output", "", "output file name, default is <type>.go")
flag.Parse()
if "none" == *templateType {
log.Fatal(errors.New("type argument is required"))
}
types := make(map[string]string)
args := flag.Args()
for _, pair := range args {
parts := strings.Split(pair, "=")
label := parts[0]
typ := parts[1]
types[label] = typ
}
if true == *basics {
for k, v := range basicTypes {
types[k] = v
}
}
pkg, err := build.Default.ImportDir(".", 0)
if err != nil {
log.Fatal(err)
}
var (
g generator
)
g.types = types
g.packageName = pkg.Name
g.imports = imports
templatepath := path.Join(os.Getenv("GOPATH"),
"src/github.com/pierremarc/futil", *templateType)
src, err := g.generate(templatepath)
if err != nil {
log.Fatal(err)
}
outPath := fmt.Sprintf("%s.go", *templateType)
if "" != *outputName {
outPath = *outputName
}
if err = ioutil.WriteFile(outPath, src, 0644); err != nil {
log.Fatalf("writing output: %s", err)
}
}
|
// Copyright (c) 2016-2019 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tagclient
import "log"
// TestProvider is a testing utility for mapping addresses to mock clients.
type TestProvider struct {
clients map[string]Client
}
// NewTestProvider creates a new TestProvider.
func NewTestProvider() *TestProvider {
return &TestProvider{make(map[string]Client)}
}
// Register sets c as the client of addr.
func (p *TestProvider) Register(addr string, c Client) {
p.clients[addr] = c
}
// Provide selects the registered client of addr.
func (p *TestProvider) Provide(addr string) Client {
c, ok := p.clients[addr]
if !ok {
log.Panicf("addr %s not found", addr)
}
return c
}
|
package repo
import (
"path/filepath"
"github.com/izumin5210/scaffold/domain/scaffold"
"github.com/pkg/errors"
)
func (r *repo) Create(e scaffold.Entry) (bool, bool, error) {
parent := filepath.Dir(e.Path())
parentCreated, err := r.fs.CreateDir(parent)
if err != nil {
return false, parentCreated, errors.Wrapf(err, "Failed to create directory %q", parent)
}
created := false
if e.IsDir() {
created, err = r.fs.CreateDir(e.Path())
if err != nil {
return created, parentCreated, errors.Wrapf(err, "Failed to create directory %q", e.Path())
}
} else {
err = r.fs.CreateFile(e.Path(), e.Content())
if err != nil {
return created, parentCreated, errors.Wrapf(err, "Failed to create file %q", e.Path())
}
created = true
}
return created, parentCreated, nil
}
|
package wxapp
// 秒杀商品表
type SeckillProduct struct {
ComID int64 `json:"com_id"`
ID int64 `json:"id" bson:"id"`
Product string `json:"product" bson:"product"`
Price float64 `json:"price" bson:"price"` // 原价
DiscountPrice float64 `json:"discount_price" bson:"discount_price"` // 折扣价
ImageURL string `json:"image_url" bson:"image_url"` // 商品图
Comment string `json:"comment" bson:"comment"` // 标注
IsShow bool `json:"is_show" bson:"is_show"` // 是否展示
}
|
package internal
import (
"framework/cluster"
"gamesvr/game"
"gamesvr/svrconf"
)
type Module struct {
*cluster.Cluster
}
func (m *Module) OnInit() {
m.Cluster = &cluster.Cluster{
MaxMsgLen: svrconf.MaxMsgLen,
AgentChanRPC: game.ChanRPC,
}
}
|
package protocol_handler
import (
"decept-defense/controllers/comm"
"decept-defense/internal/message_client"
"decept-defense/models"
"decept-defense/pkg/app"
"decept-defense/pkg/configs"
"decept-defense/pkg/util"
"encoding/json"
"github.com/astaxie/beego/validation"
"github.com/gin-gonic/gin"
"github.com/unknwon/com"
"go.uber.org/zap"
"net/http"
"path"
"strconv"
"strings"
)
type ProtocolUpdatePayload struct {
MinPort int32 `json:"MinPort" form:"MinPort" binding:"required"`
MaxPort int32 `json:"MaxPort" form:"MaxPort" binding:"required"`
}
// CreateProtocol 创建协议服务
// @Summary 创建协议服务
// @Description 创建协议接口
// @Tags 影子代理
// @Produce application/json
// @Accept multipart/form-data
// @Param ProtocolType body models.Protocols true "ProtocolType"
// @Param Authorization header string true "Insert your access token" default(Bearer <Add access token here>)
// @Success 200 {string} json "{"code":200,"msg":"ok","data":{}}"
// @Failure 400 {string} json "{"code":400,"msg":"请求参数错误","data":{}}"
// @Failure 500 {string} json "{"code":500,"msg":"内部异常","data":{}}"
// @Failure 5001 {string} json "{"code":5001,"msg":"蜜罐服务器不存在、请检测蜜罐服务状态","data":{}}"
// @Failure 6001 {string} json "{"code":6001,"msg":"协议创建失败","data":{}}"
// @Failure 6004 {string} json "{"code":6004,"msg":"协议名称重复","data":{}}"
// @Router /api/v1/protocol [post]
func CreateProtocol(c *gin.Context) {
appG := app.Gin{C: c}
var protocol models.Protocols
var taskPayload comm.FileTaskPayload
var server models.HoneypotServers
file, err := c.FormFile("file")
if err != nil {
zap.L().Error(err.Error())
appG.Response(http.StatusOK, app.InvalidParams, err.Error())
return
}
err = c.ShouldBind(&protocol)
if err != nil {
zap.L().Error(err.Error())
appG.Response(http.StatusOK, app.InvalidParams, err.Error())
return
}
if protocol.MinPort > protocol.MaxPort || protocol.MinPort <= 0 || protocol.MinPort > 65535 || protocol.MaxPort <= 0 || protocol.MaxPort > 65535{
zap.L().Error("端口范围异常")
appG.Response(http.StatusOK, app.ErrorProtocolPortRange, nil)
return
}
_, err = protocol.GetProtocolByType(protocol.ProtocolType)
if err == nil {
appG.Response(http.StatusOK, app.ErrorProtocolDup, nil)
return
}
protocolUploadFileBasePath := path.Join(util.WorkingPath(), configs.GetSetting().App.UploadPath, "protocol")
protocolScriptBasePath := path.Join(util.WorkingPath(), configs.GetSetting().App.ScriptPath, "protocol", "deploy.sh")
currentUser, exist := c.Get("currentUser")
if !exist {
zap.L().Error(err.Error())
appG.Response(http.StatusOK, app.INTERNAlERROR, err.Error())
return
}
protocol.CreateTime = util.GetCurrentTime()
protocol.Creator = *(currentUser.(*string))
protocol.FileName = file.Filename
savePath := path.Join(protocolUploadFileBasePath, protocol.ProtocolType)
if err := util.CreateDir(savePath); err != nil {
zap.L().Error(err.Error())
appG.Response(http.StatusOK, app.INTERNAlERROR, err.Error())
return
}
protocol.LocalPath = savePath
if err := c.SaveUploadedFile(file, path.Join(savePath, protocol.FileName)); err != nil {
zap.L().Error(err.Error())
appG.Response(http.StatusOK, app.INTERNAlERROR, err.Error())
return
}
tarPath := path.Join(protocolUploadFileBasePath, strings.Join([]string{protocol.ProtocolType, ".tar.gz"}, ""))
err = util.CompressTarGz(tarPath, protocolScriptBasePath, path.Join(savePath, protocol.FileName))
if err != nil {
zap.L().Error(err.Error())
appG.Response(http.StatusOK, app.INTERNAlERROR, err.Error())
return
}
protocol.DeployPath = configs.GetSetting().App.ProtocolDeployPath
id, err := util.GetUniqueID()
if err != nil {
zap.L().Error(err.Error())
appG.Response(http.StatusOK, app.INTERNAlERROR, err.Error())
return
}
protocol.TaskID = id
{
taskPayload.TaskID = id
taskPayload.URL = util.Base64Encode("http:" + "//" + configs.GetSetting().Server.AppHost + ":" + strconv.Itoa(configs.GetSetting().Server.HttpPort) + "/" + configs.GetSetting().App.UploadPath + "/protocol/" + strings.Join([]string{protocol.ProtocolType, ".tar.gz"}, ""))
md5, err := util.GetFileMD5(tarPath)
if err != nil {
zap.L().Error(err.Error())
appG.Response(http.StatusOK, app.INTERNAlERROR, err.Error())
return
}
taskPayload.FileMD5 = md5
honeypot, err := server.GetFirstHoneypotServer()
if err != nil {
zap.L().Error(err.Error())
appG.Response(http.StatusOK, app.ErrorHoneypotServerNotExist, err.Error())
return
}
taskPayload.TaskType = comm.PROTOCOL
taskPayload.OperatorType = comm.DEPLOY
taskPayload.AgentID = honeypot.AgentID
taskPayload.ScriptName = path.Base(protocolScriptBasePath)
taskPayload.CommandParameters = map[string]string{"-d": protocol.DeployPath, "-s": protocol.FileName}
jsonByte, _ := json.Marshal(taskPayload)
err = message_client.PublishMessage(configs.GetSetting().App.TaskChannel, string(jsonByte))
if err != nil {
zap.L().Error(err.Error())
appG.Response(http.StatusOK, app.ErrorRedis, err.Error())
return
}
}
protocol.Status = comm.RUNNING
err = protocol.CreateProtocol()
if err != nil {
zap.L().Error(err.Error())
appG.Response(http.StatusOK, app.ErrorProtocolCreate, err.Error())
return
}
appG.Response(http.StatusOK, app.SUCCESS, nil)
}
// GetProtocol 查找协议
// @Summary 查找协议
// @Description 查找协议
// @Tags 影子代理
// @Produce application/json
// @Accept application/json
// @Param Payload body comm.SelectPayload false "Payload"
// @Param PageNumber body comm.SelectPayload true "PageNumber"
// @Param PageSize body comm.SelectPayload true "PageSize"
// @Param Authorization header string true "Insert your access token" default(Bearer <Add access token here>)
// @Success 200 {object} comm.ProtocolSelectResultPayload
// @Failure 400 {string} json "{"code":400,"msg":"请求参数错误","data":{}}"
// @Failure 500 {string} json "{"code":500,"msg":"内部异常","data":{}}"
// @Failure 6002 {string} json "{"code":6002,"msg":"协议获取失败","data":{}}"
// @Router /api/v1/protocol/set [post]
func GetProtocol(c *gin.Context) {
appG := app.Gin{C: c}
var record models.Protocols
var payload comm.SelectPayload
err := c.ShouldBindJSON(&payload)
if err != nil {
zap.L().Error("请求参数异常")
appG.Response(http.StatusOK, app.InvalidParams, nil)
return
}
data, count, err := record.GetProtocol(&payload)
if err != nil {
zap.L().Error("协议服务列表请求异常")
appG.Response(http.StatusOK, app.ErrorProtocolGet, nil)
return
}
appG.Response(http.StatusOK, app.SUCCESS, comm.SelectResultPayload{Count: count, List: data})
}
// GetProtocolType 查找协议类型接口
// @Summary 用户查看自定义协议类型接口
// @Description 用户查看自定义协议类型接口
// @Tags 影子代理
// @Produce application/json
// @Accept application/json
// @Param Authorization header string true "Insert your access token" default(Bearer <Add access token here>)
// @Success 200 {string} json "{"code":200,"msg":"OK","data":[""]}"
// @Failure 400 {string} json "{"code":400,"msg":"请求参数错误","data":{}}"
// @Failure 500 {string} json "{"code":500,"msg":"内部异常","data":{}}"
// @Router /api/v1/protocol/type [get]
func GetProtocolType(c *gin.Context) {
appG := app.Gin{C: c}
var protocol models.Protocols
data, err := protocol.GetProtocolTypeList()
if err != nil {
zap.L().Error("获取协议服务类型异常")
appG.Response(http.StatusOK, app.INTERNAlERROR, nil)
return
}
appG.Response(http.StatusOK, app.SUCCESS, data)
}
// DeleteProtocol 删除协议服务
// @Summary 删除协议服务
// @Description 删除协议服务
// @Tags 影子代理
// @Produce application/json
// @Accept application/json
// @Param int query int true "int valid" minimum(1)
// @Param Authorization header string true "Insert your access token" default(Bearer <Add access token here>)
// @Success 200 {string} json "{"code":200,"msg":"ok","data":{}}"
// @Failure 400 {string} json "{"code":400,"msg":"请求参数错误","data":{}}"
// @Failure 500 {string} json "{"code":500,"msg":"内部异常","data":{}}"
// @Failure 6003 {string} json "{"code":6003,"msg":"协议删除失败","data":{}}"
// @Router /api/v1/protocol/:id [delete]
func DeleteProtocol(c *gin.Context) {
appG := app.Gin{C: c}
valid := validation.Validation{}
id := com.StrTo(c.Param("id")).MustInt64()
var protocol models.Protocols
valid.Min(id, 1, "id").Message("ID必须大于0")
if valid.HasErrors() {
zap.L().Error("请求参数异常")
appG.Response(http.StatusOK, app.InvalidParams, nil)
return
}
if err := protocol.DeleteProtocolByID(id); err != nil {
zap.L().Error("删除协议异常")
appG.Response(http.StatusOK, app.ErrorProtocolDel, nil)
return
}
appG.Response(http.StatusOK, app.SUCCESS, nil)
}
func CreateSSHKey(c *gin.Context){
appG := app.Gin{C: c}
type SSHPayload struct {
SSHKey string `json:"ssh_key"`
AgentID string `json:"agentid"`
}
var payload SSHPayload
var setting models.Setting
err := c.ShouldBind(payload)
if err != nil{
zap.L().Error(err.Error())
appG.Response(http.StatusOK, app.ErrorProtocolDel, err.Error())
return
}
setting.ConfigName = "SSHKey"
setting.ConfigValue = payload.SSHKey
setting.CreateSetting()
appG.Response(http.StatusOK, app.SUCCESS, nil)
}
// UpdateProtocolPortRange 更新协议端口范围
// @Summary 更新协议端口范围
// @Description 更新协议端口范围
// @Tags 影子代理
// @Produce application/json
// @Accept application/json
// @Param int query int true "int valid" minimum(1)
// @Param Authorization header string true "Insert your access token" default(Bearer <Add access token here>)
// @Success 200 {string} json "{"code":200,"msg":"ok","data":{}}"
// @Failure 400 {string} json "{"code":400,"msg":"请求参数错误","data":{}}"
// @Failure 500 {string} json "{"code":500,"msg":"内部异常","data":{}}"
// @Failure 6003 {string} json "{"code":6003,"msg":"协议删除失败","data":{}}"
// @Router /api/v1/protocol/port/:id [put]
func UpdateProtocolPortRange(c *gin.Context) {
appG := app.Gin{C: c}
valid := validation.Validation{}
id := com.StrTo(c.Param("id")).MustInt64()
var protocol models.Protocols
var payload ProtocolUpdatePayload
valid.Min(id, 1, "id").Message("ID必须大于0")
if valid.HasErrors() {
zap.L().Error("请求参数异常")
appG.Response(http.StatusOK, app.InvalidParams, nil)
return
}
err := c.ShouldBind(&payload)
if err != nil{
zap.L().Error(err.Error())
appG.Response(http.StatusOK, app.InvalidParams, err.Error())
return
}
if payload.MinPort > payload.MaxPort || payload.MinPort <= 0 || payload.MinPort > 65535 || payload.MaxPort <= 0 || payload.MaxPort > 65535{
zap.L().Error("端口范围异常")
appG.Response(http.StatusOK, app.ErrorProtocolPortRange, nil)
return
}
if err := protocol.UpdateProtocolPortRange(payload.MinPort, payload.MaxPort, id); err != nil {
zap.L().Error("更新协议")
appG.Response(http.StatusOK, app.ErrorProtocolUpdate, nil)
return
}
appG.Response(http.StatusOK, app.SUCCESS, nil)
}
|
package typeutils
import (
"reflect"
"testing"
"github.com/stretchr/testify/suite"
)
// These tests confirm the developer's understanding of how Go works.
// More specifically how the Go reflection mechanism works.
var (
a = alpha{Name: "Hubert", Percent: 17.23}
b = bravo{Finished: true, Iterations: 79}
c = a
ai actor = &a
bi actor = &b
ci actor = &c
)
//////////////////////////////////////////////////////////////////////////
type ReflectTestSuite struct {
suite.Suite
}
func (suite *ReflectTestSuite) SetupTest() {
}
func TestReflectSuite(t *testing.T) {
suite.Run(t, new(ReflectTestSuite))
}
//////////////////////////////////////////////////////////////////////////
// Verify method for determining path of package via an object defined therein.
func (suite *ReflectTestSuite) TestPackagePath() {
suite.Assert().Equal(TypeUtilsPackagePath, reflect.TypeOf(alpha{}).PkgPath())
}
//////////////////////////////////////////////////////////////////////////
// Make certain reflect.Type supports equivalence testing and use as map key.
// Note that this does NOT work for types.Type, which is a different thing.
func (suite *ReflectTestSuite) TestTypeEquivalence() {
suite.Assert().NotEqual(a, b)
suite.Assert().NotEqual(b, c)
suite.Assert().Equal(a, c)
}
func (suite *ReflectTestSuite) TestInterfaceEquivalence() {
suite.Assert().NotEqual(ai, bi)
suite.Assert().NotEqual(bi, ci)
suite.Assert().Equal(ai, ci)
}
func (suite *ReflectTestSuite) TestMap() {
lookup := make(map[reflect.Type]string)
lookup[reflect.TypeOf(a)] = "alpha"
lookup[reflect.TypeOf(b)] = "bravo"
lookup[reflect.TypeOf(c)] = "charlie" // overrides "alpha" since a == c
suite.Assert().Equal("charlie", lookup[reflect.TypeOf(a)])
suite.Assert().Equal("bravo", lookup[reflect.TypeOf(b)])
suite.Assert().Equal("charlie", lookup[reflect.TypeOf(c)])
}
func (suite *ReflectTestSuite) TestMapInterface() {
lookup := make(map[reflect.Type]string)
lookup[reflect.TypeOf(ai)] = "alpha"
lookup[reflect.TypeOf(bi)] = "bravo"
lookup[reflect.TypeOf(ci)] = "charlie" // overrides "alpha" since ai == ci
suite.Assert().Equal("charlie", lookup[reflect.TypeOf(ai)])
suite.Assert().Equal("bravo", lookup[reflect.TypeOf(bi)])
suite.Assert().Equal("charlie", lookup[reflect.TypeOf(ci)])
}
|
/*
Copyright 2015 All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"context"
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"html/template"
"io"
"net"
"net/http"
"net/url"
"os"
"runtime"
"strings"
"sync"
"time"
"golang.org/x/crypto/acme/autocert"
"golang.org/x/sync/errgroup"
httplog "log"
proxyproto "github.com/armon/go-proxyproto"
"github.com/coreos/go-oidc/oidc"
"github.com/go-chi/chi"
"github.com/go-chi/chi/middleware"
"github.com/oneconcern/keycloak-gatekeeper/version"
"go.uber.org/zap"
)
type oauthProxy struct {
client *oidc.Client
config *Config
endpoint *url.URL
idp oidc.ProviderConfig
idpClient *http.Client
listener net.Listener
log *zap.Logger
router http.Handler
adminRouter http.Handler
server *http.Server
store storage
templates *template.Template
upstream reverseProxy
csrf func(http.Handler) http.Handler
// preconfigured closures
cookieChunker func(string, string) int
cookieDropper func(string, string, string, time.Duration) *http.Cookie
// context that drives the forwarder's goroutine (used for testing)
forwardCtx context.Context //nolint:containedctx
forwardCancel func()
forwardWaitGroup *errgroup.Group
}
func init() {
runtime.GOMAXPROCS(runtime.NumCPU()) // set the core
}
// newProxy create's a new proxy from configuration
func newProxy(config *Config) (*oauthProxy, error) {
// create the service logger
log, err := createLogger(config)
if err != nil {
return nil, err
}
log.Info("starting the service", zap.String("prog", version.Prog), zap.String("author", version.Author), zap.String("version", version.GetVersion()))
svc := &oauthProxy{
config: config,
log: log,
}
svc.cookieChunker = svc.makeCookieChunker()
svc.cookieDropper = svc.makeCookieDropper()
// parse the upstream endpoint
if svc.endpoint, err = url.Parse(config.Upstream); err != nil {
return nil, err
}
// initialize the store if any
if config.StoreURL != "" {
if svc.store, err = createStorage(config.StoreURL); err != nil {
return nil, err
}
}
// initialize the openid client
if !config.SkipTokenVerification {
if svc.client, svc.idp, svc.idpClient, err = svc.newOpenIDClient(); err != nil {
return nil, err
}
} else {
log.Warn("TESTING ONLY CONFIG - access token verification has been disabled")
}
if config.ClientID == "" && config.ClientSecret == "" {
log.Warn("client credentials are not set, depending on provider (confidential|public) you might be unable to auth")
}
if config.EnableForwarding {
// runs forward proxy mode
if err := svc.createForwardingProxy(); err != nil {
return nil, err
}
} else {
// runs reverse proxy mode
if err := svc.createReverseProxy(); err != nil {
return nil, err
}
// publish health, metrics and profiling endpoints
svc.createAdminServices()
}
return svc, nil
}
var onceDiscardLog, onceEnableLog sync.Once
// createLogger is responsible for creating the service logger
func createLogger(config *Config) (*zap.Logger, error) {
onceDiscardLog.Do(func() {
httplog.SetOutput(io.Discard) // disable the http logger
})
if config.DisableAllLogging {
return zap.NewNop(), nil
}
c := zap.NewProductionConfig()
c.DisableStacktrace = true
c.DisableCaller = true
// are we enabling json logging?
if !config.EnableJSONLogging {
c.Encoding = "console"
}
// are we running verbose mode?
if config.Verbose {
onceEnableLog.Do(func() {
httplog.SetOutput(os.Stderr)
})
c.DisableCaller = false
c.Development = true
c.Level = zap.NewAtomicLevelAt(zap.DebugLevel)
}
return c.Build()
}
// useDefaultStack sets the default middleware stack for router
func (r *oauthProxy) useDefaultStack(engine chi.Router) {
engine.MethodNotAllowed(emptyHandler)
engine.NotFound(emptyHandler)
engine.Use(middleware.Recoverer)
if r.config.EnableTracing {
engine.Use(r.proxyTracingMiddleware)
}
// @check if the request tracking id middleware is enabled
if r.config.EnableRequestID {
r.log.Info("enabled the correlation request id middleware")
engine.Use(r.requestIDMiddleware(r.config.RequestIDHeader))
}
// @step: enable the entrypoint middleware
engine.Use(entrypointMiddleware)
if r.config.EnableLogging {
engine.Use(r.loggingMiddleware)
}
if r.config.EnableSecurityFilter {
engine.Use(r.securityMiddleware)
}
}
// Run starts the proxy service
func (r *oauthProxy) Run() error {
listener, err := r.createHTTPListener(makeListenerConfig(r.config))
if err != nil {
return fmt.Errorf("could not start main service: %v", err)
}
// step: create the main http(s) server
server := &http.Server{
Addr: r.config.Listen,
Handler: r.router,
ReadTimeout: r.config.ServerReadTimeout,
ReadHeaderTimeout: r.config.ServerReadTimeout,
WriteTimeout: r.config.ServerWriteTimeout,
IdleTimeout: r.config.ServerIdleTimeout,
}
r.server = server
r.listener = listener
go func() {
r.log.Info("keycloak proxy service starting", zap.String("interface", r.config.Listen))
if err = server.Serve(listener); err != nil {
if err != http.ErrServerClosed {
r.log.Fatal("failed to start the http service", zap.Error(err))
}
}
}()
// step: are we running http service as well?
if r.config.ListenHTTP != "" {
r.log.Info("keycloak proxy http service starting", zap.String("interface", r.config.ListenHTTP))
httpListener, err := r.createHTTPListener(listenerConfig{
listen: r.config.ListenHTTP,
proxyProtocol: r.config.EnableProxyProtocol,
})
if err != nil {
return err
}
httpsvc := &http.Server{
Addr: r.config.ListenHTTP,
Handler: r.router,
ReadTimeout: r.config.ServerReadTimeout,
ReadHeaderTimeout: r.config.ServerReadTimeout,
WriteTimeout: r.config.ServerWriteTimeout,
IdleTimeout: r.config.ServerIdleTimeout,
}
go func() {
if err := httpsvc.Serve(httpListener); err != nil {
r.log.Fatal("failed to start the http redirect service", zap.Error(err))
}
}()
}
// step: are we running specific admin service as well?
// if not, admin endpoints are added as routes in the main service
if r.config.ListenAdmin != "" {
r.log.Info("keycloak proxy admin service starting", zap.String("interface", r.config.ListenAdmin))
var (
adminListener net.Listener
err error
)
r.log.Info("server admin service with scheme:", zap.String("scheme", r.config.ListenAdminScheme))
if r.config.ListenAdminScheme == unsecureScheme {
// run the admin endpoint (metrics, health) with http
adminListener, err = r.createHTTPListener(listenerConfig{
listen: r.config.ListenAdmin,
proxyProtocol: r.config.EnableProxyProtocol,
})
if err != nil {
return err
}
} else {
adminListenerConfig := makeListenerConfig(r.config)
// admin specific overides
adminListenerConfig.listen = r.config.ListenAdmin
// TLS configuration defaults to the one for the main service,
// and may be overidden
if r.config.TLSAdminPrivateKey != "" && r.config.TLSAdminCertificate != "" {
adminListenerConfig.useFileTLS = true
adminListenerConfig.certificate = r.config.TLSAdminCertificate
adminListenerConfig.privateKey = r.config.TLSAdminPrivateKey
}
if r.config.TLSAdminCaCertificate != "" {
adminListenerConfig.ca = r.config.TLSAdminCaCertificate
}
if r.config.TLSAdminClientCertificate != "" {
adminListenerConfig.clientCerts = []string{r.config.TLSAdminClientCertificate}
}
if len(r.config.TLSAdminClientCertificates) > 0 {
adminListenerConfig.clientCerts = r.config.TLSAdminClientCertificates
}
adminListener, err = r.createHTTPListener(adminListenerConfig)
if err != nil {
return err
}
}
adminsvc := &http.Server{
Addr: r.config.ListenAdmin,
Handler: r.adminRouter,
ReadTimeout: r.config.ServerReadTimeout,
ReadHeaderTimeout: r.config.ServerReadTimeout,
WriteTimeout: r.config.ServerWriteTimeout,
IdleTimeout: r.config.ServerIdleTimeout,
}
go func() {
if ers := adminsvc.Serve(adminListener); ers != nil {
r.log.Fatal("failed to start the admin service", zap.Error(ers))
}
}()
}
return nil
}
// listenerConfig encapsulate listener options
type listenerConfig struct {
ca string // the path to a certificate authority
certificate string // the path to the certificate if any
clientCerts []string // the paths to client certificates to use for mutual tls
hostnames []string // list of hostnames the service will respond to
letsEncryptCacheDir string // the path to cache letsencrypt certificates
listen string // the interface to bind the listener to
privateKey string // the path to the private key if any
proxyProtocol bool // whether to enable proxy protocol on the listen
redirectionURL string // url to redirect to
useFileTLS bool // indicates we are using certificates from files
useLetsEncryptTLS bool // indicates we are using letsencrypt
useSelfSignedTLS bool // indicates we are using the self-signed tls
// advanced TLS settings
*tlsAdvancedConfig
}
// makeListenerConfig extracts a listener configuration from a proxy Config
func makeListenerConfig(config *Config) listenerConfig {
cfg := listenerConfig{
hostnames: config.Hostnames,
letsEncryptCacheDir: config.LetsEncryptCacheDir,
listen: config.Listen,
proxyProtocol: config.EnableProxyProtocol,
redirectionURL: config.RedirectionURL,
privateKey: config.TLSPrivateKey,
// TLS settings
useFileTLS: config.TLSPrivateKey != "" && config.TLSCertificate != "",
ca: config.TLSCaCertificate,
certificate: config.TLSCertificate,
clientCerts: nil,
useLetsEncryptTLS: config.UseLetsEncrypt,
useSelfSignedTLS: config.EnabledSelfSignedTLS,
tlsAdvancedConfig: &tlsAdvancedConfig{
tlsMinVersion: config.TLSMinVersion,
tlsCurvePreferences: config.TLSCurvePreferences,
tlsCipherSuites: config.TLSCipherSuites,
tlsUseModernSettings: config.TLSUseModernSettings,
tlsPreferServerCipherSuites: config.TLSPreferServerCipherSuites,
},
}
if config.TLSClientCertificate != "" {
cfg.clientCerts = []string{config.TLSClientCertificate}
}
if len(config.TLSClientCertificates) > 0 {
cfg.clientCerts = config.TLSClientCertificates
}
return cfg
}
// ErrHostNotConfigured indicates the hostname was not configured
var ErrHostNotConfigured = errors.New("acme/autocert: host not configured")
// createHTTPListener is responsible for creating a listening socket
func (r *oauthProxy) createHTTPListener(config listenerConfig) (net.Listener, error) {
var listener net.Listener
var err error
// are we create a unix socket or tcp listener?
if strings.HasPrefix(config.listen, "unix://") {
socket := config.listen[7:]
if exists := fileExists(socket); exists {
if err = os.Remove(socket); err != nil {
return nil, err
}
}
r.log.Info("listening on unix socket", zap.String("interface", config.listen))
if listener, err = net.Listen("unix", socket); err != nil {
return nil, err
}
} else if listener, err = net.Listen("tcp", config.listen); err != nil {
return nil, err
}
// does it require proxy protocol?
if config.proxyProtocol {
r.log.Info("enabling the proxy protocol on listener", zap.String("interface", config.listen))
listener = &proxyproto.Listener{Listener: listener}
}
// @check if the socket requires TLS
if config.useSelfSignedTLS || config.useLetsEncryptTLS || config.useFileTLS {
getCertificate := func(hello *tls.ClientHelloInfo) (*tls.Certificate, error) {
return nil, errors.New("not configured")
}
if config.useLetsEncryptTLS {
r.log.Info("enabling letsencrypt tls support")
m := autocert.Manager{
Prompt: autocert.AcceptTOS,
Cache: autocert.DirCache(config.letsEncryptCacheDir),
HostPolicy: func(_ context.Context, host string) error {
if len(config.hostnames) > 0 {
found := false
for _, h := range config.hostnames {
found = found || (h == host)
}
if !found {
return ErrHostNotConfigured
}
} else if config.redirectionURL != "" {
if u, err := url.Parse(config.redirectionURL); err != nil {
return err
} else if u.Host != host {
return ErrHostNotConfigured
}
}
return nil
},
}
getCertificate = m.GetCertificate
}
if config.useSelfSignedTLS {
r.log.Info("enabling self-signed tls support", zap.Duration("expiration", r.config.SelfSignedTLSExpiration))
rotate, err := newSelfSignedCertificate(r.config.SelfSignedTLSHostnames, r.config.SelfSignedTLSExpiration, r.log)
if err != nil {
return nil, err
}
getCertificate = rotate.GetCertificate
}
if config.useFileTLS {
r.log.Info("tls support enabled", zap.String("certificate", config.certificate), zap.String("private_key", config.privateKey))
rotate, err := newCertificateRotator(config.certificate, config.privateKey, r.log)
if err != nil {
r.log.Error("error while setting certificate rotator", zap.Error(err))
return nil, err
}
// start watching the files for changes
if err := rotate.watch(); err != nil {
r.log.Error("error while setting file watch on certificate", zap.Error(err))
return nil, err
}
getCertificate = rotate.GetCertificate
}
ts, err := parseTLS(config.tlsAdvancedConfig)
if err != nil {
return nil, err
}
tlsConfig := &tls.Config{
GetCertificate: getCertificate,
// Causes servers to use Go's default ciphersuite preferences,
// which are tuned to avoid attacks. Does nothing on clients.
//nolint:gas
PreferServerCipherSuites: ts.tlsPreferServerCipherSuites,
CurvePreferences: ts.tlsCurvePreferences,
NextProtos: []string{"h2", "http/1.1"},
MinVersion: ts.tlsMinVersion,
CipherSuites: ts.tlsCipherSuites,
}
// @check if we are doing mutual tls
if len(config.clientCerts) > 0 {
r.log.Info("enabling mutual tls support with client certs")
caCertPool, erp := makeCertPool("client", config.clientCerts...)
if erp != nil {
r.log.Error("unable to read client CA certificate", zap.Error(erp))
return nil, erp
}
tlsConfig.ClientCAs = caCertPool
tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert
}
listener = tls.NewListener(listener, tlsConfig)
}
return listener, nil
}
// createTemplates loads the custom template
func (r *oauthProxy) createTemplates() error {
var list []string
if r.config.SignInPage != "" {
r.log.Debug("loading the custom sign in page", zap.String("page", r.config.SignInPage))
list = append(list, r.config.SignInPage)
}
if r.config.ForbiddenPage != "" {
r.log.Debug("loading the custom sign forbidden page", zap.String("page", r.config.ForbiddenPage))
list = append(list, r.config.ForbiddenPage)
}
if len(list) > 0 {
r.log.Info("loading the custom templates", zap.String("templates", strings.Join(list, ",")))
r.templates = template.Must(template.ParseFiles(list...))
}
return nil
}
// newOpenIDClient initializes the openID configuration, note: the redirection url is deliberately left blank
// in order to retrieve it from the host header on request
func (r *oauthProxy) newOpenIDClient() (*oidc.Client, oidc.ProviderConfig, *http.Client, error) {
var err error
var config oidc.ProviderConfig
// step: fix up the url if required, the underlying lib will add the .well-known/openid-configuration to the discovery url for us.
r.config.DiscoveryURL = strings.TrimSuffix(r.config.DiscoveryURL, "/.well-known/openid-configuration")
// step: create a idp http client
var pool *x509.CertPool
if r.config.OpenIDProviderCA != "" {
pool, err = makeCertPool("OpenID provider", r.config.OpenIDProviderCA)
if err != nil {
r.log.Error("unable to read OpenIDProvider CA certificate", zap.String("path", r.config.OpenIDProviderCA), zap.Error(err))
return nil, config, nil, err
}
}
hc := &http.Client{
Transport: &http.Transport{
Proxy: func(_ *http.Request) (*url.URL, error) {
if r.config.OpenIDProviderProxy != "" {
idpProxyURL, erp := url.Parse(r.config.OpenIDProviderProxy)
if erp != nil {
r.log.Error("invalid proxy address for open IDP provider proxy", zap.Error(erp))
return nil, erp
}
return idpProxyURL, nil
}
// no proxy used
//nolint:nilnil
return nil, nil
},
TLSClientConfig: &tls.Config{
//nolint:gas
InsecureSkipVerify: r.config.SkipOpenIDProviderTLSVerify,
RootCAs: pool,
},
},
Timeout: time.Second * 10,
}
// step: attempt to retrieve the provider configuration
completeCh := make(chan bool)
go func() {
for {
r.log.Info("attempting to retrieve configuration discovery url",
zap.String("url", r.config.DiscoveryURL),
zap.String("timeout", r.config.OpenIDProviderTimeout.String()))
if config, err = oidc.FetchProviderConfig(hc, r.config.DiscoveryURL); err == nil {
break // break and complete
}
r.log.Warn("failed to get provider configuration from discovery", zap.Error(err))
time.Sleep(time.Second * 3)
}
completeCh <- true
}()
// wait for timeout or successful retrieval
select {
case <-time.After(r.config.OpenIDProviderTimeout):
return nil, config, nil, errors.New("failed to retrieve the provider configuration from discovery url")
case <-completeCh:
r.log.Info("successfully retrieved openid configuration from the discovery")
}
client, err := oidc.NewClient(oidc.ClientConfig{
Credentials: oidc.ClientCredentials{
ID: r.config.ClientID,
Secret: r.config.ClientSecret,
},
HTTPClient: hc,
RedirectURL: fmt.Sprintf("%s/oauth/callback", r.config.RedirectionURL),
ProviderConfig: config,
Scope: append(r.config.Scopes, oidc.DefaultScope...),
})
if err != nil {
return nil, config, hc, err
}
// start the provider sync for key rotation
client.SyncProviderConfig(r.config.DiscoveryURL)
return client, config, hc, nil
}
// Render implements the echo Render interface
func (r *oauthProxy) Render(w io.Writer, name string, data interface{}) error {
return r.templates.ExecuteTemplate(w, name, data)
}
func (r *oauthProxy) buildProxyTLSConfig() (*tls.Config, error) {
//nolint:gas
tlsConfig := &tls.Config{
InsecureSkipVerify: r.config.SkipUpstreamTLSVerify,
ClientSessionCache: tls.NewLRUClientSessionCache(0),
}
// are we using a client certificate?
// @TODO provide a means to reload the client certificate when it expires. I'm not sure if it's just a
// case of update the http transport settings - Also where to place this go-routine?
if r.config.TLSClientCertificate != "" {
pool, err := makeCertPool("client", r.config.TLSClientCertificate)
if err != nil {
r.log.Error("unable to read client certificate", zap.String("path", r.config.TLSClientCertificate), zap.Error(err))
return nil, err
}
tlsConfig.ClientCAs = pool
tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert
}
// @check if we have an upstream ca to verify the upstream
if r.config.UpstreamCA != "" {
r.log.Info("loading the upstream ca", zap.String("path", r.config.UpstreamCA))
pool, err := makeCertPool("upstream CA", r.config.UpstreamCA)
if err != nil {
r.log.Error("unable to read upstream CA certificate", zap.String("path", r.config.UpstreamCA), zap.Error(err))
return nil, err
}
tlsConfig.RootCAs = pool
}
return tlsConfig, nil
}
func makeCertPool(who string, certs ...string) (*x509.CertPool, error) {
caCertPool := x509.NewCertPool()
for _, cert := range certs {
caPEMCert, err := os.ReadFile(cert)
if err != nil {
return nil, fmt.Errorf("cannot read cert file for %s: %q: %v", who, cert, err)
}
ok := caCertPool.AppendCertsFromPEM(caPEMCert)
if !ok {
return nil, fmt.Errorf("invalid %s PEM certificate", who)
}
}
return caCertPool, nil
}
|
package LeetCode
import (
"fmt"
)
func Code516() {
fmt.Println(longestPalindromeSubseq("bbbab"))
}
/**
给定一个字符串s,找到其中最长的回文子序列。可以假设s的最大长度为1000。
示例 1:
输入:
"bbbab"
输出:
4
一个可能的最长回文子序列为 "bbbb"。
示例 2:
输入:
"cbbd"
输出:
2
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/longest-palindromic-subsequence
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
*/
/**
* Definition for singly-linked list.
* type ListNode struct {
* Val int
* Next *ListNode
* }
*/
func longestPalindromeSubseq(s string) int {
if s == "" {
return 0
}
//dp := [][]int{}
dp := make([][]int, len(s))
start, end := 0, 1
i, j := 0, 0
for i = 0; i < len(s); i++ {
dp[i] = make([]int, len(s))
dp[i][i] = 1
}
for i = len(s) - 2; i >= 0; i-- {
for j = i + 1; j < len(s); j++ {
if s[i] == s[j] && dp[i+1][j-1] == j-i-1 {
dp[i][j] = dp[i+1][j-1] + 2
if dp[i][j] > end {
start = i
end = dp[i][j]
}
} else {
dp[i][j] = max(dp[i+1][j], dp[i][j-1])
}
}
}
return len(s[start : start+end])
}
|
// Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package rowcontainer
import (
"context"
"fmt"
"math"
"math/rand"
"sort"
"testing"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/diskmap"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo"
"github.com/cockroachdb/cockroach/pkg/sql/execinfra"
"github.com/cockroachdb/cockroach/pkg/sql/randgen"
"github.com/cockroachdb/cockroach/pkg/sql/rowenc"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/storage"
"github.com/cockroachdb/cockroach/pkg/util/encoding"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/mon"
"github.com/cockroachdb/cockroach/pkg/util/randutil"
"github.com/stretchr/testify/require"
)
// Tests the de-duping functionality of DiskBackedNumberedRowContainer.
func TestNumberedRowContainerDeDuping(t *testing.T) {
defer leaktest.AfterTest(t)()
ctx := context.Background()
st := cluster.MakeTestingClusterSettings()
evalCtx := tree.MakeTestingEvalContext(st)
tempEngine, _, err := storage.NewTempEngine(ctx, base.DefaultTestTempStorageConfig(st), base.DefaultTestStoreSpec)
if err != nil {
t.Fatal(err)
}
defer tempEngine.Close()
numRows := 20
const numCols = 2
const smallMemoryBudget = 40
rng, _ := randutil.NewPseudoRand()
memoryMonitor := mon.NewMonitor(
"test-mem",
mon.MemoryResource,
nil, /* curCount */
nil, /* maxHist */
-1, /* increment */
math.MaxInt64, /* noteworthy */
st,
)
diskMonitor := execinfra.NewTestDiskMonitor(ctx, st)
defer diskMonitor.Stop(ctx)
memoryBudget := math.MaxInt64
if rng.Intn(2) == 0 {
fmt.Printf("using smallMemoryBudget to spill to disk\n")
memoryBudget = smallMemoryBudget
}
memoryMonitor.Start(ctx, nil, mon.MakeStandaloneBudget(int64(memoryBudget)))
defer memoryMonitor.Stop(ctx)
// Use random types and random rows.
types := randgen.RandSortingTypes(rng, numCols)
ordering := colinfo.ColumnOrdering{
colinfo.ColumnOrderInfo{
ColIdx: 0,
Direction: encoding.Ascending,
},
colinfo.ColumnOrderInfo{
ColIdx: 1,
Direction: encoding.Descending,
},
}
numRows, rows := makeUniqueRows(t, &evalCtx, rng, numRows, types, ordering)
rc := NewDiskBackedNumberedRowContainer(
true /*deDup*/, types, &evalCtx, tempEngine, memoryMonitor, diskMonitor,
)
defer rc.Close(ctx)
// Each pass does an UnsafeReset at the end.
for passWithReset := 0; passWithReset < 2; passWithReset++ {
// Insert rows.
for insertPass := 0; insertPass < 2; insertPass++ {
for i := 0; i < numRows; i++ {
idx, err := rc.AddRow(ctx, rows[i])
require.NoError(t, err)
require.Equal(t, i, idx)
}
}
// Random access of the inserted rows.
var accesses []int
for i := 0; i < 2*numRows; i++ {
accesses = append(accesses, rng.Intn(numRows))
}
rc.SetupForRead(ctx, [][]int{accesses})
for i := 0; i < len(accesses); i++ {
skip := rng.Intn(10) == 0
row, err := rc.GetRow(ctx, accesses[i], skip)
require.NoError(t, err)
if skip {
continue
}
require.Equal(t, rows[accesses[i]].String(types), row.String(types))
}
// Reset and reorder the rows for the next pass.
rand.Shuffle(numRows, func(i, j int) {
rows[i], rows[j] = rows[j], rows[i]
})
require.NoError(t, rc.UnsafeReset(ctx))
}
}
// Tests the iterator and iterator caching of DiskBackedNumberedRowContainer.
// Does not utilize the de-duping functionality since that is tested
// elsewhere.
func TestNumberedRowContainerIteratorCaching(t *testing.T) {
defer leaktest.AfterTest(t)()
ctx := context.Background()
st := cluster.MakeTestingClusterSettings()
evalCtx := tree.MakeTestingEvalContext(st)
tempEngine, _, err := storage.NewTempEngine(ctx, base.DefaultTestTempStorageConfig(st), base.DefaultTestStoreSpec)
if err != nil {
t.Fatal(err)
}
defer tempEngine.Close()
memoryMonitor := mon.NewMonitor(
"test-mem",
mon.MemoryResource,
nil, /* curCount */
nil, /* maxHist */
-1, /* increment */
math.MaxInt64, /* noteworthy */
st,
)
diskMonitor := execinfra.NewTestDiskMonitor(ctx, st)
defer diskMonitor.Stop(ctx)
numRows := 200
const numCols = 2
// This memory budget allows for some caching, but typically cannot
// cache all the rows.
const memoryBudget = 12000
memoryMonitor.Start(ctx, nil, mon.MakeStandaloneBudget(memoryBudget))
defer memoryMonitor.Stop(ctx)
// Use random types and random rows.
rng, _ := randutil.NewPseudoRand()
types := randgen.RandSortingTypes(rng, numCols)
ordering := colinfo.ColumnOrdering{
colinfo.ColumnOrderInfo{
ColIdx: 0,
Direction: encoding.Ascending,
},
colinfo.ColumnOrderInfo{
ColIdx: 1,
Direction: encoding.Descending,
},
}
numRows, rows := makeUniqueRows(t, &evalCtx, rng, numRows, types, ordering)
rc := NewDiskBackedNumberedRowContainer(
false /*deDup*/, types, &evalCtx, tempEngine, memoryMonitor, diskMonitor,
)
defer rc.Close(ctx)
// Each pass does an UnsafeReset at the end.
for passWithReset := 0; passWithReset < 2; passWithReset++ {
// Insert rows.
for i := 0; i < numRows; i++ {
idx, err := rc.AddRow(ctx, rows[i])
require.NoError(t, err)
require.Equal(t, i, idx)
}
// We want all the memory to be usable by the cache, so spill to disk.
require.NoError(t, rc.testingSpillToDisk(ctx))
require.True(t, rc.UsingDisk())
// Random access of the inserted rows.
var accesses [][]int
for i := 0; i < 2*numRows; i++ {
var access []int
for j := 0; j < 4; j++ {
access = append(access, rng.Intn(numRows))
}
accesses = append(accesses, access)
}
rc.SetupForRead(ctx, accesses)
for _, access := range accesses {
for _, index := range access {
skip := rng.Intn(10) == 0
row, err := rc.GetRow(ctx, index, skip)
require.NoError(t, err)
if skip {
continue
}
require.Equal(t, rows[index].String(types), row.String(types))
}
}
fmt.Printf("hits: %d, misses: %d, maxCacheSize: %d\n",
rc.rowIter.hitCount, rc.rowIter.missCount, rc.rowIter.maxCacheSize)
// Reset and reorder the rows for the next pass.
rand.Shuffle(numRows, func(i, j int) {
rows[i], rows[j] = rows[j], rows[i]
})
require.NoError(t, rc.UnsafeReset(ctx))
}
}
// Tests that the DiskBackedNumberedRowContainer and
// DiskBackedIndexedRowContainer return the same results.
func TestCompareNumberedAndIndexedRowContainers(t *testing.T) {
defer leaktest.AfterTest(t)()
rng, _ := randutil.NewPseudoRand()
ctx := context.Background()
st := cluster.MakeTestingClusterSettings()
evalCtx := tree.MakeTestingEvalContext(st)
tempEngine, _, err := storage.NewTempEngine(ctx, base.DefaultTestTempStorageConfig(st), base.DefaultTestStoreSpec)
if err != nil {
t.Fatal(err)
}
defer tempEngine.Close()
diskMonitor := execinfra.NewTestDiskMonitor(ctx, st)
defer diskMonitor.Stop(ctx)
numRows := 200
const numCols = 2
// This memory budget allows for some caching, but typically cannot
// cache all the rows.
var memoryBudget int64 = 12000
if rng.Intn(2) == 0 {
memoryBudget = math.MaxInt64
}
// Use random types and random rows.
types := randgen.RandSortingTypes(rng, numCols)
ordering := colinfo.ColumnOrdering{
colinfo.ColumnOrderInfo{
ColIdx: 0,
Direction: encoding.Ascending,
},
colinfo.ColumnOrderInfo{
ColIdx: 1,
Direction: encoding.Descending,
},
}
numRows, rows := makeUniqueRows(t, &evalCtx, rng, numRows, types, ordering)
var containers [2]numberedContainer
containers[0] = makeNumberedContainerUsingIRC(
ctx, t, types, &evalCtx, tempEngine, st, memoryBudget, diskMonitor)
containers[1] = makeNumberedContainerUsingNRC(
ctx, t, types, &evalCtx, tempEngine, st, memoryBudget, diskMonitor)
defer func() {
for _, rc := range containers {
rc.close(ctx)
}
}()
// Each pass does an UnsafeReset at the end.
for passWithReset := 0; passWithReset < 2; passWithReset++ {
// Insert rows.
for i := 0; i < numRows; i++ {
for _, rc := range containers {
err := rc.addRow(ctx, rows[i])
require.NoError(t, err)
}
}
// We want all the memory to be usable by the cache, so spill to disk.
if memoryBudget != math.MaxInt64 {
for _, rc := range containers {
require.NoError(t, rc.spillToDisk(ctx))
}
}
// Random access of the inserted rows.
var accesses [][]int
for i := 0; i < 2*numRows; i++ {
var access []int
for j := 0; j < 4; j++ {
access = append(access, rng.Intn(numRows))
}
accesses = append(accesses, access)
}
for _, rc := range containers {
rc.setupForRead(ctx, accesses)
}
for _, access := range accesses {
for _, index := range access {
skip := rng.Intn(10) == 0
var rows [2]rowenc.EncDatumRow
for i, rc := range containers {
row, err := rc.getRow(ctx, index, skip)
require.NoError(t, err)
rows[i] = row
}
if skip {
continue
}
require.Equal(t, rows[0].String(types), rows[1].String(types))
}
}
// Reset and reorder the rows for the next pass.
rand.Shuffle(numRows, func(i, j int) {
rows[i], rows[j] = rows[j], rows[i]
})
for _, rc := range containers {
require.NoError(t, rc.unsafeReset(ctx))
}
}
}
// Adapter interface that can be implemented using both DiskBackedNumberedRowContainer
// and DiskBackedIndexedRowContainer.
type numberedContainer interface {
addRow(context.Context, rowenc.EncDatumRow) error
setupForRead(ctx context.Context, accesses [][]int)
getRow(ctx context.Context, idx int, skip bool) (rowenc.EncDatumRow, error)
spillToDisk(context.Context) error
unsafeReset(context.Context) error
close(context.Context)
}
type numberedContainerUsingNRC struct {
rc *DiskBackedNumberedRowContainer
memoryMonitor *mon.BytesMonitor
}
func (d numberedContainerUsingNRC) addRow(ctx context.Context, row rowenc.EncDatumRow) error {
_, err := d.rc.AddRow(ctx, row)
return err
}
func (d numberedContainerUsingNRC) setupForRead(ctx context.Context, accesses [][]int) {
d.rc.SetupForRead(ctx, accesses)
}
func (d numberedContainerUsingNRC) getRow(
ctx context.Context, idx int, skip bool,
) (rowenc.EncDatumRow, error) {
return d.rc.GetRow(ctx, idx, false)
}
func (d numberedContainerUsingNRC) spillToDisk(ctx context.Context) error {
return d.rc.testingSpillToDisk(ctx)
}
func (d numberedContainerUsingNRC) unsafeReset(ctx context.Context) error {
return d.rc.UnsafeReset(ctx)
}
func (d numberedContainerUsingNRC) close(ctx context.Context) {
d.rc.Close(ctx)
d.memoryMonitor.Stop(ctx)
}
func makeNumberedContainerUsingNRC(
ctx context.Context,
t testing.TB,
types []*types.T,
evalCtx *tree.EvalContext,
engine diskmap.Factory,
st *cluster.Settings,
memoryBudget int64,
diskMonitor *mon.BytesMonitor,
) numberedContainerUsingNRC {
memoryMonitor := makeMemMonitorAndStart(ctx, st, memoryBudget)
rc := NewDiskBackedNumberedRowContainer(
false /* deDup */, types, evalCtx, engine, memoryMonitor, diskMonitor)
require.NoError(t, rc.testingSpillToDisk(ctx))
return numberedContainerUsingNRC{rc: rc, memoryMonitor: memoryMonitor}
}
type numberedContainerUsingIRC struct {
rc *DiskBackedIndexedRowContainer
memoryMonitor *mon.BytesMonitor
}
func (d numberedContainerUsingIRC) addRow(ctx context.Context, row rowenc.EncDatumRow) error {
return d.rc.AddRow(ctx, row)
}
func (d numberedContainerUsingIRC) setupForRead(context.Context, [][]int) {}
func (d numberedContainerUsingIRC) getRow(
ctx context.Context, idx int, skip bool,
) (rowenc.EncDatumRow, error) {
if skip {
return nil, nil
}
row, err := d.rc.GetRow(ctx, idx)
if err != nil {
return nil, err
}
return row.(IndexedRow).Row, nil
}
func (d numberedContainerUsingIRC) spillToDisk(ctx context.Context) error {
if d.rc.UsingDisk() {
return nil
}
return d.rc.SpillToDisk(ctx)
}
func (d numberedContainerUsingIRC) unsafeReset(ctx context.Context) error {
return d.rc.UnsafeReset(ctx)
}
func (d numberedContainerUsingIRC) close(ctx context.Context) {
d.rc.Close(ctx)
d.memoryMonitor.Stop(ctx)
}
func makeNumberedContainerUsingIRC(
ctx context.Context,
t require.TestingT,
types []*types.T,
evalCtx *tree.EvalContext,
engine diskmap.Factory,
st *cluster.Settings,
memoryBudget int64,
diskMonitor *mon.BytesMonitor,
) numberedContainerUsingIRC {
memoryMonitor := makeMemMonitorAndStart(ctx, st, memoryBudget)
rc := NewDiskBackedIndexedRowContainer(
nil /* ordering */, types, evalCtx, engine, memoryMonitor, diskMonitor)
require.NoError(t, rc.SpillToDisk(ctx))
return numberedContainerUsingIRC{rc: rc, memoryMonitor: memoryMonitor}
}
func makeMemMonitorAndStart(
ctx context.Context, st *cluster.Settings, budget int64,
) *mon.BytesMonitor {
memoryMonitor := mon.NewMonitor(
"test-mem",
mon.MemoryResource,
nil, /* curCount */
nil, /* maxHist */
-1, /* increment */
math.MaxInt64, /* noteworthy */
st,
)
memoryMonitor.Start(ctx, nil, mon.MakeStandaloneBudget(budget))
return memoryMonitor
}
// Assume that join is using a batch of 100 left rows.
const leftRowsBatch = 100
// repeatAccesses is the number of times on average that each right row is accessed.
func generateLookupJoinAccessPattern(
rng *rand.Rand, rightRowsReadPerLeftRow int, repeatAccesses int,
) [][]int {
// Unique rows accessed.
numRowsAccessed := (leftRowsBatch * rightRowsReadPerLeftRow) / repeatAccesses
out := make([][]int, leftRowsBatch)
for i := 0; i < len(out); i++ {
// Each left row sees a contiguous sequence of rows on the right since the
// rows are being retrieved and stored in the container in index order.
start := rng.Intn(numRowsAccessed - rightRowsReadPerLeftRow)
out[i] = make([]int, rightRowsReadPerLeftRow)
for j := start; j < start+rightRowsReadPerLeftRow; j++ {
out[i][j-start] = j
}
}
return out
}
// numRightRows is the number of rows in the container, of which a certain
// fraction of rows are accessed randomly (when using an inverted index for
// intersection the result set can be sparse).
// repeatAccesses is the number of times on average that each right row is accessed.
func generateInvertedJoinAccessPattern(
b *testing.B, rng *rand.Rand, numRightRows int, rightRowsReadPerLeftRow int, repeatAccesses int,
) [][]int {
// Unique rows accessed.
numRowsAccessed := (leftRowsBatch * rightRowsReadPerLeftRow) / repeatAccesses
// Don't want each left row to access most of the right rows.
require.True(b, rightRowsReadPerLeftRow < numRowsAccessed/2)
accessedIndexes := make(map[int]struct{})
for len(accessedIndexes) < numRowsAccessed {
accessedIndexes[rng.Intn(numRightRows)] = struct{}{}
}
accessedRightRows := make([]int, 0, numRowsAccessed)
for k := range accessedIndexes {
accessedRightRows = append(accessedRightRows, k)
}
out := make([][]int, leftRowsBatch)
for i := 0; i < len(out); i++ {
out[i] = make([]int, 0, rightRowsReadPerLeftRow)
uniqueRows := make(map[int]struct{})
for len(uniqueRows) < rightRowsReadPerLeftRow {
idx := rng.Intn(len(accessedRightRows))
if _, notUnique := uniqueRows[idx]; notUnique {
continue
}
uniqueRows[idx] = struct{}{}
out[i] = append(out[i], accessedRightRows[idx])
}
// Sort since accesses by a left row are in ascending order.
sort.Slice(out[i], func(a, b int) bool {
return out[i][a] < out[i][b]
})
}
return out
}
func accessPatternForBenchmarkIterations(totalAccesses int, accessPattern [][]int) [][]int {
var out [][]int
var i, j int
for count := 0; count < totalAccesses; {
if i >= len(accessPattern) {
i = 0
continue
}
if j >= len(accessPattern[i]) {
j = 0
i++
continue
}
if j == 0 {
out = append(out, []int(nil))
}
last := len(out) - 1
out[last] = append(out[last], accessPattern[i][j])
count++
j++
}
return out
}
func BenchmarkNumberedContainerIteratorCaching(b *testing.B) {
const numRows = 10000
ctx := context.Background()
st := cluster.MakeTestingClusterSettings()
evalCtx := tree.MakeTestingEvalContext(st)
tempEngine, _, err := storage.NewTempEngine(ctx, base.TempStorageConfig{InMemory: true}, base.DefaultTestStoreSpec)
if err != nil {
b.Fatal(err)
}
defer tempEngine.Close()
diskMonitor := execinfra.NewTestDiskMonitor(ctx, st)
defer diskMonitor.Stop(ctx)
// Each row is 10 string columns. Each string has a mean length of 5, and the
// row encoded into bytes is ~64 bytes. So we approximate ~512 rows per ssblock.
// The in-memory decoded footprint in the cache is ~780 bytes.
var typs []*types.T
for i := 0; i < 10; i++ {
typs = append(typs, types.String)
}
rng, _ := randutil.NewPseudoRand()
rows := make([]rowenc.EncDatumRow, numRows)
for i := 0; i < numRows; i++ {
rows[i] = make([]rowenc.EncDatum, len(typs))
for j := range typs {
rows[i][j] = rowenc.DatumToEncDatum(typs[j], randgen.RandDatum(rng, typs[j], false))
}
}
type accessPattern struct {
joinType string
paramStr string
accesses [][]int
}
var accessPatterns []accessPattern
// Lookup join access patterns. The highest number of unique rows accessed is
// when rightRowsReadPerLeftRow = 64 and repeatAccesses = 1, which with a left
// batch of 100 is 100 * 64 / 1 = 6400 rows accessed. The container has
// 10000 rows. If N unique rows are accessed these form a prefix of the rows
// in the container.
for _, rightRowsReadPerLeftRow := range []int{1, 2, 4, 8, 16, 32, 64} {
for _, repeatAccesses := range []int{1, 2} {
accessPatterns = append(accessPatterns, accessPattern{
joinType: "lookup-join",
paramStr: fmt.Sprintf("matchRatio=%d/repeatAccesses=%d",
rightRowsReadPerLeftRow, repeatAccesses),
accesses: generateLookupJoinAccessPattern(rng, rightRowsReadPerLeftRow, repeatAccesses),
})
}
}
// Inverted join access patterns.
// With a left batch of 100 rows, and rightRowsReadPerLeftRow = (25, 50, 100), the
// total accesses are (2500, 5000, 10000). Consider repeatAccesses = 2: the unique
// rows accessed are (1250, 2500, 5000), which will be randomly distributed over the
// 10000 rows.
for _, rightRowsReadPerLeftRow := range []int{1, 25, 50, 100} {
for _, repeatAccesses := range []int{1, 2, 4, 8} {
accessPatterns = append(accessPatterns, accessPattern{
joinType: "inverted-join",
paramStr: fmt.Sprintf("matchRatio=%d/repeatAccesses=%d",
rightRowsReadPerLeftRow, repeatAccesses),
accesses: generateInvertedJoinAccessPattern(
b, rng, numRows, rightRowsReadPerLeftRow, repeatAccesses),
})
}
}
// Observed cache behavior for a particular access pattern for each kind of
// join, to give some insight into performance.
// - The inverted join pattern has poor locality and the IndexedRowContainer
// does poorly. The NumberedRowContainer is able to use the knowledge that
// many rows will never be accessed.
// 11000 100KB 500KB 2.5MB
// IndexedRowContainer 0.00 0.00 0.00 0.00
// NumberedRowContainer 0.22 0.68 0.88 1.00
// - The lookup join access pattern and observed hit rates. The better
// locality improves the behavior of the IndexedRowContainer, but it
// is still significantly worse than the NumberedRowContainer.
// 11000 100KB 500KB 2.5MB
// IndexedRowContainer 0.00 0.00 0.10 0.35
// NumberedRowContainer 0.01 0.09 0.28 0.63
for _, pattern := range accessPatterns {
// Approx cache capacity in rows with these settings: 13, 132, 666, 3300.
for _, memoryBudget := range []int64{11000, 100 << 10, 500 << 10, 2500 << 10} {
for _, containerKind := range []string{"indexed", "numbered"} {
b.Run(fmt.Sprintf("%s/%s/mem=%d/%s", pattern.joinType, pattern.paramStr, memoryBudget,
containerKind), func(b *testing.B) {
var nc numberedContainer
switch containerKind {
case "indexed":
nc = makeNumberedContainerUsingIRC(
ctx, b, typs, &evalCtx, tempEngine, st, memoryBudget, diskMonitor)
case "numbered":
nc = makeNumberedContainerUsingNRC(
ctx, b, typs, &evalCtx, tempEngine, st, memoryBudget, diskMonitor)
}
defer nc.close(ctx)
for i := 0; i < len(rows); i++ {
require.NoError(b, nc.addRow(ctx, rows[i]))
}
accesses := accessPatternForBenchmarkIterations(b.N, pattern.accesses)
b.ResetTimer()
nc.setupForRead(ctx, accesses)
for i := 0; i < len(accesses); i++ {
for j := 0; j < len(accesses[i]); j++ {
if _, err := nc.getRow(ctx, accesses[i][j], false /* skip */); err != nil {
b.Fatal(err)
}
}
}
b.StopTimer()
// Disabled code block. Change to true to look at hit ratio and cache sizes
// for these benchmarks.
if false {
// Print statements for understanding the performance differences.
fmt.Printf("\n**%s/%s/%d/%s: iters: %d\n", pattern.joinType, pattern.paramStr, memoryBudget, containerKind, b.N)
switch rc := nc.(type) {
case numberedContainerUsingNRC:
fmt.Printf("hit rate: %.2f, maxCacheSize: %d\n",
float64(rc.rc.rowIter.hitCount)/float64(rc.rc.rowIter.missCount+rc.rc.rowIter.hitCount),
rc.rc.rowIter.maxCacheSize)
case numberedContainerUsingIRC:
fmt.Printf("hit rate: %.2f, maxCacheSize: %d\n",
float64(rc.rc.hitCount)/float64(rc.rc.missCount+rc.rc.hitCount),
rc.rc.maxCacheSize)
}
}
})
}
}
}
}
// TODO(sumeer):
// - Benchmarks:
// - de-duping with and without spilling.
|
package array
func removeDuplicates(nums []int) int {
if nums == nil || len(nums) == 0 {
return 0
}
fis := 0
las := 1
for las != len(nums) {
if nums[fis] != nums[las] {
fis++
nums[fis] = nums[las]
}
las++
}
return fis + 1
}
|
package main
import (
"io"
"os"
"sort"
"github.com/yuyamada/atcoder/lib"
)
func main() {
solve(os.Stdin, os.Stdout)
}
func solve(reader io.Reader, writer io.Writer) {
io := lib.NewIo(reader, writer)
defer io.Flush()
w, h, n := io.NextInt(), io.NextInt(), io.NextInt()
x1 := io.NextInts(n)
x2 := io.NextInts(n)
y1 := io.NextInts(n)
y2 := io.NextInts(n)
ans := solver(w, h, x1, x2, y1, y2)
io.Println(ans)
}
func solver(w, h int, x1, x2, y1, y2 []int) (ans int) {
stage := createStage(w, h, x1, x2, y1, y2)
for i := range stage {
for j := range stage[i] {
if stage[i][j] == 1 {
continue
}
fill(&stage, i, j)
ans++
}
}
return ans
}
func createStage(w, h int, x1, x2, y1, y2 []int) [][]int {
mx := compress(append(x1, x2...), 1, w)
my := compress(append(y1, y2...), 1, h)
nRows, nCols := len(mx), len(my)
stage := make([][]int, nRows)
for i := 0; i < nRows; i++ {
stage[i] = make([]int, nCols)
}
for i := range x1 {
cx1, cy1 := mx[x1[i]], my[y1[i]]
cx2, cy2 := mx[x2[i]], my[y2[i]]
if cx1 > cx2 {
cx1, cx2 = cx2, cx1
}
if cy1 > cy2 {
cy1, cy2 = cy2, cy1
}
for j := cx1; j <= cx2; j++ {
for k := cy1; k <= cy2; k++ {
stage[j][k] = 1
}
}
}
return stage
}
func compress(values []int, min, max int) map[int]int {
mp := map[int]int{}
sort.Ints(values)
for _, v := range values {
for dv := -1; dv <= 1; dv++ {
if v+dv < min || max < v+dv {
continue
}
if _, ok := mp[v+dv]; ok {
continue
}
mp[v+dv] = len(mp)
}
}
return mp
}
var dx []int = []int{-1, 1, 0, 0}
var dy []int = []int{0, 0, -1, 1}
func fill(stage *[][]int, x, y int) {
w, h := len(*stage), len((*stage)[0])
(*stage)[x][y] = 1
for i := range dx {
if x+dx[i] < 0 || w <= x+dx[i] || y+dy[i] < 0 || h <= y+dy[i] {
continue
}
if (*stage)[x+dx[i]][y+dy[i]] == 1 {
continue
}
fill(stage, x+dx[i], y+dy[i])
}
}
|
package zombie_driver
// DriverID represents a driver identifier.
type DriverID int
// Status represents driver status based on records in database.
type Status struct {
ID DriverID `json:"id"`
Zombie bool `json:"zombie"`
}
// Client creates a connection to the services.
type Client interface {
Connect() CabService
}
// CabService represents a service for managing requests.
type CabService interface {
CheckZombieStatus(id string) (*Status, error)
}
|
package delete
import (
"encoding/json"
"fmt"
"net/http"
"github.com/ocoscope/face/db"
"github.com/ocoscope/face/utils"
"github.com/ocoscope/face/utils/answer"
)
func User(w http.ResponseWriter, r *http.Request) {
type tbody struct {
CompanyID, UserID, DeleteUserID int64
AccessToken string
}
var body tbody
err := json.NewDecoder(r.Body).Decode(&body)
if err != nil {
utils.Message(w, "Не правильные данные", 400)
return
}
database, err := db.CopmanyDB(body.CompanyID)
if err != nil {
utils.Message(w, "Не удалось подключится к базе данных", 400)
return
}
defer database.Close()
err = db.CheckUserAccessToken(database, body.UserID, body.AccessToken)
if err != nil {
utils.Message(w, "Не правильный ключ", 401)
return
}
userRoleID, err := db.GetUserRoleID(database, body.UserID)
if err != nil || userRoleID != 1 {
utils.Message(w, answer.ACCESS_DENIED, 400)
return
}
deleteUserRoleID, err := db.GetUserRoleID(database, body.DeleteUserID)
if err != nil || deleteUserRoleID == 3 {
utils.Message(w, "Нельзя удалить уовленного сотрудника", 400)
return
}
// удаляем из сотрудников в отделе или отделах
db.DeleteEmployeeByUser(database, body.DeleteUserID)
// убираем от руководства в отделе или отделах
db.UpdateDepartmentLeader(database, body.UserID, body.DeleteUserID)
err = db.DeleteUser(database, body.DeleteUserID)
if err != nil {
fmt.Println(err)
utils.Message(w, answer.FR, 400)
return
}
utils.MessageJson(w, answer.SR, 200)
}
|
/*
@Time : 2019/9/16 17:57
@Author : zxr
@File : date
@Software: GoLand
*/
package tools
import (
"fmt"
"time"
)
func GetCurrentUnix() int64 {
var cstSh, _ = time.LoadLocation("Asia/Shanghai") //上海
now := time.Now().In(cstSh)
formStr := fmt.Sprintf("%d-%02d-%02d %02d:%02d:%02d", now.Year(), now.Month(), now.Day(), 0, 0, 0)
parse, _ := time.ParseInLocation("2006-01-02 15:04:05", formStr, cstSh)
return parse.Unix()
}
|
package deltal
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
"sync"
"testing"
)
type cryptTest struct {
In, Out, Pass string
Check bool
}
var (
crypting = []cryptTest{
{"encoder.go", "../test/encoder.go.pw.delta", "pw", true},
{"encoder.go", "../test/encoder.go.delta", "", true},
{"encoder.go", "../test/encoder.go.nocheck.delta", "", false},
{"encoder.go", "../test/encoder.go.pw.nocheck.delta", "pw", false},
{"old/preReaderDelta.txt", "../test/preReaderDelta.txt.pw.delta", "pw", true},
{"old/preReaderDelta.txt", "../test/preReaderDelta.txt.delta", "", true},
{"old/preReaderDelta.txt", "../test/preReaderDelta.txt.nocheck.delta", "", false},
{"old/preReaderDelta.txt", "../test/preReaderDelta.txt.pw.nocheck.delta", "pw", false},
}
)
func (c *cryptTest) String() string {
return fmt.Sprintf("Encoder: %s <-> %s using PW: \"%s\" Checksum: %v", c.In, c.Out, c.Pass, c.Check)
}
func TestEncoder(t *testing.T) {
os.Mkdir("../test", os.ModePerm)
writtenFiles := make(map[string]([]string))
for _, v := range crypting {
fmt.Println(v.String())
// Encryption
filein, err := os.Open(v.In)
check(err, t)
encoder, err := NewEncoderReader(filein, v.Pass, v.Check)
check(err, t)
encryptedData, err := ioutil.ReadAll(encoder)
check(err, t)
ioutil.WriteFile(v.Out, encryptedData, os.ModePerm)
// Decryption
cryptin, err := os.Open(v.Out)
check(err, t)
decoder := NewDecoderStream(cryptin, v.Pass)
decryptedData, err := ioutil.ReadAll(decoder)
check(err, t)
decryptOut := v.Out + ".testdec"
writtenFiles[v.In] = append(writtenFiles[v.In], decryptOut)
ioutil.WriteFile(decryptOut, decryptedData, os.ModePerm)
// Testing Seeker
fmt.Println(decoder)
decoder.Seek(0, 0)
}
var wg *sync.WaitGroup
wg = new(sync.WaitGroup)
for source, decrypts := range writtenFiles {
sourceData, _ := ioutil.ReadFile(source)
for _, file := range decrypts {
wg.Add(1)
go func() {
defer wg.Done()
decrpytData, _ := ioutil.ReadFile(file)
if bytes.Equal(sourceData, decrpytData) {
fmt.Println("Successfull", source, file)
} else {
fmt.Println("Failed", source, file)
t.Fail()
}
}()
}
wg.Wait()
}
}
func TestCompressEncryption(t *testing.T) {
reader, _ := os.Open("old/preReaderDelta.txt")
encoder, _ := NewCompressedEncoderReader(reader, "pass", true)
output, _ := os.Create("../test/compressed.txt.gz.delta")
io.Copy(output, encoder)
}
func BenchmarkEncryptNoPW(b *testing.B) {
bdf, fd := benchEncrypter("../test/bench.txt", "")
for i := 0; i < b.N; i++ {
bdf()
}
fd.Close()
}
func BenchmarkEncryptPW(b *testing.B) {
bdf, fd := benchEncrypter("../test/bench.txt", "password")
for i := 0; i < b.N; i++ {
bdf()
}
fd.Close()
}
func BenchmarkDecryptNoPW(b *testing.B) {
bdf, fd := benchDecrypter("../test/bench.txt", "")
for i := 0; i < b.N; i++ {
bdf()
}
fd.Close()
}
func BenchmarkDecryptPW(b *testing.B) {
bdf, fd := benchDecrypter("../test/bench.txt", "password")
for i := 0; i < b.N; i++ {
bdf()
}
fd.Close()
}
func check(err error, t *testing.T) {
if err != nil {
t.Fail()
fmt.Println(err)
}
}
|
package main
import (
"local/crypto-api/auth"
"net/http"
"local/crypto-api/crypto"
"github.com/gin-gonic/gin"
)
func main() {
r := gin.Default()
r.POST("/api/login", auth.LoginEndpoint)
cryptoGroup := r.Group("/api/crypto")
cryptoGroup.Use(AuthMidleware())
{
cryptoGroup.GET("/btc", crypto.GetCryptoEndpoint)
cryptoGroup.POST("/btc", crypto.UpdateCryptoEndpoint)
}
r.Use(CORSMiddleware())
r.Run(":8000")
}
// AuthMidleware checks if the used token is valid
func AuthMidleware() gin.HandlerFunc {
return func(c *gin.Context) {
err := auth.ValidateToken(c.Request)
if err != nil {
c.JSON(http.StatusUnauthorized, gin.H{
"message": "Token inválido",
})
c.Abort()
return
}
c.Next()
}
}
// CORSMiddleware allows CORS requests
func CORSMiddleware() gin.HandlerFunc {
return func(c *gin.Context) {
c.Writer.Header().Set("Content-Type", "application/json")
c.Writer.Header().Set("Access-Control-Allow-Origin", c.Request.Header.Get("Origin"))
c.Writer.Header().Set("Access-Control-Max-Age", "86400")
c.Writer.Header().Set("Access-Control-Allow-Methods", "POST, GET, OPTIONS, PUT, DELETE, UPDATE")
c.Writer.Header().Set("Access-Control-Allow-Headers", "Content-Type, Content-Length, Accept-Encoding, X-CSRF-Token, Authorization, X-Max")
c.Writer.Header().Set("Access-Control-Allow-Credentials", "true")
if c.Request.Method == "OPTIONS" {
c.AbortWithStatus(200)
} else {
c.Next()
}
}
}
|
package main
import (
"bytes"
"encoding/binary"
"io"
"net"
"net/url"
"regexp"
)
func encodeSize(message []byte) []byte {
size := make([]byte, 2)
binary.LittleEndian.PutUint16(size, uint16(len(message)))
return size
}
func uwsgiPack(path, query, host, remoteAddr string, modifier1 int) []byte {
if path == "" {
path = "/"
}
uwsgiParams := map[string]string{
"SERVER_PROTOCOL": "HTTP/1.1",
"REQUEST_METHOD": "GET",
"PATH_INFO": path,
"REQUEST_URI": path,
"QUERY_STRING": query,
"SERVER_NAME": host,
"HTTP_HOST": host,
"REMOTE_ADDR": remoteAddr,
}
var params []byte
for k, v := range uwsgiParams {
bytesKey, bytesValue := []byte(k), []byte(v)
params = append(append(params, encodeSize(bytesKey)...), bytesKey...)
params = append(append(params, encodeSize(bytesValue)...), bytesValue...)
}
return append(append(append([]byte{byte(modifier1)}, encodeSize(params)...), 0), params...)
}
func get(url *url.URL, httpHost, remoteAddr string, modifier1 int) []byte {
uwsgiRequest := uwsgiPack(url.Path, url.RawQuery, httpHost, remoteAddr, modifier1)
conn, err := net.Dial("tcp", url.Host)
if err != nil {
return []byte("No connection")
}
conn.Write(uwsgiRequest)
var response bytes.Buffer
io.Copy(&response, conn)
defer conn.Close()
return response.Bytes()
}
func checkStatus(response []byte, expectedStatus string) int {
responseRe := regexp.MustCompile(`^HTTP/[01].[01] ([0-9]{3}) `)
matches := responseRe.FindSubmatch(response)
if len(matches) < 2 {
return 1
}
if string(matches[1]) == expectedStatus {
return 0
}
return 1
}
|
package copy
import (
"bytes"
"encoding/json"
"fmt"
"os"
"os/exec"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
var environmentCMD = &cobra.Command{
Use: "environment",
Short: "Copy env variables configmap for a microservice in one environment to another environment",
Long: `
Copy env variables configmap for a microservice in one environment to another environment.
go run main.go tools microservice copy environment \
--application cde2e951-d40a-3548-8b45-64c0ded97940 \
--microservice-name frontend \
--from-env test \
--to-env prod
`,
Run: func(cmd *cobra.Command, args []string) {
logrus.SetFormatter(&logrus.JSONFormatter{})
logrus.SetOutput(os.Stdout)
logContext := logrus.StandardLogger()
sourceMicroserviceName := viper.GetString("tools.microservice.copy.environment.microservice-name")
sourceEnvironment := viper.GetString("tools.microservice.copy.environment.from-env")
destinationEnvironment := viper.GetString("tools.microservice.copy.environment.to-env")
application := viper.GetString("tools.microservice.copy.environment.application")
sourceConfigMapName := fmt.Sprintf("%s-%s-env-variables", sourceEnvironment, sourceMicroserviceName)
namespace := fmt.Sprintf("application-%s", application)
logContextWithMeta := logContext.WithFields(logrus.Fields{
"k8sNamespace": namespace,
"k8sConfigmap": sourceConfigMapName,
})
logContextWithMeta.WithField("microserviceName", sourceMicroserviceName).
Info("Begins to copy env var configmap")
kubeCtlGetCfgMap := exec.Command("kubectl", "-n", namespace, "get", "configmap", sourceConfigMapName, "-o", "json")
_, err := kubeCtlGetCfgMap.StderrPipe()
if err != nil {
logContext.Fatal(err)
return
}
configMapJson, err := kubeCtlGetCfgMap.Output()
if err != nil {
logContextWithMeta.WithField("error", err).
Fatal("Failed to get configmap")
return
}
var modifiedConfigMap map[string]interface{}
json.Unmarshal(configMapJson, &modifiedConfigMap)
destinationConfigMap := fmt.Sprintf("%s-%s-env-variables", destinationEnvironment, sourceMicroserviceName)
metadata := modifiedConfigMap["metadata"].(map[string]interface{})
metadata["name"] = destinationConfigMap
// delete uid and version to ensure it patches
delete(metadata, "resourceVersion")
delete(metadata, "uid")
out, _ := json.Marshal(modifiedConfigMap)
kubectlApply := exec.Command("kubectl", "-o", "json", "apply", "-f", "-")
kubectlApply.Stdin = bytes.NewReader(out)
kubectlApply.Stderr = os.Stderr
kubectlApply.Start()
kubeCtlGetCfgMap.Run()
kubectlApply.Wait()
logContextWithMeta.WithField("k8sDestinationConfigMap", destinationConfigMap).
Info("Successfully copied configmap for env variables")
},
}
|
package app
import (
"database/sql"
"fmt"
"log"
"strings"
"github.com/gin-gonic/gin"
"github.com/jmoiron/sqlx"
"github.com/mercedtime/api/db/models"
"github.com/mercedtime/api/users"
)
// RegisterRoutes will setup all the app routes
func (a *App) RegisterRoutes(g *gin.RouterGroup) {
g.POST("/user", a.PostUser)
g.GET("/instructor/:id", instructorFromID(a.DB))
g.GET("/instructor/:id/courses", instructorCourses(a.DB))
lists := g.Group("/", listParamsMiddleware)
lists.GET("/lectures", ListLectures(a.DB))
lists.GET("/exams", ListExams(a.DB))
lists.GET("/labs", ListLabs(a.DB))
lists.GET("/discussions", ListDiscussions(a.DB))
lists.GET("/instructors", ListInstructors(a.DB))
}
// PostUser handles user creation
func (a *App) PostUser(c *gin.Context) {
type user struct {
users.User
Password string
}
u := user{}
err := c.BindJSON(&u)
if err != nil {
c.JSON(500, NewErr("could not read body"))
return
}
// TODO check auth for permissions to set is_admin
u.IsAdmin = false
if u.Password == "" {
c.JSON(400, ErrStatus(400, "no password for new user"))
return
}
if _, err = a.CreateUser(&u.User, u.Password); err != nil {
senderr(c, err, 500)
return
}
c.JSON(200, u.User)
}
var (
// NoOp Defaults vary between databases
// sqlite3: -1
// postgres: nil
defaultLimit interface{} = nil
defaultOffset interface{} = 0 // default to 0
)
func listParamsMiddleware(c *gin.Context) {
var (
limit interface{} = defaultLimit
offset interface{} = defaultOffset
)
if lim, ok := c.GetQuery("limit"); ok && lim != "" {
limit = lim
}
if off, ok := c.GetQuery("offset"); ok && off != "" {
offset = off
}
c.Set("limit", limit)
c.Set("offset", offset)
c.Next()
}
// ListLectures returns a handlerfunc that lists lectures.
// Depends on "limit" and "offset" being set from middleware.
func ListLectures(db *sqlx.DB) func(*gin.Context) {
var (
lecturesQuery = `
SELECT ` + strings.Join(models.GetSchema(models.Lecture{}), ",") + `
FROM lectures
LIMIT $1 OFFSET $2`
lecturesBySubjectQuery = `
SELECT ` + strings.Join(models.GetNamedSchema("l", models.Lecture{}), ",") + `
FROM lectures l, course c
WHERE
l.crn = c.crn AND
c.subject = $1
LIMIT $2 OFFSET $3`
err error
lectures []models.Lecture
)
return func(c *gin.Context) {
lectures = nil // deallocate from previous calls
subject, ok := c.GetQuery("subject")
if ok {
err = db.Select(
&lectures, lecturesBySubjectQuery,
strings.ToUpper(subject),
c.MustGet("limit"), c.MustGet("offset"),
)
} else {
err = db.Select(&lectures, lecturesQuery, c.MustGet("limit"), c.MustGet("offset"))
}
if err != nil {
senderr(c, err, 500)
return
}
c.JSON(200, lectures)
}
}
// ListLabs returns a handlerfunc that lists labs.
// Depends on "limit" and "offset" being set from middleware.
func ListLabs(db *sqlx.DB) gin.HandlerFunc {
var (
err error
query = `
SELECT
` + strings.Join(models.GetNamedSchema("aux", models.LabDisc{}), ",") + `
FROM aux,course
WHERE
aux.crn = course.crn AND
course.type = 'LAB'
LIMIT $1 OFFSET $2`
)
var list []models.LabDisc
return func(c *gin.Context) {
list = nil
if err = db.Select(
&list, query,
c.MustGet("limit"),
c.MustGet("offset"),
); err != nil {
senderr(c, err, 500)
return
}
c.JSON(200, list)
}
}
// ListDiscussions returns a handlerfunc that lists discussions.
// Depends on "limit" and "offset" being set from middleware.
func ListDiscussions(db *sqlx.DB) gin.HandlerFunc {
var err error
query := `
SELECT
` + strings.Join(models.GetNamedSchema("aux", models.LabDisc{}), ",") + `
FROM aux,course
WHERE
aux.crn = course.crn AND
course.type = 'DISC'
LIMIT $1 OFFSET $2`
var list []models.LabDisc
return func(c *gin.Context) {
list = nil
if err = db.Select(
&list, query,
c.MustGet("limit"),
c.MustGet("offset"),
); err != nil {
senderr(c, err, 500)
return
}
c.JSON(200, list)
}
}
// ListExams returns a handlerfunc that lists exams.
// Depends on "limit" and "offset" being set from middleware.
func ListExams(db *sqlx.DB) gin.HandlerFunc {
var err error
query := `
SELECT
crn, date, start_time, end_time
FROM exam
LIMIT $1 OFFSET $2`
var list []models.Exam
return func(c *gin.Context) {
list = nil
if err = db.Select(
&list, query,
c.MustGet("limit"),
c.MustGet("offset"),
); err != nil {
senderr(c, err, 500)
return
}
c.JSON(200, list)
}
}
func getFromCRN(db *sqlx.DB, query string, v interface{ Scan(models.Scanable) error }) gin.HandlerFunc {
return func(c *gin.Context) {
var (
crn = c.GetInt("crn")
row = db.QueryRow(query, crn)
err = v.Scan(row)
)
if err == sql.ErrNoRows {
c.JSON(404, &Error{
Msg: fmt.Sprintf("no results found for crn: %d", crn),
Status: 404,
})
return
}
if err != nil {
senderr(c, err, 500)
return
}
c.JSON(200, v)
}
}
func senderr(c *gin.Context, e error, status int) {
log.Println(e)
c.JSON(
status,
&Error{
Msg: e.Error(),
Status: status,
},
)
}
|
package game
import (
"reflect"
"testing"
)
var gmsLegal = []Game{
Game{
3,
Players{'A', 'B', 'C'},
History{},
},
Game{
10,
Players{'Ä', 'B', '😛'},
History{
Move{'Ä', 0, 0},
},
},
Game{
5,
Players{'1', '2', '3'},
History{
Move{'1', 0, 0},
Move{'2', 2, 0},
Move{'3', 0, 1},
Move{'1', 0, 2},
Move{'2', 3, 0},
Move{'3', 0, 3},
},
},
}
var gmsIllegal = []Game{
Game{
1,
Players{'A', 'B', 'C'},
History{},
},
Game{
5,
Players{'A', 'B'},
History{},
},
Game{
10,
Players{'Ä', 'B', NoPlayer},
History{
Move{'Ä', 0, 0},
},
},
Game{
5,
Players{'1', '2', '3'},
History{
Move{'1', 0, 0},
Move{'2', 2, 0},
Move{'3', 0, 1},
Move{'1', 0, 2},
Move{'2', 3, 0},
Move{'3', 0, 3},
Move{'1', 0, 0},
},
},
Game{
4,
Players{'1', '2', '3'},
History{
Move{'1', 0, 0},
Move{'2', 2, 0},
Move{'3', 0, 1},
Move{'2', 3, 0},
Move{'1', 0, 2},
Move{'3', 0, 3},
Move{'1', 0, 0},
},
},
}
func TestLegalSizes(t *testing.T) {
var s Size
for s = MinSize; s <= MaxSize; s++ {
_, err := New(s)
if err != nil {
t.Fatal(err)
}
}
}
func TestIllegalSizes(t *testing.T) {
var s Size
strErr := "false negative: %d is an illegal size"
for s = 0; s < MinSize; s++ {
_, err := New(s)
if err == nil {
t.Fatalf(strErr, s)
}
}
for s = MaxSize + 1; s < MaxSize+64; s++ {
_, err := New(s)
if err == nil {
t.Fatalf(strErr, s)
}
}
}
func TestMake(t *testing.T) {
left := &Game{size: 3}
right, err := New(3)
if err != nil {
t.Fatal("false positive:", err)
}
if !reflect.DeepEqual(left, right) {
t.Fatalf("corrupted Game: wanted size %d have size %d", left, right)
}
}
|
package router
import (
"editorApi/controller/editorapi"
"editorApi/middleware"
"github.com/gin-gonic/gin"
)
func InitActorsRouter(Router *gin.RouterGroup) {
ActorsRouter := Router.Group("editor").Use(middleware.CORSMiddleware(), middleware.JWTAuth())
{
ActorsRouter.POST("actors/create", editorapi.ActorsCreate)
ActorsRouter.POST("actors/find", editorapi.ActorsFind)
ActorsRouter.POST("actors/findone", editorapi.ActorsFindOne)
ActorsRouter.POST("actors/list", editorapi.ActorsList)
ActorsRouter.POST("actors/update", editorapi.ActorsUpdate)
ActorsRouter.POST("actors/delete", editorapi.ActorsDelete)
}
}
|
package main
// Compute the Damerau–Levenshtein distance between a and b.
// Reference: https://en.wikipedia.org/wiki/Damerau%E2%80%93Levenshtein_distance#Distance_with_adjacent_transpositions
func DLDist(a, b string) int {
ra := make([]rune, 0, len(a))
rb := make([]rune, 0, len(b))
for _, r := range a {
ra = append(ra, r)
}
for _, r := range b {
rb = append(rb, r)
}
var i, j int
// Pre-process to remove the common prefix and suffix.
for i = 0; i < len(ra) && i < len(rb) && ra[i] == rb[i]; i++ {
}
if i > 0 {
ra = ra[i:]
rb = rb[i:]
}
for i, j = len(ra)-1, len(rb)-1; i >= 0 && j >= 0 && ra[i] == rb[j]; i, j = i-1, j-1 {
}
if i < len(ra)-1 {
ra = ra[:i+1]
rb = rb[:j+1]
}
na, nb := len(ra)+2, len(rb)+2
da := make(map[rune]int)
d := make([][]int, na)
for i = range d {
d[i] = make([]int, nb)
}
maxDist := len(ra) + len(rb)
d[0][0] = maxDist
for i = 1; i < na; i++ {
d[i][0] = maxDist
d[i][1] = i - 1
}
for j = 1; j < nb; j++ {
d[0][j] = maxDist
d[1][j] = j - 1
}
var db, cost, k, l int
for i = 2; i < na; i++ {
db = 0
for j = 2; j < nb; j++ {
k = da[rb[j-2]]
l = db
if ra[i-2] == rb[j-2] {
cost = 0
db = j - 1
} else {
cost = 1
}
d[i][j] = min4int(
d[i-1][j-1]+cost, // substitution
d[i][j-1]+1, // insertion
d[i-1][j]+1, // deletion
d[k][l]+(i-k-2)+1+(j-l-2), // transposition
)
}
da[ra[i-2]] = i - 1
}
return d[na-1][nb-1]
}
func min4int(a, b, c, d int) int {
if a > b {
a = b
}
if a > c {
a = c
}
if a > d {
a = d
}
return a
}
|
// Copyright (C) 2020 Storj Labs, Inc.
// See LICENSE for copying information.
package extensions
import (
"bytes"
"encoding/binary"
"io"
"math/bits"
)
const (
uint64Size = 8
firstCustomTypeID = 65
encFirstCustomTypeID = 130 // encoded 65
)
// hardcoded initial part of Revocation gob encoding, its constant until Revocation struct won't change,
// contains definition of Revocation struct with fields names and types.
// https://golang.org/pkg/encoding/gob/
var wireEncoding = []byte{
64, 255, 129, 3, 1, 1, 10, 82, 101, 118, 111, 99, 97, 116, 105, 111, 110, 1, 255, 130, 0,
1, 3, 1, 9, 84, 105, 109, 101, 115, 116, 97, 109, 112, 1, 4, 0, 1, 7, 75, 101, 121, 72,
97, 115, 104, 1, 10, 0, 1, 9, 83, 105, 103, 110, 97, 116, 117, 114, 101, 1, 10, 0, 0, 0,
}
type revocationEncoder struct {
value *bytes.Buffer
}
func (encoder *revocationEncoder) encode(revocation Revocation) ([]byte, error) {
encoder.value = new(bytes.Buffer)
encoder.encodeInt(firstCustomTypeID)
delta := uint64(1)
if revocation.Timestamp != 0 {
encoder.encodeUint(delta)
encoder.encodeInt(revocation.Timestamp)
} else {
delta++
}
if len(revocation.KeyHash) > 0 {
encoder.encodeUint(delta)
encoder.encodeUint(uint64(len(revocation.KeyHash)))
encoder.writeBytes(revocation.KeyHash)
delta = uint64(1)
} else {
delta++
}
if len(revocation.Signature) > 0 {
encoder.encodeUint(delta)
encoder.encodeUint(uint64(len(revocation.Signature)))
encoder.writeBytes(revocation.Signature)
}
encoder.encodeUint(0)
valueLength := encoder.value.Len()
encoder.encodeUint(uint64(valueLength))
value := encoder.value.Bytes()
lengthData := value[valueLength:]
valueData := value[:valueLength]
return append(wireEncoding, append(lengthData, valueData...)...), nil
}
func (encoder *revocationEncoder) encodeInt(i int64) {
var x uint64
if i < 0 {
x = uint64(^i<<1) | 1
} else {
x = uint64(i << 1)
}
encoder.encodeUint(x)
}
func (encoder *revocationEncoder) encodeUint(x uint64) {
if x <= 0x7F {
encoder.writeByte(uint8(x))
return
}
var stateBuf [1 + uint64Size]byte
binary.BigEndian.PutUint64(stateBuf[1:], x)
bc := bits.LeadingZeros64(x) >> 3 // 8 - bytelen(x)
stateBuf[bc] = uint8(bc - uint64Size) // and then we subtract 8 to get -bytelen(x)
encoder.writeBytes(stateBuf[bc : uint64Size+1])
}
func (encoder *revocationEncoder) writeByte(x byte) {
encoder.value.WriteByte(x)
}
func (encoder *revocationEncoder) writeBytes(x []byte) {
encoder.value.Write(x)
}
type revocationDecoder struct {
data *bytes.Buffer
}
func (decoder *revocationDecoder) decode(data []byte) (revocation Revocation, err error) {
decoder.data = bytes.NewBuffer(data)
wire := make([]byte, len(wireEncoding))
_, err = io.ReadFull(decoder.data, wire)
if err != nil {
return revocation, ErrRevocation.Wrap(err)
}
if !bytes.Equal(wire, wireEncoding) {
return revocation, ErrRevocation.New("invalid revocation encoding")
}
length, err := decoder.decodeUint()
if err != nil {
return revocation, ErrRevocation.Wrap(err)
}
if length != uint64(len(decoder.data.Bytes())) {
return revocation, ErrRevocation.New("invalid revocation encoding")
}
typeID, err := decoder.decodeUint()
if err != nil {
return revocation, ErrRevocation.Wrap(err)
}
if typeID != encFirstCustomTypeID {
return revocation, ErrRevocation.Wrap(ErrRevocation.New("invalid revocation encoding"))
}
index := uint64(0)
for {
field, err := decoder.decodeUint()
if err != nil {
return revocation, ErrRevocation.Wrap(err)
}
if field == 0 {
break
}
switch field + index {
case 1:
revocation.Timestamp, err = decoder.decodeInt()
if err != nil {
return revocation, ErrRevocation.Wrap(err)
}
case 2:
revocation.KeyHash, err = decoder.decodeHash()
if err != nil {
return revocation, ErrRevocation.Wrap(err)
}
case 3:
revocation.Signature, err = decoder.decodeHash()
if err != nil {
return revocation, ErrRevocation.Wrap(err)
}
default:
return revocation, ErrRevocation.New("invalid field")
}
index += field
}
return revocation, nil
}
func (decoder *revocationDecoder) decodeHash() ([]byte, error) {
length, err := decoder.decodeUint()
if err != nil {
return nil, ErrRevocation.Wrap(err)
}
n := int(length)
if uint64(n) != length || decoder.data.Len() < n || n < 0 {
return nil, ErrRevocation.New("invalid hash length: %d", length)
}
buf := make([]byte, n)
_, err = io.ReadFull(decoder.data, buf)
if err != nil {
return nil, ErrRevocation.Wrap(err)
}
return buf, nil
}
func (decoder *revocationDecoder) decodeUint() (x uint64, err error) {
b, err := decoder.data.ReadByte()
if err != nil {
return 0, ErrRevocation.Wrap(err)
}
if b <= 0x7f {
return uint64(b), nil
}
n := -int(int8(b))
if n > uint64Size || n < 0 {
return 0, ErrRevocation.New("encoded unsigned integer out of range")
}
buf := make([]byte, n)
read, err := io.ReadFull(decoder.data, buf)
if err != nil {
return 0, ErrRevocation.Wrap(err)
}
if read < n {
return 0, ErrRevocation.New("invalid uint data length %d: exceeds input size %d", n, len(buf))
}
// Don't need to check error; it's safe to loop regardless.
// Could check that the high byte is zero but it's not worth it.
for _, b := range buf {
x = x<<8 | uint64(b)
}
return x, nil
}
func (decoder *revocationDecoder) decodeInt() (int64, error) {
x, err := decoder.decodeUint()
if err != nil {
return 0, err
}
if x&1 != 0 {
return ^int64(x >> 1), nil
}
return int64(x >> 1), nil
}
|
package main
import (
"encoding/json"
"fmt"
"math/rand"
"time"
"github.com/hmgle/chi"
"github.com/hmgle/chi/middleware"
"github.com/hmgle/chi/render"
"github.com/valyala/fasthttp"
"golang.org/x/net/context"
)
func main() {
r := chi.NewRouter()
r.Use(middleware.RequestID)
// r.Use(middleware.Logger)
r.Use(middleware.Recoverer)
r.Get("/", func(fctx *fasthttp.RequestCtx) {
fctx.Write([]byte("..."))
})
r.Get("/ping", func(fctx *fasthttp.RequestCtx) {
fctx.Write([]byte("pong"))
})
r.Get("/panic", func(fctx *fasthttp.RequestCtx) {
panic("test")
})
// Slow handlers/operations.
r.Group(func(r chi.Router) {
// Stop processing when client disconnects.
// TODO
// r.Use(middleware.CloseNotify)
// Stop processing after 2.5 seconds.
r.Use(middleware.Timeout(2500 * time.Millisecond))
r.Get("/slow", func(ctx context.Context, fctx *fasthttp.RequestCtx) {
rand.Seed(time.Now().Unix())
// Processing will take 1-5 seconds.
processTime := time.Duration(rand.Intn(4)+1) * time.Second
select {
case <-ctx.Done():
return
case <-time.After(processTime):
// The above channel simulates some hard work.
}
fctx.Write([]byte(fmt.Sprintf("Processed in %v seconds\n", processTime)))
})
})
// Throttle very expensive handlers/operations.
r.Group(func(r chi.Router) {
// Stop processing after 30 seconds.
r.Use(middleware.Timeout(30 * time.Second))
// Only one request will be processed at a time.
r.Use(middleware.Throttle(1))
r.Get("/throttled", func(ctx context.Context, fctx *fasthttp.RequestCtx) {
select {
case <-ctx.Done():
switch ctx.Err() {
case context.DeadlineExceeded:
fctx.SetStatusCode(504)
fctx.Write([]byte("Processing too slow\n"))
default:
fctx.Write([]byte("Canceled\n"))
}
return
case <-time.After(5 * time.Second):
// The above channel simulates some hard work.
}
fctx.Write([]byte("Processed\n"))
})
})
// RESTy routes for "articles" resource
r.Route("/articles", func(r chi.Router) {
r.Get("/", paginate, listArticles) // GET /articles
r.Post("/", createArticle) // POST /articles
r.Route("/:articleID", func(r chi.Router) {
r.Use(ArticleCtx)
r.Get("/", getArticle) // GET /articles/123
r.Put("/", updateArticle) // PUT /articles/123
r.Delete("/", deleteArticle) // DELETE /articles/123
})
})
// Mount the admin sub-router
r.Mount("/admin", adminRouter())
fasthttp.ListenAndServe(":3333", r.ServeHTTP)
}
type Article struct {
ID string `json:"id"`
Title string `json:"title"`
}
func ArticleCtx(next chi.Handler) chi.Handler {
return chi.HandlerFunc(func(ctx context.Context, fctx *fasthttp.RequestCtx) {
articleID := chi.URLParam(ctx, "articleID")
article, err := dbGetArticle(articleID)
if err != nil {
fctx.Error("Not Found", 404)
return
}
ctx = context.WithValue(ctx, "article", article)
next.ServeHTTPC(ctx, fctx)
})
}
func listArticles(ctx context.Context, fctx *fasthttp.RequestCtx) {
fctx.Write([]byte("list of articles.."))
// or render.Data(w, 200, []byte("list of articles.."))
}
func createArticle(ctx context.Context, fctx *fasthttp.RequestCtx) {
var article *Article
// btw, you could do this body reading / marhsalling in a nice bind middleware
data := fctx.PostBody()
if err := json.Unmarshal(data, &article); err != nil {
fctx.Error(err.Error(), 422)
return
}
// should really send back the json marshalled new article.
// build your own responder :)
fctx.Write([]byte(article.Title))
}
func getArticle(ctx context.Context, fctx *fasthttp.RequestCtx) {
article, ok := ctx.Value("article").(*Article)
if !ok {
fctx.Error("Unprocessable Entity", 422)
return
}
// Build your own responder, see the "./render" pacakge as a starting
// point for your own.
render.JSON(fctx, 200, article)
// or..
// w.Write([]byte(fmt.Sprintf("title:%s", article.Title)))
}
func updateArticle(ctx context.Context, fctx *fasthttp.RequestCtx) {
article, ok := ctx.Value("article").(*Article)
if !ok {
fctx.Error("Not Found", 404)
return
}
// btw, you could do this body reading / marhsalling in a nice bind middleware
data := fctx.PostBody()
uArticle := struct {
*Article
_ interface{} `json:"id,omitempty"` // prevents 'id' from being overridden
}{Article: article}
if err := json.Unmarshal(data, &uArticle); err != nil {
fctx.Error("Unprocessable Entity", 422)
return
}
render.JSON(fctx, 200, uArticle)
// w.Write([]byte(fmt.Sprintf("updated article, title:%s", uArticle.Title)))
}
func deleteArticle(ctx context.Context, fctx *fasthttp.RequestCtx) {
article, ok := ctx.Value("article").(*Article)
if !ok {
fctx.Error("Unprocessable Entity", 422)
return
}
_ = article // delete the article from the data store..
fctx.SetStatusCode(204)
}
func dbGetArticle(id string) (*Article, error) {
//.. fetch the article from a data store of some kind..
return &Article{ID: id, Title: "Going all the way,"}, nil
}
func paginate(next chi.Handler) chi.Handler {
return chi.HandlerFunc(func(ctx context.Context, fctx *fasthttp.RequestCtx) {
// just a stub.. some ideas are to look at URL query params for something like
// the page number, or the limit, and send a query cursor down the chain
next.ServeHTTPC(ctx, fctx)
})
}
// A completely separate router for administrator routes
func adminRouter() chi.Handler { // or chi.Router {
r := chi.NewRouter()
r.Use(AdminOnly)
r.Get("/", func(fctx *fasthttp.RequestCtx) {
fctx.Write([]byte("admin: index"))
})
r.Get("/accounts", func(fctx *fasthttp.RequestCtx) {
fctx.Write([]byte("admin: list accounts.."))
})
r.Get("/users/:userId", func(ctx context.Context, fctx *fasthttp.RequestCtx) {
fctx.Write([]byte(fmt.Sprintf("admin: view user id %v", chi.URLParam(ctx, "userId"))))
})
return r
}
func AdminOnly(next chi.Handler) chi.Handler {
return chi.HandlerFunc(func(ctx context.Context, fctx *fasthttp.RequestCtx) {
isAdmin, ok := ctx.Value("acl.admin").(bool)
if !ok || !isAdmin {
fctx.Error("Forbidden", 403)
return
}
next.ServeHTTPC(ctx, fctx)
})
}
|
package register
import (
"context"
"fmt"
"net/http"
"os"
"time"
"github.com/sirupsen/logrus"
"github.com/rancher/fleet/internal/config"
"github.com/rancher/fleet/internal/registration"
fleet "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1"
"github.com/rancher/fleet/pkg/durations"
fleetcontrollers "github.com/rancher/fleet/pkg/generated/controllers/fleet.cattle.io"
"github.com/rancher/wrangler/pkg/generated/controllers/core"
corecontrollers "github.com/rancher/wrangler/pkg/generated/controllers/core/v1"
"github.com/rancher/wrangler/pkg/randomtoken"
"github.com/rancher/wrangler/pkg/ratelimit"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"sigs.k8s.io/yaml"
)
const (
CredName = "fleet-agent" // same as AgentConfigName
Kubeconfig = "kubeconfig"
Token = "token"
Values = "values"
APIServerURL = "apiServerURL"
APIServerCA = "apiServerCA"
DeploymentNamespace = "deploymentNamespace"
ClusterNamespace = "clusterNamespace"
ClusterName = "clusterName"
)
type AgentInfo struct {
ClusterNamespace string
ClusterName string
ClientConfig clientcmd.ClientConfig
}
func Register(ctx context.Context, namespace, clusterID string, config *rest.Config) (*AgentInfo, error) {
for {
cfg, err := tryRegister(ctx, namespace, clusterID, config)
if err == nil {
return cfg, nil
}
logrus.Errorf("Failed to register agent: %v", err)
select {
case <-ctx.Done():
return nil, ctx.Err()
case <-time.After(durations.AgentRegistrationRetry):
}
}
}
// tryRegister makes sure the secret cattle-fleet-system/fleet-agent is
// populated and the contained kubeconfig is working
func tryRegister(ctx context.Context, namespace, clusterID string, cfg *rest.Config) (*AgentInfo, error) {
cfg = rest.CopyConfig(cfg)
// disable the rate limiter
cfg.RateLimiter = ratelimit.None
k8s, err := core.NewFactoryFromConfig(cfg)
if err != nil {
return nil, err
}
secret, err := k8s.Core().V1().Secret().Get(namespace, CredName, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
logrus.Warn("Cannot find fleet-agent secret, running registration")
// fallback to local cattle-fleet-system/fleet-agent-bootstrap
secret, err = runRegistration(ctx, k8s.Core().V1(), namespace, clusterID)
if err != nil {
return nil, fmt.Errorf("registration failed: %w", err)
}
} else if err != nil {
return nil, err
} else if err := testClientConfig(secret.Data[Kubeconfig]); err != nil {
// skip testClientConfig check if previous error, or IsNotFound fallback succeeded
logrus.Errorf("Current credential failed, failing back to reregistering: %v", err)
secret, err = runRegistration(ctx, k8s.Core().V1(), namespace, clusterID)
if err != nil {
return nil, fmt.Errorf("re-registration failed: %w", err)
}
}
clientConfig, err := clientcmd.NewClientConfigFromBytes(secret.Data[Kubeconfig])
if err != nil {
return nil, err
}
// delete the fleet-agent-bootstrap cred
_ = k8s.Core().V1().Secret().Delete(namespace, config.AgentBootstrapConfigName, nil)
return &AgentInfo{
ClusterNamespace: string(secret.Data[ClusterNamespace]),
ClusterName: string(secret.Data[ClusterName]),
ClientConfig: clientConfig,
}, nil
}
// runRegistration reads the cattle-fleet-system/fleet-agent-bootstrap and
// waits for the registration secret to appear on the management cluster to
// create a new fleet-agent secret
func runRegistration(ctx context.Context, k8s corecontrollers.Interface, namespace, clusterID string) (*corev1.Secret, error) {
secret, err := k8s.Secret().Get(namespace, config.AgentBootstrapConfigName, metav1.GetOptions{})
if err != nil {
return nil, fmt.Errorf("looking up secret %s/%s: %w", namespace, config.AgentBootstrapConfigName, err)
}
return createAgentSecret(ctx, clusterID, k8s, secret)
}
// createAgentSecret uses the provided fleet-agent-bootstrap token to build a
// kubeconfig and create a ClusterRegistration on the management cluster.
// Then it waits up to 30 minutes for the registration secret
// "c-clientID-clientRandom" to appear in the systemRegistrationNamespace on
// the management cluster.
// Finally uses the client from the config (service account: fleet-agent), to
// update the "fleet-agent" secret with a new kubeconfig from the registration
// secret. The new kubeconfig can then be used to query bundledeployments.
func createAgentSecret(ctx context.Context, clusterID string, k8s corecontrollers.Interface, secret *corev1.Secret) (*corev1.Secret, error) {
clientConfig := createClientConfigFromSecret(secret)
ns, _, err := clientConfig.Namespace()
if err != nil {
return nil, err
}
kc, err := clientConfig.ClientConfig()
if err != nil {
return nil, err
}
cfg, err := config.Lookup(ctx, secret.Namespace, config.AgentConfigName, k8s.ConfigMap())
if err != nil {
return nil, fmt.Errorf("failed to look up client config %s/%s: %w", secret.Namespace, config.AgentConfigName, err)
}
fleetK8s, err := kubernetes.NewForConfig(kc)
if err != nil {
return nil, err
}
fc, err := fleetcontrollers.NewFactoryFromConfig(kc)
if err != nil {
return nil, err
}
token, err := randomtoken.Generate()
if err != nil {
return nil, err
}
if cfg.ClientID != "" {
clusterID = cfg.ClientID
} else if clusterID == "" {
kubeSystem, err := k8s.Namespace().Get("kube-system", metav1.GetOptions{})
if err != nil {
return nil, fmt.Errorf("cannot retrieve our kubeSystem.UID: %w", err)
}
clusterID = string(kubeSystem.UID)
}
// add the name of the pod that created the registration for debugging
cfg.Labels["fleet.cattle.io/created-by-agent-pod"] = os.Getenv("HOSTNAME")
logrus.Infof("Creating clusterregistration with id '%s' for new token", clusterID)
request, err := fc.Fleet().V1alpha1().ClusterRegistration().Create(&fleet.ClusterRegistration{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "request-",
Namespace: ns,
},
Spec: fleet.ClusterRegistrationSpec{
ClientID: clusterID,
ClientRandom: token,
ClusterLabels: cfg.Labels,
},
})
if err != nil {
return nil, fmt.Errorf("cannot create clusterregistration on management cluster for cluster id '%s': %w", clusterID, err)
}
secretName := registration.SecretName(request.Spec.ClientID, request.Spec.ClientRandom)
secretNamespace := string(values(secret.Data)["systemRegistrationNamespace"])
timeout := time.After(durations.CreateClusterSecretTimeout)
for {
select {
case <-timeout:
return nil, fmt.Errorf("timeout waiting for registration secret '%s/%s' on management cluster", secretNamespace, secretName)
case <-ctx.Done():
return nil, ctx.Err()
case <-time.After(durations.ClusterSecretRetry):
}
newSecret, err := fleetK8s.CoreV1().Secrets(secretNamespace).Get(ctx, secretName, metav1.GetOptions{})
if err != nil {
logrus.Infof("Waiting for secret '%s/%s' on management cluster for request '%s/%s': %v", secretNamespace, secretName, request.Namespace, request.Name, err)
continue
}
newToken := newSecret.Data[Token]
clusterNamespace := newSecret.Data[ClusterNamespace]
clusterName := newSecret.Data[ClusterName]
deploymentNamespace := newSecret.Data[DeploymentNamespace]
newKubeconfig, err := updateClientConfig(clientConfig, string(newToken), string(deploymentNamespace))
if err != nil {
return nil, err
}
if err := testClientConfig(newKubeconfig); err != nil {
return nil, fmt.Errorf("new client config cannot list bundledeployments on management cluster: %w", err)
}
// fleet-agent secret
agentSecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: CredName,
Namespace: secret.Namespace,
},
Data: map[string][]byte{
Kubeconfig: newKubeconfig,
DeploymentNamespace: deploymentNamespace,
ClusterNamespace: clusterNamespace,
ClusterName: clusterName,
},
}
secret, err := k8s.Secret().Create(agentSecret)
if apierrors.IsAlreadyExists(err) {
if err = k8s.Secret().Delete(agentSecret.Namespace, agentSecret.Name, &metav1.DeleteOptions{}); err != nil {
return nil, err
}
secret, err = k8s.Secret().Create(agentSecret)
}
if err != nil {
err = fmt.Errorf("failed to create 'fleet-agent' secret: %w", err)
}
return secret, err
}
}
func values(data map[string][]byte) map[string][]byte {
values := data[Values]
if len(values) == 0 {
return data
}
newData := map[string]interface{}{}
if err := yaml.Unmarshal(values, &newData); err != nil {
return data
}
data = map[string][]byte{}
for k, v := range newData {
if s, ok := v.(string); ok {
data[k] = []byte(s)
}
}
return data
}
func createClientConfigFromSecret(secret *corev1.Secret) clientcmd.ClientConfig {
data := values(secret.Data)
apiServerURL := string(data[APIServerURL])
apiServerCA := data[APIServerCA]
namespace := string(data[ClusterNamespace])
token := string(data[Token])
if _, err := http.Get(apiServerURL); err == nil {
apiServerCA = nil
}
cfg := clientcmdapi.Config{
Clusters: map[string]*clientcmdapi.Cluster{
"cluster": {
Server: apiServerURL,
CertificateAuthorityData: apiServerCA,
},
},
AuthInfos: map[string]*clientcmdapi.AuthInfo{
"user": {
Token: token,
},
},
Contexts: map[string]*clientcmdapi.Context{
"default": {
Cluster: "cluster",
AuthInfo: "user",
Namespace: namespace,
},
},
CurrentContext: "default",
}
return clientcmd.NewDefaultClientConfig(cfg, &clientcmd.ConfigOverrides{})
}
func testClientConfig(cfg []byte) error {
cc, err := clientcmd.NewClientConfigFromBytes(cfg)
if err != nil {
return err
}
ns, _, err := cc.Namespace()
if err != nil {
return err
}
rest, err := cc.ClientConfig()
if err != nil {
return err
}
fc, err := fleetcontrollers.NewFactoryFromConfig(rest)
if err != nil {
return err
}
_, err = fc.Fleet().V1alpha1().BundleDeployment().List(ns, metav1.ListOptions{})
return err
}
func updateClientConfig(cc clientcmd.ClientConfig, token, ns string) ([]byte, error) {
raw, err := cc.RawConfig()
if err != nil {
return nil, err
}
for _, v := range raw.AuthInfos {
v.Token = token
}
for _, v := range raw.Contexts {
v.Namespace = ns
}
return clientcmd.Write(raw)
}
|
package script
import (
"strings"
)
var Directions = []string{"north", "south", "east", "west", "up", "down"}
// FIX: move these into a standard rules extension package?
func _makeOpposites() map[string]string {
op := make(map[string]string)
for i := 0; i < len(Directions)/2; i++ {
a, b := Directions[2*i], Directions[2*i+1]
op[a], op[b] = b, a
}
return op
}
var opposites = _makeOpposites()
//
// Begin a statement connecting one room to another via a movement direction.
// Direction: a noun of "direction" type: ex. north, east, south.
//
func Going(direction string) GoingFragment {
return GoingFragment{GoesFromFragment{origin: NewOrigin(2), fromDir: direction}}
}
//
// Causes directional movement to pass through an explicit departure door.
//
func (goingFrom GoingFragment) Through(door string) GoesFromFragment {
goingFrom.fromDoor = door
return goingFrom.GoesFromFragment
}
//
// Begin a statement connecting one room to another via a door.
// Door: The exit from a room.
//
func Through(door string) GoesFromFragment {
return GoesFromFragment{origin: NewOrigin(2), fromDoor: door}
}
//
// Establishes a two-way connection between the room From() and the passed destination.
//
func (goesFrom GoesFromFragment) ConnectsTo(room string) GoesToFragment {
return GoesToFragment{from: goesFrom, toRoom: room, twoWay: true}
}
//
// Establishes a one-way connection between the room From() and the passed destination.
//
func (goesFrom GoesFromFragment) ArrivesAt(room string) GoesToFragment {
return GoesToFragment{from: goesFrom, toRoom: room}
}
//
// Optional door to arrive at in the destination room.
//
func (goesTo GoesToFragment) Door(door string) IFragment {
goesTo.toDoor = door
return goesTo
}
type GoingFragment struct {
GoesFromFragment
}
type GoesFromFragment struct {
origin Origin
fromDir, fromDoor string
}
type GoesToFragment struct {
from GoesFromFragment
toRoom, toDoor string
twoWay bool
}
//
// implements IFragment for use in The()
//
func (goesTo GoesToFragment) MakeStatement(b SubjectBlock) (err error) {
from := newFromSite(b.subject, goesTo.from.fromDoor, goesTo.from.fromDir)
to := newToSite(goesTo.toRoom, goesTo.toDoor, goesTo.from.fromDir)
// A Room (contains) Doors
if e := from.makeSite(b); e != nil {
err = e
} else if e := to.makeSite(b); e != nil {
err = e
}
// A departure door (has a matching) arrival door
if err == nil {
if _, e := b.The(from.door.str, Has("destination", to.door.str)); e != nil {
err = e
} else if goesTo.twoWay {
_, err = b.The(to.door.str, Has("destination", from.door.str))
}
}
// A Room+Travel Direction (has a matching) departure door
// ( if you do not have an deptature door, one will be created for you. )
if err == nil {
dir := xDir{goesTo.from.fromDir}
if dir.isSpecified() {
if _, e := dir.makeDir(b); e != nil {
err = e
} else if _, e := b.The(from.room.str, Has(dir.via(), from.door.str)); e != nil {
err = e
} else if goesTo.twoWay {
_, err = b.The(to.room.str, Has(dir.revVia(), to.door.str))
// FIX? REMOVED dynamic opposite lookup
// needs more thought as to how new directions could be added
// perhaps some sort of "dependency injection" where we can add evaluations
// -- dynmic compiler generators -- as hooks after ( dependent on ) sets of other instances, classes, etc. so those hooks can use model reflection to generate new, non-conflicting, model data -- this is already similar to the idea of onion skins of visual content, hardpoint hooks, etc.
//_, err = b.The(to.room.str, Has(dir.revRev(), from.door.str))
}
}
}
return err
}
// helper to create departure door if needed
func newFromSite(room, door, dir string) xSite {
gen := door == ""
if gen {
door = strings.Join([]string{room, "departure", dir}, "-")
}
return xSite{xRoom{room}, xDoor{door, gen}}
}
// helper to create arrival door if needed
func newToSite(room, door, dir string) xSite {
gen := door == ""
if gen {
door = strings.Join([]string{room, "arrival", dir}, "-")
}
return xSite{xRoom{room}, xDoor{door, gen}}
}
type xSite struct {
room xRoom
door xDoor
}
func (x xSite) makeSite(b SubjectBlock) (err error) {
if _, e := b.The("room", Called(x.room.str), Exists()); e != nil {
err = e
} else if _, e = b.The("door", Called(x.door.str), In(x.room.str), Exists()); e != nil {
err = e
} else if x.door.gen {
_, err = b.Our(x.door.str, Is("scenery"))
}
return err
}
type xRoom struct {
str string
}
type xDoor struct {
str string
gen bool
}
type xDir struct {
str string
}
func (x xDir) isSpecified() bool {
return len(x.str) > 0
}
func (x xDir) makeDir(b SubjectBlock) (int, error) {
return b.The("direction", Called(x.str), Exists())
}
func (x xDir) via() string {
return x.str + "-via"
}
func (x xDir) opposite() string {
return opposites[x.str]
}
func (x xDir) revVia() string {
return x.opposite() + "-via"
}
// FIX? REMOVED dynamic opposite lookup ( see comment in MakeStatement )
// func (x xDir) revVia() string {
// return x.str + "-rev-via"
// }
|
package tc
import (
"bufio"
"encoding/binary"
"fmt"
"io"
"strconv"
"strings"
"t3x9/ast"
"t3x9/lex"
)
const bpw = 4
const (
opush = "00"
oclear = "01"
oldval = "02,w"
oldaddr = "03,a"
oldlref = "04,w"
oldglob = "05,a"
oldlocl = "06,w"
ostglob = "07,a"
ostlocl = "08,w"
ostindr = "09"
ostindb = "0a"
oincglob = "0b,a"
oinclocl = "0c,w"
oalloc = "0d,w"
odealloc = "0e,w"
oloclvec = "0f"
oglobvec = "10,a"
oindex = "11"
oderef = "12"
oindxb = "13"
odrefb = "14"
omark = ",m"
oresolv = ",r"
ocall = "17,W"
ojumpfwd = "18,>"
ojumpback = "19,<"
ojmpfalse = "1a,>"
ojmptrue = "1b,>"
ofor = "1c,>"
ofordown = "1d,>"
oenter = "1e"
oexit = "1f"
ohalt = "20,w"
oneg = "21"
oinv = "22"
olognot = "23"
oadd = "24"
osub = "25"
omul = "26"
odiv = "27"
omod = "28"
oand = "29"
oor = "2a"
oxor = "2b"
oshl = "2c"
oshr = "2d"
oeq = "2e"
oneq = "2f"
olt = "30"
ogt = "31"
ole = "32"
oge = "33"
oword = ",W"
tcomp = "3500"
tcopy = "3501"
tfill = "3502"
tscan = "3503"
tcreate = "3504"
topen = "3505"
tclose = "3506"
tread = "3507"
twrite = "3508"
trename = "3509"
tremove = "350a"
)
type scope struct {
*ast.Scope
parent *scope
symbol map[string]*symbol
}
type symbol struct {
*ast.Symbol
value int
}
type reloc struct {
addr int
seg int
}
type CG struct {
text []byte
data []byte
rel []reloc
stack []int
lp int
loaded bool
gsp *scope
csp *scope
leaves []int
loops []int
loop0 int
defs map[*ast.Symbol]ast.Decl
}
func NewCG() *CG {
return &CG{}
}
func (c *CG) Compile(w io.Writer, p *ast.Prog) error {
c.program(p)
c.text = c.pad(c.text, 4)
c.relocate()
b := bufio.NewWriter(w)
c.writehdr(b)
b.Write(c.text)
b.Write(c.data)
return b.Flush()
}
func (c *CG) relocate() {
dist := len(c.text)
for _, r := range c.rel {
switch r.seg {
case 't':
a := c.tfetch(r.addr) + dist
c.tpatch(r.addr, a)
case 'd':
a := c.dfetch(r.addr) + dist
c.dpatch(r.addr, a)
default:
panic(fmt.Errorf("unknown segment %c", r.seg))
}
}
}
func (c *CG) builtin(name, code string) {
c.gen(ojumpfwd, 0)
c.add(&ast.Symbol{Ident: name, Type: ast.FUNC}, len(c.text))
c.gen(code, 0)
c.gen(oresolv, 0)
}
func (c *CG) writehdr(w io.Writer) {
fmt.Fprintf(w, "#! /u/bin/tcvm\n")
fmt.Fprintf(w, "T3X9")
binary.Write(w, binary.LittleEndian, uint32(len(c.text)))
binary.Write(w, binary.LittleEndian, uint32(len(c.data)))
}
func (c *CG) spill() {
if c.loaded {
c.gen(opush, 0)
} else {
c.loaded = true
}
}
func (c *CG) clear() {
c.loaded = false
}
func (c *CG) swap() {
sp := len(c.stack)
c.stack[sp-1], c.stack[sp-2] = c.stack[sp-2], c.stack[sp-1]
}
func (c *CG) tos() int {
return c.stack[len(c.stack)-1]
}
func (c *CG) push(v int) {
c.stack = append(c.stack, v)
}
func (c *CG) pop() int {
l := len(c.stack) - 1
v := c.stack[l]
c.stack = c.stack[:l]
return v
}
func (c *CG) tag(seg int) {
addr := len(c.data) - bpw
if seg == 't' {
addr = len(c.text) - bpw
}
c.rel = append(c.rel, reloc{addr, seg})
}
func (c *CG) tpatch(a, x int) {
binary.LittleEndian.PutUint32(c.text[a:], uint32(x))
}
func (c *CG) dpatch(a, x int) {
binary.LittleEndian.PutUint32(c.data[a:], uint32(x))
}
func (c *CG) store(u *scope, y *symbol) {
if u == c.gsp {
c.gen(ostglob, y.value)
} else {
c.gen(ostlocl, y.value)
}
}
func (c *CG) load(u *scope, y *symbol) {
if u == c.gsp {
c.gen(oldglob, y.value)
} else {
c.gen(oldlocl, y.value)
}
}
func (c *CG) tfetch(a int) int {
return int(binary.LittleEndian.Uint32(c.text[a:]))
}
func (c *CG) dfetch(a int) int {
return int(binary.LittleEndian.Uint32(c.data[a:]))
}
func (c *CG) resolvefwd(loc, fn int) {
for loc != 0 {
nloc := c.tfetch(loc)
c.tpatch(loc, fn-loc-bpw)
loc = nloc
}
}
func (c *CG) emit(x uint8) {
c.text = append(c.text, x)
}
func (c *CG) emitw(x int) {
c.emit(uint8(x))
c.emit(uint8(x >> 8))
c.emit(uint8(x >> 16))
c.emit(uint8(x >> 24))
}
func (c *CG) datab(x uint8) {
c.data = append(c.data, x)
}
func (c *CG) dataw(x int) {
c.datab(uint8(x))
c.datab(uint8(x >> 8))
c.datab(uint8(x >> 16))
c.datab(uint8(x >> 24))
}
func (c *CG) mkstring(s string) int {
if len(s) >= 2 {
if s[0] == '"' && s[len(s)-1] == '"' {
s = s[1 : len(s)-1]
}
}
a := len(c.data)
for i := range s {
c.datab(s[i])
}
c.datab(0)
for len(c.data)%bpw != 0 {
c.datab(0)
}
return a
}
func (c *CG) mkbytevec(b *ast.ByteVecExpr) int {
a := len(c.data)
for _, v := range b.Data {
if v > 255 {
panic(fmt.Errorf("packed byte value too large: %v", v))
}
c.datab(uint8(v))
}
for len(c.data)%bpw != 0 {
c.datab(0)
}
return a
}
func (c *CG) mktable(t *ast.TableExpr) int {
var tbl, af []int
for _, m := range t.Member {
switch m := m.(type) {
case *ast.BasicLit:
switch m.Type {
case lex.STRING:
tbl = append(tbl, c.mkstring(m.Value))
af = append(af, 1)
default:
panic(fmt.Errorf("unknown table type %s", m.Type))
}
case *ast.ConstExpr:
tbl = append(tbl, m.Value)
af = append(af, 0)
case *ast.ParenExpr:
c.expr(m.X, true)
c.gen(ostglob, 0)
tbl = append(tbl, 0)
af = append(af, len(c.text)-bpw)
case *ast.TableExpr:
tbl = append(tbl, c.mktable(m))
af = append(af, 1)
case *ast.ByteVecExpr:
tbl = append(tbl, c.mkbytevec(m))
af = append(af, 1)
default:
panic(fmt.Errorf("unknown table type %T", m))
}
}
loc := len(c.data)
for i := range tbl {
c.dataw(tbl[i])
if af[i] == 1 {
c.tag('d')
} else if af[i] > 1 {
c.tpatch(af[i], len(c.data)-4)
}
}
return loc
}
func (c *CG) gen(s string, v int) {
var op uint16
var mode rune
var x16, nop bool
if strings.HasPrefix(s, ",") {
fmt.Sscanf(s, ",%c", &mode)
nop = true
} else if strings.IndexRune(s, ',') > 0 {
fmt.Sscanf(s, "%x,%c", &op, &mode)
} else {
fmt.Sscanf(s, "%x", &op)
}
if op&0xff00 != 0 {
op = (op>>8)&0xff | (op<<8)&0xff00
x16 = true
}
if -0x80 <= v && v <= 0x7f {
if mode == 'w' {
mode = 'b'
}
op |= 0x80
}
if !nop {
c.emit(uint8(op))
if x16 {
c.emit(uint8(op >> 8))
}
}
switch mode {
case 'b':
c.emit(uint8(v))
case 'w', 'W':
c.emitw(v)
case 'a':
c.emitw(v)
c.tag('t')
case 'm':
c.push(len(c.text))
case '>':
c.push(len(c.text))
c.emitw(0)
case '<':
c.emitw(c.pop() - len(c.text) - bpw)
case 'r':
x := c.pop()
c.tpatch(x, len(c.text)-x-bpw)
case 0:
default:
panic(fmt.Errorf("unsupported op %q", s))
}
}
func (c *CG) newscope(p *scope, u *ast.Scope) *scope {
v := &scope{
Scope: u,
parent: p,
symbol: make(map[string]*symbol),
}
return v
}
func (c *CG) add(s *ast.Symbol, v int) (*scope, *symbol) {
name := strings.ToLower(s.Ident)
u, y := c.lookup(name)
if u == c.csp && y != nil {
return u, y
}
y = &symbol{Symbol: s, value: v}
c.csp.symbol[name] = y
return c.csp, y
}
func (c *CG) lookup(name string) (*scope, *symbol) {
name = strings.ToLower(name)
u := c.csp
for u != nil {
y := u.symbol[name]
if y != nil {
return u, y
}
u = u.parent
}
return nil, nil
}
func (c *CG) program(p *ast.Prog) {
c.gsp = c.newscope(nil, p.Scope)
c.csp = c.gsp
c.defs = p.Defs
c.builtin("t.memcomp", tcomp)
c.builtin("t.memcopy", tcopy)
c.builtin("t.memfill", tfill)
c.builtin("t.memscan", tscan)
c.builtin("t.create", tcreate)
c.builtin("t.open", topen)
c.builtin("t.close", tclose)
c.builtin("t.read", tread)
c.builtin("t.write", twrite)
c.builtin("t.rename", trename)
c.builtin("t.remove", tremove)
c.declaration(c.csp)
c.gen(oenter, 0)
c.compound(p.Stmt)
c.gen(ohalt, 0)
}
func (c *CG) declaration(u *scope) {
for _, s := range u.Symbol {
switch {
case s.Type&ast.CONST != 0:
c.add(s, s.Value.(*ast.ConstExpr).Value)
case s.Type&ast.DECL != 0:
c.add(s, 0)
case s.Type&ast.FUNC != 0:
c.funcdecl(s)
case s.Type&ast.VAR != 0:
c.vardecl(s)
}
}
}
func (c *CG) funcdecl(s *ast.Symbol) {
f, ok := c.defs[s].(*ast.FuncDecl)
if !ok {
return
}
c.gen(ojumpfwd, 0)
_, y := c.add(s, len(c.text))
c.csp = c.newscope(c.csp, f.Scope)
laddr := 2 * bpw
for _, a := range f.Arg {
c.add(a, 12+len(f.Arg)*bpw-laddr)
laddr += bpw
}
if y.Type&ast.DECL != 0 {
c.resolvefwd(y.value, len(c.text))
y.Symbol = s
y.value = len(c.text)
}
c.gen(oenter, 0)
c.stmt(f.Body)
c.gen(oclear, 0)
c.gen(oexit, 0)
c.gen(oresolv, 0)
c.csp = c.csp.parent
}
func (c *CG) vardecl(s *ast.Symbol) {
u, y := c.add(s, c.lp)
size := 1
switch {
case y.Type&ast.BYTE != 0:
size = (y.Len + bpw - 1) / bpw
case y.Type&ast.VECT != 0:
size = y.Len
}
if u == c.gsp {
y.value = len(c.data)
if y.Type&ast.VECT != 0 {
c.gen(oalloc, size*bpw)
c.gen(oglobvec, len(c.data))
}
c.dataw(0)
} else {
c.gen(oalloc, size*bpw)
c.lp -= size * bpw
if y.Type&ast.VECT != 0 {
c.gen(oloclvec, 0)
c.lp -= bpw
}
y.value = c.lp
}
}
func (c *CG) compound(s *ast.CompoundStmt) {
c.csp = c.newscope(c.csp, s.Scope)
lp := c.lp
c.declaration(c.csp)
for _, s := range s.Stmt {
c.stmt(s)
}
if lp != c.lp {
c.gen(odealloc, lp-c.lp)
}
c.lp = lp
c.csp = c.csp.parent
}
func (c *CG) stmt(s ast.Stmt) {
switch s := s.(type) {
case *ast.CompoundStmt:
c.compound(s)
case *ast.AssignStmt:
c.assignstmt(s)
case *ast.ExprStmt:
switch e := s.X.(type) {
case *ast.CallExpr:
c.callstmt(e)
default:
panic(fmt.Errorf("unsupported statement %T", s))
}
case *ast.IfStmt:
c.ifstmt(s)
case *ast.WhileStmt:
c.whilestmt(s)
case *ast.ForStmt:
c.forstmt(s)
case *ast.BranchStmt:
switch s.Tok {
case lex.LEAVE:
c.leavestmt(s)
case lex.LOOP:
c.loopstmt(s)
}
case *ast.ReturnStmt:
switch s.Tok {
case lex.HALT:
c.haltstmt(s)
case lex.RETURN:
c.returnstmt(s)
default:
panic(fmt.Errorf("unsupported statement %T", s))
}
case *ast.EmptyStmt:
default:
panic(fmt.Errorf("unsupported statement %T", s))
}
}
func (c *CG) assignstmt(s *ast.AssignStmt) {
c.clear()
u, y, b := c.index(s.Lhs)
c.expr(s.Rhs, false)
if y == nil {
if b {
c.gen(ostindb, 0)
} else {
c.gen(ostindr, 0)
}
} else {
c.store(u, y)
}
}
func (c *CG) callstmt(s *ast.CallExpr) {
c.clear()
c.index(s.Fun)
c.fncall(s)
}
func (c *CG) leavestmt(s *ast.BranchStmt) {
c.gen(ojumpfwd, 0)
c.leaves = append(c.leaves, c.pop())
}
func (c *CG) loopstmt(s *ast.BranchStmt) {
if c.loop0 > 0 {
c.push(c.loop0)
c.gen(ojumpback, 0)
} else {
c.gen(ojumpfwd, 0)
c.loops = append(c.loops, c.pop())
}
}
func (c *CG) ifstmt(s *ast.IfStmt) {
c.expr(s.Cond, true)
c.gen(ojmpfalse, 0)
c.stmt(s.Body)
if s.Else != nil {
c.gen(ojumpfwd, 0)
c.swap()
c.gen(oresolv, 0)
c.stmt(s.Else)
}
c.gen(oresolv, 0)
}
func (c *CG) whilestmt(s *ast.WhileStmt) {
lp0 := c.loop0
lvp := len(c.leaves)
c.gen(omark, 0)
c.loop0 = c.tos()
c.expr(s.Cond, true)
c.gen(ojmpfalse, 0)
c.stmt(s.Body)
c.swap()
c.gen(ojumpback, 0)
c.gen(oresolv, 0)
for len(c.leaves) > lvp {
c.push(c.leaves[len(c.leaves)-1])
c.gen(oresolv, 0)
c.leaves = c.leaves[:len(c.leaves)-1]
}
c.loop0 = lp0
}
func (c *CG) forstmt(s *ast.ForStmt) {
llp := len(c.loops)
lvp := len(c.leaves)
lp0 := c.loop0
c.loop0 = 0
u, y := c.lookup(s.Name.Ident)
c.expr(s.Init, true)
c.store(u, y)
c.gen(omark, 0)
c.load(u, y)
c.expr(s.Cond, false)
step := 1
if s.Post != nil {
step = s.Post.Value
}
if step < 0 {
c.gen(ofordown, 0)
} else {
c.gen(ofor, 0)
}
c.stmt(s.Body)
for len(c.loops) > llp {
c.push(c.loops[len(c.loops)-1])
c.gen(oresolv, 0)
c.loops = c.loops[:len(c.loops)-1]
}
if u == c.gsp {
c.gen(oincglob, y.value)
} else {
c.gen(oinclocl, y.value)
}
c.gen(oword, step)
c.swap()
c.gen(ojumpback, 0)
c.gen(oresolv, 0)
for len(c.leaves) > lvp {
c.push(c.leaves[len(c.leaves)-1])
c.gen(oresolv, 0)
c.leaves = c.leaves[:len(c.leaves)-1]
}
c.loop0 = lp0
}
func (c *CG) haltstmt(s *ast.ReturnStmt) {
c.gen(ohalt, s.Value.(*ast.ConstExpr).Value)
}
func (c *CG) returnstmt(s *ast.ReturnStmt) {
if s.Value == nil {
c.gen(oclear, 0)
} else {
c.expr(s.Value, true)
}
if c.lp != 0 {
c.gen(odealloc, -c.lp)
}
c.gen(oexit, 0)
}
func (c *CG) expr(e ast.Expr, clr bool) {
if clr {
c.clear()
}
c.sexpr(e)
}
func (c *CG) factor(e ast.Expr) {
switch e := e.(type) {
case *ast.BasicLit:
switch e.Type {
case lex.INTEGER:
str := e.Value
if strings.HasPrefix(str, "%") {
str = "-" + str[1:]
}
val, _ := strconv.ParseInt(str, 0, 64)
c.spill()
c.gen(oldval, int(val))
case lex.STRING:
c.spill()
c.gen(oldaddr, c.mkstring(e.Value))
}
case *ast.Symbol:
c.address(e.Ident, 0, false)
case *ast.ParenExpr:
c.expr(e.X, false)
case *ast.CallExpr:
c.fncall(e)
case *ast.TableExpr:
c.spill()
c.gen(oldaddr, c.mktable(e))
case *ast.ByteVecExpr:
c.spill()
c.gen(oldaddr, c.mkbytevec(e))
default:
panic(fmt.Errorf("unsupported expression %T", e))
}
}
func (c *CG) fncall(e *ast.CallExpr) {
_, y := c.lookup(e.Name.Ident)
for _, a := range e.Arg {
c.expr(a, false)
}
if c.loaded {
c.spill()
}
if y.Type&ast.DECL != 0 {
c.gen(ocall, y.value)
y.value = len(c.text) - bpw
} else {
c.gen(ocall, y.value-len(c.text)-5)
}
if len(e.Arg) > 0 {
c.gen(odealloc, len(e.Arg)*bpw)
}
c.loaded = true
}
func (c *CG) lexpr(op string, e *ast.BinaryExpr) {
c.sexpr(e.X)
c.gen(op, 0)
c.clear()
c.sexpr(e.Y)
}
func (c *CG) index(e ast.Expr) (u *scope, y *symbol, bp bool) {
var p []ast.Expr
loop:
for {
p = append(p, e)
switch t := e.(type) {
case *ast.IndexExpr:
e = t.X
default:
break loop
}
}
var lv int
for i := len(p) - 1; i >= 0; i-- {
switch t := p[i].(type) {
case *ast.Symbol:
lv = t.Value.(int)
u, y = c.address(t.Ident, lv, len(p) > 1)
case *ast.IndexExpr:
if t.Byte {
c.factor(t.Index)
c.gen(oindxb, 0)
if lv == 0 {
c.gen(odrefb, 0)
}
bp = true
break
}
c.expr(t.Index, false)
c.gen(oindex, 0)
if i > 0 || lv == 0 {
c.gen(oderef, 0)
}
default:
panic(fmt.Errorf("unsupported index expression: %T", t))
}
}
if len(p) > 1 {
y = nil
}
return
}
func (c *CG) cond(e *ast.CondExpr) {
c.sexpr(e.X)
c.gen(ojmpfalse, 0)
c.expr(e.Y, true)
c.gen(ojumpfwd, 0)
c.swap()
c.gen(oresolv, 0)
c.expr(e.Z, true)
c.gen(oresolv, 0)
}
func (c *CG) sexpr(e ast.Expr) {
switch e := e.(type) {
case *ast.IndexExpr:
c.index(e)
case *ast.CondExpr:
c.cond(e)
case *ast.LogicalExpr:
c.sexpr(e.X)
for i := 0; i < e.Depth; i++ {
c.gen(oresolv, 0)
}
case *ast.BinaryExpr:
if e.Op == lex.CONJ {
c.lexpr(ojmpfalse, e)
break
} else if e.Op == lex.DISJ {
c.lexpr(ojmptrue, e)
break
}
c.sexpr(e.X)
c.sexpr(e.Y)
switch e.Op {
case lex.ADD:
c.gen(oadd, 0)
case lex.SUB:
c.gen(osub, 0)
case lex.MUL:
c.gen(omul, 0)
case lex.DIV:
c.gen(odiv, 0)
case lex.MOD:
c.gen(omod, 0)
case lex.LT:
c.gen(olt, 0)
case lex.LE:
c.gen(ole, 0)
case lex.GT:
c.gen(ogt, 0)
case lex.GE:
c.gen(oge, 0)
case lex.NEQ:
c.gen(oneq, 0)
case lex.EQ:
c.gen(oeq, 0)
case lex.AND:
c.gen(oand, 0)
case lex.OR:
c.gen(oor, 0)
case lex.XOR:
c.gen(oxor, 0)
case lex.SHL:
c.gen(oshl, 0)
case lex.SHR:
c.gen(oshr, 0)
default:
panic(fmt.Errorf("unsupported binary op %v", e.Op))
}
case *ast.UnaryExpr:
if e.Op == lex.ADDROF {
u, y, _ := c.index(e.X)
if y == nil {
// nop
} else if u == c.gsp {
c.spill()
c.gen(oldaddr, y.value)
} else {
c.spill()
c.gen(oldlref, y.value)
}
break
}
c.sexpr(e.X)
switch e.Op {
case lex.SUB:
c.gen(oneg, 0)
case lex.INV:
c.gen(oinv, 0)
case lex.LNOT:
c.gen(olognot, 0)
default:
panic(fmt.Errorf("unsupported binary op %v", e.Op))
}
default:
c.factor(e)
}
}
func (c *CG) address(name string, lv int, idx bool) (u *scope, y *symbol) {
u, y = c.lookup(name)
if y.Type&ast.CONST != 0 {
c.spill()
c.gen(oldval, y.value)
} else if lv == 0 || idx {
c.spill()
c.load(u, y)
}
return
}
func (c *CG) pad(b []byte, a int) []byte {
n := (len(b) + a) &^ (a - 1)
return append(b, make([]byte, n-len(b))...)
}
|
package array
import (
"github.com/project-flogo/core/data"
"github.com/project-flogo/core/data/expression/function"
"github.com/project-flogo/core/support/log"
"reflect"
)
type appendFunc struct {
}
func init() {
function.Register(&appendFunc{})
}
func (a *appendFunc) Name() string {
return "append"
}
func (appendFunc) Sig() (paramTypes []data.Type, isVariadic bool) {
return []data.Type{data.TypeAny, data.TypeAny}, false
}
func (appendFunc) Eval(params ...interface{}) (interface{}, error) {
items := params[0]
item := params[1]
log.RootLogger().Debugf("Start array appendFunc function with parameters %+v and %+v", items, item)
if item == nil {
//Do nothing
return items, nil
}
if items == nil {
newitems := reflect.MakeSlice(reflect.SliceOf(reflect.TypeOf(item)), 1, 1)
newitems.Index(0).Set(reflect.ValueOf(item))
return newitems.Interface(), nil
}
arrV := reflect.ValueOf(items)
if arrV.Kind() == reflect.Slice {
item := reflect.ValueOf(item)
if item.Kind() == reflect.Slice {
for i := 0; i < item.Len(); i++ {
arrV = reflect.Append(arrV, item.Index(i))
}
} else {
arrV = reflect.Append(arrV, item)
}
}
log.RootLogger().Debugf("array append function done, final array %+v", arrV.Interface())
return arrV.Interface(), nil
}
|
// Copyright 2020 The VectorSQL Authors.
//
// Code is licensed under Apache License, Version 2.0.
package optimizers
import (
"testing"
"planners"
"github.com/stretchr/testify/assert"
)
func TestOptimizePredicatePushDown(t *testing.T) {
plan := planners.NewMapPlan(
planners.NewScanPlan("tables", "system"),
planners.NewProjectPlan(
planners.NewMapPlan(
planners.NewVariablePlan("name"),
),
),
planners.NewFilterPlan(
planners.NewBinaryExpressionPlan(
"=",
planners.NewVariablePlan("name"),
planners.NewConstantPlan("db2"),
),
),
)
plan = Optimize(plan, DefaultOptimizers).(*planners.MapPlan)
expect := plan.SubPlans[2]
actual := plan.SubPlans[0].(*planners.ScanPlan).Filter
assert.Equal(t, expect, actual)
}
|
package troubleshoot
import (
"context"
"net/http"
"testing"
dynatracev1beta1 "github.com/Dynatrace/dynatrace-operator/src/api/v1beta1"
"github.com/Dynatrace/dynatrace-operator/src/dtclient"
"github.com/Dynatrace/dynatrace-operator/src/kubeobjects/address"
"github.com/Dynatrace/dynatrace-operator/src/scheme"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
)
const (
testRegistry = "testing.dev.dynatracelabs.com"
testApiUrl = "https://" + testRegistry + "/api"
testOtherApiUrl = "https://" + testRegistry + "/otherapi"
testDynatraceSecret = testDynakube
testOtherDynatraceSecret = "otherDynatraceSecret"
testApiToken = "apiTokenValue"
testPaasToken = "passTokenValue"
testSecretName = "customSecret"
testCustomPullSecretToken = "secretTokenValue"
)
type errorClient struct {
client.Client
}
func (errorClt *errorClient) List(_ context.Context, _ client.ObjectList, _ ...client.ListOption) error {
return errors.New("fake error")
}
func TestDynakubeCRD(t *testing.T) {
t.Run("crd does not exist", func(t *testing.T) {
clt := fake.NewClientBuilder().Build()
troubleshootCtx := troubleshootContext{apiReader: clt, namespaceName: testNamespace}
assert.ErrorContains(t, checkCRD(&troubleshootCtx), "CRD for Dynakube missing")
})
t.Run("unrelated error", func(t *testing.T) {
troubleshootCtx := troubleshootContext{apiReader: &errorClient{}, namespaceName: testNamespace}
assert.ErrorContains(t, checkCRD(&troubleshootCtx), "could not list Dynakube")
})
}
func TestDynakube(t *testing.T) {
t.Run("dynakube exists", func(t *testing.T) {
clt := fake.NewClientBuilder().
WithScheme(scheme.Scheme).
WithObjects(
testNewDynakubeBuilder(testNamespace, testDynakube).build(),
testBuildNamespace(testNamespace),
).
Build()
troubleshootCtx := troubleshootContext{
apiReader: clt,
namespaceName: testNamespace,
dynakube: *testNewDynakubeBuilder(testNamespace, testDynakube).build(),
}
assert.NoErrorf(t, getSelectedDynakube(&troubleshootCtx), "no dynakube found")
})
t.Run("dynakube does not exist", func(t *testing.T) {
clt := fake.NewClientBuilder().
WithScheme(scheme.Scheme).
WithObjects(
testNewDynakubeBuilder(testNamespace, testDynakube).build(),
testBuildNamespace(testNamespace),
).
Build()
troubleshootCtx := troubleshootContext{
apiReader: clt,
namespaceName: testNamespace,
dynakube: *testNewDynakubeBuilder(testNamespace, "doesnotexist").build(),
}
assert.Errorf(t, getSelectedDynakube(&troubleshootCtx), "dynakube found")
})
t.Run("invalid namespace selected", func(t *testing.T) {
clt := fake.NewClientBuilder().
WithScheme(scheme.Scheme).
WithObjects(
testNewDynakubeBuilder(testNamespace, testDynakube).build(),
testBuildNamespace(testNamespace),
).
Build()
troubleshootCtx := troubleshootContext{apiReader: clt, namespaceName: testOtherNamespace}
assert.Errorf(t, getSelectedDynakube(&troubleshootCtx), "dynakube found")
})
}
func TestApiUrl(t *testing.T) {
t.Run("valid ApiUrl", func(t *testing.T) {
troubleshootCtx := troubleshootContext{namespaceName: testNamespace, dynakube: *testNewDynakubeBuilder(testNamespace, testDynakube).withApiUrl(testApiUrl).build()}
assert.NoErrorf(t, checkApiUrlSyntax(&troubleshootCtx), "invalid ApiUrl")
})
t.Run("invalid ApiUrl", func(t *testing.T) {
troubleshootCtx := troubleshootContext{namespaceName: testNamespace, dynakube: *testNewDynakubeBuilder(testNamespace, testDynakube).withApiUrl(testOtherApiUrl).build()}
assert.Errorf(t, checkApiUrlSyntax(&troubleshootCtx), "valid ApiUrl")
})
}
func TestDynatraceSecret(t *testing.T) {
t.Run("Dynatrace secret exists", func(t *testing.T) {
dynakube := testNewDynakubeBuilder(testNamespace, testDynakube).withTokens(testDynatraceSecret).build()
clt := fake.NewClientBuilder().
WithScheme(scheme.Scheme).
WithObjects(
dynakube,
testBuildNamespace(testNamespace),
testNewSecretBuilder(testNamespace, testDynatraceSecret).build(),
).
Build()
troubleshootCtx := troubleshootContext{
apiReader: clt,
namespaceName: testNamespace,
dynakube: *dynakube,
}
assert.NoErrorf(t, getSelectedDynakube(&troubleshootCtx), "Dynatrace secret not found")
})
t.Run("Dynatrace secret does not exist", func(t *testing.T) {
clt := fake.NewClientBuilder().
WithScheme(scheme.Scheme).
WithObjects(
testNewDynakubeBuilder(testNamespace, testDynakube).withTokens(testDynatraceSecret).build(),
testBuildNamespace(testNamespace),
testNewSecretBuilder(testNamespace, testOtherDynatraceSecret).build(),
).
Build()
troubleshootCtx := troubleshootContext{
apiReader: clt,
namespaceName: testNamespace,
dynakube: *testNewDynakubeBuilder(testNamespace, testDynakube).build(),
}
assert.Errorf(t, checkIfDynatraceApiSecretHasApiToken(&troubleshootCtx), "Dynatrace secret found")
})
t.Run("Dynatrace secret has apiToken token", func(t *testing.T) {
dynakube := testNewDynakubeBuilder(testNamespace, testDynakube).withTokens(testDynatraceSecret).build()
clt := fake.NewClientBuilder().
WithScheme(scheme.Scheme).
WithObjects(
dynakube,
testBuildNamespace(testNamespace),
testNewSecretBuilder(testNamespace, testDynatraceSecret).dataAppend("apiToken", testApiToken).dataAppend("paasToken", testPaasToken).build(),
).
Build()
troubleshootCtx := troubleshootContext{
apiReader: clt,
namespaceName: testNamespace,
dynakube: *dynakube,
}
assert.NoErrorf(t, checkIfDynatraceApiSecretHasApiToken(&troubleshootCtx), "Dynatrace secret does not have required tokens")
})
t.Run("Dynatrace secret - apiToken is missing", func(t *testing.T) {
dynakube := testNewDynakubeBuilder(testNamespace, testDynakube).withTokens(testDynatraceSecret).build()
clt := fake.NewClientBuilder().
WithScheme(scheme.Scheme).
WithObjects(
dynakube,
testBuildNamespace(testNamespace),
testNewSecretBuilder(testNamespace, testDynatraceSecret).dataAppend("paasToken", testPaasToken).build(),
).
Build()
troubleshootCtx := troubleshootContext{
apiReader: clt,
namespaceName: testNamespace,
dynakube: *dynakube,
}
assert.Errorf(t, checkIfDynatraceApiSecretHasApiToken(&troubleshootCtx), "Dynatrace secret does not have apiToken")
})
}
func TestPullSecret(t *testing.T) {
t.Run("custom pull secret exists", func(t *testing.T) {
dynakube := testNewDynakubeBuilder(testNamespace, testDynakube).withCustomPullSecret(testSecretName).build()
clt := fake.NewClientBuilder().
WithScheme(scheme.Scheme).
WithObjects(
dynakube,
testBuildNamespace(testNamespace),
testNewSecretBuilder(testNamespace, testSecretName).build(),
).
Build()
troubleshootCtx := troubleshootContext{apiReader: clt, namespaceName: testNamespace, dynakube: *dynakube}
assert.NoErrorf(t, checkPullSecretExists(&troubleshootCtx), "custom pull secret not found")
})
t.Run("custom pull secret does not exist", func(t *testing.T) {
clt := fake.NewClientBuilder().
WithScheme(scheme.Scheme).
WithObjects(
testNewDynakubeBuilder(testNamespace, testDynakube).withCustomPullSecret(testSecretName).build(),
testBuildNamespace(testNamespace),
).
Build()
troubleshootCtx := troubleshootContext{apiReader: clt, namespaceName: testNamespace}
assert.Errorf(t, checkPullSecretExists(&troubleshootCtx), "custom pull secret found")
})
t.Run("custom pull secret has required tokens", func(t *testing.T) {
troubleshootCtx := troubleshootContext{namespaceName: testNamespace, pullSecret: *testNewSecretBuilder(testNamespace, testSecretName).dataAppend(".dockerconfigjson", testCustomPullSecretToken).build()}
assert.NoErrorf(t, checkPullSecretHasRequiredTokens(&troubleshootCtx), "custom pull secret does not have required tokens")
})
t.Run("custom pull secret does not have required tokens", func(t *testing.T) {
troubleshootCtx := troubleshootContext{namespaceName: testNamespace, pullSecret: *testNewSecretBuilder(testNamespace, testSecretName).build()}
assert.Errorf(t, checkPullSecretHasRequiredTokens(&troubleshootCtx), "custom pull secret has required tokens")
})
}
func TestProxySecret(t *testing.T) {
t.Run("proxy secret exists", func(t *testing.T) {
clt := fake.NewClientBuilder().
WithScheme(scheme.Scheme).
WithObjects(
testNewDynakubeBuilder(testNamespace, testDynakube).withProxySecret(testSecretName).build(),
testBuildNamespace(testNamespace),
testNewSecretBuilder(testNamespace, testSecretName).build(),
).
Build()
troubleshootCtx := troubleshootContext{apiReader: clt, namespaceName: testNamespace}
assert.NoErrorf(t, applyProxySettings(&troubleshootCtx), "proxy secret not found")
})
t.Run("proxy secret does not exist", func(t *testing.T) {
dynakube := testNewDynakubeBuilder(testNamespace, testDynakube).withProxySecret(testSecretName).build()
clt := fake.NewClientBuilder().
WithScheme(scheme.Scheme).
WithObjects(
dynakube,
testBuildNamespace(testNamespace),
).
Build()
troubleshootCtx := troubleshootContext{
apiReader: clt, namespaceName: testNamespace, dynakube: *dynakube,
}
assert.Errorf(t, applyProxySettings(&troubleshootCtx), "proxy secret found, should not exist")
})
t.Run("proxy secret has required tokens", func(t *testing.T) {
proxySecret := *testNewSecretBuilder(testNamespace, testSecretName).
dataAppend(dtclient.CustomProxySecretKey, testCustomPullSecretToken).
build()
troubleshootCtx := troubleshootContext{
namespaceName: testNamespace,
proxySecret: &proxySecret,
httpClient: &http.Client{},
}
assert.NoErrorf(t, applyProxySettings(&troubleshootCtx), "proxy secret does not have required tokens")
})
t.Run("proxy secret does not have required tokens", func(t *testing.T) {
secret := *testNewSecretBuilder(testNamespace, testSecretName).build()
dynakube := *testNewDynakubeBuilder(testNamespace, testDynakube).withProxySecret(testSecretName).build()
clt := fake.NewClientBuilder().WithScheme(scheme.Scheme).WithObjects(
&dynakube,
&secret).
Build()
troubleshootCtx := troubleshootContext{
namespaceName: testNamespace,
proxySecret: &secret,
dynakube: dynakube,
apiReader: clt}
assert.Errorf(t, applyProxySettings(&troubleshootCtx), "proxy secret has required tokens")
})
}
type testDynaKubeBuilder struct {
dynakube *dynatracev1beta1.DynaKube
}
func testNewDynakubeBuilder(namespace string, dynakube string) *testDynaKubeBuilder {
return &testDynaKubeBuilder{
dynakube: &dynatracev1beta1.DynaKube{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: dynakube,
},
},
}
}
func (builder *testDynaKubeBuilder) withApiUrl(apiUrl string) *testDynaKubeBuilder {
builder.dynakube.Spec.APIURL = apiUrl
return builder
}
func (builder *testDynaKubeBuilder) withTokens(secretName string) *testDynaKubeBuilder {
builder.dynakube.Spec.Tokens = secretName
return builder
}
func (builder *testDynaKubeBuilder) withCustomPullSecret(secretName string) *testDynaKubeBuilder {
builder.dynakube.Spec.CustomPullSecret = secretName
return builder
}
func (builder *testDynaKubeBuilder) withProxy(proxyURL string) *testDynaKubeBuilder {
builder.dynakube.Spec.Proxy = &dynatracev1beta1.DynaKubeProxy{
Value: proxyURL,
}
return builder
}
func (builder *testDynaKubeBuilder) withProxySecret(secretName string) *testDynaKubeBuilder {
builder.dynakube.Spec.Proxy = &dynatracev1beta1.DynaKubeProxy{
ValueFrom: secretName,
}
return builder
}
func (builder *testDynaKubeBuilder) withActiveGateCapability(capability dynatracev1beta1.CapabilityDisplayName) *testDynaKubeBuilder {
if builder.dynakube.Spec.ActiveGate.Capabilities == nil {
builder.dynakube.Spec.ActiveGate.Capabilities = make([]dynatracev1beta1.CapabilityDisplayName, 0)
}
builder.dynakube.Spec.ActiveGate.Capabilities = append(builder.dynakube.Spec.ActiveGate.Capabilities, capability)
return builder
}
func (builder *testDynaKubeBuilder) withActiveGateCustomImage(image string) *testDynaKubeBuilder {
builder.dynakube.Spec.ActiveGate.Image = image
return builder
}
func (builder *testDynaKubeBuilder) withCloudNativeFullStack() *testDynaKubeBuilder {
builder.dynakube.Spec.OneAgent.CloudNativeFullStack = &dynatracev1beta1.CloudNativeFullStackSpec{
HostInjectSpec: dynatracev1beta1.HostInjectSpec{},
}
return builder
}
func (builder *testDynaKubeBuilder) withClassicFullStack() *testDynaKubeBuilder {
builder.dynakube.Spec.OneAgent.ClassicFullStack = &dynatracev1beta1.HostInjectSpec{}
return builder
}
func (builder *testDynaKubeBuilder) withHostMonitoring() *testDynaKubeBuilder {
builder.dynakube.Spec.OneAgent.HostMonitoring = &dynatracev1beta1.HostInjectSpec{}
return builder
}
func (builder *testDynaKubeBuilder) withClassicFullStackCustomImage(image string) *testDynaKubeBuilder {
if builder.dynakube.Spec.OneAgent.ClassicFullStack != nil {
builder.dynakube.Spec.OneAgent.ClassicFullStack.Image = image
} else {
builder.dynakube.Spec.OneAgent.ClassicFullStack = &dynatracev1beta1.HostInjectSpec{
Image: image,
}
}
return builder
}
func (builder *testDynaKubeBuilder) withCloudNativeFullStackCustomImage(image string) *testDynaKubeBuilder {
if builder.dynakube.Spec.OneAgent.CloudNativeFullStack != nil {
builder.dynakube.Spec.OneAgent.CloudNativeFullStack.Image = image
} else {
builder.dynakube.Spec.OneAgent.CloudNativeFullStack = &dynatracev1beta1.CloudNativeFullStackSpec{
HostInjectSpec: dynatracev1beta1.HostInjectSpec{
Image: image,
},
}
}
return builder
}
func (builder *testDynaKubeBuilder) withHostMonitoringCustomImage(image string) *testDynaKubeBuilder {
if builder.dynakube.Spec.OneAgent.HostMonitoring != nil {
builder.dynakube.Spec.OneAgent.HostMonitoring.Image = image
} else {
builder.dynakube.Spec.OneAgent.HostMonitoring = &dynatracev1beta1.HostInjectSpec{
Image: image,
}
}
return builder
}
func (builder *testDynaKubeBuilder) withCloudNativeCodeModulesImage(image string) *testDynaKubeBuilder {
if builder.dynakube.Spec.OneAgent.CloudNativeFullStack != nil {
builder.dynakube.Spec.OneAgent.CloudNativeFullStack.CodeModulesImage = image
} else {
builder.dynakube.Spec.OneAgent.CloudNativeFullStack = &dynatracev1beta1.CloudNativeFullStackSpec{
AppInjectionSpec: dynatracev1beta1.AppInjectionSpec{
InitResources: corev1.ResourceRequirements{},
CodeModulesImage: image,
},
}
}
return builder
}
func (builder *testDynaKubeBuilder) withApplicationMonitoringCodeModulesImage(image string) *testDynaKubeBuilder {
if builder.dynakube.Spec.OneAgent.ApplicationMonitoring != nil {
builder.dynakube.Spec.OneAgent.ApplicationMonitoring.CodeModulesImage = image
builder.dynakube.Spec.OneAgent.ApplicationMonitoring.UseCSIDriver = address.Of(true)
} else {
builder.dynakube.Spec.OneAgent.ApplicationMonitoring = &dynatracev1beta1.ApplicationMonitoringSpec{
AppInjectionSpec: dynatracev1beta1.AppInjectionSpec{
InitResources: corev1.ResourceRequirements{},
CodeModulesImage: image,
},
UseCSIDriver: address.Of(true),
}
}
return builder
}
func (builder *testDynaKubeBuilder) withClassicFullStackImageVersion(version string) *testDynaKubeBuilder {
if builder.dynakube.Spec.OneAgent.ClassicFullStack != nil {
builder.dynakube.Spec.OneAgent.ClassicFullStack.Version = version
} else {
builder.dynakube.Spec.OneAgent.ClassicFullStack = &dynatracev1beta1.HostInjectSpec{
Version: version,
}
}
return builder
}
func (builder *testDynaKubeBuilder) withCloudNativeFullStackImageVersion(version string) *testDynaKubeBuilder {
if builder.dynakube.Spec.OneAgent.CloudNativeFullStack != nil {
builder.dynakube.Spec.OneAgent.CloudNativeFullStack.Version = version
} else {
builder.dynakube.Spec.OneAgent.CloudNativeFullStack = &dynatracev1beta1.CloudNativeFullStackSpec{
HostInjectSpec: dynatracev1beta1.HostInjectSpec{
Version: version,
},
}
}
return builder
}
func (builder *testDynaKubeBuilder) withHostMonitoringImageVersion(version string) *testDynaKubeBuilder {
if builder.dynakube.Spec.OneAgent.HostMonitoring != nil {
builder.dynakube.Spec.OneAgent.HostMonitoring.Version = version
} else {
builder.dynakube.Spec.OneAgent.HostMonitoring = &dynatracev1beta1.HostInjectSpec{
Version: version,
}
}
return builder
}
func (builder *testDynaKubeBuilder) build() *dynatracev1beta1.DynaKube {
return builder.dynakube
}
type testSecretBuilder struct {
secret *corev1.Secret
}
func testNewSecretBuilder(namespace string, name string) *testSecretBuilder {
return &testSecretBuilder{
secret: &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: name,
},
},
}
}
func (builder *testSecretBuilder) dataAppend(key string, value string) *testSecretBuilder {
if builder.secret.Data == nil {
builder.secret.Data = make(map[string][]byte)
builder.secret.Data[key] = []byte(value)
} else {
builder.secret.Data[key] = []byte(value)
}
return builder
}
func (builder *testSecretBuilder) build() *corev1.Secret {
return builder.secret
}
func testBuildNamespace(namespace string) *corev1.Namespace {
return &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: namespace,
UID: testUID,
},
}
}
|
// Copyright 2021 Comcast Cable Communications Management, LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package block_test
import (
"context"
"encoding/json"
"github.com/xmidt-org/ears/pkg/event"
"github.com/xmidt-org/ears/pkg/filter/block"
"github.com/xmidt-org/ears/pkg/tenant"
"testing"
)
func TestFilterPassBasic(t *testing.T) {
ctx := context.Background()
f, err := block.NewFilter(tenant.Id{AppId: "myapp", OrgId: "myorg"}, "block", "mymblock", block.Config{}, nil, nil)
if err != nil {
t.Fatalf("block test failed: %s\n", err.Error())
}
eventStr := `{ "foo": "bar"}`
var obj interface{}
err = json.Unmarshal([]byte(eventStr), &obj)
if err != nil {
t.Fatalf("block test failed: %s\n", err.Error())
}
e, err := event.New(ctx, obj, event.FailOnNack(t))
if err != nil {
t.Fatalf("block test failed: %s\n", err.Error())
}
evts := f.Filter(e)
if len(evts) != 0 {
t.Fatalf("wrong number of blocked events: %d\n", len(evts))
}
}
|
package controller
import (
"encoding/json"
"gm/method"
"io/ioutil"
"net/http"
"shared/utility/errors"
"shared/utility/glog"
"shared/utility/httputil"
"shared/utility/safe"
"sync"
)
type Dispatcher struct {
f sync.Map
filters []HttpRequestFilter
}
func (d *Dispatcher) ServeHTTP(w http.ResponseWriter, r *http.Request) {
defer safe.Recover()
glog.Debugf("req :%+v", r)
//接入规范 响应头 Content-Type 必须 是: application/json
w.Header().Set("content-type", "application/json")
err := d.Filter(r)
if err != nil {
glog.Errorf("ServeHTTP Filter err:%v", err)
w.WriteHeader(http.StatusNetworkAuthenticationRequired)
return
}
path := r.URL.Path[1:]
moduleRouter, ok := d.Dispatch(r.Method)
if !ok {
w.WriteHeader(http.StatusNotFound)
return
}
var content *httputil.HttpContent
switch r.Method {
case http.MethodGet:
query := r.URL.Query()
glog.Debugf("req get path:%s ,param :%v", path, query)
content = moduleRouter.HandleGet(r.Context(), path, query)
case http.MethodPost:
bytes, err := ioutil.ReadAll(r.Body)
if err != nil {
glog.Errorf("ServeHTTP Parse body err:%v", err)
w.WriteHeader(http.StatusInternalServerError)
return
}
glog.Debugf("req post path:%s,body :%s", path, bytes)
content = moduleRouter.HandlePost(r.Context(), path, bytes)
default:
content = httputil.NewHttpContent(nil, errors.NewCode(404, "404"))
}
if content.ErrorCode != 0 {
w.WriteHeader(http.StatusInternalServerError)
} else {
w.WriteHeader(http.StatusOK)
}
marshal, err := json.Marshal(content)
if err != nil {
glog.Errorf("ServeHTTP json Marshal error: %+v", err)
w.WriteHeader(http.StatusInternalServerError)
return
}
_, err = w.Write(marshal)
if err != nil {
glog.Errorf("ServeHTTP Write Err:%v", err)
}
}
func NewDispatcher() *Dispatcher {
return &Dispatcher{
f: sync.Map{},
}
}
func (d *Dispatcher) RegisterRouter(router *PathRouter) {
d.f.Store(router.GetMethod(), router)
}
func (d *Dispatcher) AddFilter(filter HttpRequestFilter) {
d.filters = append(d.filters, filter)
}
func (d *Dispatcher) Filter(r *http.Request) error {
for _, filter := range d.filters {
err := filter.filter(r)
if err != nil {
return errors.WrapTrace(err)
}
}
return nil
}
func (d *Dispatcher) RegisterRouters() error {
get, err := NewPathRouter(method.NewHttpGetHandler())
if err != nil {
glog.Fatalf("NewPathRouter Method GET error: %v", err)
return err
}
d.RegisterRouter(get)
post, err := NewPathRouter(method.NewHttpPostHandler())
if err != nil {
glog.Fatalf("NewPathRouter Method POST error: %v", err)
return err
}
d.RegisterRouter(post)
return nil
}
func (d *Dispatcher) Dispatch(method string) (*PathRouter, bool) {
v, ok := d.f.Load(method)
if !ok {
return nil, false
}
return v.(*PathRouter), true
}
|
// Copyright 2020 The VectorSQL Authors.
//
// Code is licensed under Apache License, Version 2.0.
package datavalues
import (
"strconv"
"unsafe"
"base/docs"
"base/errors"
)
type ValueFloat float64
func MakeFloat(v float64) IDataValue {
r := ValueFloat(v)
return &r
}
func ZeroFloat() IDataValue {
r := ValueFloat(0)
return &r
}
func (v *ValueFloat) Size() uintptr {
return unsafe.Sizeof(v)
}
func (v *ValueFloat) String() string {
return strconv.FormatFloat(float64(*v), 'E', -1, 64)
}
func (v *ValueFloat) Type() Type {
return TypeFloat
}
func (v *ValueFloat) Family() Family {
return FamilyFloat
}
func (v *ValueFloat) AsFloat() float64 {
return float64(*v)
}
func (v *ValueFloat) Compare(other IDataValue) (Comparison, error) {
if other.Type() != TypeFloat {
return 0, errors.Errorf("type mismatch between values")
}
a := float64(*v)
b := AsFloat(other)
switch {
case a > b:
return 1, nil
case b > a:
return -1, nil
default:
return 0, nil
}
}
func (v *ValueFloat) Document() docs.Documentation {
return docs.Text("Float")
}
func AsFloat(v IDataValue) float64 {
if t, ok := v.(*ValueFloat); ok {
return float64(*t)
}
return 0.0
}
|
package main
import (
"bufio"
"flag"
"fmt"
"log"
"os"
"github.com/ipkg/blockchain"
)
func init() {
flag.Parse()
log.SetFlags(log.Lshortfile | log.LstdFlags)
}
func main() {
kp := blockchain.GenerateNewKeypair()
chain := blockchain.NewBlockchain(kp, nil)
go chain.Run()
go func() {
for {
select {
case tx := <-chain.TransactionAvailable():
log.Printf("tx=%x", tx.Hash())
case blk := <-chain.BlockAvailable():
log.Printf("block=%x", blk.Hash())
}
}
}()
fmt.Println("Type something and hit enter")
for {
str := <-readStdin()
tx := buildTx(kp, []byte(str))
chain.QueueTransaction(tx)
log.Printf("Submitted tx=%s", tx.String())
}
}
func buildTx(kp *blockchain.Keypair, payload []byte) *blockchain.Transaction {
tx := blockchain.NewTransaction(kp.Public, nil, payload)
tx.Header.Nonce = tx.GenerateNonce(blockchain.TRANSACTION_POW)
tx.Signature = tx.Sign(kp)
return tx
}
func readStdin() chan string {
cb := make(chan string)
sc := bufio.NewScanner(os.Stdin)
go func() {
if sc.Scan() {
cb <- sc.Text()
}
}()
return cb
}
|
package iterators_test
import (
"testing"
"github.com/adamluzsi/frameless/ports/iterators"
"github.com/adamluzsi/testcase/assert"
)
var _ iterators.Iterator[string] = iterators.Slice([]string{"A", "B", "C"})
func TestNewSlice_SliceGiven_SliceIterableAndValuesReturnedWithDecode(t *testing.T) {
t.Parallel()
i := iterators.Slice([]int{42, 4, 2})
assert.Must(t).True(i.Next())
assert.Must(t).Equal(42, i.Value())
assert.Must(t).True(i.Next())
assert.Must(t).Equal(4, i.Value())
assert.Must(t).True(i.Next())
assert.Must(t).Equal(2, i.Value())
assert.Must(t).False(i.Next())
assert.Must(t).Nil(i.Err())
}
func TestNewSlice_ClosedCalledMultipleTimes_NoErrorReturned(t *testing.T) {
t.Parallel()
i := iterators.Slice([]int{42})
for index := 0; index < 42; index++ {
assert.Must(t).Nil(i.Close())
}
}
|
package main
import (
"fmt"
"os"
"sync"
)
var wg sync.WaitGroup
var commandQueue sync.WaitGroup
var isShutdown bool
const (
InfoColor = "\033[1;34m%s\033[0m\n"
NoticeColor = "\033[1;36m%s\033[0m\n"
WarningColor = "\033[1;33m%s\033[0m\n"
ErrorColor = "\033[1;31m%s\033[0m\n"
DebugColor = "\033[0;36m%s\033[0m\n"
)
func main() {
fmt.Printf(DebugColor, "=================================================")
fmt.Printf(DebugColor, "Loading application by aytronn")
fmt.Printf(DebugColor, "Command \"help\" for see all command")
fmt.Printf(DebugColor, "=================================================")
onLoad()
for {
commandQueue.Add(1)
go commandManager()
commandQueue.Wait()
}
}
func onLoad() {
wg.Add(1)
go getConfig()
wg.Wait()
wg.Add(1)
go getBetaUser()
wg.Wait()
getMongo("translations")
registerCommand(Command{
name: "search",
Execute: searchCommand,
usage: "search <pseudo> : allow to check if is in beta whitelist"})
registerCommand(Command{
name: "reload",
Execute: reloadCommand,
usage: "reload : Allow to reload"})
}
func shutdown() {
isShutdown = true
os.Exit(0)
}
|
package ino
import (
"math"
"github.com/hajimehoshi/ebiten"
"github.com/hajimehoshi/go-inovation/ino/internal/audio"
"github.com/hajimehoshi/go-inovation/ino/internal/draw"
"github.com/hajimehoshi/go-inovation/ino/internal/field"
"github.com/hajimehoshi/go-inovation/ino/internal/fieldtype"
"github.com/hajimehoshi/go-inovation/ino/internal/input"
)
type PlayerState int
const (
PLAYERSTATE_START PlayerState = iota
PLAYERSTATE_NORMAL
PLAYERSTATE_ITEMGET
PLAYERSTATE_MUTEKI
PLAYERSTATE_DEAD
)
const (
PLAYER_SPEED = 2.0
PLAYER_GRD_ACCRATIO = 0.04
PLAYER_AIR_ACCRATIO = 0.01
PLAYER_JUMP = -4.0
PLAYER_GRAVITY = 0.2
PLAYER_FALL_SPEEDMAX = 4.0
WAIT_TIMER_INTERVAL = 10
LIFE_RATIO = 400
MUTEKI_INTERVAL = 50
START_WAIT_INTERVAL = 50
SCROLLPANEL_SPEED = 2.0
LUNKER_JUMP_DAMAGE1 = 40.0
LUNKER_JUMP_DAMAGE2 = 96.0
)
type Player struct {
life int
jumpCnt int
timer int
position PositionF
speed PositionF
direction int
jumpedPoint PositionF
state PlayerState
itemGet fieldtype.FieldType
waitTimer int
gameData *GameData // TODO(hajimehoshi): Remove this?
view *View
field *field.Field
}
func NewPlayer(gameData *GameData) *Player {
f := field.New(field_data)
x, y := f.GetStartPoint()
startPointF := PositionF{float64(x), float64(y)}
audio.PlayBGM(audio.BGM0)
return &Player{
gameData: gameData,
field: f,
life: gameData.lifeMax * LIFE_RATIO,
position: startPointF,
jumpedPoint: startPointF,
view: NewView(startPointF),
}
}
func (p *Player) onWall() bool {
if p.toFieldOfsY() > field.CHAR_SIZE/4 {
return false
}
if p.field.IsRidable(p.toFieldX(), p.toFieldY()+1) && p.toFieldOfsX() < field.CHAR_SIZE*7/8 {
return true
}
if p.field.IsRidable(p.toFieldX()+1, p.toFieldY()+1) && p.toFieldOfsX() > field.CHAR_SIZE/8 {
return true
}
return false
}
func (p *Player) isFallable() bool {
if !p.onWall() {
return false
}
if p.field.IsWall(p.toFieldX(), p.toFieldY()+1) && p.toFieldOfsX() < field.CHAR_SIZE*7/8 {
return false
}
if p.field.IsWall(p.toFieldX()+1, p.toFieldY()+1) && p.toFieldOfsX() > field.CHAR_SIZE/8 {
return false
}
return true
}
func (p *Player) isUpperWallBoth() bool {
if p.toFieldOfsY() < field.CHAR_SIZE/2 {
return false
}
if p.field.IsWall(p.toFieldX(), p.toFieldY()) && p.field.IsWall(p.toFieldX()+1, p.toFieldY()) {
return true
}
return false
}
func (p *Player) isUpperWall() bool {
if p.toFieldOfsY() < field.CHAR_SIZE/2 {
return false
}
if p.field.IsWall(p.toFieldX(), p.toFieldY()) && p.toFieldOfsX() < field.CHAR_SIZE*7/8 {
return true
}
if p.field.IsWall(p.toFieldX()+1, p.toFieldY()) && p.toFieldOfsX() > field.CHAR_SIZE/8 {
return true
}
return false
}
func (p *Player) isLeftWall() bool {
if p.field.IsWall(p.toFieldX(), p.toFieldY()) {
return true
}
if p.field.IsWall(p.toFieldX(), p.toFieldY()+1) && p.toFieldOfsY() > field.CHAR_SIZE/8 {
return true
}
return false
}
func (p *Player) isRightWall() bool {
if p.field.IsWall(p.toFieldX()+1, p.toFieldY()) {
return true
}
if p.field.IsWall(p.toFieldX()+1, p.toFieldY()+1) && p.toFieldOfsY() > field.CHAR_SIZE/8 {
return true
}
return false
}
func (p *Player) normalizeToRight() {
p.position.X = float64(p.toFieldX() * field.CHAR_SIZE)
p.speed.X = 0
}
func (p *Player) normalizeToLeft() {
p.position.X = float64((p.toFieldX() + 1) * field.CHAR_SIZE)
p.speed.X = 0
}
func (p *Player) normalizeToUpper() {
if p.speed.Y < 0 {
p.speed.Y = 0
}
p.position.Y = float64(field.CHAR_SIZE * (p.toFieldY() + 1))
}
func (p *Player) toFieldX() int {
return int(p.position.X) / field.CHAR_SIZE
}
func (p *Player) toFieldY() int {
return int(p.position.Y) / field.CHAR_SIZE
}
func (p *Player) toFieldOfsX() int {
return int(p.position.X) % field.CHAR_SIZE
}
func (p *Player) toFieldOfsY() int {
return int(p.position.Y) % field.CHAR_SIZE
}
func (p *Player) Update() GameStateMsg {
msg := GAMESTATE_MSG_NONE
p.field.Update()
switch p.state {
case PLAYERSTATE_START:
p.waitTimer++
if p.waitTimer > START_WAIT_INTERVAL {
p.state = PLAYERSTATE_NORMAL
}
case PLAYERSTATE_NORMAL:
p.moveByInput()
p.moveNormal()
if p.life < p.gameData.lifeMax*LIFE_RATIO {
o_life := p.life
p.life++
if (p.life / LIFE_RATIO) != (o_life / LIFE_RATIO) {
audio.PlaySE(audio.SE_HEAL)
}
}
case PLAYERSTATE_ITEMGET:
p.moveItemGet()
if p.state != PLAYERSTATE_ITEMGET {
if p.gameData.IsGameClear() {
msg = GAMESTATE_MSG_REQ_ENDING
}
}
case PLAYERSTATE_MUTEKI:
p.moveByInput()
p.moveNormal()
p.waitTimer++
if p.waitTimer > MUTEKI_INTERVAL {
p.state = PLAYERSTATE_NORMAL
}
case PLAYERSTATE_DEAD:
p.moveNormal()
audio.PauseBGM()
if input.Current().IsActionKeyPressed() && p.waitTimer > 15 {
msg = GAMESTATE_MSG_REQ_TITLE
}
}
if p.life < LIFE_RATIO {
if p.state != PLAYERSTATE_DEAD {
p.waitTimer = 0
}
p.state = PLAYERSTATE_DEAD
p.direction = 0
p.waitTimer++
}
return msg
}
func (p *Player) moveNormal() {
p.timer++
p.gameData.Update()
// 移動&落下
p.speed.Y += PLAYER_GRAVITY
p.position.X += p.speed.X
p.position.Y += p.speed.Y
if p.speed.Y > PLAYER_FALL_SPEEDMAX {
p.speed.Y = PLAYER_FALL_SPEEDMAX
}
if p.state == PLAYERSTATE_NORMAL {
p.checkCollision()
}
// ATARI判定
hitLeft := false
hitRight := false
hitUpper := false
if p.onWall() && p.speed.Y >= 0 {
if p.gameData.lunkerMode {
if p.position.Y-p.jumpedPoint.Y > LUNKER_JUMP_DAMAGE1 {
p.state = PLAYERSTATE_MUTEKI
p.waitTimer = 0
p.life -= LIFE_RATIO
audio.PlaySE(audio.SE_DAMAGE)
}
if p.position.Y-p.jumpedPoint.Y > LUNKER_JUMP_DAMAGE2 {
p.state = PLAYERSTATE_MUTEKI
p.waitTimer = 0
p.life -= LIFE_RATIO * 99
audio.PlaySE(audio.SE_DAMAGE)
}
}
if !input.Current().IsActionKeyPressed() || !input.Current().IsDirectionKeyPressed(input.DirectionDown) || !p.isFallable() {
if p.speed.Y > 0 {
p.speed.Y = 0
}
p.position.Y = float64(field.CHAR_SIZE * p.toFieldY())
p.jumpCnt = 0
}
p.jumpedPoint = p.position
}
if p.isLeftWall() && p.speed.X < 0 {
hitLeft = true
}
if p.isRightWall() && p.speed.X > 0 {
hitRight = true
}
if p.isUpperWall() && p.speed.Y <= 0 {
hitUpper = true
}
if hitUpper && !hitLeft && !hitRight {
p.normalizeToUpper()
}
if !hitUpper && hitLeft {
p.normalizeToLeft()
}
if !hitUpper && hitRight {
p.normalizeToRight()
}
if hitUpper && hitRight {
if p.isUpperWallBoth() {
p.normalizeToUpper()
} else {
if p.toFieldOfsX() > p.toFieldOfsY() {
p.normalizeToRight()
} else {
p.normalizeToUpper()
}
}
}
if hitUpper && hitLeft {
if p.isUpperWallBoth() {
p.normalizeToUpper()
} else {
if field.CHAR_SIZE-p.toFieldOfsX() > p.toFieldOfsY() {
p.normalizeToLeft()
} else {
p.normalizeToUpper()
}
}
}
// 床特殊効果
switch p.getOnField() {
case fieldtype.FIELD_SCROLL_L:
p.speed.X = p.speed.X*(1.0-PLAYER_GRD_ACCRATIO) + float64(p.direction*PLAYER_SPEED-SCROLLPANEL_SPEED)*PLAYER_GRD_ACCRATIO
case fieldtype.FIELD_SCROLL_R:
p.speed.X = p.speed.X*(1.0-PLAYER_GRD_ACCRATIO) + float64(p.direction*PLAYER_SPEED+SCROLLPANEL_SPEED)*PLAYER_GRD_ACCRATIO
case fieldtype.FIELD_SLIP:
// Do nothing
case fieldtype.FIELD_NONE:
p.speed.X = p.speed.X*(1.0-PLAYER_AIR_ACCRATIO) + float64(p.direction*PLAYER_SPEED)*PLAYER_AIR_ACCRATIO
default:
p.speed.X = p.speed.X*(1.0-PLAYER_GRD_ACCRATIO) + float64(p.direction*PLAYER_SPEED)*PLAYER_GRD_ACCRATIO
}
p.view.Update(p.position, p.speed)
}
func (p *Player) moveItemGet() {
if p.waitTimer < WAIT_TIMER_INTERVAL {
p.waitTimer++
return
}
if input.Current().IsActionKeyJustPressed() {
p.state = PLAYERSTATE_NORMAL
audio.ResumeBGM(audio.BGM0)
}
}
func (p *Player) moveByInput() {
if input.Current().IsDirectionKeyPressed(input.DirectionLeft) {
p.direction = -1
}
if input.Current().IsDirectionKeyPressed(input.DirectionRight) {
p.direction = 1
}
if input.Current().IsActionKeyJustPressed() {
if ((p.gameData.jumpMax > p.jumpCnt) || p.onWall()) && !input.Current().IsDirectionKeyPressed(input.DirectionDown) {
p.speed.Y = PLAYER_JUMP // ジャンプ
if !p.onWall() {
p.jumpCnt++
}
if math.Abs(p.speed.X) < 0.1 {
if p.speed.X < 0 {
p.speed.X -= 0.02
}
if p.speed.X > 0 {
p.speed.X += 0.02
}
}
audio.PlaySE(audio.SE_JUMP)
p.jumpedPoint = p.position
}
}
}
func (p *Player) checkCollision() {
for xx := 0; xx < 2; xx++ {
for yy := 0; yy < 2; yy++ {
// アイテム獲得(STATE_ITEMGETへ遷移)
if p.field.IsItem(p.toFieldX()+xx, p.toFieldY()+yy) {
// 隠しアイテムは条件が必要
if !p.field.IsItemGettable(p.toFieldX()+xx, p.toFieldY()+yy, p.gameData) {
continue
}
p.state = PLAYERSTATE_ITEMGET
// アイテム効果
p.itemGet = p.field.GetField(p.toFieldX()+xx, p.toFieldY()+yy)
switch p.field.GetField(p.toFieldX()+xx, p.toFieldY()+yy) {
case fieldtype.FIELD_ITEM_POWERUP:
p.gameData.jumpMax++
case fieldtype.FIELD_ITEM_LIFE:
p.gameData.lifeMax++
p.life = p.gameData.lifeMax * LIFE_RATIO
default:
p.gameData.itemGetFlags[p.itemGet] = true
}
p.field.EraseField(p.toFieldX()+xx, p.toFieldY()+yy)
p.waitTimer = 0
audio.PauseBGM()
if IsItemForClear(p.itemGet) || p.itemGet == fieldtype.FIELD_ITEM_POWERUP {
audio.PlaySE(audio.SE_ITEMGET)
} else {
audio.PlaySE(audio.SE_ITEMGET2)
}
return
}
// トゲ(ダメージ)
if p.field.IsSpike(p.toFieldX()+xx, p.toFieldY()+yy) {
p.state = PLAYERSTATE_MUTEKI
p.waitTimer = 0
p.life -= LIFE_RATIO
p.speed.Y = PLAYER_JUMP
p.jumpCnt = -1 // ダメージ・エキストラジャンプ
audio.PlaySE(audio.SE_DAMAGE)
return
}
}
}
}
func (p *Player) getOnField() fieldtype.FieldType {
if !p.onWall() {
return fieldtype.FIELD_NONE
}
x, y := p.toFieldX(), p.toFieldY()
if p.toFieldOfsX() < field.CHAR_SIZE/2 {
if p.field.IsRidable(x, y+1) {
return p.field.GetField(x, y+1)
}
return p.field.GetField(x+1, y+1)
}
if p.field.IsRidable(x+1, y+1) {
return p.field.GetField(x+1, y+1)
}
return p.field.GetField(x, y+1)
}
func (p *Player) drawPlayer(screen *ebiten.Image, game *Game) {
v := p.view.ToScreenPosition(p.position)
vx, vy := int(v.X), int(v.Y)
if p.state == PLAYERSTATE_DEAD { // 死亡
anime := (p.timer / 6) % 4
if game.gameData.lunkerMode {
draw.Draw(screen, "ino", vx, vy, field.CHAR_SIZE*(2+anime), 128+field.CHAR_SIZE*2, field.CHAR_SIZE, field.CHAR_SIZE)
return
}
draw.Draw(screen, "ino", vx, vy, field.CHAR_SIZE*(2+anime), 128, field.CHAR_SIZE, field.CHAR_SIZE)
return
}
if p.state != PLAYERSTATE_MUTEKI || p.timer%10 < 5 {
anime := (p.timer / 6) % 2
if !p.onWall() {
anime = 0
}
if p.direction < 0 {
if game.gameData.lunkerMode {
draw.Draw(screen, "ino", vx, vy, field.CHAR_SIZE*anime, 128+field.CHAR_SIZE*2, field.CHAR_SIZE, field.CHAR_SIZE)
return
}
draw.Draw(screen, "ino", vx, vy, field.CHAR_SIZE*anime, 128, field.CHAR_SIZE, field.CHAR_SIZE)
return
}
if game.gameData.lunkerMode {
draw.Draw(screen, "ino", vx, vy, field.CHAR_SIZE*anime, 128+field.CHAR_SIZE*3, field.CHAR_SIZE, field.CHAR_SIZE)
return
}
draw.Draw(screen, "ino", vx, vy, field.CHAR_SIZE*anime, 128+field.CHAR_SIZE, field.CHAR_SIZE, field.CHAR_SIZE)
return
}
}
func (p *Player) drawLife(screen *ebiten.Image, game *Game) {
for t := 0; t < game.gameData.lifeMax; t++ {
if p.life < LIFE_RATIO*2 && p.timer%10 < 5 && game.gameData.lifeMax > 1 {
continue
}
if p.life >= (t+1)*LIFE_RATIO {
draw.Draw(screen, "ino",
field.CHAR_SIZE*t, 0, field.CHAR_SIZE*3, 128+field.CHAR_SIZE*1, field.CHAR_SIZE, field.CHAR_SIZE)
continue
}
draw.Draw(screen, "ino",
field.CHAR_SIZE*t, 0, field.CHAR_SIZE*4, 128+field.CHAR_SIZE*1, field.CHAR_SIZE, field.CHAR_SIZE)
}
}
func (p *Player) drawItems(screen *ebiten.Image, game *Game) {
for t := fieldtype.FIELD_ITEM_FUJI; t < fieldtype.FIELD_ITEM_MAX; t++ {
if !game.gameData.itemGetFlags[t] {
draw.Draw(screen, "ino",
draw.ScreenWidth-field.CHAR_SIZE/4*(int(fieldtype.FIELD_ITEM_MAX)-2-int(t)), 0, // 無
field.CHAR_SIZE*5, 128+field.CHAR_SIZE, field.CHAR_SIZE/4, field.CHAR_SIZE/2)
continue
}
// クリア条件アイテムは専用グラフィック
if IsItemForClear(t) {
for i, c := range clearFlagItems {
if c != t {
continue
}
draw.Draw(screen, "ino",
draw.ScreenWidth-field.CHAR_SIZE/4*(int(fieldtype.FIELD_ITEM_MAX)-2-int(t)), 0,
field.CHAR_SIZE*5+field.CHAR_SIZE/4*(i+2), 128+field.CHAR_SIZE, field.CHAR_SIZE/4, field.CHAR_SIZE/2)
}
continue
}
draw.Draw(screen, "ino",
draw.ScreenWidth-field.CHAR_SIZE/4*(int(fieldtype.FIELD_ITEM_MAX)-2-int(t)), 0, // 有
field.CHAR_SIZE*5+field.CHAR_SIZE/4, 128+field.CHAR_SIZE, field.CHAR_SIZE/4, field.CHAR_SIZE/2)
}
}
func (p *Player) drawMessage(screen *ebiten.Image, game *Game) {
switch p.state {
case PLAYERSTATE_ITEMGET:
t := WAIT_TIMER_INTERVAL - p.waitTimer
draw.DrawItemMessage(screen, p.itemGet, (draw.ScreenHeight-96)/2+24-t*t, game.lang)
draw.DrawItemFrame(screen, (draw.ScreenWidth-32)/2, (draw.ScreenHeight-96)/2-t*t-24)
it := int(p.itemGet) - (int(fieldtype.FIELD_ITEM_BORDER) + 1)
draw.Draw(screen, "ino", (draw.ScreenWidth-16)/2, (draw.ScreenHeight-96)/2-int(t)*int(t)-16,
(it%16)*field.CHAR_SIZE, (it/16+4)*field.CHAR_SIZE, field.CHAR_SIZE, field.CHAR_SIZE)
case PLAYERSTATE_START:
key := "msg_" + game.lang.String()
draw.Draw(screen, key, (draw.ScreenWidth-256)/2, 64+(draw.ScreenHeight-240)/2, 0, 96, 256, 32)
case PLAYERSTATE_DEAD:
key := "msg_" + game.lang.String()
draw.Draw(screen, key, (draw.ScreenWidth-256)/2, 64+(draw.ScreenHeight-240)/2, 0, 128, 256, 32)
}
}
func (p *Player) Draw(screen *ebiten.Image, game *Game) {
po := p.view.GetPosition()
p.field.Draw(screen, game.gameData, int(po.X), int(po.Y))
p.drawPlayer(screen, game)
p.drawLife(screen, game)
p.drawItems(screen, game)
p.drawMessage(screen, game)
}
|
package roman
import (
"testing"
)
// func Test_D(t *testing.T) {
// got := ToInt("MDCXCV")
// t.Fatal(got)
// }
func Test_Examples(t *testing.T) {
examples := []struct {
want int
roman string
}{
{1, "I"},
{2, "II"},
{3, "III"},
{4, "IV"},
{5, "V"},
{6, "VI"},
{7, "VII"},
{8, "VIII"},
{9, "IX"},
{10, "X"},
{14, "XIV"},
{19, "XIX"},
{30, "XXX"},
{34, "XXXIV"},
{38, "XXXVIII"},
{39, "XXXIX"},
{40, "XL"},
{44, "XLIV"},
{49, "XLIX"},
{50, "L"},
{54, "LIV"},
{59, "LIX"},
{60, "LX"},
{61, "LXI"},
{64, "LXIV"},
{69, "LXIX"},
{89, "LXXXIX"},
{90, "XC"},
{100, "C"},
{104, "CIV"},
{109, "CIX"},
{114, "CXIV"},
{119, "CXIX"},
{149, "CXLIX"},
{150, "CL"},
{189, "CLXXXIX"},
{199, "CXCIX"},
{399, "CCCXCIX"},
{400, "CD"},
{406, "CDVI"},
{490, "CDXC"},
{500, "D"},
{900, "CM"},
{1000, "M"},
{1695, "MDCXCV"},
{2000, "MM"},
{2059, "MMLIX"},
{3999, "MMMDCDXCIX"},
}
for i, e := range examples {
got := ToInt(e.roman)
if e.want != got {
t.Fatalf(
"\n%v example '%v' -> want:%v got:%v\n",
i, e.roman, e.want, got,
)
}
}
}
|
package worldlight
import (
"github.com/go-gl/mathgl/mgl32"
)
/* Assuming this is 8bits
enum emittype_t
{
emit_surface, // 90 degree spotlight
emit_point, // simple point light source
emit_spotlight, // spotlight with penumbra
emit_skylight, // directional light with no falloff (surface must trace to SKY texture)
emit_quakelight, // linear falloff, non-lambertian
emit_skyambient, // spherical light source with no falloff (surface must trace to SKY texture)
};
*/
type EmitType uint8
const EMIT_SURFACE EmitType = 0
const EMIT_POINT EmitType = 1
const EMIT_SPOTLIGHT EmitType = 2
const EMIT_SKYLIGHT EmitType = 3
const EMIT_QUAKELIGHT EmitType = 4
const EMIT_SKYAMBIENT EmitType = 5
type WorldLight struct {
Origin mgl32.Vec3
Intensity mgl32.Vec3
Normal mgl32.Vec3
Cluster int32
Type EmitType //Think for alignments sake with is uint8. May be 3 bytes padding...
_ [3]byte
Style int32
Stopdot float32
Stopdot2 float32
Exponent float32
Radius float32
ConstantAttenuation float32
LinearAttenuation float32
QuadraticAttenuation float32
Flags int32
TexInfo int32
Owner int32
}
|
package handler
import (
"context"
"github.com/zzsds/micro-sms-service/service"
"github.com/jinzhu/gorm"
"github.com/micro/go-micro/v2/errors"
"github.com/zzsds/micro-sms-service/models"
"github.com/zzsds/micro-sms-service/modules/provider"
"github.com/zzsds/micro-sms-service/proto/sms"
)
// Sms ...
type Sms struct {
Repo *service.SendRepo
SmsProvider provider.Driver
templateRepo *service.TemplateRepo
}
func NewSms() *Sms {
return &Sms{templateRepo: service.NewTemplateRepo()}
}
//
// SendList ...
func (e *Sms) SendList(ctx context.Context, req *sms.SendPage, rsp *sms.SendPage) error {
return nil
}
// TemplateList ...
func (e *Sms) TemplateList(ctx context.Context, req *sms.TemplatePage, rsp *sms.TemplatePage) error {
return nil
}
// Template ...
func (e *Sms) Template(ctx context.Context, req *sms.SmsStruct, rsp *sms.SmsStruct) (err error) {
result, err := e.templateRepo.FirstModelTemplate(models.Template{
Model: gorm.Model{ID: uint(req.Id)},
Provider: req.Provider,
Mode: req.Mode,
BizType: req.BizType,
})
if err != nil {
return errors.BadRequest("go.micro.srv.sms", err.Error())
}
rsp = e.DownloadObj(result)
return nil
}
func (e *Sms) List(ctx context.Context, req *sms.ListRequest, rsp *sms.ListResponse) error {
return nil
}
func (e *Sms) Create(ctx context.Context, in *sms.CreateTempRequest, rsp *sms.CreateTempResponse) error {
return nil
}
func (e *Sms) DownloadObj(m *models.Template) (obj *sms.SmsStruct) {
obj = new(sms.SmsStruct)
obj.Id = int64(m.ID)
obj.Provider = m.Provider
obj.Sign = m.Sign
obj.Content = m.Content
obj.Mode = m.Mode
return
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.