text stringlengths 11 4.05M |
|---|
package lambda
import (
"bytes"
"context"
"encoding/base64"
"encoding/json"
"errors"
"io/ioutil"
"strings"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/lambda"
"github.com/luraproject/lura/v2/config"
"github.com/luraproject/lura/v2/core"
"github.com/luraproject/lura/v2/logging"
"github.com/luraproject/lura/v2/proxy"
)
const (
Namespace = "github.com/devopsfaith/krakend-lambda"
)
var (
errBadStatusCode = errors.New("aws lambda: bad status code")
errNoConfig = errors.New("aws lambda: no extra config defined")
errBadConfig = errors.New("aws lambda: unable to parse the defined extra config")
clientContext = base64.StdEncoding.EncodeToString([]byte(`{"client":"KrakenD", "version":"` + core.KrakendVersion + `"}`))
)
type Invoker interface {
InvokeWithContext(aws.Context, *lambda.InvokeInput, ...request.Option) (*lambda.InvokeOutput, error)
}
func BackendFactory(logger logging.Logger, bf proxy.BackendFactory) proxy.BackendFactory {
return BackendFactoryWithInvoker(logger, bf, invokerFactory)
}
func invokerFactory(o *Options) Invoker {
if o.Config == nil {
return lambda.New(session.New())
}
return lambda.New(session.Must(session.NewSession(o.Config)))
}
func BackendFactoryWithInvoker(logger logging.Logger, bf proxy.BackendFactory, invokerFactory func(*Options) Invoker) proxy.BackendFactory {
return func(remote *config.Backend) proxy.Proxy {
logPrefix := "[BACKEND: " + remote.URLPattern + "][Lambda]"
ecfg, err := getOptions(remote)
if err != nil {
if err != errNoConfig {
logger.Error(logPrefix, err)
}
return bf(remote)
}
i := invokerFactory(ecfg)
ef := proxy.NewEntityFormatter(remote)
logger.Debug(logPrefix, "Component enabled")
return func(ctx context.Context, r *proxy.Request) (*proxy.Response, error) {
payload, err := ecfg.PayloadExtractor(r)
if err != nil {
return nil, err
}
input := &lambda.InvokeInput{
// ClientContext: aws.String(clientContext),
FunctionName: aws.String(ecfg.FunctionExtractor(r)),
InvocationType: aws.String("RequestResponse"),
LogType: aws.String("Tail"),
Payload: payload,
// Qualifier: aws.String("1"),
}
result, err := i.InvokeWithContext(ctx, input)
if err != nil {
return nil, err
}
if result.StatusCode == nil || *result.StatusCode != 200 {
return nil, errBadStatusCode
}
data := map[string]interface{}{}
if err := json.Unmarshal(result.Payload, &data); err != nil {
return nil, err
}
response := ef.Format(proxy.Response{
Metadata: proxy.Metadata{
StatusCode: int(*result.StatusCode),
Headers: map[string][]string{},
},
Data: data,
IsComplete: true,
})
if result.ExecutedVersion != nil {
response.Metadata.Headers["X-Amz-Executed-Version"] = []string{*result.ExecutedVersion}
}
return &response, nil
}
}
}
func getOptions(remote *config.Backend) (*Options, error) {
v, ok := remote.ExtraConfig[Namespace]
if !ok {
return nil, errNoConfig
}
ecfg, ok := v.(map[string]interface{})
if !ok {
return nil, errBadConfig
}
var funcExtractor functionExtractor
funcName, ok := ecfg["function_name"].(string)
if ok {
funcExtractor = func(_ *proxy.Request) string {
return funcName
}
} else {
funcParamName, ok := ecfg["function_param_name"].(string)
if !ok {
funcParamName = "function"
}
funcExtractor = func(r *proxy.Request) string {
return r.Params[funcParamName]
}
}
cfg := &Options{
FunctionExtractor: funcExtractor,
}
if remote.Method == "GET" {
cfg.PayloadExtractor = fromParams
} else {
cfg.PayloadExtractor = fromBody
}
region, ok := ecfg["region"].(string)
if !ok {
return cfg, nil
}
cfg.Config = &aws.Config{
Region: aws.String(region),
}
if endpoint, ok := ecfg["endpoint"].(string); ok {
cfg.Config.WithEndpoint(endpoint)
}
if retries, ok := ecfg["max_retries"].(int); ok {
cfg.Config.WithMaxRetries(retries)
}
return cfg, nil
}
type Options struct {
PayloadExtractor payloadExtractor
FunctionExtractor functionExtractor
Config *aws.Config
}
type functionExtractor func(*proxy.Request) string
type payloadExtractor func(*proxy.Request) ([]byte, error)
func fromParams(r *proxy.Request) ([]byte, error) {
buf := new(bytes.Buffer)
params := map[string]string{}
for k, v := range r.Params {
params[strings.ToLower(k)] = v
}
err := json.NewEncoder(buf).Encode(params)
return buf.Bytes(), err
}
func fromBody(r *proxy.Request) ([]byte, error) {
return ioutil.ReadAll(r.Body)
}
|
// Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package terraform provides definitions of Terraform resources in JSON format.
// This intentially does not define all fields in the plan JSON.
// https://www.terraform.io/docs/internals/json-format.html#plan-representation
package terraform
import (
"encoding/json"
"strings"
)
// https://www.terraform.io/docs/internals/json-format.html#plan-representation
type plan struct {
Variables map[string]variable `json:"variables"`
PlannedValues values `json:"planned_values"`
ResourceChanges []ResourceChange `json:"resource_changes"`
Configuration Configuration `json:"configuration"`
}
type variable struct {
Value interface{} `json:"value"`
}
// https://www.terraform.io/docs/internals/json-format.html#values-representation
type values struct {
RootModules struct {
Resources []Resource `json:"resources"`
ChildModules []childModule `json:"child_modules"`
} `json:"root_module"`
}
type childModule struct {
Address string `json:"address"`
Resources []Resource `json:"resources"`
ChildModules []childModule `json:"child_modules"`
}
// Resource represent single Terraform resource definition.
type Resource struct {
Name string `json:"name"`
Address string `json:"address"`
Kind string `json:"type"`
Mode string `json:"mode"` // "managed" for resources, or "data" for data resources
Values map[string]interface{} `json:"values"`
}
// ResourceChange represents a Terraform resource change from a Terraform plan.
// See "resource_changes" at https://www.terraform.io/docs/internals/json-format.html#plan-representation
type ResourceChange struct {
Address string `json:"address"`
ModuleAddress string `json:"module_address"`
Mode string `json:"mode"` // "managed" for resources, or "data" for data resources
Kind string `json:"type"`
Name string `json:"name"`
Change Change `json:"change"`
}
// Change represents the "Change" element of a Terraform resource change from a Terraform plan.
// https://www.terraform.io/docs/internals/json-format.html#change-representation
type Change struct {
Actions []string `json:"actions"`
// These are "value-representation", not "values-representation" and the keys are resource-specific.
Before map[string]interface{} `json:"before"`
After map[string]interface{} `json:"after"`
AfterUnknown map[string]interface{} `json:"after_unknown"` // Undocumented :( See https://github.com/terraform-providers/terraform-provider-aws/issues/11823
}
// Configuration represents part of the configuration block of a plan.
// https://www.terraform.io/docs/internals/json-format.html#configuration-representation
type Configuration struct {
ProviderConfig map[string]ProviderConfig `json:"provider_config"`
RootModule struct {
// Note: This is not the same schema as the planned value resource above.
Resources []struct {
Address string `json:"address"`
Kind string `json:"type"`
Name string `json:"name"`
ProviderConfigKey string `json:"provider_config_key"`
Expressions expressions `json:"expressions"`
} `json:"resources"`
} `json:"root_module"`
}
// ProviderConfig represents a single provider configuration from the Configuration block of a Terraform plan.
type ProviderConfig struct {
Name string `json:"name"`
VersionConstraint string `json:"version_constraint,omitempty"`
Alias string `json:"alias,omitempty"`
Expressions expressions `json:"expressions"`
}
type expressions map[string]interface{}
// ReadPlanChanges unmarshals b into a jsonPlan and returns the array of ResourceChange from it.
// If actions is not "", will only return changes where one of the specified actions will be taken.
func ReadPlanChanges(data []byte, actions []string) ([]ResourceChange, error) {
p := new(plan)
if err := json.Unmarshal(data, p); err != nil {
return nil, err
}
var result []ResourceChange
for _, rc := range p.ResourceChanges {
if len(actions) == 0 || slicesEqual(rc.Change.Actions, actions) {
result = append(result, rc)
}
}
return result, nil
}
func slicesEqual(a, b []string) bool {
if len(a) != len(b) {
return false
}
for i := range a {
if a[i] != b[i] {
return false
}
}
return true
}
// ReadProviderConfigValues returns the values from the expressions block from the provider config for the resource with the given kind and name.
// Variable references are resolved, as are constant_value.
func ReadProviderConfigValues(data []byte, kind, name string) (map[string]interface{}, error) {
p := new(plan)
if err := json.Unmarshal(data, p); err != nil {
return nil, err
}
result := make(map[string]interface{})
// Resolve expressions in the provider config.
config, ok := resourceProviderConfig(kind, name, p)
if !ok {
return result, nil
}
for k, v := range config.Expressions {
// Within a provider config, we expect expressions to be maps, but that's not guaranteed, so don't type assert.
switch mv := v.(type) {
case map[string]interface{}:
result[k] = resolveExpression(mv, p)
}
}
return result, nil
}
func resourceProviderConfig(kind string, name string, plan *plan) (pc ProviderConfig, ok bool) {
// Find the provider_config_key for this resource, if it exists.
for _, r := range plan.Configuration.RootModule.Resources {
if r.Kind == kind && r.Name == name {
// Find the right provider config based on the provider_config_key
if pc, ok := plan.Configuration.ProviderConfig[r.ProviderConfigKey]; ok {
return pc, true
}
}
}
return ProviderConfig{}, false
}
func resolveExpression(expr map[string]interface{}, plan *plan) interface{} {
if expr == nil {
return nil
}
if cv, ok := expr["constant_value"]; ok {
return cv
}
if refs, ok := expr["references"]; ok {
switch stringRefs := refs.(type) {
case []interface{}:
for _, ref := range stringRefs {
if resolved := resolveReference(ref.(string), plan); resolved != nil {
// Take the first one. Not sure if this is 100% correct.
return resolved
}
}
}
}
// Can't resolve, return expression value as-is.
return expr
}
// resolveReference resolves expressions within "references" blocks of a Terraform plan to their specific values.
// At the moment, it only handles "var.XXX", for references to variables.
// The docs say not to parse the strings, but I don't see anywhere in the plan where these are directly used as keys, so I'm parsing them as strings.
// https://www.terraform.io/docs/configuration/expressions.html#references-to-named-values
func resolveReference(expr string, plan *plan) interface{} {
exprParts := strings.Split(expr, ".")
if exprParts[0] == "var" {
// Variable.
varName := exprParts[1]
if v, ok := plan.Variables[varName]; ok {
return v.Value
}
}
return nil
}
|
package lv2_rectangular
func gcd(min, max int) int {
rem := min % max
if rem == 0 {
return max
}
return gcd(max, rem)
}
func minAndMax(a, b int) (int, int) {
var min, max int
if a > b {
max = a
min = b
} else {
max = b
min = a
}
return min, max
}
func solution(w int, h int) int64 {
min, max := minAndMax(w, h)
v := gcd(min, max)
a := max / v
b := min / v
return int64(w*h - ((a + (b - 1)) * v))
}
|
package cashlessdevice
import (
"testing"
)
func Test_validateCrc(t *testing.T) {
type args struct {
c []byte
}
tests := []struct {
name string
args args
want bool
}{
{
name: "test true",
args: args{
c: []byte{0x03, 0x00, 0x96, 0xF0, 0xF0, 0xF0, 0xF0, 0x00, 0x59},
},
want: true,
},
{
name: "test false",
args: args{
c: []byte{0x01, 0x00, 0x96, 0xF0, 0xF0, 0xF0, 0xF0, 0x00, 0x59},
},
want: false,
},
{
name: "test empty",
args: args{
c: []byte{},
},
want: false,
},
{
name: "test 1 byte",
args: args{
c: []byte{0xFF},
},
want: false,
},
{
name: "test 2 byte",
args: args{
c: []byte{0x06, 0x06},
},
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := validateCrc(tt.args.c); got != tt.want {
t.Errorf("validateCrc() = %v, want %v", got, tt.want)
}
})
}
}
func Test_calculateCrc(t *testing.T) {
type args struct {
c []byte
}
tests := []struct {
name string
args args
want byte
}{
{
name: "test 1 byte",
args: args{
c: []byte{0x06},
},
want: 0x06,
},
{
name: "test 1 byte",
args: args{
c: []byte{0x03, 0x00, 0x96, 0xF0, 0xF0, 0xF0, 0xF0, 0x00},
},
want: 0x59,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := calculateCrc(tt.args.c); got != tt.want {
t.Errorf("calculateCrc() = %v, want %v", got, tt.want)
}
})
}
}
|
package mysql
import (
"fmt"
"strings"
)
type Index struct {
Name string `yaml:"name,omitempty"`
Type string `yaml:"type,omitempty"`
Fields string `yaml:"fields,omitempty"`
Extend string `yaml:"-"`
}
func (p *Index) Hash() string {
return ""
}
func (p *Index) Complete() {
p.Type = strings.ToUpper(p.Type)
return
}
func (p *Index) ValidateIndex(table *Entity) (err error) {
if p.Fields == "" {
return
}
keys := strings.Split(p.Fields, ",")
for _, key := range keys {
var exists bool
for _, field := range table.Field {
if key == field.Name {
exists = true
break
}
}
if !exists {
err = fmt.Errorf(`index key "%s" not exists`, key)
return
}
}
return
}
func (p *Index) IsUnique() bool {
return strings.ToUpper(p.Type) == IndexTypeUnique
}
|
package awssqs
import (
"log"
"strconv"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/sqs"
)
var emptyOpList = make([]OpStatus, 0)
var emptyMessageList = make([]Message, 0)
// this is our interface implementation
type awsSqsImpl struct {
config AwsSqsConfig
svc *sqs.SQS
}
// factory for our SQS interface
func newAwsSqs(config AwsSqsConfig) (AWS_SQS, error) {
// validate the inbound configuration
if len(config.MessageBucketName) == 0 {
return nil, ErrMissingConfiguration
}
sess, err := session.NewSession()
if err != nil {
return nil, err
}
svc := sqs.New(sess)
return &awsSqsImpl{config, svc}, nil
}
// QueueHandle get a queue handle (URL) when provided a queue name
func (awsi *awsSqsImpl) QueueHandle(queueName string) (QueueHandle, error) {
// get the queue URL from the name
result, err := awsi.svc.GetQueueUrl(&sqs.GetQueueUrlInput{
QueueName: aws.String(queueName),
})
if err != nil {
if strings.HasPrefix(err.Error(), sqs.ErrCodeQueueDoesNotExist) {
return "", ErrBadQueueName
}
return "", err
}
return QueueHandle(*result.QueueUrl), nil
}
// GetMessagesAvailable get the number of messages available in the specified queue
func (awsi *awsSqsImpl) GetMessagesAvailable(queueName string) (uint, error) {
// get the queue handle
queue, err := awsi.QueueHandle(queueName)
if err != nil {
return 0, err
}
// and get the necessary attribute
q := string(queue)
attr := "ApproximateNumberOfMessages"
res, err := awsi.svc.GetQueueAttributes(&sqs.GetQueueAttributesInput{
QueueUrl: &q,
AttributeNames: []*string{
&attr,
},
})
if err != nil {
return 0, err
}
count, _ := strconv.Atoi(*res.Attributes[attr])
return uint(count), nil
}
// BatchMessageGet get a batch of messages from the specified queue. Will return on receipt of any messages
// without waiting and will wait no longer than the wait time if no messages are received.
func (awsi *awsSqsImpl) BatchMessageGet(queue QueueHandle, maxMessages uint, waitTime time.Duration) ([]Message, error) {
// ensure the block size is not too large
if maxMessages > MAX_SQS_BLOCK_COUNT {
return emptyMessageList, ErrBlockCountTooLarge
}
// ensure the wait time is not too large
if waitTime.Seconds() > float64(MAX_SQS_WAIT_TIME) {
return emptyMessageList, ErrWaitTooLarge
}
q := string(queue)
start := time.Now()
result, err := awsi.svc.ReceiveMessage(&sqs.ReceiveMessageInput{
AttributeNames: []*string{
aws.String(sqs.QueueAttributeNameAll),
},
MessageAttributeNames: []*string{
aws.String(sqs.QueueAttributeNameAll),
},
QueueUrl: &q,
MaxNumberOfMessages: aws.Int64(int64(maxMessages)),
WaitTimeSeconds: aws.Int64(int64(waitTime.Seconds())),
})
elapsed := int64(time.Since(start) / time.Millisecond)
if err != nil {
if strings.HasPrefix(err.Error(), sqs.ErrCodeQueueDoesNotExist) {
return emptyMessageList, ErrBadQueueHandle
}
return emptyMessageList, err
}
// if we did not get any messages
sz := len(result.Messages)
if sz == 0 {
return emptyMessageList, nil
}
// we want to warn if the receive took a long time (and yielded messages)
warnIfSlow(elapsed, "ReceiveMessage")
// build the response message set from the returned AWS structures
messages := make([]Message, 0, sz)
var returnErr error
wasError := false
for _, m := range result.Messages {
// make a new message and append to the list
m, err := MakeMessage(*m)
messages = append(messages, *m)
if err != nil {
// sometimes we have incomplete messages so capture that info here...
// incomplete messages are marked as such so can be handled elsewhere
wasError = true
returnErr = err
}
}
// if one (or more) error occurred, return it with the list of messages
if wasError == true {
return messages, returnErr
}
return messages, nil
}
// BatchMessagePut put a batch of messages to the specified queue.
// in the event of one or more failure, the operation status array will indicate which
// messages were processed successfully and which were not.
func (awsi *awsSqsImpl) BatchMessagePut(queue QueueHandle, messages []Message) ([]OpStatus, error) {
// early exit if no messages provided
sz := len(messages)
if sz == 0 {
return emptyOpList, nil
}
// ensure the block size is not too large
if uint(sz) > MAX_SQS_BLOCK_COUNT {
return emptyOpList, ErrBlockCountTooLarge
}
// our operation status array
ops := make([]OpStatus, sz)
// initialize the operation status array to all successful and convert any
// oversize messages (use index access to the array because this updates the messages)
for ix := range messages {
ops[ix] = true
sz := messages[ix].Size()
if sz > MAX_SQS_MESSAGE_SIZE {
err := messages[ix].ConvertToOversizeMessage(awsi.config.MessageBucketName)
if err != nil {
log.Printf("WARNING: failed converting oversize message, ignoring further processing for it")
ops[ix] = false
}
}
}
// calculate the total block size and the number of messages that are larger than the maximum message size
var totalSize uint = 0
for ix := range messages {
totalSize += messages[ix].Size()
}
// if the total block size is too large then we can split the block in half and handle each one individually
if totalSize > MAX_SQS_BLOCK_SIZE {
half := sz / 2
if half == 0 {
// an insane situation, bomb out
log.Fatalf("ERROR: cannot split block further, aborting")
}
log.Printf("INFO: blocksize too large, splitting at %d", half)
op1, err1 := awsi.BatchMessagePut(queue, messages[0:half])
op2, err2 := awsi.BatchMessagePut(queue, messages[half:])
op1 = append(op1, op2...)
if err1 != nil {
return op1, err1
} else {
return op1, err2
}
}
q := string(queue)
batch := make([]*sqs.SendMessageBatchRequestEntry, 0, sz)
// make a batch of messages that we successfully processed so far
for ix, m := range messages {
if ops[ix] == true {
batch = append(batch, constructSend(m, ix))
}
}
start := time.Now()
response, err := awsi.svc.SendMessageBatch(&sqs.SendMessageBatchInput{
Entries: batch,
QueueUrl: &q,
})
elapsed := int64(time.Since(start) / time.Millisecond)
// we want to warn if the receive took a long time
warnIfSlow(elapsed, "SendMessageBatch")
if err != nil {
if strings.HasPrefix(err.Error(), sqs.ErrCodeQueueDoesNotExist) {
return emptyOpList, ErrBadQueueHandle
}
return emptyOpList, err
}
for _, f := range response.Failed {
log.Printf("WARNING: ID %s send not successful (%s)", *f.Id, *f.Message)
id, converr := strconv.Atoi(*f.Id)
if converr == nil && id < sz {
ops[id] = false
}
}
// if any of the operation statuses are failures, return an error indicating so
for _, b := range ops {
if b == false {
return ops, ErrOneOrMoreOperationsUnsuccessful
}
}
return ops, nil
}
// BatchMessageDelete mark a batch of messages from the specified queue as suitable for delete. This mechanism
// prevents messages from being reprocessed.
func (awsi *awsSqsImpl) BatchMessageDelete(queue QueueHandle, messages []Message) ([]OpStatus, error) {
// early exit if no messages provided
var sz = uint(len(messages))
if sz == 0 {
return emptyOpList, nil
}
// ensure the block size is not too large
if sz > MAX_SQS_BLOCK_COUNT {
return emptyOpList, ErrBlockCountTooLarge
}
q := string(queue)
batch := make([]*sqs.DeleteMessageBatchRequestEntry, 0, sz)
ops := make([]OpStatus, sz)
// the SQS delete loop, initially, assume everything works
for ix, m := range messages {
ops[ix] = true
batch = append(batch, constructDelete(m.GetReceiptHandle(), ix))
}
start := time.Now()
response, err := awsi.svc.DeleteMessageBatch(&sqs.DeleteMessageBatchInput{
Entries: batch,
QueueUrl: &q,
})
elapsed := int64(time.Since(start) / time.Millisecond)
// we want to warn if the receive took a long time
warnIfSlow(elapsed, "DeleteMessageBatch")
if err != nil {
if strings.HasPrefix(err.Error(), sqs.ErrCodeQueueDoesNotExist) {
return emptyOpList, ErrBadQueueHandle
}
return emptyOpList, err
}
for _, f := range response.Failed {
log.Printf("WARNING: ID %s delete not successful (%s)", *f.Id, *f.Message)
id, converr := strconv.Atoi(*f.Id)
if converr == nil && uint(id) < sz {
ops[id] = false
} else {
log.Printf("WARNING: suspect ID %s in delete response", *f.Id)
}
}
// we have now deleted the messages from SQS, delete any oversize payloads from S3
for _, f := range response.Successful {
id, converr := strconv.Atoi(*f.Id)
if converr == nil && uint(id) < sz {
if messages[id].IsOversize() == true {
deleteError := messages[id].DeleteOversizeMessage()
if deleteError != nil {
log.Printf("WARNING: failed deleting oversize message")
ops[id] = false
}
}
} else {
log.Printf("WARNING: suspect ID %s in delete response", *f.Id)
}
}
// if any of the operation statuses are failures, return an error indicating so
for _, b := range ops {
if b == false {
return ops, ErrOneOrMoreOperationsUnsuccessful
}
}
return ops, nil
}
// MessagePutRetry retry a batched put after one or more of the operations fails.
// retry the specified amount of times and return an error of after retrying one or messages
// has still not been sent successfully.
func (awsi *awsSqsImpl) MessagePutRetry(queue QueueHandle, messages []Message, opStatus []OpStatus, retries uint) error {
// if we made it here then there is still operations outstanding and we have run out of attempts.
// just return an error
if retries == 0 {
log.Printf("ERROR: out of retries, giving up")
return ErrOneOrMoreOperationsUnsuccessful
}
// create the retry batch
retryBatch := make([]Message, 0)
for ix, op := range opStatus {
if op == false {
retryBatch = append(retryBatch, messages[ix])
}
}
// make sure there are items to retry... if not return success
sz := len(retryBatch)
if sz == 0 {
return nil
}
// sleep for a while
time.Sleep(100 * time.Millisecond)
log.Printf("INFO: retrying %d item(s)... (%d remaining tries)", sz, retries)
opStatusRetry, err := awsi.BatchMessagePut(queue, retryBatch)
// if success then we are done
if err == nil {
return nil
}
// if not success, anything other than an error we can retry is fatal so give up
if err != ErrOneOrMoreOperationsUnsuccessful {
return err
}
// try again and reduce the retries count
return awsi.MessagePutRetry(queue, retryBatch, opStatusRetry, retries-1)
}
//
// end of file
//
|
package config
type Config struct {
DbConnString string `split_words:"true" required:"true"`
Port int `split_words:"true" required:"true"`
MigratesDir string `split_words:"true" required:"true"`
}
|
package tumblr
type Activity struct {
Id string `json:"id"`
Action string `json:"action"`
Blog Blog `json:"blog"`
}
|
package arithmetic
import "testing"
func TestMaxI64(t *testing.T) {
if MaxI64() != LOWER_BOUND_I64 {
t.FailNow()
}
if MaxI64(1024) != 1024 {
t.FailNow()
}
if MaxI64(3, 5) != 5 {
t.FailNow()
}
if MaxI64(1, 3, 5) != 5 {
t.FailNow()
}
}
func TestMinI64(t *testing.T) {
if MinI64() != UPPER_BOUND_I64 {
t.FailNow()
}
if MinI64(1024) != 1024 {
t.FailNow()
}
if MinI64(3, 5) != 3 {
t.FailNow()
}
if MinI64(1, 3, 5) != 1 {
t.FailNow()
}
}
func TestMaxI(t *testing.T) {
if MaxI() != LOWER_BOUND_I {
t.FailNow()
}
if MaxI(1024) != 1024 {
t.FailNow()
}
if MaxI(3, 5) != 5 {
t.FailNow()
}
if MaxI(1, 3, 5) != 5 {
t.FailNow()
}
}
func TestMinI(t *testing.T) {
if MinI() != UPPER_BOUND_I {
t.FailNow()
}
if MinI(1024) != 1024 {
t.FailNow()
}
if MinI(3, 5) != 3 {
t.FailNow()
}
if MinI(1, 3, 5) != 1 {
t.FailNow()
}
}
|
package system
import (
"yj-app/app/controller/system/config"
"yj-app/app/service/middleware/auth"
"yj-app/app/yjgframe/router"
)
//加载路由
func init() {
// 参数路由
g1 := router.New("admin", "/system/config", auth.Auth)
g1.GET("/", "system:config:view", config.List)
g1.POST("/list", "system:config:list", config.ListAjax)
g1.GET("/add", "system:config:add", config.Add)
g1.POST("/add", "system:config:add", config.AddSave)
g1.POST("/remove", "system:config:remove", config.Remove)
g1.GET("/edit", "system:config:edit", config.Edit)
g1.POST("/edit", "system:config:edit", config.EditSave)
g1.POST("/export", "system:config:export", config.Export)
g1.POST("/checkConfigKeyUniqueAll", "system:config:view", config.CheckConfigKeyUniqueAll)
g1.POST("/checkConfigKeyUnique", "system:config:view", config.CheckConfigKeyUnique)
}
|
package cli
import (
"errors"
"fmt"
"strings"
)
type commandType uint
const (
None commandType = iota
Help
AddResource
RemoveResource
Info
Exit
)
/*
cliCommand provides default structure of cli option
Args:
ct - Shows the type of the command
short - (optional) short name of an option
long - long name of an option
expArgs - provide an expected input args format
The syntax for expected args:
%s - single string value
%t - true or false
%d - decimal int
gotArgs - list of parsed args
*/
type cliCommand struct {
ct commandType
short uint8
long string
expArgs string
next []cliCommand
// forbidden to declare on creation
gotArgs []interface{}
}
func newCliCommandRegistry() []cliCommand {
return []cliCommand{
{ct: Help, long: "help", short: 'h'},
{ct: AddResource, long: "add", expArgs: "%s"},
{ct: RemoveResource, long: "remove", expArgs: "%s"},
{ct: Info, long: "info", short: 'i'},
{ct: Exit, long: "exit"}}
}
var cliCommands = newCliCommandRegistry()
func (c *cliCommand) ParseCommand(word string) error {
switch len(word) {
case 1:
return errors.New("can't parse empty option")
case 2:
for _, cmd := range cliCommands {
if cmd.short != 0 && word[1] == cmd.short {
c.ct, c.expArgs, c.long = cmd.ct, cmd.expArgs, cmd.long
return nil
}
}
return errors.New("there is no such option")
default:
// trimming -- and \n
trmWord := word[2:]
for _, cmd := range cliCommands {
if cmd.long != "" && trmWord == cmd.long {
c.ct, c.expArgs, c.long = cmd.ct, cmd.expArgs, cmd.long
return nil
}
}
return errors.New("there is no such option")
}
}
/*
ParseArg adding next argument (if it's possible)
to cliCommand gotArgs slice
*/
func (c *cliCommand) ParseArg(word string) error {
if c.ct == None {
return errors.New("there isn't an option for this argument")
}
if strings.Contains(c.expArgs, "...") {
// todo: implement support of multiple args
return errors.New("sorry, multiple arguments are not implemented yet")
}
if len(c.gotArgs) != 0 {
return errors.New("more than one argument was sent")
}
switch c.expArgs {
case "%s":
c.gotArgs = append(c.gotArgs, word)
case "%t":
var s bool
_, err := fmt.Fscanf(strings.NewReader(word), "%t", s)
if err != nil {
return err
}
c.gotArgs = append(c.gotArgs, s)
case "%d":
var s int
_, err := fmt.Fscanf(strings.NewReader(word), "%d", s)
if err != nil {
return err
}
c.gotArgs = append(c.gotArgs, s)
case "":
return errors.New("argument wasn't expected for this command")
}
return nil
}
func parseCommandList(args []string) ([]*cliCommand, error) {
parsed := make([]*cliCommand, 0, 3)
cmd := new(cliCommand)
for _, word := range args {
var err error
if word[0] == '-' {
parsed = append(parsed, cmd)
err = cmd.ParseCommand(word)
} else {
err = cmd.ParseArg(word)
}
if err != nil {
return nil, err
}
}
return parsed, nil
}
func parseCommandLine(s string) ([]*cliCommand, error) {
return parseCommandList(strings.Split(s, " "))
}
|
package sqlite
import (
"fmt"
)
func SQLiteCount(db *SQLiteDB, tbl_name string) (int64, error) {
if conn, err := db.GetConn(true); nil == err {
if row := conn.QueryRow(fmt.Sprintf("SELECT COUNT(*) FROM %s", tbl_name)); nil != row {
var cnt int64
if err := row.Scan(&cnt); nil == err {
return cnt, nil
} else {
return 0, err
}
} else {
return 0, fmt.Errorf("QueryRow return failed")
}
} else {
return 0, err
}
}
|
package main
/*
#cgo pkg-config: gstreamer-1.0 gstreamer-app-1.0
#include <stdio.h>
#include <gst/gst.h>
void cb_proxy_padadd(GstElement* v, GstPad *v2,gpointer v3);
*/
import "C"
import (
"fmt"
"strings"
"unsafe"
//"github.com/notedit/gst"
"github.com/tomberek/gst"
)
//export cb_proxy_padadd
func cb_proxy_padadd(v *C.GstElement, v2 *C.GstPad, v3 unsafe.Pointer) {
// Recreate a gst.Pad without having access to private .pad field
// TODO: allow construction of gst.Pad from C.GstPad and remove unsafe
pad := (*gst.Pad)(unsafe.Pointer(&v2))
element := (*gst.Element)(v3)
fmt.Printf("[ USR ] enter cb_proxy_padadd\n")
fmt.Printf("[ USR ] element: %+v\n", element)
capstr := pad.GetCurrentCaps().ToString()
if strings.HasPrefix(capstr, "audio") {
sinkpad := convert.GetStaticPad("sink")
pad.Link(sinkpad)
}
}
var convert *gst.Element
func main() {
pipeline, err := gst.PipelineNew("test-pipeline")
if err != nil {
panic(err)
}
source, _ := gst.ElementFactoryMake("uridecodebin", "source")
convert, _ = gst.ElementFactoryMake("audioconvert", "convert")
sink, _ := gst.ElementFactoryMake("autoaudiosink", "sink")
pipeline.Add(source)
pipeline.Add(convert)
pipeline.Add(sink)
convert.Link(sink)
source.SetObject("uri", "http://dl5.webmfiles.org/big-buck-bunny_trailer.webm")
source.SetCallback("pad-added", C.cb_proxy_padadd)
pipeline.SetState(gst.StatePlaying)
bus := pipeline.GetBus()
for {
message := bus.Pull(gst.MessageError | gst.MessageEos)
fmt.Println("message:", message.GetName())
fmt.Printf("message: %+v\n", message.GetType())
fmt.Printf("message: %s\n", message.GetStructure().ToString())
if message.GetType() == gst.MessageEos {
break
}
}
}
|
package main
import (
"KServer/manage"
"KServer/manage/config"
"KServer/server/discovery/services"
"KServer/server/utils"
"KServer/server/utils/msg"
"fmt"
)
func main() {
// 管理器选择开启的服务
conf := config.NewManageConfig()
conf.Server.Head = msg.ServiceDiscoveryTopic
conf.DB.Redis = true
conf.Message.Kafka = true
conf.DB.Mongo = true
m := manage.NewManage(conf)
// 初始化redisPool
redisConfig := config.NewRedisConfig(utils.RedisConFile)
redis := m.DB().Redis()
if !redis.StartMasterPool(redisConfig.GetMasterAddr(), redisConfig.Master.PassWord, redisConfig.Master.MaxIdle, redisConfig.Master.MaxActive) ||
!redis.StartSlavePool(redisConfig.GetSlaveAddr(), redisConfig.Slave.PassWord, redisConfig.Slave.MaxIdle, redisConfig.Slave.MaxActive) {
fmt.Println("Redis 开启失败")
return
}
// 初始化kafka
kafkaConfig := config.NewKafkaConfig(utils.KafkaConFile)
kafka := m.Message().Kafka()
err := kafka.Send().Open([]string{kafkaConfig.GetAddr()})
if err != nil {
fmt.Println("Kafka Send 开启失败")
return
}
// 启动mongo数据库
m.DB().Mongo().Start()
s := services.NewServiceDiscovery(m)
kafka.AddRouter(msg.ServiceDiscoveryTopic, msg.ServiceDiscoveryID, s.ServiceHandle)
kafka.StartListen([]string{kafkaConfig.GetAddr()}, msg.ServiceDiscoveryTopic, utils.NewOffset)
m.Server().Start()
}
|
/*
Given an array of ints, return True if 6 appears as either the first or last element in the array. The array will be length 1 or more.
*/
package main
import (
"fmt"
)
func first_last6(ints []int) bool {
if len(ints) > 0 {
return ints[0] == 6 || ints[len(ints)-1] == 6
}
return false
}
func main(){
var status int = 0
if first_last6([]int{6, 6}) {
status += 1
}
if first_last6([]int{6}) {
status += 1
}
if ! first_last6([]int{5, 6, 1}) {
status += 1
}
if first_last6([]int{6,1,1,6}) {
status += 1
}
if status == 4 {
fmt.Println("OK")
} else {
fmt.Println("NOT OK")
}
}
|
// Copyright (c) 2020 VMware, Inc. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package k8s
import (
"encoding/base64"
"fmt"
"io"
"os"
"github.com/pkg/errors"
"github.com/vladimirvivien/gexe"
)
// FetchWorkloadConfig...
func FetchWorkloadConfig(clusterName, clusterNamespace, mgmtKubeConfigPath string) (string, error) {
var filePath string
cmdStr := fmt.Sprintf(`kubectl get secrets/%s-kubeconfig --template '{{.data.value}}' --namespace=%s --kubeconfig %s`, clusterName, clusterNamespace, mgmtKubeConfigPath)
p := gexe.StartProc(cmdStr)
if p.Err() != nil {
return filePath, fmt.Errorf("kubectl get secrets failed: %s: %s", p.Err(), p.Result())
}
f, err := os.CreateTemp(os.TempDir(), fmt.Sprintf("%s-workload-config", clusterName))
if err != nil {
return filePath, errors.Wrap(err, "Cannot create temporary file")
}
filePath = f.Name()
defer f.Close()
base64Dec := base64.NewDecoder(base64.StdEncoding, p.Out())
if _, err := io.Copy(f, base64Dec); err != nil {
return filePath, errors.Wrap(err, "error decoding workload kubeconfig")
}
return filePath, nil
}
|
package middleware
import (
"context"
"encoding/json"
"github.com/joshia/automated-api-test-service/testapp/config/apperror"
"github.com/joshia/automated-api-test-service/testapp/lib/message"
"github.com/joshia/automated-api-test-service/testapp/lib/uuid"
"log"
"net/http"
)
func InjectRequestId(r *http.Request) *http.Request {
ctx := r.Context()
ctx = context.WithValue(ctx, "requestId", uuid.NewRequestId())
r = r.WithContext(ctx)
return r
}
func RewriteErrorResponse(b []byte) []byte {
errv := &apperror.V1Error{}
err := json.Unmarshal(b, errv)
if err != nil {
log.Fatalf(apperror.ErrFailedToDecodeConfigurationFile, err)
}
errRes := message.SetErrorResponse(errv)
res, err := json.Marshal(errRes)
if err != nil {
log.Fatalf(apperror.ErrFailedToDecodeConfigurationFile, err)
}
return res
} |
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package log
import klog "k8s.io/klog/v2"
const klogMaxLevel = 5
// Klog satisfies the Verboser interface.
type Klog struct {
levels []*klogLogger
}
// NewKlog creates a new Klog wrapped in the Verboser interface.
func NewKlog() Verboser {
levels := make([]*klogLogger, 0, klogMaxLevel+1)
for level := 0; level <= 5; level++ {
v := klog.V(klog.Level(level))
levels = append(levels, &klogLogger{v})
}
return &Klog{levels}
}
// V returns a Logger for the provided level.
func (l *Klog) V(level Level) Logger {
if level > klogMaxLevel {
return l.levels[klogMaxLevel]
}
return l.levels[level]
}
// klogLogger satisfies the Logger interface.
type klogLogger struct {
v klog.Verbose
}
// Enabled returns whether the Logger is enabled or not.
func (l *klogLogger) Enabled() bool {
return l.v.Enabled()
}
// Log logs a message.
func (l *klogLogger) Log(format string, args ...interface{}) {
l.v.Infof(format, args...)
}
|
package crawler
import (
"context"
"sync"
"time"
"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/core/protocol"
logging "github.com/ipfs/go-log"
//lint:ignore SA1019 TODO migrate away from gogo pb
"github.com/libp2p/go-msgio/protoio"
pb "github.com/libp2p/go-libp2p-kad-dht/pb"
kbucket "github.com/libp2p/go-libp2p-kbucket"
)
var (
logger = logging.Logger("dht-crawler")
_ Crawler = (*DefaultCrawler)(nil)
)
type (
// Crawler connects to hosts in the DHT to track routing tables of peers.
Crawler interface {
// Run crawls the DHT starting from the startingPeers, and calls either handleSuccess or handleFail depending on whether a peer was successfully contacted or not.
Run(ctx context.Context, startingPeers []*peer.AddrInfo, handleSuccess HandleQueryResult, handleFail HandleQueryFail)
}
// DefaultCrawler provides a default implementation of Crawler.
DefaultCrawler struct {
parallelism int
connectTimeout time.Duration
host host.Host
dhtRPC *pb.ProtocolMessenger
dialAddressExtendDur time.Duration
}
)
// NewDefaultCrawler creates a new DefaultCrawler
func NewDefaultCrawler(host host.Host, opts ...Option) (*DefaultCrawler, error) {
o := new(options)
if err := defaults(o); err != nil {
return nil, err
}
for _, opt := range opts {
if err := opt(o); err != nil {
return nil, err
}
}
pm, err := pb.NewProtocolMessenger(&messageSender{h: host, protocols: o.protocols, timeout: o.perMsgTimeout})
if err != nil {
return nil, err
}
return &DefaultCrawler{
parallelism: o.parallelism,
connectTimeout: o.connectTimeout,
host: host,
dhtRPC: pm,
dialAddressExtendDur: o.dialAddressExtendDur,
}, nil
}
// MessageSender handles sending wire protocol messages to a given peer
type messageSender struct {
h host.Host
protocols []protocol.ID
timeout time.Duration
}
// SendRequest sends a peer a message and waits for its response
func (ms *messageSender) SendRequest(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) {
s, err := ms.h.NewStream(ctx, p, ms.protocols...)
if err != nil {
return nil, err
}
w := protoio.NewDelimitedWriter(s)
if err := w.WriteMsg(pmes); err != nil {
return nil, err
}
r := protoio.NewDelimitedReader(s, network.MessageSizeMax)
tctx, cancel := context.WithTimeout(ctx, ms.timeout)
defer cancel()
defer func() { _ = s.Close() }()
msg := new(pb.Message)
if err := ctxReadMsg(tctx, r, msg); err != nil {
_ = s.Reset()
return nil, err
}
return msg, nil
}
func ctxReadMsg(ctx context.Context, rc protoio.ReadCloser, mes *pb.Message) error {
errc := make(chan error, 1)
go func(r protoio.ReadCloser) {
defer close(errc)
err := r.ReadMsg(mes)
errc <- err
}(rc)
select {
case err := <-errc:
return err
case <-ctx.Done():
return ctx.Err()
}
}
// SendMessage sends a peer a message without waiting on a response
func (ms *messageSender) SendMessage(ctx context.Context, p peer.ID, pmes *pb.Message) error {
s, err := ms.h.NewStream(ctx, p, ms.protocols...)
if err != nil {
return err
}
defer func() { _ = s.Close() }()
w := protoio.NewDelimitedWriter(s)
return w.WriteMsg(pmes)
}
// HandleQueryResult is a callback on successful peer query
type HandleQueryResult func(p peer.ID, rtPeers []*peer.AddrInfo)
// HandleQueryFail is a callback on failed peer query
type HandleQueryFail func(p peer.ID, err error)
// Run crawls dht peers from an initial seed of `startingPeers`
func (c *DefaultCrawler) Run(ctx context.Context, startingPeers []*peer.AddrInfo, handleSuccess HandleQueryResult, handleFail HandleQueryFail) {
jobs := make(chan peer.ID, 1)
results := make(chan *queryResult, 1)
// Start worker goroutines
var wg sync.WaitGroup
wg.Add(c.parallelism)
for i := 0; i < c.parallelism; i++ {
go func() {
defer wg.Done()
for p := range jobs {
res := c.queryPeer(ctx, p)
results <- res
}
}()
}
defer wg.Wait()
defer close(jobs)
var toDial []*peer.AddrInfo
peersSeen := make(map[peer.ID]struct{})
numSkipped := 0
for _, ai := range startingPeers {
extendAddrs := c.host.Peerstore().Addrs(ai.ID)
if len(ai.Addrs) > 0 {
extendAddrs = append(extendAddrs, ai.Addrs...)
c.host.Peerstore().AddAddrs(ai.ID, extendAddrs, c.dialAddressExtendDur)
}
if len(extendAddrs) == 0 {
numSkipped++
continue
}
toDial = append(toDial, ai)
peersSeen[ai.ID] = struct{}{}
}
if numSkipped > 0 {
logger.Infof("%d starting peers were skipped due to lack of addresses. Starting crawl with %d peers", numSkipped, len(toDial))
}
numQueried := 0
outstanding := 0
for len(toDial) > 0 || outstanding > 0 {
var jobCh chan peer.ID
var nextPeerID peer.ID
if len(toDial) > 0 {
jobCh = jobs
nextPeerID = toDial[0].ID
}
select {
case res := <-results:
if len(res.data) > 0 {
logger.Debugf("peer %v had %d peers", res.peer, len(res.data))
rtPeers := make([]*peer.AddrInfo, 0, len(res.data))
for p, ai := range res.data {
c.host.Peerstore().AddAddrs(p, ai.Addrs, c.dialAddressExtendDur)
if _, ok := peersSeen[p]; !ok {
peersSeen[p] = struct{}{}
toDial = append(toDial, ai)
}
rtPeers = append(rtPeers, ai)
}
if handleSuccess != nil {
handleSuccess(res.peer, rtPeers)
}
} else if handleFail != nil {
handleFail(res.peer, res.err)
}
outstanding--
case jobCh <- nextPeerID:
outstanding++
numQueried++
toDial = toDial[1:]
logger.Debugf("starting %d out of %d", numQueried, len(peersSeen))
}
}
}
type queryResult struct {
peer peer.ID
data map[peer.ID]*peer.AddrInfo
err error
}
func (c *DefaultCrawler) queryPeer(ctx context.Context, nextPeer peer.ID) *queryResult {
tmpRT, err := kbucket.NewRoutingTable(20, kbucket.ConvertPeerID(nextPeer), time.Hour, c.host.Peerstore(), time.Hour, nil)
if err != nil {
logger.Errorf("error creating rt for peer %v : %v", nextPeer, err)
return &queryResult{nextPeer, nil, err}
}
connCtx, cancel := context.WithTimeout(ctx, c.connectTimeout)
defer cancel()
err = c.host.Connect(connCtx, peer.AddrInfo{ID: nextPeer})
if err != nil {
logger.Debugf("could not connect to peer %v: %v", nextPeer, err)
return &queryResult{nextPeer, nil, err}
}
localPeers := make(map[peer.ID]*peer.AddrInfo)
var retErr error
for cpl := 0; cpl <= 15; cpl++ {
generatePeer, err := tmpRT.GenRandPeerID(uint(cpl))
if err != nil {
panic(err)
}
peers, err := c.dhtRPC.GetClosestPeers(ctx, nextPeer, generatePeer)
if err != nil {
logger.Debugf("error finding data on peer %v with cpl %d : %v", nextPeer, cpl, err)
retErr = err
break
}
for _, ai := range peers {
if _, ok := localPeers[ai.ID]; !ok {
localPeers[ai.ID] = ai
}
}
}
if retErr != nil {
return &queryResult{nextPeer, nil, retErr}
}
return &queryResult{nextPeer, localPeers, retErr}
}
|
package main
import (
"fmt"
"log"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethclient"
ethereum "github.com/ethereum/go-ethereum"
hello "github.com/sanguohot/medichain/contracts/hello"
"context"
"math/big"
"strings"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/accounts/abi"
)
func main() {
client, err := ethclient.Dial("http://10.6.250.55:8545")
if err != nil {
log.Fatal(err)
return
}
// 0xb349Eba018bFA9d89Da90829629D39668F6653A2
// 0xca21167a870cf8b9618d259af454c6d00b30b028
// 0xB818715eb048286A608B5E9851877AD7A30a41A0
contractAddress := common.HexToAddress("0xfcd14ED03E6D94CA127d557a1883Dd042a81ea11")
query := ethereum.FilterQuery{
FromBlock: big.NewInt(0),
ToBlock: nil,
Addresses: []common.Address{
contractAddress,
},
}
logs, err := client.FilterLogs(context.Background(), query)
if err != nil {
log.Fatal("client.FilterLogs", err)
return
}
contractAbi, err := abi.JSON(strings.NewReader(string(hello.HelloABI)))
if err != nil {
log.Fatal("abi.JSON", err)
return
}
fmt.Println("logs count ===>", len(logs))
for _, vLog := range logs {
var event struct {
NewSaying string
}
// 参数一是事件参数构造的对象
// 参数二是事件函数名,不是合约函数名
// 参数三是事件的数据
err := contractAbi.Unpack(&event, "onSaySomethingElse", vLog.Data)
if err != nil {
log.Fatal("contractAbi.Unpack", err)
return
}
fmt.Println("event ===>", event)
var topics [4]string
for i := range vLog.Topics {
topics[i] = vLog.Topics[i].Hex()
}
fmt.Println("topics ===>", topics) // 0xe79e73da417710ae99aa2088575580a60415d359acfad9cdd3382d59c80281d4
}
eventSignature := []byte("onSaySomethingElse(string)")
hash := crypto.Keccak256Hash(eventSignature)
fmt.Println("eventSignature hash ===>", hash.Hex())
} |
// Copyright 2023 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package temptable_test
import (
"testing"
"github.com/pingcap/tidb/parser/auth"
"github.com/pingcap/tidb/testkit"
"github.com/stretchr/testify/require"
)
func TestSelectTemporaryTableUnionView(t *testing.T) {
// see the issue: https://github.com/pingcap/tidb/issues/42563
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
require.NoError(t, tk.Session().Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil, nil))
tk.MustExec("use test")
tk.MustExec("create table t(a int)")
tk.MustExec("insert into t values(1)")
tk.MustExec("create view tv as select a from t")
tk.MustExec("create temporary table t(a int)")
tk.MustExec("insert into t values(2)")
tk.MustQuery("select * from tv").Check(testkit.Rows("1"))
tk.MustQuery("select * from t").Check(testkit.Rows("2"))
tk.MustQuery("select * from (select a from t union all select a from tv) t1 order by a").Check(testkit.Rows("1", "2"))
}
|
package formaterror
import "strings"
var errorMessages = make(map[string]string)
func FormatError(errString string) map[string]string {
if strings.Contains(errString, "isbn") {
errorMessages["Taken_isbn"] = "There is already a book with that isbn, remember it is unique"
}
if strings.Contains(errString, "title") {
errorMessages["Taken_isbn"] = "There is already a book with that title"
}
if len(errorMessages) > 0 {
return errorMessages
}
if len(errorMessages) == 0 {
errorMessages["Incorrect_details"] = "Incorrect Details"
return errorMessages
}
return nil
}
|
// Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package version
import (
"encoding/json"
"fmt"
)
// Command is the option for plugin to print the version.
const Command = "version"
// Version is the version number of the repository.
var Version string
// GitShortHash is the short hash of the Git HEAD.
var GitShortHash string
// BuildTime is the build time stamp.
var BuildTime string
type versionInfo struct {
Version string `json:"version"`
GitShortHash string `json:"gitShortHash"`
Built string `json:"built"`
}
// String returns a JSON version string from the versionInfo type.
func String() (string, error) {
verInfo := versionInfo{
Version: Version,
GitShortHash: GitShortHash,
Built: BuildTime,
}
verInfoJSON, err := json.Marshal(verInfo)
if err != nil {
return "", fmt.Errorf("version: failed to marshal version info: %v: %v", verInfo, err)
}
return string(verInfoJSON), nil
}
|
package letter
import "sync"
var mutex = &sync.Mutex{}
func Frequency(s string) map[rune]int {
m := map[rune]int{}
for _, v := range s {
m[v] += 1
}
return m
}
func Concurrent(s string, m *map[rune]int, c chan byte) {
for _, v := range s {
mutex.Lock()
(*m)[v] += 1
mutex.Unlock()
}
c <- 1
}
func ConcurrentFrequency(s []string) map[rune]int {
m := map[rune]int{}
done := make(chan byte)
for _, v := range s {
go Concurrent(v, &m, done)
}
for range s {
<-done
}
return m
}
|
/*
Copyright 2022 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package docgen
import (
"encoding/json"
"log"
"strings"
"github.com/oam-dev/kubevela/pkg/utils"
)
// Language is used to define the language
type Language string
var (
// En is english, the default language
En = I18n{lang: LangEn}
// Zh is Chinese
Zh = I18n{lang: LangZh}
)
const (
// LangEn is english, the default language
LangEn Language = "English"
// LangZh is Chinese
LangZh Language = "Chinese"
)
// I18n will automatically get translated data
type I18n struct {
lang Language
}
// LoadI18nData will load i18n data for the package
func LoadI18nData(path string) {
log.Printf("loading i18n data from %s", path)
data, err := utils.ReadRemoteOrLocalPath(path, false)
if err != nil {
log.Println("ignore using the i18n data", err)
return
}
var dat = map[string]map[Language]string{}
err = json.Unmarshal(data, &dat)
if err != nil {
log.Println("ignore using the i18n data", err)
return
}
for k, v := range dat {
if _, ok := v[LangEn]; !ok {
v[LangEn] = k
}
k = strings.ToLower(k)
ed, ok := i18nDoc[k]
if !ok {
ed = map[Language]string{}
}
for sk, sv := range v {
sv = strings.TrimSpace(sv)
if sv == "" {
continue
}
ed[sk] = sv
}
i18nDoc[k] = ed
}
}
// Language return the language used in i18n instance
func (i *I18n) Language() Language {
if i == nil || i.lang == "" {
return En.Language()
}
return i.lang
}
func (i *I18n) trans(str string) (string, bool) {
dd, ok := i18nDoc[str]
if !ok {
return str, false
}
data := dd[i.lang]
if data == "" {
return str, true
}
return data, true
}
// Get translate for the string
func (i *I18n) Get(str string) string {
if i == nil || i.lang == "" {
return En.Get(str)
}
if data, ok := i.trans(str); ok {
return data
}
str = strings.TrimSpace(str)
if data, ok := i.trans(str); ok {
return data
}
str = strings.TrimSuffix(str, ".")
if data, ok := i.trans(str); ok {
return data
}
str = strings.TrimSuffix(str, "。")
if data, ok := i.trans(str); ok {
return data
}
raw := str
str = strings.TrimSpace(str)
if data, ok := i.trans(str); ok {
return data
}
str = strings.ToLower(str)
if data, ok := i.trans(str); ok {
return data
}
return raw
}
// Definitions are all the words and phrases for internationalization in cli and docs
var i18nDoc = map[string]map[Language]string{
".": {
LangZh: "。",
LangEn: ".",
},
"Description": {
LangZh: "描述",
LangEn: "Description",
},
"Scope": {
LangZh: "适用范围",
LangEn: "Scope",
},
"Examples": {
LangZh: "示例",
LangEn: "Examples",
},
"Specification": {
LangZh: "参数说明",
LangEn: "Specification",
},
"AlibabaCloud": {
LangZh: "阿里云",
LangEn: "Alibaba Cloud",
},
"AWS": {
LangZh: "AWS",
LangEn: "AWS",
},
"Azure": {
LangZh: "Azure",
LangEn: "Azure",
},
"Name": {
LangZh: "名称",
LangEn: "Name",
},
"Type": {
LangZh: "类型",
LangEn: "Type",
},
"Required": {
LangZh: "是否必须",
LangEn: "Required",
},
"Default": {
LangZh: "默认值",
LangEn: "Default",
},
"Apply To Component Types": {
LangZh: "适用于组件类型",
LangEn: "Apply To Component Types",
},
}
|
package main
func main() {
var bef = 43261596
reverseBits(uint32(bef))
}
func reverseBits(num uint32) uint32 {
var ret uint32 = 0
//注意十进制的反转,不是二进制的反转 不能简单的用十进制的反转来实现,23 =>32 底层 1 0111 => 10 0000
for i := 0; i < 32; i++ {
final := num & 1 //任何数与1与都是他本身,与0与都是0,num&1得到最后一个二进制位数字
final <<= (31 - i) //往右移1位相当于乘以2,所以此处要进行一个右移位操作,将后面的数字放到前面
//10000000000000000000000
//01000000000000000000000
ret |= final //将前面的数字进行或操作,即可将所有位链接在一起
num >>= 1 //每次遍历完都向右移动一位
}
return ret
}
//【190】颠倒给定的32位无符号整数的二进制位
//按位&1操作就能得到当前位
|
package main
import (
"fmt"
)
// https://leetcode-cn.com/problems/daily-temperatures/
// 739. 每日温度 | Daily Temperatures
//------------------------------------------------------------------------------
func dailyTemperatures(T []int) []int {
return dailyTemperatures0(T)
}
//------------------------------------------------------------------------------
// Solution 1
//
// 需要找到比当天温度更高的未来最近的一天, 即:
// * 对于 j, 找到 i 使 T[i] > T[j] && i > j, 且 i-j 最小.
// 递减栈: 栈中存储的是待确定元素下标, 对应的温度是递减的.
//
// 遍历数组:
// * 入栈: 当前温度 T[i] <= T[top] 时入栈 push(i), 栈中元素为待确定.
// * 出栈: 当 T[i] > T[top], 则表明第一个比栈顶温度大的天出现了, 栈顶可以确定了, 弹出.
// 对于最后未出栈的天, 表明找不到未来的某天比其温度更高, 根据题意等待天数为0.
// 因为结果数组初始化为 0, 所以不用处理, 也不用再加哨兵来降低代码复杂度.
//
// 复杂度:
// * 时间: O(N)
// * 空间: O(N)
func dailyTemperatures1(T []int) []int {
N := len(T)
stk, p := make([]int, N), 0
res := make([]int, N)
for i := 0; i < N; i++ {
for p > 0 && T[stk[p-1]] < T[i] {
p--
res[stk[p]] = i - stk[p]
}
stk[p], p = i, p+1
}
return res
}
//------------------------------------------------------------------------------
// Solution 0
// 复杂度分析:
// * 时间: O(N*max(T))
// * 空间: O(max(T))
func dailyTemperatures0(T []int) []int {
N := len(T)
const M = 101
f := make([]int, M)
res := make([]int, N)
for i := N - 1; i >= 0; i-- {
for t := T[i] + 1; t < M; t++ {
if f[t] > 0 && (res[i] == 0 || f[t]-i < res[i]) {
res[i] = f[t] - i
}
}
f[T[i]] = i
}
return res
}
func main() {
cases := [][]int{
{73, 74, 75, 71, 69, 72, 76, 73},
}
realCase := cases[0:]
for i, c := range realCase {
fmt.Println("## case", i)
// solve
fmt.Println(dailyTemperatures(c))
fmt.Println(dailyTemperatures1(c))
}
}
|
package chainvalidate
import (
"errors"
"math/big"
"strings"
"sync"
"time"
"github.com/cpusoft/goutil/belogs"
"github.com/cpusoft/goutil/certutil"
"github.com/cpusoft/goutil/conf"
"github.com/cpusoft/goutil/convert"
"github.com/cpusoft/goutil/hashutil"
"github.com/cpusoft/goutil/jsonutil"
"github.com/cpusoft/goutil/osutil"
model "rpstir2-model"
)
func getChainMfts(chains *Chains, wg *sync.WaitGroup) {
defer wg.Done()
start := time.Now()
belogs.Debug("getChainMfts(): start:")
chainMftSqls, err := getChainMftSqlsDb()
if err != nil {
belogs.Error("getChainMfts(): getChainMftSqlsDb:", err)
return
}
belogs.Debug("getChainMfts(): getChainMftSqlsDb, len(chainMftSqls):", len(chainMftSqls))
for i := range chainMftSqls {
chainMft := chainMftSqls[i].ToChainMft()
chainMft.ChainFileHashs, err = GetChainFileHashsDb(chainMft.Id)
if err != nil {
belogs.Error("getChainMfts(): GetChainFileHashsDb fail:", chainMft.Id, err)
return
}
belogs.Debug("getChainMfts():i:", i, " chainMft.ChainFileHashs:", chainMft.ChainFileHashs)
chainMft.PreviousMft, err = GetPreviousMftDb(chainMft.Id)
belogs.Debug("getChainMfts():i:", i, " previousMft:", i, chainMft.PreviousMft)
if err != nil {
belogs.Error("getChainMfts(): GetPreviousMftDb fail:", chainMft.Id, err)
return
}
belogs.Debug("getChainMfts():i:", i, " chainMft.PreviousMft:", chainMft.PreviousMft) //shaodebug
chains.MftIds = append(chains.MftIds, chainMftSqls[i].Id)
chains.AddMft(&chainMft)
}
belogs.Debug("getChainMfts(): end len(chainMftSqls):", len(chainMftSqls), ", len(chains.MftIds):", len(chains.MftIds), " time(s):", time.Since(start))
return
}
func validateMfts(chains *Chains, wg *sync.WaitGroup) {
defer wg.Done()
start := time.Now()
mftIds := chains.MftIds
belogs.Debug("validateMfts(): start: len(mftIds):", len(mftIds))
var mftWg sync.WaitGroup
chainMftCh := make(chan int, conf.Int("chain::chainConcurrentCount"))
for _, mftId := range mftIds {
mftWg.Add(1)
chainMftCh <- 1
go validateMft(chains, mftId, &mftWg, chainMftCh)
}
mftWg.Wait()
close(chainMftCh)
belogs.Info("validateMfts(): end, len(mftIds):", len(mftIds), " time(s):", time.Since(start))
}
func validateMft(chains *Chains, mftId uint64, wg *sync.WaitGroup, chainMftCh chan int) {
defer func() {
wg.Done()
<-chainMftCh
}()
start := time.Now()
chainMft, err := chains.GetMftById(mftId)
if err != nil {
belogs.Error("validateMft(): GetMftById fail:", mftId, err)
return
}
// set parent cer
chainMft.ParentChainCerAlones, err = getMftParentChainCers(chains, mftId)
if err != nil {
belogs.Error("validateMft(): getMftParentChainCers fail:", mftId, err)
chainMft.StateModel.JudgeState()
chains.UpdateFileTypeIdToMft(&chainMft)
return
}
belogs.Debug("validateMft(): chainMft.ParentChainCer: mftId, len(chainMft.ParentChainCerAlones):", mftId, len(chainMft.ParentChainCerAlones))
// exists parent cer
if len(chainMft.ParentChainCerAlones) > 0 {
// get one parent
parentCer := osutil.JoinPathFile(chainMft.ParentChainCerAlones[0].FilePath, chainMft.ParentChainCerAlones[0].FileName)
mft := osutil.JoinPathFile(chainMft.FilePath, chainMft.FileName)
belogs.Debug("validateMft():parentCer:", parentCer, " mft:", mft)
// openssl verify mft
result, err := certutil.VerifyEeCertByX509(parentCer, mft, chainMft.EeCertStart, chainMft.EeCertEnd)
belogs.Debug("validateMft():VerifyEeCertByX509 result:", result, err)
if result != "ok" {
desc := ""
if err != nil {
desc = err.Error()
belogs.Debug("validateMft():verify mft by parent cer fail, fail, mftId:", chainMft.Id, err)
}
stateMsg := model.StateMsg{Stage: "chainvalidate",
Fail: "Fail to be verified by its issuing certificate",
Detail: desc + ", parent cer file is " + chainMft.ParentChainCerAlones[0].FileName + ", mft file is " + chainMft.FileName}
// if subject doesnot match ,will just set warning
if strings.Contains(desc, "issuer name does not match subject from issuing certificate") {
chainMft.StateModel.AddWarning(&stateMsg)
} else {
chainMft.StateModel.AddError(&stateMsg)
}
}
} else {
belogs.Debug("validateMft():mft file has not found parent cer, fail, chainMft.Id,mftId:", chainMft.Id, mftId)
stateMsg := model.StateMsg{Stage: "chainvalidate",
Fail: "Its issuing certificate no longer exists",
Detail: ""}
chainMft.StateModel.AddError(&stateMsg)
}
// check files in filehash should exist
noExistFiles := make([]string, 0)
sha256ErrorFiles := make([]string, 0)
for _, fh := range chainMft.ChainFileHashs {
f := osutil.JoinPathFile(fh.Path, fh.File)
exist, err := osutil.IsExists(f)
belogs.Debug("validateMft():IsExists f:", f, exist, err)
if !exist || err != nil {
belogs.Debug("validateMft():IsExists f fail:", f, exist, err)
noExistFiles = append(noExistFiles, fh.File)
continue
}
sha256, err := hashutil.Sha256File(f)
belogs.Debug("validateMft():Sha256File:", f, " calc hash:"+sha256, " fh.Hash:"+fh.Hash)
if sha256 != fh.Hash || err != nil {
belogs.Debug("validateMft():Sha256File fail, mftfile is ", chainMft.FilePath+chainMft.FileName,
" err fil is "+f,
" calc sha256:"+sha256, " saved sha256:"+fh.Hash, err)
sha256ErrorFiles = append(sha256ErrorFiles, f)
continue
}
}
if len(noExistFiles) > 0 {
belogs.Debug("validateMft():verify mft file fail, mftId:", chainMft.Id,
" noExistFiles:", jsonutil.MarshalJson(noExistFiles))
stateMsg := model.StateMsg{Stage: "chainvalidate",
Fail: "File on filelist no longer exists",
Detail: "object(s) is(are) not in publication point but listed on mft, the(these) object(s) is(are) " +
strings.Join(noExistFiles, ", ")}
chainMft.StateModel.AddError(&stateMsg)
}
if len(sha256ErrorFiles) > 0 {
belogs.Debug("validateMft():verify mft file hash fail, mftId:", chainMft.Id,
" sha256ErrorFiles:", jsonutil.MarshalJson(sha256ErrorFiles))
stateMsg := model.StateMsg{Stage: "chainvalidate",
Fail: "The sha256 value of the file is not equal to the value on the filelist",
Detail: "object(s) in publication point and mft has(have) different hashvalues, the(these) object(s) is(are) " +
strings.Join(sha256ErrorFiles, ", ")}
chainMft.StateModel.AddError(&stateMsg)
}
belogs.Debug("validateMft():after check ChainFileHashs, stateModel:", chainMft.Id, jsonutil.MarshalJson(chainMft.StateModel))
noExistFiles = make([]string, 0)
// check all the file(cer/crl/roa) which have same aki ,should all in filehash
sameAkiCerRoaAsaCrlFiles, sameAkiCrls, sameAkiChainMfts, err := getSameAkiCerRoaCrlFilesChainMfts(chains, mftId)
belogs.Debug("validateMft():getSameAkiCerRoaCrlFilesChainMfts, mftId:", chainMft.Id, " sameAkiCerRoaAsaCrlFiles:", sameAkiCerRoaAsaCrlFiles,
" sameAkiCrls:", sameAkiCrls, " sameAkiChainMfts:", sameAkiChainMfts, err)
if err != nil {
belogs.Debug("validateMft():getSameAkiCerRoaCrlFilesChainMfts fail, aki:", chainMft.Aki)
stateMsg := model.StateMsg{Stage: "chainvalidate",
Fail: "Fail to get CER/ROA/CRL/MFT under specific AKI",
Detail: err.Error()}
chainMft.StateModel.AddError(&stateMsg)
} else {
if len(sameAkiCerRoaAsaCrlFiles) == 0 {
belogs.Debug("validateMft():getSameAkiCerRoaCrlFilesChainMfts len(akiFiles)==0, aki:", chainMft.Aki)
stateMsg := model.StateMsg{Stage: "chainvalidate",
Fail: "Fail to get CER/ROA/CRL/MFT under specific AKI",
Detail: "the aki is " + chainMft.Aki}
chainMft.StateModel.AddError(&stateMsg)
}
for _, sameAkiCerRoaCrlFile := range sameAkiCerRoaAsaCrlFiles {
found := false
for _, fileHash := range chainMft.ChainFileHashs {
if strings.ToLower(sameAkiCerRoaCrlFile) == strings.ToLower(fileHash.File) {
found = true
break
}
}
if !found {
belogs.Debug("validateMft():the same aki file ", sameAkiCerRoaCrlFile, " is not exist in filehashs of mft ")
noExistFiles = append(noExistFiles, sameAkiCerRoaCrlFile)
}
}
if len(noExistFiles) > 0 {
belogs.Debug("validateMft():the same aki " + chainMft.Aki + " files " + jsonutil.MarshalJson(noExistFiles) + " is not exists in filehashs of mft")
stateMsg := model.StateMsg{Stage: "chainvalidate",
Fail: "The CER, ROA and CRL of these same AKI are not on the filelist of MFT of same AKI",
Detail: "object(s) is(are) in publication point but not listed on mft, the(these) object(s) is(are) " +
jsonutil.MarshalJson(noExistFiles)}
chainMft.StateModel.AddError(&stateMsg)
}
// mft's thisUpdate/nextUpdate are equal to clr's thisUpdate/nextUpdate
if len(sameAkiCrls) == 0 {
belogs.Debug("validateMft():getSameAkiCerRoaCrlFilesChainMfts len(sameAkiCrls)==0, aki:", chainMft.Aki)
stateMsg := model.StateMsg{Stage: "chainvalidate",
Fail: "Fail to get CRL under specific AKI",
Detail: "The aki of MFT is " + chainMft.Aki}
chainMft.StateModel.AddError(&stateMsg)
}
for i := range sameAkiCrls {
if !chainMft.ThisUpdate.Equal(sameAkiCrls[i].ThisUpdate) {
stateMsg := model.StateMsg{Stage: "chainvalidate",
Fail: "The thisUpdate of CRL is different from thisUpdate of MFT which has the same AKI",
Detail: "The thisUpdate of CRL is " + convert.ToString(sameAkiCrls[i].ThisUpdate) +
", and the thisUpdate of MFT is " + convert.ToString(chainMft.ThisUpdate) +
", and the CLR file is " + sameAkiCrls[i].FilePath + " " + sameAkiCrls[i].FileName}
chainMft.StateModel.AddWarning(&stateMsg)
}
if !chainMft.NextUpdate.Equal(sameAkiCrls[i].NextUpdate) {
stateMsg := model.StateMsg{Stage: "chainvalidate",
Fail: "The nextUpdate of CRL is different from nextUpdate of MFT which has same AKI",
Detail: "The NextUpdate of CRL is " + convert.ToString(sameAkiCrls[i].NextUpdate) +
", and the NextUpdate of MFT is " + convert.ToString(chainMft.ThisUpdate) +
", and the CLR file is " + sameAkiCrls[i].FilePath + " " + sameAkiCrls[i].FileName}
chainMft.StateModel.AddWarning(&stateMsg)
}
}
}
belogs.Debug("validateMft():after check akiFiles, stateModel:", chainMft.Id, jsonutil.MarshalJson(chainMft.StateModel))
// check same aki mft files, compare mftnumber
// mft files have only one
belogs.Debug("validateMft():GetSameAkiMftFiles aki:", chainMft.Aki,
" self is ", chainMft.FileName,
" chainMfts:", jsonutil.MarshalJson(sameAkiChainMfts))
if len(sameAkiChainMfts) == 1 {
// filename shoud be equal
if sameAkiChainMfts[0].FileName != chainMft.FileName {
belogs.Debug("validateMft():same mft files is not self, aki:", sameAkiChainMfts[0].FileName, chainMft.FileName, chainMft.Aki)
stateMsg := model.StateMsg{Stage: "chainvalidate",
Fail: "Fail to get Manifest under specific AKI",
Detail: "aki is " + chainMft.Aki + " fileName is " + chainMft.FileName + " same aki file is " + sameAkiChainMfts[0].FileName}
chainMft.StateModel.AddError(&stateMsg)
}
} else if len(sameAkiChainMfts) == 0 {
belogs.Debug("validateMft():same mft files is zero, aki:", chainMft.Aki)
stateMsg := model.StateMsg{Stage: "chainvalidate",
Fail: "Fail to get Manifest under specific AKI",
Detail: "aki is " + chainMft.Aki + ", fileName should be " + chainMft.FileName}
chainMft.StateModel.AddError(&stateMsg)
} else {
belogs.Debug("validateMft():more than one same aki mft files, ",
chainMft.Aki, chainMft.FileName, chainMft.MftNumber, " sameAkiChainMfts: ", jsonutil.MarshalJson(sameAkiChainMfts))
// smaller/older are more ahead
smallerFiles := make([]ChainMft, 0)
biggerFiles := make([]ChainMft, 0)
for i, sameAkiChainMft := range sameAkiChainMfts {
// using filename and mftnumber to found self ( may have same filename )
if sameAkiChainMft.FileName == chainMft.FileName && sameAkiChainMft.MftNumber == chainMft.MftNumber {
if i > 0 && i < len(sameAkiChainMfts) {
smallerFiles = sameAkiChainMfts[:i]
}
if i+1 < len(sameAkiChainMfts) {
biggerFiles = sameAkiChainMfts[i+1:]
}
belogs.Debug("validateMft():same aki have mft files are smaller or bigger: self: i, aki, mftNumber:",
i, chainMft.Aki, chainMft.MftNumber,
", mftFiles are ", jsonutil.MarshalJson(sameAkiChainMfts),
", smallerFiles are ", jsonutil.MarshalJson(smallerFiles),
", biggerFiles files are ", jsonutil.MarshalJson(biggerFiles))
if len(biggerFiles) == 0 {
stateMsg := model.StateMsg{Stage: "chainvalidate",
Fail: "There are multiple CRLs under a specific AKI, and this CRL has the largest CRL Number",
Detail: "the smaller files are " + jsonutil.MarshalJson(smallerFiles) +
", the bigge files are " + jsonutil.MarshalJson(biggerFiles)}
//chainMft.StateModel.AddWarning(&stateMsg)
belogs.Debug("validateMft():len(biggerFiles) == 0, all same aki mft files are smaller, so it is just warning, ",
chainMft.Aki, chainMft.FileName, chainMft.MftNumber, " sameAkiChainMfts: ", jsonutil.MarshalJson(sameAkiChainMfts), stateMsg)
} else {
stateMsg := model.StateMsg{Stage: "chainvalidate",
Fail: "There are multiple Manifests under a specific AKI, and this Manifest has not the largest Manifest Number",
Detail: "the smaller files are " + jsonutil.MarshalJson(smallerFiles) +
", the bigge files are " + jsonutil.MarshalJson(biggerFiles)}
chainMft.StateModel.AddError(&stateMsg)
belogs.Debug("validateMft():len(biggerFiles) > 0, some same aki mft files are bigger, so it is error, ",
chainMft.Aki, chainMft.FileName, chainMft.MftNumber, " sameAkiChainMfts: ", jsonutil.MarshalJson(sameAkiChainMfts),
" bigger files:", jsonutil.MarshalJson(biggerFiles), stateMsg)
}
break
}
}
}
if len(chainMft.ChainSnInCrlRevoked.CrlFileName) > 0 {
belogs.Debug("validateMft(): mft ee file is founded in crl's revoked cer list:",
chainMft.Id, jsonutil.MarshalJson(chainMft.ChainSnInCrlRevoked.CrlFileName))
stateMsg := model.StateMsg{Stage: "chainvalidate",
Fail: "The EE of this Manifest is found on the revocation list of CRL",
Detail: chainMft.FileName + " is in " + chainMft.ChainSnInCrlRevoked.CrlFileName + " revoked cer list, " +
" and revoked time is " + convert.Time2StringZone(chainMft.ChainSnInCrlRevoked.RevocationTime)}
chainMft.StateModel.AddError(&stateMsg)
}
belogs.Debug("validateMft(): check previous mft, mftId:", chainMft.Id, " chainMft.PreviousMft:", chainMft.PreviousMft) //shaodebug
if chainMft.PreviousMft.Found {
// compare prev Number and cur NUmber
prevMftNumber, okPrev := new(big.Int).SetString(chainMft.PreviousMft.MftNumber, 16)
curMftNumber, ok := new(big.Int).SetString(chainMft.MftNumber, 16)
// shaodebug
belogs.Debug("validateMft(): found previous mft, mftId:", chainMft.Id,
" prevMftNumber:", prevMftNumber, " okPrev:", okPrev, " curMftNumber:", curMftNumber, " ok:", ok)
// should be hex
if !ok || !okPrev {
belogs.Info("validateMft(): !ok || !okPrev mftId:", chainMft.Id) //shaodebug
stateMsg := model.StateMsg{Stage: "chainvalidate",
Fail: "The Number of this Manifest or the previous Number is not a Hexadecimal number",
Detail: "The Number of this Manifest is " + chainMft.MftNumber + ", and the previouse Number is " + chainMft.PreviousMft.MftNumber}
chainMft.StateModel.AddError(&stateMsg)
} else {
comp := curMftNumber.Cmp(prevMftNumber)
belogs.Debug("validateMft(): comp, prevMftNumber:", prevMftNumber, " curMftNumber:", curMftNumber, " comp:", comp) //shaodebug
if comp < 0 {
// if cur < prev, then error
stateMsg := model.StateMsg{Stage: "chainvalidate",
Fail: "The Number of this Manifest is less than the previous Number",
Detail: "The Number of this Manifest is " + curMftNumber.String() + ", and the previouse Number is " + prevMftNumber.String()}
chainMft.StateModel.AddError(&stateMsg)
} else if comp == 0 {
// if cur == prev, then warning
stateMsg := model.StateMsg{Stage: "chainvalidate",
Fail: "The Number of this Manifest is equal to the previous Number",
Detail: "The Number of this Manifest is " + curMftNumber.String() + ", and the previouse Number is " + prevMftNumber.String()}
chainMft.StateModel.AddWarning(&stateMsg)
} else {
// cur > prev
// if cur - prev == 1 ,then ok, else warning
one := big.NewInt(1)
sub := big.NewInt(0).Sub(curMftNumber, prevMftNumber)
belogs.Debug("validateMft(): comp, one:", one, " sub:", sub) //shaodebug
// just bigger 1, ok
if sub.Cmp(one) != 0 {
stateMsg := model.StateMsg{Stage: "chainvalidate",
Fail: "The Number of this Manifest is not exactly 1 larger than the previous Number",
Detail: "The Number of this Manifest is " + curMftNumber.String() + ", and the previouse Number is " + prevMftNumber.String()}
chainMft.StateModel.AddWarning(&stateMsg)
}
}
}
belogs.Debug("validateMft(): prevMftNumber and curMftNumber, mftId:", chainMft.Id, " chainMft.StateModel:", jsonutil.MarshalJson(chainMft.StateModel)) //shaodebug
// compare prev thisUpdate/nextUpdate and cur thisUpdate/nextUpdate
if !chainMft.ThisUpdate.After(chainMft.PreviousMft.ThisUpdate) {
stateMsg := model.StateMsg{Stage: "chainvalidate",
Fail: "The ThisUpdate of this Manifest is is later than the previous ThisUpdate",
Detail: "The ThisUpdate of this Manifest is " + chainMft.ThisUpdate.String() + ", and the previouse ThisUpdate is " + chainMft.PreviousMft.ThisUpdate.String()}
chainMft.StateModel.AddError(&stateMsg)
}
if !chainMft.NextUpdate.After(chainMft.PreviousMft.NextUpdate) {
stateMsg := model.StateMsg{Stage: "chainvalidate",
Fail: "The NextUpdate of this Manifest is is later than the previous NextUpdate",
Detail: "The NextUpdate of this Manifest is " + chainMft.NextUpdate.String() + ", and the previouse NextUpdate is " + chainMft.PreviousMft.NextUpdate.String()}
chainMft.StateModel.AddError(&stateMsg)
}
belogs.Debug("validateMft(): ThisUpdate and NextUpdate, mftId:", chainMft.Id, " chainMft.StateModel:", jsonutil.MarshalJson(chainMft.StateModel)) //shaodebug
}
chainMft.StateModel.JudgeState()
belogs.Debug("validateMft(): stateModel:", chainMft.StateModel)
if chainMft.StateModel.State != "valid" {
belogs.Debug("validateMft(): stateModel have errors or warnings, mftId :", mftId, " stateModel:", jsonutil.MarshalJson(chainMft.StateModel))
}
chains.UpdateFileTypeIdToMft(&chainMft)
belogs.Debug("validateMft():end UpdateFileTypeIdToMft mftId:", mftId, " time(s):", time.Since(start))
}
func getMftParentChainCers(chains *Chains, mftId uint64) (chainCerAlones []ChainCerAlone, err error) {
parentChainCerAlone, err := getMftParentChainCer(chains, mftId)
if err != nil {
belogs.Error("getMftParentChainCers(): getMftParentChainCer, mftId:", mftId, err)
return nil, err
}
belogs.Debug("getMftParentChainCers(): mftId:", mftId, " parentChainCerAlone.Id:", parentChainCerAlone.Id)
if parentChainCerAlone.Id == 0 {
belogs.Debug("getMftParentChainCers(): parentChainCer is not found , mftId :", mftId)
return chainCerAlones, nil
}
chainCerAlones = make([]ChainCerAlone, 0)
chainCerAlones = append(chainCerAlones, parentChainCerAlone)
chainCerAlonesTmp, err := GetCerParentChainCers(chains, parentChainCerAlone.Id)
if err != nil {
belogs.Error("getMftParentChainCers(): GetCerParentChainCers, mftId:", mftId, " parentChainCerAlone.Id:", parentChainCerAlone.Id, err)
return nil, err
}
chainCerAlones = append(chainCerAlones, chainCerAlonesTmp...)
belogs.Debug("getMftParentChainCers():mftId, len(chainCerAlones):", mftId, len(chainCerAlones))
return chainCerAlones, nil
}
func getMftParentChainCer(chains *Chains, mftId uint64) (chainCerAlone ChainCerAlone, err error) {
chainMft, err := chains.GetMftById(mftId)
if err != nil {
belogs.Error("getMftParentChainCer(): GetMft, mftId:", mftId, err)
return chainCerAlone, err
}
belogs.Debug("getMftParentChainCer(): mftId:", mftId, " chainMft:", chainMft)
//get mft's aki --> parent cer's ski
if len(chainMft.Aki) == 0 {
belogs.Error("getMftParentChainCer(): chainMft.Aki is empty, fail:", mftId)
return chainCerAlone, errors.New("mft's aki is empty")
}
aki := chainMft.Aki
parentCerSki := aki
fileTypeId, ok := chains.SkiToFileTypeId[parentCerSki]
belogs.Debug("getMftParentChainCer(): mftId, parentCerSki,fileTypeId, ok:", mftId, parentCerSki, fileTypeId, ok)
if ok {
parentChainCer, err := chains.GetCerByFileTypeId(fileTypeId)
belogs.Debug("getMftParentChainCer(): GetCerByFileTypeId, mftId, fileTypeId, parentChainCer.Id:", mftId, fileTypeId, parentChainCer.Id)
if err != nil {
belogs.Error("getMftParentChainCer(): GetCerByFileTypeId, mftId,fileTypeId, fail:", mftId, fileTypeId, err)
return chainCerAlone, err
}
return *NewChainCerAlone(&parentChainCer), nil
}
// not found parent ,is not error
belogs.Debug("getMftParentChainCer(): not found mft's parent cer:", mftId)
return chainCerAlone, nil
}
func getSameAkiCerRoaCrlFilesChainMfts(chains *Chains, mftId uint64) (sameAkiCerRoaAsaCrlFiles []string, sameAkiCrls []SameAkiCrl,
sameAkiChainMfts []ChainMft, err error) {
chainMft, err := chains.GetMftById(mftId)
if err != nil {
belogs.Error("getSameAkiCerRoaCrlFilesChainMfts():GetMftById, mftId:", mftId, err)
return nil, nil, nil, err
}
sameAkiCerRoaAsaCrlFiles = make([]string, 0)
sameAkiCrls = make([]SameAkiCrl, 0)
sameAkiChainMfts = make([]ChainMft, 0)
//get mft's aki --> cer/roa/crl/
aki := chainMft.Aki
fileTypeIds, ok := chains.AkiToFileTypeIds[aki]
belogs.Debug("getSameAkiCerRoaCrlFilesChainMfts(): mftId, fileTypeIds, ok:", mftId, fileTypeIds, ok)
if ok {
for _, fileTypeId := range fileTypeIds.FileTypeIds {
belogs.Debug("getSameAkiCerRoaCrlFilesChainMfts(): mftId, fileTypeId:", mftId, fileTypeId)
if ok {
fileType := string(fileTypeId[:3])
switch fileType {
case "cer":
chainCer, err := chains.GetCerByFileTypeId(fileTypeId)
if err != nil {
belogs.Error("getSameAkiCerRoaCrlFilesChainMfts(): GetCerByFileTypeId, mftId,fileTypeId,err:", mftId, fileTypeId, err)
return nil, nil, nil, err
}
sameAkiCerRoaAsaCrlFiles = append(sameAkiCerRoaAsaCrlFiles, chainCer.FileName)
belogs.Debug("getSameAkiCerRoaCrlFilesChainMfts(): mftId, chainCer.FileName, ok:", mftId, chainCer.FileName, ok)
case "crl":
chainCrl, err := chains.GetCrlByFileTypeId(fileTypeId)
if err != nil {
belogs.Error("getSameAkiCerRoaCrlFilesChainMfts(): GetCrlByFileTypeId, mftId,fileTypeId,err:", mftId, fileTypeId, err)
return nil, nil, nil, err
}
sameAkiCerRoaAsaCrlFiles = append(sameAkiCerRoaAsaCrlFiles, chainCrl.FileName)
sameAkiCrl := SameAkiCrl{Found: true,
FilePath: chainCrl.FilePath,
FileName: chainCrl.FileName,
ThisUpdate: chainCrl.ThisUpdate,
NextUpdate: chainCrl.NextUpdate}
sameAkiCrls = append(sameAkiCrls, sameAkiCrl)
belogs.Debug("getSameAkiCerRoaCrlFilesChainMfts(): mftId, chainCrl.FileName, ok:", mftId, chainCrl.FileName, ok, " sameAkiCrl:", sameAkiCrl)
case "roa":
chainRoa, err := chains.GetRoaByFileTypeId(fileTypeId)
if err != nil {
belogs.Error("getSameAkiCerRoaCrlFilesChainMfts(): GetRoaByFileTypeId, mftId,fileTypeId,err:", mftId, fileTypeId, err)
return nil, nil, nil, err
}
sameAkiCerRoaAsaCrlFiles = append(sameAkiCerRoaAsaCrlFiles, chainRoa.FileName)
belogs.Debug("getSameAkiCerRoaCrlFilesChainMfts(): mftId, chainRoa.FileName, ok:", mftId, chainRoa.FileName, ok)
case "asa":
chainAsa, err := chains.GetAsaByFileTypeId(fileTypeId)
if err != nil {
belogs.Error("getSameAkiCerRoaCrlFilesChainMfts(): GetAsaByFileTypeId, mftId,fileTypeId,err:", mftId, fileTypeId, err)
return nil, nil, nil, err
}
sameAkiCerRoaAsaCrlFiles = append(sameAkiCerRoaAsaCrlFiles, chainAsa.FileName)
belogs.Debug("getSameAkiCerRoaCrlFilesChainMfts(): mftId, chainRoa.FileName, ok:", mftId, chainAsa.FileName, ok)
case "mft":
chainMft, err := chains.GetMftByFileTypeId(fileTypeId)
if err != nil {
belogs.Error("getSameAkiCerRoaCrlFilesChainMfts(): GetMftByFileTypeId, mftId,fileTypeId,err:", mftId, fileTypeId, err)
return nil, nil, nil, err
}
sameAkiChainMfts = append(sameAkiChainMfts, chainMft)
belogs.Debug("getSameAkiCerRoaCrlFilesChainMfts(): mftId, chainMft.Id, ok:", mftId, chainMft.Id, ok)
}
}
}
}
return
}
// invalidMftEffect:warning/invalid
func updateChainByMft(chains *Chains, invalidMftEffect string) (err error) {
start := time.Now()
mftIds := chains.MftIds
belogs.Info("updateChainByMft(): start: len(mftIds):", len(mftIds))
rsyncDestPath := conf.VariableString("rsync::destPath") + "/"
rrdpDestPath := conf.VariableString("rrdp::destPath") + "/"
// found invalid mft
for _, mftId := range mftIds {
chainMft, err := chains.GetMftById(mftId)
if err != nil {
belogs.Error("validateMft(): GetMftById fail:", mftId, err)
return err
}
if chainMft.StateModel.State != "invalid" {
continue
}
belogs.Debug("updateChainByMft(): found invalid mft, mftId:", mftId,
chainMft.FilePath, chainMft.FileName, jsonutil.MarshalJson(chainMft.StateModel))
fileTypeIds, ok := chains.AkiToFileTypeIds[chainMft.Aki]
belogs.Debug("updateChainByMft(): mftId, fileTypeIds, ok:", mftId, fileTypeIds, ok)
if !ok {
continue
}
publicPointName := chainMft.FilePath
publicPointName = strings.Replace(publicPointName, rsyncDestPath, "", -1)
publicPointName = strings.Replace(publicPointName, rrdpDestPath, "", -1)
stateMsg := model.StateMsg{Stage: "chainvalidate",
Fail: "Manifest which has same AKI of this file is invalid or missing",
Detail: `No manifest(invalid or missing) is available for ` + publicPointName + ` , and AKI is (` + chainMft.Aki + `), thus there may have been undetected deletions or replay substitutions from the publication point`}
belogs.Debug("updateChainByMft(): mftId, publicPointName, stateMsg:", mftId, publicPointName,
jsonutil.MarshalJson(stateMsg))
for _, fileTypeId := range fileTypeIds.FileTypeIds {
belogs.Debug("updateChainByMft(): mftId, fileTypeId:", mftId, fileTypeId)
fileType := string(fileTypeId[:3])
switch fileType {
case "cer":
chainCer, err := chains.GetCerByFileTypeId(fileTypeId)
if err != nil {
belogs.Error("updateChainByMft(): GetCerByFileTypeId, mftId,fileTypeId,err:", mftId, fileTypeId, err)
return err
}
if invalidMftEffect == "warning" {
chainCer.StateModel.AddWarning(&stateMsg)
} else if invalidMftEffect == "invalid" {
chainCer.StateModel.AddError(&stateMsg)
}
chains.UpdateFileTypeIdToCer(&chainCer)
belogs.Debug("updateChainByMft(): mftId:", mftId, " chainMft:", chainMft.FilePath, chainMft.FileName,
" chainCer:", chainCer.FilePath, chainCer.FileName, jsonutil.MarshalJson(chainCer.StateModel))
case "crl":
chainCrl, err := chains.GetCrlByFileTypeId(fileTypeId)
if err != nil {
belogs.Error("updateChainByMft(): GetCrlByFileTypeId, mftId,fileTypeId,err:", mftId, fileTypeId, err)
return err
}
if invalidMftEffect == "warning" {
chainCrl.StateModel.AddWarning(&stateMsg)
} else if invalidMftEffect == "invalid" {
chainCrl.StateModel.AddError(&stateMsg)
}
chains.UpdateFileTypeIdToCrl(&chainCrl)
belogs.Debug("updateChainByMft(): mftId:", mftId, " chainMft:", chainMft.FilePath, chainMft.FileName,
" chainCrl:", chainCrl.FilePath, chainCrl.FileName, jsonutil.MarshalJson(chainCrl.StateModel))
case "roa":
chainRoa, err := chains.GetRoaByFileTypeId(fileTypeId)
if err != nil {
belogs.Error("updateChainByMft(): GetRoaByFileTypeId, mftId,fileTypeId,err:", mftId, fileTypeId, err)
return err
}
if invalidMftEffect == "warning" {
chainRoa.StateModel.AddWarning(&stateMsg)
} else if invalidMftEffect == "invalid" {
chainRoa.StateModel.AddError(&stateMsg)
}
chains.UpdateFileTypeIdToRoa(&chainRoa)
belogs.Debug("updateChainByMft(): mftId:", mftId, " chainMft:", chainMft.FilePath, chainMft.FileName,
" chainRoa:", chainRoa.FilePath, chainRoa.FileName, jsonutil.MarshalJson(chainRoa.StateModel))
case "asa":
chainAsa, err := chains.GetAsaByFileTypeId(fileTypeId)
if err != nil {
belogs.Error("updateChainByMft(): GetAsaByFileTypeId, mftId,fileTypeId,err:", mftId, fileTypeId, err)
return err
}
if invalidMftEffect == "warning" {
chainAsa.StateModel.AddWarning(&stateMsg)
} else if invalidMftEffect == "invalid" {
chainAsa.StateModel.AddError(&stateMsg)
}
chains.UpdateFileTypeIdToAsa(&chainAsa)
belogs.Debug("updateChainByMft(): mftId:", mftId, " chainMft:", chainMft.FilePath, chainMft.FileName,
" chainAsa:", chainAsa.FilePath, chainAsa.FileName, jsonutil.MarshalJson(chainAsa.StateModel))
default:
// do nothing
}
}
}
belogs.Info("updateChainByMft(): end: len(mftIds):", len(mftIds), " time(s):", time.Since(start))
return nil
}
|
package tasks
import (
"testing"
_ "github.com/mattn/go-sqlite3"
"github.com/stretchr/testify/assert"
)
func TestRun(t *testing.T) {
assert := assert.New(t)
conf := &Config{
DBMaxRows: 10,
DBConnections: map[string]DBConnection{
"sqlite": DBConnection{
Driver: "sqlite3",
DataSource: ":memory:",
},
},
}
cases := []struct {
sql OptionalStringArray
data map[string]interface{}
}{
{
OptionalStringArray{"select 1", "select 2", "select 3"},
nil,
},
{
OptionalStringArray{"select :foo"},
map[string]interface{}{
"foo": "bar",
},
},
{
OptionalStringArray{"select :foo, :bar", "select :bar", "select :foo"},
map[string]interface{}{
"foo": "sss",
"bar": 123,
},
},
}
for i, c := range cases {
db := &DBTask{
Connection: "sqlite",
SQL: c.sql,
}
// TODO: look at result object
_, err := db.run(c.data, conf)
assert.NoError(err, "case %d", i)
}
}
|
/*
Bhallaladeva was an evil king who ruled the kingdom of Maahishmati.
He wanted to erect a 100ft golden statue of himself and he looted gold from several places for this.
He even looted his own people, by using the following unfair strategy:
There are N houses in Maahishmati, and the ith house has Ai gold plates. Each gold plate costs exactly 1 Nimbda, which is the unit of currency in the kingdom of Maahishmati.
Bhallaladeva would choose an integer K, and loots all the houses in several steps. In each step:
He would choose a house i which hasn't been looted yet, pay the owner exactly Ai Nimbdas, and take away all the gold plates in that house (Hence, he also ends up looting this house).
He would now choose atmost K houses which haven't been looted yet and take away all the gold plates from these houses without paying a single Nimbda (Yes, he takes all of them for free).
He repeats the above steps until all the N houses have been looted. Your task is to devise a strategy for Bhallaladeva to loot the houses in some order, so that the number of nimbdas he has to pay is minimium.
You'll also be given multiple values of K (Q of them to be precise), and you need to find the minimum number of nimbdas for each of these values.
Input
The first line of input consists of a single integer N denoting the number of houses in Maahishmati.
The second line of input consists of N space separated integers denoting A1, A2, ..., AN, where Ai denotes the number of gold plates in the ith house.
The third line of input consists of a single integer Q denoting the number of values of K to follow. Each of the following Q lines consist of a single integer, where the value on the ith line denotes the value of K for the ith query.
Output
Output exactly Q integers on separate lines, where the output on the ith line denotes the answer for the ith value of K.
Constraints
1 ≤ N ≤ 10^5
1 ≤ Q ≤ 10^5
0 ≤ K ≤ N-1
1 ≤ Ai ≤ 10^4
*/
package main
import (
"fmt"
"reflect"
"sort"
)
func main() {
test([]int{3, 2, 1, 4}, []int{0, 2}, []int{10, 3})
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
func test(a, k, r []int) {
p := nimbdas(a, k)
fmt.Println(p)
assert(reflect.DeepEqual(p, r))
}
func nimbdas(a, k []int) []int {
n := len(a)
p := append([]int{}, a...)
sort.Ints(p)
for i := 1; i < n; i++ {
p[i] += p[i-1]
}
r := make([]int, len(k))
for i := range k {
r[i] = p[(n+k[i])/(k[i]+1)-1]
}
return r
}
|
package checksum
import (
"io/ioutil"
"testing"
"github.com/stretchr/testify/require"
)
var input = []struct {
spreadsheet string
checksum int
}{
{
spreadsheet: `5 1 9 5
7 5 3
2 4 6 8`,
checksum: 18},
}
func TestChecksum(t *testing.T) {
assert := require.New(t)
for _, in := range input {
sum := Checksum(in.spreadsheet)
assert.Equal(in.checksum, sum)
}
}
func TestSolveChecksum(t *testing.T) {
assert := require.New(t)
dat, _ := ioutil.ReadFile("./input.txt")
sum := Checksum(string(dat))
assert.Equal(45972, sum)
}
var edvinput = []struct {
spreadsheet string
checksum int
}{
{
spreadsheet: `5 9 2 8
9 4 7 3
3 8 6 5`,
checksum: 9},
}
func TestEvenlyDivisibleValues(t *testing.T) {
assert := require.New(t)
for _, in := range edvinput {
sum := EvenlyDivisibleValues(in.spreadsheet)
assert.Equal(in.checksum, sum)
}
}
func TestSolveEvenlyDivisibleValues(t *testing.T) {
assert := require.New(t)
dat, _ := ioutil.ReadFile("./input.txt")
sum := EvenlyDivisibleValues(string(dat))
assert.Equal(326, sum)
}
|
package engine
import (
"github.com/gabrielEscame/go-engine/physics"
"github.com/veandco/go-sdl2/sdl"
)
type Entity interface {
Update(*Input, float64)
Draw(*sdl.Surface)
}
type CollidableEntity interface {
GetShape() *physics.SquareShape
OnCollisionEnter(*CollisionInfo)
}
|
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT license.
package api
import (
"fmt"
"strconv"
"strings"
"github.com/Azure/go-autorest/autorest/to"
"github.com/Azure/aks-engine/pkg/api/common"
)
func (cs *ContainerService) setKubeletConfig(isUpgrade bool) {
o := cs.Properties.OrchestratorProfile
staticLinuxKubeletConfig := map[string]string{
"--address": "0.0.0.0",
"--allow-privileged": "true",
"--anonymous-auth": "false",
"--authorization-mode": "Webhook",
"--client-ca-file": "/etc/kubernetes/certs/ca.crt",
"--pod-manifest-path": "/etc/kubernetes/manifests",
"--cluster-dns": o.KubernetesConfig.DNSServiceIP,
"--cgroups-per-qos": "true",
"--kubeconfig": "/var/lib/kubelet/kubeconfig",
"--keep-terminated-pod-volumes": "false",
"--tls-cert-file": "/etc/kubernetes/certs/kubeletserver.crt",
"--tls-private-key-file": "/etc/kubernetes/certs/kubeletserver.key",
"--v": "2",
"--volume-plugin-dir": "/etc/kubernetes/volumeplugins",
}
for key := range staticLinuxKubeletConfig {
switch key {
case "--anonymous-auth", "--client-ca-file":
if !to.Bool(o.KubernetesConfig.EnableSecureKubelet) { // Don't add if EnableSecureKubelet is disabled
delete(staticLinuxKubeletConfig, key)
}
}
}
// Start with copy of Linux config
staticWindowsKubeletConfig := make(map[string]string)
for key, val := range staticLinuxKubeletConfig {
switch key {
case "--pod-manifest-path", "--tls-cert-file", "--tls-private-key-file": // Don't add Linux-specific config
staticWindowsKubeletConfig[key] = ""
case "--anonymous-auth":
if !to.Bool(o.KubernetesConfig.EnableSecureKubelet) { // Don't add if EnableSecureKubelet is disabled
staticWindowsKubeletConfig[key] = ""
} else {
staticWindowsKubeletConfig[key] = val
}
case "--client-ca-file":
if !to.Bool(o.KubernetesConfig.EnableSecureKubelet) { // Don't add if EnableSecureKubelet is disabled
staticWindowsKubeletConfig[key] = ""
} else {
staticWindowsKubeletConfig[key] = "c:\\k\\ca.crt"
}
default:
staticWindowsKubeletConfig[key] = val
}
}
// Add Windows-specific overrides
// Eventually paths should not be hardcoded here. They should be relative to $global:KubeDir in the PowerShell script
staticWindowsKubeletConfig["--azure-container-registry-config"] = "c:\\k\\azure.json"
staticWindowsKubeletConfig["--pod-infra-container-image"] = "kubletwin/pause"
staticWindowsKubeletConfig["--kubeconfig"] = "c:\\k\\config"
staticWindowsKubeletConfig["--cloud-config"] = "c:\\k\\azure.json"
staticWindowsKubeletConfig["--cgroups-per-qos"] = "false"
staticWindowsKubeletConfig["--enforce-node-allocatable"] = "\"\"\"\""
staticWindowsKubeletConfig["--system-reserved"] = "memory=2Gi"
staticWindowsKubeletConfig["--hairpin-mode"] = "promiscuous-bridge"
staticWindowsKubeletConfig["--image-pull-progress-deadline"] = "20m"
staticWindowsKubeletConfig["--resolv-conf"] = "\"\"\"\""
staticWindowsKubeletConfig["--eviction-hard"] = "\"\"\"\""
nodeStatusUpdateFrequency := GetK8sComponentsByVersionMap(o.KubernetesConfig)[o.OrchestratorVersion]["nodestatusfreq"]
if cs.Properties.IsAzureStackCloud() {
nodeStatusUpdateFrequency = DefaultAzureStackKubernetesNodeStatusUpdateFrequency
}
// Default Kubelet config
defaultKubeletConfig := map[string]string{
"--cluster-domain": "cluster.local",
"--network-plugin": "cni",
"--pod-infra-container-image": o.KubernetesConfig.MCRKubernetesImageBase + GetK8sComponentsByVersionMap(o.KubernetesConfig)[o.OrchestratorVersion][common.PauseComponentName],
"--max-pods": strconv.Itoa(DefaultKubernetesMaxPods),
"--eviction-hard": DefaultKubernetesHardEvictionThreshold,
"--node-status-update-frequency": nodeStatusUpdateFrequency,
"--image-gc-high-threshold": strconv.Itoa(DefaultKubernetesGCHighThreshold),
"--image-gc-low-threshold": strconv.Itoa(DefaultKubernetesGCLowThreshold),
"--non-masquerade-cidr": DefaultNonMasqueradeCIDR,
"--cloud-provider": "azure",
"--cloud-config": "/etc/kubernetes/azure.json",
"--azure-container-registry-config": "/etc/kubernetes/azure.json",
"--event-qps": DefaultKubeletEventQPS,
"--cadvisor-port": DefaultKubeletCadvisorPort,
"--pod-max-pids": strconv.Itoa(DefaultKubeletPodMaxPIDs),
"--image-pull-progress-deadline": "30m",
"--enforce-node-allocatable": "pods",
"--streaming-connection-idle-timeout": "4h",
"--tls-cipher-suites": TLSStrongCipherSuitesKubelet,
"--healthz-port": DefaultKubeletHealthzPort,
}
// Set --non-masquerade-cidr if ip-masq-agent is disabled on AKS or
// explicitly disabled in kubernetes config
if cs.Properties.IsIPMasqAgentDisabled() {
defaultKubeletConfig["--non-masquerade-cidr"] = cs.Properties.OrchestratorProfile.KubernetesConfig.ClusterSubnet
}
// Apply Azure CNI-specific --max-pods value
if o.KubernetesConfig.NetworkPlugin == NetworkPluginAzure {
defaultKubeletConfig["--max-pods"] = strconv.Itoa(DefaultKubernetesMaxPodsVNETIntegrated)
}
minVersionRotateCerts := "1.11.9"
if common.IsKubernetesVersionGe(o.OrchestratorVersion, minVersionRotateCerts) {
defaultKubeletConfig["--rotate-certificates"] = "true"
}
if common.IsKubernetesVersionGe(o.OrchestratorVersion, "1.16.0") {
// for enabling metrics-server v0.3.0+
defaultKubeletConfig["--authentication-token-webhook"] = "true"
defaultKubeletConfig["--read-only-port"] = "0" // we only have metrics-server v0.3 support in 1.16.0 and above
}
if o.KubernetesConfig.NeedsContainerd() {
defaultKubeletConfig["--container-runtime"] = "remote"
defaultKubeletConfig["--runtime-request-timeout"] = "15m"
defaultKubeletConfig["--container-runtime-endpoint"] = "unix:///run/containerd/containerd.sock"
}
// If no user-configurable kubelet config values exists, use the defaults
setMissingKubeletValues(o.KubernetesConfig, defaultKubeletConfig)
if isUpgrade {
// if upgrade, force default "--pod-infra-container-image" value
o.KubernetesConfig.KubeletConfig["--pod-infra-container-image"] = defaultKubeletConfig["--pod-infra-container-image"]
}
addDefaultFeatureGates(o.KubernetesConfig.KubeletConfig, o.OrchestratorVersion, minVersionRotateCerts, "RotateKubeletServerCertificate=true")
addDefaultFeatureGates(o.KubernetesConfig.KubeletConfig, o.OrchestratorVersion, "1.20.0-rc.0", "ExecProbeTimeout=true")
// Override default cloud-provider?
if to.Bool(o.KubernetesConfig.UseCloudControllerManager) {
staticLinuxKubeletConfig["--cloud-provider"] = "external"
}
// Override default --network-plugin?
if o.KubernetesConfig.NetworkPlugin == NetworkPluginKubenet {
if o.KubernetesConfig.NetworkPolicy != NetworkPolicyCalico {
o.KubernetesConfig.KubeletConfig["--network-plugin"] = NetworkPluginKubenet
}
}
// We don't support user-configurable values for the following,
// so any of the value assignments below will override user-provided values
for key, val := range staticLinuxKubeletConfig {
o.KubernetesConfig.KubeletConfig[key] = val
}
if isUpgrade && common.IsKubernetesVersionGe(o.OrchestratorVersion, "1.14.0") {
hasSupportPodPidsLimitFeatureGate := strings.Contains(o.KubernetesConfig.KubeletConfig["--feature-gates"], "SupportPodPidsLimit=true")
podMaxPids, err := strconv.Atoi(o.KubernetesConfig.KubeletConfig["--pod-max-pids"])
if err != nil {
o.KubernetesConfig.KubeletConfig["--pod-max-pids"] = strconv.Itoa(-1)
} else {
// If we don't have an explicit SupportPodPidsLimit=true, disable --pod-max-pids by setting to -1
// To prevent older clusters from inheriting SupportPodPidsLimit=true implicitly starting w/ 1.14.0
if !hasSupportPodPidsLimitFeatureGate || podMaxPids <= 0 {
o.KubernetesConfig.KubeletConfig["--pod-max-pids"] = strconv.Itoa(-1)
}
}
}
removeKubeletFlags(o.KubernetesConfig.KubeletConfig, o.OrchestratorVersion)
invalidFeatureGates := []string{}
// Remove --feature-gate VolumeSnapshotDataSource starting with 1.22
if common.IsKubernetesVersionGe(o.OrchestratorVersion, "1.22.0-alpha.1") {
invalidFeatureGates = append(invalidFeatureGates, "VolumeSnapshotDataSource")
}
removeInvalidFeatureGates(o.KubernetesConfig.KubeletConfig, invalidFeatureGates)
// Master-specific kubelet config changes go here
if cs.Properties.MasterProfile != nil {
if cs.Properties.MasterProfile.KubernetesConfig == nil {
cs.Properties.MasterProfile.KubernetesConfig = &KubernetesConfig{}
}
if cs.Properties.MasterProfile.KubernetesConfig.KubeletConfig == nil {
cs.Properties.MasterProfile.KubernetesConfig.KubeletConfig = make(map[string]string)
}
if isUpgrade {
// if upgrade, force default "--pod-infra-container-image" value
cs.Properties.MasterProfile.KubernetesConfig.KubeletConfig["--pod-infra-container-image"] = o.KubernetesConfig.KubeletConfig["--pod-infra-container-image"]
}
//Ensure cloud-provider setting
if to.Bool(o.KubernetesConfig.UseCloudControllerManager) {
cs.Properties.MasterProfile.KubernetesConfig.KubeletConfig["--cloud-provider"] = "external"
}
setMissingKubeletValues(cs.Properties.MasterProfile.KubernetesConfig, o.KubernetesConfig.KubeletConfig)
addDefaultFeatureGates(cs.Properties.MasterProfile.KubernetesConfig.KubeletConfig, o.OrchestratorVersion, "", "")
if isUpgrade && common.IsKubernetesVersionGe(o.OrchestratorVersion, "1.14.0") {
hasSupportPodPidsLimitFeatureGate := strings.Contains(cs.Properties.MasterProfile.KubernetesConfig.KubeletConfig["--feature-gates"], "SupportPodPidsLimit=true")
podMaxPids, err := strconv.Atoi(cs.Properties.MasterProfile.KubernetesConfig.KubeletConfig["--pod-max-pids"])
if err != nil {
cs.Properties.MasterProfile.KubernetesConfig.KubeletConfig["--pod-max-pids"] = strconv.Itoa(-1)
} else {
// If we don't have an explicit SupportPodPidsLimit=true, disable --pod-max-pids by setting to -1
// To prevent older clusters from inheriting SupportPodPidsLimit=true implicitly starting w/ 1.14.0
if !hasSupportPodPidsLimitFeatureGate || podMaxPids <= 0 {
cs.Properties.MasterProfile.KubernetesConfig.KubeletConfig["--pod-max-pids"] = strconv.Itoa(-1)
}
}
}
// "--protect-kernel-defaults" is only true for VHD based VMs since the base Ubuntu distros don't have a /etc/sysctl.d/60-CIS.conf file.
if cs.Properties.MasterProfile.IsVHDDistro() {
if _, ok := cs.Properties.MasterProfile.KubernetesConfig.KubeletConfig["--protect-kernel-defaults"]; !ok {
cs.Properties.MasterProfile.KubernetesConfig.KubeletConfig["--protect-kernel-defaults"] = "true"
}
}
// Override the --resolv-conf kubelet config value for Ubuntu 18.04 after the distro value is set.
if cs.Properties.MasterProfile.IsUbuntu1804() || cs.Properties.MasterProfile.IsUbuntu2004() {
cs.Properties.MasterProfile.KubernetesConfig.KubeletConfig["--resolv-conf"] = "/run/systemd/resolve/resolv.conf"
}
removeKubeletFlags(cs.Properties.MasterProfile.KubernetesConfig.KubeletConfig, o.OrchestratorVersion)
invalidFeatureGates := []string{}
// Remove --feature-gate VolumeSnapshotDataSource starting with 1.22
if common.IsKubernetesVersionGe(o.OrchestratorVersion, "1.22.0-alpha.1") {
invalidFeatureGates = append(invalidFeatureGates, "VolumeSnapshotDataSource")
}
removeInvalidFeatureGates(cs.Properties.MasterProfile.KubernetesConfig.KubeletConfig, invalidFeatureGates)
if cs.Properties.AnyAgentIsLinux() {
if val, ok := cs.Properties.MasterProfile.KubernetesConfig.KubeletConfig["--register-with-taints"]; !ok {
cs.Properties.MasterProfile.KubernetesConfig.KubeletConfig["--register-with-taints"] = common.MasterNodeTaint
} else {
if !strings.Contains(val, common.MasterNodeTaint) {
cs.Properties.MasterProfile.KubernetesConfig.KubeletConfig["--register-with-taints"] += fmt.Sprintf(",%s", common.MasterNodeTaint)
}
}
}
}
// Agent-specific kubelet config changes go here
for _, profile := range cs.Properties.AgentPoolProfiles {
if profile.KubernetesConfig == nil {
profile.KubernetesConfig = &KubernetesConfig{}
}
if profile.KubernetesConfig.KubeletConfig == nil {
profile.KubernetesConfig.KubeletConfig = make(map[string]string)
}
if isUpgrade {
// if upgrade, force default "--pod-infra-container-image" value
profile.KubernetesConfig.KubeletConfig["--pod-infra-container-image"] = o.KubernetesConfig.KubeletConfig["--pod-infra-container-image"]
}
if profile.IsWindows() {
for key, val := range staticWindowsKubeletConfig {
profile.KubernetesConfig.KubeletConfig[key] = val
}
} else {
for key, val := range staticLinuxKubeletConfig {
profile.KubernetesConfig.KubeletConfig[key] = val
}
}
setMissingKubeletValues(profile.KubernetesConfig, o.KubernetesConfig.KubeletConfig)
if isUpgrade && common.IsKubernetesVersionGe(o.OrchestratorVersion, "1.14.0") {
hasSupportPodPidsLimitFeatureGate := strings.Contains(profile.KubernetesConfig.KubeletConfig["--feature-gates"], "SupportPodPidsLimit=true")
podMaxPids, err := strconv.Atoi(profile.KubernetesConfig.KubeletConfig["--pod-max-pids"])
if err != nil {
profile.KubernetesConfig.KubeletConfig["--pod-max-pids"] = strconv.Itoa(-1)
} else {
// If we don't have an explicit SupportPodPidsLimit=true, disable --pod-max-pids by setting to -1
// To prevent older clusters from inheriting SupportPodPidsLimit=true implicitly starting w/ 1.14.0
if !hasSupportPodPidsLimitFeatureGate || podMaxPids <= 0 {
profile.KubernetesConfig.KubeletConfig["--pod-max-pids"] = strconv.Itoa(-1)
}
}
}
// "--protect-kernel-defaults" is only true for VHD based VMs since the base Ubuntu distros don't have a /etc/sysctl.d/60-CIS.conf file.
if profile.IsVHDDistro() {
if _, ok := profile.KubernetesConfig.KubeletConfig["--protect-kernel-defaults"]; !ok {
profile.KubernetesConfig.KubeletConfig["--protect-kernel-defaults"] = "true"
}
}
// Override the --resolv-conf kubelet config value for Ubuntu 18.04 after the distro value is set.
if profile.IsUbuntu1804() || profile.IsUbuntu2004() {
profile.KubernetesConfig.KubeletConfig["--resolv-conf"] = "/run/systemd/resolve/resolv.conf"
}
removeKubeletFlags(profile.KubernetesConfig.KubeletConfig, o.OrchestratorVersion)
if cs.Properties.OrchestratorProfile.KubernetesConfig.IsAddonEnabled(common.AADPodIdentityAddonName) && !profile.IsWindows() {
if val, ok := profile.KubernetesConfig.KubeletConfig["--register-with-taints"]; !ok {
profile.KubernetesConfig.KubeletConfig["--register-with-taints"] = fmt.Sprintf("%s=true:NoSchedule", common.AADPodIdentityTaintKey)
} else {
if !strings.Contains(val, common.AADPodIdentityTaintKey) {
profile.KubernetesConfig.KubeletConfig["--register-with-taints"] += fmt.Sprintf(",%s=true:NoSchedule", common.AADPodIdentityTaintKey)
}
}
}
}
}
func removeKubeletFlags(k map[string]string, v string) {
// Get rid of values not supported until v1.10
if !common.IsKubernetesVersionGe(v, "1.10.0") {
for _, key := range []string{"--pod-max-pids"} {
delete(k, key)
}
}
// Get rid of values not supported in v1.12 and up
if common.IsKubernetesVersionGe(v, "1.12.0") {
for _, key := range []string{"--cadvisor-port"} {
delete(k, key)
}
}
// Get rid of values not supported in v1.15 and up
if common.IsKubernetesVersionGe(v, "1.15.0-beta.1") {
for _, key := range []string{"--allow-privileged"} {
delete(k, key)
}
}
// Remove dockershim related flags in v1.24 and up
if common.IsKubernetesVersionGe(v, "1.24.0-alpha") {
for _, key := range []string{
"--cni-conf-dir",
"--cni-bin-dir",
"--cni-cache-dir",
"--docker-endpoint",
"--experimental-dockershim-root-directory",
"--image-pull-progress-deadline",
"--network-plugin",
"--network-plugin-mtu",
"--non-masquerade-cidr",
} {
delete(k, key)
}
}
}
func setMissingKubeletValues(p *KubernetesConfig, d map[string]string) {
if p.KubeletConfig == nil {
p.KubeletConfig = d
} else {
for key, val := range d {
// If we don't have a user-configurable value for each option
if _, ok := p.KubeletConfig[key]; !ok {
// then assign the default value
p.KubeletConfig[key] = val
}
}
}
}
|
package xsuportal
import (
"encoding/base64"
"fmt"
"sync/atomic"
"time"
"github.com/SherClockHolmes/webpush-go"
"github.com/golang/protobuf/proto"
"github.com/jmoiron/sqlx"
"google.golang.org/protobuf/types/known/timestamppb"
"github.com/isucon/isucon10-final/webapp/golang/proto/xsuportal/resources"
)
type Notifier struct {
}
var options = webpush.Options{
Subscriber: "xsuportal@example.com",
VAPIDPrivateKey: "8Hhzlr3izBRZ0RWKXraDpk42blfsZbUnVmy1NyniZKk",
VAPIDPublicKey: "BC7mQPMOgmwiJYTQyswmsRHLzpGVhd07HYSXtRT9EDgIf-0QMWOzYpGRGdelgT8MmOPxqtjtv4eSexJxJX8oZKc",
}
func (n *Notifier) VAPIDKey() *webpush.Options {
return &options
}
type notifiableContestant struct {
ID string `db:"id"`
TeamID int64 `db:"team_id"`
Endpoint string `db:"endpoint"`
P256dh string `db:"p256dh"`
Auth string `db:"auth"`
}
var countNotify int64 = 0
func (n *Notifier) NotifyClarificationAnswered(db sqlx.Ext, c *Clarification, updated bool) error {
var contestants []notifiableContestant
if c.Disclosed.Valid && c.Disclosed.Bool {
err := sqlx.Select(
db,
&contestants,
"SELECT c.id AS `id`, c.team_id AS `team_id`, s.endpoint AS `endpoint`, s.p256dh AS `p256dh`, s.auth AS `auth` FROM `contestants` AS c JOIN `push_subscriptions` AS s ON c.id = s.contestant_id WHERE `team_id` IS NOT NULL",
)
if err != nil {
return fmt.Errorf("select all contestants: %w", err)
}
} else {
err := sqlx.Select(
db,
&contestants,
"SELECT `id`, `team_id` FROM `contestants` WHERE `team_id` = ?",
c.TeamID,
)
if err != nil {
return fmt.Errorf("select contestants(team_id=%v): %w", c.TeamID, err)
}
}
for _, contestant := range contestants {
notificationPB := &resources.Notification{
Content: &resources.Notification_ContentClarification{
ContentClarification: &resources.Notification_ClarificationMessage{
ClarificationId: c.ID,
Owned: c.TeamID == contestant.TeamID,
Updated: updated,
},
},
}
//notification, err := n.notify(db, notificationPB, contestant.ID)
//if err != nil {
// return fmt.Errorf("notify: %w", err)
//}
if n.VAPIDKey() != nil {
notificationPB.Id = atomic.AddInt64(&countNotify, 1)
notificationPB.CreatedAt = timestamppb.New(time.Now())
// TODO: Web Push IIKANJI NI SHITE
n.notifyProto(contestant, notificationPB)
}
}
return nil
}
func (n *Notifier) NotifyBenchmarkJobFinished(db sqlx.Ext, job *BenchmarkJob) error {
var contestants []notifiableContestant
err := sqlx.Select(
db,
&contestants,
"SELECT c.id AS `id`, c.team_id AS `team_id`, s.endpoint AS `endpoint`, s.p256dh AS `p256dh`, s.auth AS `auth` FROM `contestants` AS c JOIN `push_subscriptions` AS s ON c.id = s.contestant_id WHERE `team_id` = ?",
job.TeamID,
)
if err != nil {
return fmt.Errorf("select contestants(team_id=%v): %w", job.TeamID, err)
}
for _, contestant := range contestants {
notificationPB := &resources.Notification{
Content: &resources.Notification_ContentBenchmarkJob{
ContentBenchmarkJob: &resources.Notification_BenchmarkJobMessage{
BenchmarkJobId: job.ID,
},
},
}
//notification, err := n.notify(db, notificationPB, contestant.ID)
//if err != nil {
// return fmt.Errorf("notify: %w", err)
//}
if n.VAPIDKey() != nil {
notificationPB.Id = atomic.AddInt64(&countNotify, 1)
notificationPB.CreatedAt = timestamppb.New(time.Now())
// TODO: Web Push IIKANJI NI SHITE
n.notifyProto(contestant, notificationPB)
}
}
return nil
}
func (n *Notifier) notifyProto(c notifiableContestant, m proto.Message) error {
res, _ := proto.Marshal(m)
encRes := base64.StdEncoding.EncodeToString(res)
var s webpush.Subscription
s.Endpoint = c.Endpoint
s.Keys.P256dh = c.P256dh
s.Keys.Auth = c.Auth
resp, err := webpush.SendNotification([]byte(encRes), &s, n.VAPIDKey())
if err != nil {
return err
}
defer resp.Body.Close()
return nil
}
func (n *Notifier) notify(db sqlx.Ext, notificationPB *resources.Notification, contestantID string) (*Notification, error) {
return nil, nil
}
|
package utils
import (
"github.com/astaxie/beego"
"github.com/zwczou/jpush"
)
var (
appKey = "9a11d6ce355150887087d0ca"
secret = "af4025100bbfc437e3df1726"
)
func init() {
if str := beego.AppConfig.String("jiguang"+ "::appKey");str != ""{
appKey = str
}
if str := beego.AppConfig.String("jiguang"+ "::secret");str != ""{
secret = str
}
}
/*极光推送 所有用户*/
func JiGuangSendAll(alertTitle, alertContent, title, content string) {
//1初始化客户端
client := jpush.NewJpushClient(appKey, secret)
//2获取推送唯一标识符cid
//cidList, err = client.PushCid(1, "push")
//推送消息
payload := &jpush.Payload{
Platform: jpush.NewPlatform().All(),
Audience: jpush.NewAudience().All(),
Notification: &jpush.Notification{
Alert: "后台推送",
//提醒
Android: &jpush.AndroidNotification{
Alert: alertContent, //提醒内容
Title: alertTitle, //提醒标题
},
Ios: &jpush.IosNotification{
Alert: alertContent,
Sound: title,
},
WinPhone: &jpush.WinPhoneNotification{
Alert: alertContent,
Title: alertTitle,
},
},
Options: &jpush.Options{
TimeLive: 60,
ApnsProduction: false,
},
//内容
Message: &jpush.Message{
Title: title,
Content: content,
},
}
msgId, err := client.Push(payload)
// msgId, err = client.PushValidate(payload)
if err != nil {
//异常 pass
} else {
msgId = msgId
}
//4创建计划任务
//client.ScheduleCreate
}
//极光推送单用户
func JiGuangSendByAddr(addr, alertTitle, alertContent, title, content string) {
//1初始化客户端
client := jpush.NewJpushClient(appKey, secret)
//2获取推送唯一标识符cid
//cidList, err = client.PushCid(1, "push")
audience := jpush.NewAudience()
audience.SetRegistrationId(addr)
//推送消息
payload := &jpush.Payload{
Platform: jpush.NewPlatform().All(),
Audience: audience,
Notification: &jpush.Notification{
Alert: "后台推送",
//提醒
Android: &jpush.AndroidNotification{
Alert: alertContent, //提醒内容
Title: alertTitle, //提醒标题
},
Ios: &jpush.IosNotification{
Alert: alertContent,
Sound: title,
},
WinPhone: &jpush.WinPhoneNotification{
Alert: alertContent,
Title: alertTitle,
},
},
Options: &jpush.Options{
TimeLive: 60,
ApnsProduction: false,
},
//内容
Message: &jpush.Message{
Title: title,
Content: content,
},
}
msgId, err := client.Push(payload)
// msgId, err = client.PushValidate(payload)
if err != nil {
//异常 pass
} else {
msgId = msgId
}
//4创建计划任务
//client.ScheduleCreate
}
//极光推送多用户
func JiGuangSendByAddrs(addrs []string, alertTitle, alertContent, title, content string) {
//1初始化客户端
client := jpush.NewJpushClient(appKey, secret)
//2获取推送唯一标识符cid
//cidList, err = client.PushCid(1, "push")
audience := jpush.NewAudience()
audience.SetRegistrationId(addrs...)
//推送消息
payload := &jpush.Payload{
Platform: jpush.NewPlatform().All(),
Audience: audience,
Notification: &jpush.Notification{
Alert: "后台推送",
//提醒
Android: &jpush.AndroidNotification{
Alert: alertContent, //提醒内容
Title: alertTitle, //提醒标题
},
Ios: &jpush.IosNotification{
Alert: alertContent,
Sound: title,
},
WinPhone: &jpush.WinPhoneNotification{
Alert: alertContent,
Title: alertTitle,
},
},
Options: &jpush.Options{
TimeLive: 60,
ApnsProduction: false,
},
//内容
Message: &jpush.Message{
Title: title,
Content: content,
},
}
msgId, err := client.Push(payload)
// msgId, err = client.PushValidate(payload)
if err != nil {
//异常 pass
} else {
msgId = msgId
}
//4创建计划任务
//client.ScheduleCreate
} |
package main
import (
"encoding/json"
"net/http"
docker "github.com/docker/docker/client"
"github.com/ubclaunchpad/inertia/common"
)
// statusHandler returns a formatted string about the status of the
// deployment and lists currently active project containers
func statusHandler(w http.ResponseWriter, r *http.Request) {
if deployment == nil {
status := &common.DeploymentStatus{
InertiaVersion: Version,
Containers: make([]string, 0),
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(status)
return
}
cli, err := docker.NewEnvClient()
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
defer cli.Close()
status, err := deployment.GetStatus(cli)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
status.InertiaVersion = Version
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(status)
}
|
package datastore
import (
"database/sql"
"strings"
"github.com/go-kit/kit/log"
// we mask the actual driver for now
_ "github.com/lib/pq"
"github.com/RicardoCampos/goauth/oauth2"
)
type pgClientRepository struct {
db *sql.DB
logger log.Logger
}
//NewPostgresClientRepository creates a new repository backed by Postgres
func NewPostgresClientRepository(dataSourceName string, logger log.Logger) (oauth2.ClientRepository, error) {
db, err := sql.Open("postgres", dataSourceName)
if err != nil {
return nil, err
}
if err = db.Ping(); err != nil {
return nil, err
}
repository := pgClientRepository{
db,
logger,
}
return repository, nil
}
// AddClient Adds a client to the in memory databsae
func (r pgClientRepository) AddClient(client oauth2.Client) {
db := r.db
stmt, err := db.Prepare("INSERT INTO public.clients(\"clientId\", \"clientSecret\", \"accessTokenLifetime\", \"tokenType\", \"allowedScopes\" ) VALUES($1,$2,$3,$4,$5)")
if err != nil {
r.logger.Log("msg", "Unable to prepare client insert statement", err)
return
}
_, er := stmt.Exec(client.ClientID(), client.ClientSecret(), client.AccessTokenLifetime(), client.TokenType(), strings.Join(client.AllowedScopes(), " "))
if er != nil {
r.logger.Log("msg", "Unable to execute client insert statement", err)
}
}
// GetClients gets an in memory array of clients
func (r pgClientRepository) GetClients() map[string]oauth2.Client {
db := r.db
stmt, err := db.Prepare("SELECT \"clientId\", \"clientSecret\", \"accessTokenLifetime\", \"tokenType\", \"allowedScopes\" FROM public.clients")
defer stmt.Close()
rows, err := stmt.Query()
if err != nil {
r.logger.Log("msg", "Unable to prepare client get statement", err)
return nil
}
defer rows.Close()
clients := make(map[string]oauth2.Client)
for rows.Next() {
var (
dbClientID string
clientSecret string
accessTokenLifetime int64
tokenType string
allowedScopes string
)
err := rows.Scan(&dbClientID, &clientSecret, &accessTokenLifetime, &tokenType, &allowedScopes)
if err != nil {
r.logger.Log("msg", "Unable to scan client get statement", err)
}
client, err := oauth2.NewClient(dbClientID, clientSecret, tokenType, accessTokenLifetime, strings.Fields(allowedScopes))
if err != nil {
r.logger.Log("msg", "Unable to create client from rows", err)
}
clients[dbClientID] = client
}
if err = rows.Err(); err != nil {
r.logger.Log("msg", "Unknown error getting clients", err)
}
return clients
}
// GetClient gets a specified client
func (r pgClientRepository) GetClient(clientID string) (oauth2.Client, bool) {
db := r.db
var (
dbClientID string
clientSecret string
accessTokenLifetime int64
tokenType string
allowedScopes string
)
stmt, err := db.Prepare("SELECT \"clientId\", \"clientSecret\", \"accessTokenLifetime\", \"tokenType\", \"allowedScopes\" FROM public.clients WHERE \"clientId\" = $1 LIMIT 1;")
if err != nil {
r.logger.Log("msg", "Could not compile the client query.", err)
return nil, false
}
defer stmt.Close()
err = stmt.QueryRow(clientID).Scan(&dbClientID, &clientSecret, &accessTokenLifetime, &tokenType, &allowedScopes)
if err != nil {
r.logger.Log("msg", "Unable to find a matching client.", err)
return nil, false
}
client, err := oauth2.NewClient(dbClientID, clientSecret, tokenType, accessTokenLifetime, strings.Fields(allowedScopes))
if err != nil {
r.logger.Log("msg", "Failed to create a client from the row retrieved", err)
return nil, false
}
return client, true
}
|
package response
type TextResponse struct {
Response
Content string `xml:"Content"`
}
func NewTextResponse(text string, toUser string) TextResponse {
res := TextResponse{}
res.Response = NewResponse("text")
res.CreateTime = 11155566
res.FromUserName = "gh_fba62a0ffce7"
res.ToUserName = toUser
res.Content = text
return res
}
func (t TextResponse) TextResponseToString() (string, error) {
res, err := ResponseToString(t)
return res, err
}
|
package options
//import (
// "os"
// "testing"
// "time"
//
// . "github.com/onsi/gomega"
//)
//
//func TestHandleRabbitEnvars_relay(t *testing.T) {
//
// g := NewGomegaWithT(t)
//
// envars := map[string]string{
// "PLUMBER_DEBUG": "true",
// "PLUMBER_RELAY_TOKEN": "8EDB98ED-0D85-4CFD-BE24-8B1E00A9F7C3",
// "PLUMBER_RELAY_GRPC_ADDRESS": "localhost:9000",
// "PLUMBER_RELAY_GRPC_DISABLE_TLS": "true",
// "PLUMBER_RELAY_GRPC_TIMEOUT": "4s",
// "PLUMBER_RELAY_NUM_WORKERS": "10",
// "PLUMBER_RELAY_RABBIT_ADDRESS": "amqp://testing.tld:6379",
// "PLUMBER_RELAY_RABBIT_EXCHANGE": "testex",
// "PLUMBER_RELAY_RABBIT_ROUTING_KEY": "testqueue",
// "PLUMBER_RELAY_RABBIT_QUEUE": "testqueue",
// "PLUMBER_RELAY_RABBIT_QUEUE_DURABLE": "true",
// "PLUMBER_RELAY_RABBIT_QUEUE_AUTO_DELETE": "false",
// "PLUMBER_RELAY_RABBIT_QUEUE_EXCLUSIVE": "false",
// "PLUMBER_RELAY_RABBIT_AUTOACK": "false",
// "PLUMBER_RELAY_RABBIT_QUEUE_DECLARE": "false",
// "PLUMBER_RELAY_CONSUMER_TAG": "plumber_123",
// }
//
// for k, v := range envars {
// os.Setenv(k, v)
// }
//
// defer func() {
// // Unset all so we don't interfere with other tests
// for k, _ := range envars {
// os.Unsetenv(k)
// }
// }()
//
// cmd, opts, err := New([]string{"relay", "rabbit"})
//
// g.Expect(err).ToNot(HaveOccurred())
// g.Expect(cmd).To(Equal("relay rabbit"))
// g.Expect(opts.Relay.Type).To(Equal("rabbit"))
// g.Expect(opts.Relay.GRPCDisableTLS).To(BeTrue())
// g.Expect(opts.Relay.GRPCTimeout).To(Equal(time.Second * 4))
// g.Expect(opts.Relay.Token).To(Equal("8EDB98ED-0D85-4CFD-BE24-8B1E00A9F7C3"))
// g.Expect(opts.Relay.GRPCAddress).To(Equal("localhost:9000"))
// g.Expect(opts.Relay.NumWorkers).To(Equal(10))
// g.Expect(opts.Rabbit.Exchange).To(Equal("testex"))
// g.Expect(opts.Rabbit.Address).To(Equal("amqp://testing.tld:6379"))
// g.Expect(opts.Rabbit.RoutingKey).To(Equal("testqueue"))
// g.Expect(opts.Rabbit.ReadQueueDurable).To(BeTrue())
// g.Expect(opts.Rabbit.ReadQueueAutoDelete).To(BeFalse())
// g.Expect(opts.Rabbit.ReadQueueExclusive).To(BeFalse())
// g.Expect(opts.Rabbit.ReadQueueDeclare).To(BeFalse())
// g.Expect(opts.Rabbit.ReadAutoAck).To(BeFalse())
// g.Expect(opts.Rabbit.ReadConsumerTag).To(Equal("plumber_123"))
//
//}
//
//func TestHandleAWSSQSEnvars_relay(t *testing.T) {
//
// g := NewGomegaWithT(t)
//
// envars := map[string]string{
// "PLUMBER_DEBUG": "true",
// "PLUMBER_RELAY_TOKEN": "8EDB98ED-0D85-4CFD-BE24-8B1E00A9F7C3",
// "PLUMBER_RELAY_GRPC_ADDRESS": "localhost:9000",
// "PLUMBER_RELAY_GRPC_DISABLE_TLS": "true",
// "PLUMBER_RELAY_GRPC_TIMEOUT": "4s",
// "PLUMBER_RELAY_NUM_WORKERS": "10",
// "PLUMBER_RELAY_SQS_QUEUE_NAME": "plumber_test",
// "PLUMBER_RELAY_SQS_REMOTE_ACCOUNT_ID": "1234",
// "PLUMBER_RELAY_SQS_MAX_NUM_MESSAGES": "2",
// "PLUMBER_RELAY_SQS_RECEIVE_REQUEST_ATTEMPT_ID": "plumber_receiver",
// "PLUMBER_RELAY_SQS_AUTO_DELETE": "true",
// "PLUMBER_RELAY_SQS_WAIT_TIME_SECONDS": "6",
// "PLUMBER_RELAY_CONSUMER_TAG": "plumber_123",
// }
//
// for k, v := range envars {
// os.Setenv(k, v)
// }
//
// defer func() {
// // Unset all so we don't interfere with other tests
// for k, _ := range envars {
// os.Unsetenv(k)
// }
// }()
//
// cmd, opts, err := New([]string{"relay", "aws-sqs"})
//
// g.Expect(err).ToNot(HaveOccurred())
// g.Expect(cmd).To(Equal("relay aws-sqs"))
// g.Expect(opts.Debug).To(BeTrue())
// g.Expect(opts.Relay.Type).To(Equal("aws-sqs"))
// g.Expect(opts.Relay.Token).To(Equal("8EDB98ED-0D85-4CFD-BE24-8B1E00A9F7C3"))
// g.Expect(opts.Relay.GRPCAddress).To(Equal("localhost:9000"))
// g.Expect(opts.Relay.GRPCDisableTLS).To(BeTrue())
// g.Expect(opts.Relay.GRPCTimeout).To(Equal(time.Second * 4))
// g.Expect(opts.Relay.NumWorkers).To(Equal(10))
// g.Expect(opts.AWSSQS.QueueName).To(Equal("plumber_test"))
// g.Expect(opts.AWSSQS.RemoteAccountID).To(Equal("1234"))
// g.Expect(opts.AWSSQS.RelayMaxNumMessages).To(Equal(int64(2)))
// g.Expect(opts.AWSSQS.RelayAutoDelete).To(BeTrue())
// g.Expect(opts.AWSSQS.RemoteAccountID).To(Equal("1234"))
// g.Expect(opts.AWSSQS.RelayWaitTimeSeconds).To(Equal(int64(6)))
// g.Expect(opts.AWSSQS.RelayReceiveRequestAttemptId).To(Equal("plumber_receiver"))
//}
|
package app
import (
"context"
pb "github.com/chenzhe84/BaiCloud/metadata-service/proto/app"
)
type Handler struct {
Repo IRepository
}
func (h *Handler) SaveApp(cxt context.Context, app *pb.App, res *pb.Response) error {
if err := h.Repo.SaveApp(app); err != nil {
res.Result = false
res.Message = err.Error()
} else {
res.Result = true
}
return nil
}
func (h *Handler) GetApp(cxt context.Context, app *pb.App, res *pb.AppResponse) error {
if app, err := h.Repo.GetAppById(app.Id); err != nil {
res.Result = false
res.Message = err.Error()
} else {
res.Result = true
res.App = app
}
return nil
}
func (h *Handler) SaveModule(cxt context.Context, module *pb.Module, res *pb.Response) error {
if err := h.Repo.SaveModule(module); err != nil {
res.Result = false
res.Message = err.Error()
} else {
res.Result = true
}
return nil
}
func (h *Handler) GetModule(cxt context.Context, module *pb.Module, res *pb.ModuleResponse) error {
if module, err := h.Repo.GetModuleById(module.Id); err != nil {
res.Result = false
res.Message = err.Error()
} else {
res.Result = true
res.Module = module
}
return nil
}
func (h *Handler) SaveForm(cxt context.Context, form *pb.Form, res *pb.Response) error {
if err := h.Repo.SaveForm(form); err != nil {
res.Result = false
res.Message = err.Error()
} else {
res.Result = true
}
return nil
}
func (h *Handler) GetForm(cxt context.Context, form *pb.Form, res *pb.FormResponse) error {
if form, err := h.Repo.GetFormById(form.Id); err != nil {
res.Result = false
res.Message = err.Error()
} else {
res.Result = true
res.Form = form
}
return nil
}
|
// client.go
package main
import (
"encoding/binary"
"fmt"
"log"
"math/rand"
"net"
"time"
)
func client(addr, name string, hangie bool) {
/*Starts a client, initiates a connection*/
conn, err := net.Dial(network, addr+port)
var succ int
if err != nil {
fmt.Println("My name is", name, "I couldn't join the server. I am leaving :(")
return
}
buffer := make([]byte, buffer_size)
/*First we need to send our name and receive number of clients running*/
copy(buffer, []byte(name))
_, err = write_deadline(conn, waiting_time, buffer)
if nil != err {
log.Println("Failed to send my name to server")
}
_, err = read_deadline(conn, waiting_time, buffer)
if nil != err {
log.Println("Failed to receive number of clients")
} else {
num_clients, succ := binary.Uvarint(buffer)
if succ > 0 {
fmt.Println("My name is", name, num_clients, "clients were served including me")
}
}
/*Now we are sending some numbuffer = make([]byte, buffer_size)ber of requests*/
if !hangie { //good client
for j := 0; j < 2*max_requests; j++ {
buffer = make([]byte, buffer_size)
number := uint64(rand.Uint32() % 10000)
fmt.Println("My name is", name, "I am sending number", number, "on attempt", j)
binary.PutUvarint(buffer, number)
_, err = write_deadline(conn, waiting_time, buffer)
if nil != err {
log.Println("Failed to write to server")
continue //we hope to recover in the future
}
}
for j := 0; j < 2*max_requests; j++ {
_, err = read_deadline(conn, waiting_time, buffer)
if nil != err {
log.Println("This is", name, "Failed to read from server on", j, "attempt")
continue //we hope to recover in the future
}
var number uint64
number, succ = binary.Uvarint(buffer)
if succ < 1 {
fmt.Println("My name is", name, "I failed to get a sensible answer from server on attempt", j, "!")
} else {
fmt.Println("My name is", name, "I have got the number", number, "on attempt", j)
}
}
} else { //terrible client, deserving to be dropped
for {
//fmt.Println("My name is", name, "I am trying to hang the server")
time.Sleep(time.Second)
}
}
defer conn.Close()
}
|
package main
import (
"github.com/gin-gonic/gin"
"github.com/joho/godotenv"
"healthy-api/router"
"log"
)
func main() {
r := gin.Default()
err := godotenv.Load()
if err != nil {
log.Fatal("Error loading .env file")
}
// 血壓紀錄
//blood := r.Group("/blood")
//router.BloodRouter(blood)
// 設備
device := r.Group("/device")
router.DeviceRouter(device)
r.Run()
} |
package domain
import (
commonDto "github.com/bearname/videohost/internal/common/dto"
"github.com/bearname/videohost/internal/common/util"
"github.com/bearname/videohost/internal/user/app/dto"
)
type AuthService interface {
CreateUser(newUserDto dto.SignupUserDto) (util.Token, error)
Login(loginUserDto dto.LoginUserDto) (util.Token, error)
ValidateToken(authorizationHeader string) (commonDto.UserDto, error)
RefreshToken(refreshTokenDto dto.RefreshTokenDto) (util.Token, error)
}
|
package main
import (
"digicert"
"errors"
"log"
"os"
)
var c *digicert.Client
func main() {
container()
certificate()
domain()
orders()
organization()
request()
user()
}
func checkEnv() error {
if c == nil {
var err error
c, err = digicert.New(os.Getenv("DC_KEY"))
if err != nil {
log.Fatal(err)
}
}
if c.AuthKey == "" {
return errors.New("API key not defined")
}
return nil
}
// export DC_KEY="apikeys"
|
package messages
type ReadAckRequest struct {
RId int64 `json:"r_id"`
UserId string `json:"user_id"`
RemoteId string `json:"remote_id"`
GroupId string `json:"group_id"`
MsgId int64 `json:"msg_id"`
Type MessageType `json:"type"`
}
|
package main
import (
"context"
"flag"
"log"
"os"
"os/signal"
"runtime/pprof"
"github.com/azenk/audio/stream"
"github.com/golang/glog"
"github.com/spf13/viper"
)
func main() {
cfgFile := viper.New()
cfgFile.SetDefault("left.frequency", 1000)
cfgFile.SetDefault("left.amplitude", 1)
cfgFile.SetDefault("left.phase", 0)
cfgFile.SetDefault("right.frequency", 1000)
cfgFile.SetDefault("right.amplitude", 1)
cfgFile.SetDefault("right.phase", 0)
cfgFile.AddConfigPath(".")
cfgFile.SetConfigName("signals")
cfgFile.ReadInConfig()
flag.Parse()
if cpuProfileFile := cfgFile.GetString("cpuprofile"); cpuProfileFile != "" {
f, err := os.Create(cpuProfileFile)
if err != nil {
log.Fatal("could not create CPU profile: ", err)
}
if err := pprof.StartCPUProfile(f); err != nil {
log.Fatal("could not start CPU profile: ", err)
}
defer pprof.StopCPUProfile()
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
d, err := stream.OpenDefaultDevice(ctx, &stream.Configuration{Channels: 2})
if err != nil {
panic(err)
}
glog.Infof("Opened card with config: %s", d.Config())
streamCh := d.Stream()
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt)
signalsCtx, signalsCancel := context.WithCancel(context.Background())
defer signalsCancel()
left := stream.Sinewave(signalsCtx,
d.Config().SampleRate()/100,
cfgFile.GetFloat64("left.amplitude"),
cfgFile.GetFloat64("left.frequency"),
cfgFile.GetFloat64("left.phase"),
d.Config().SampleRate())
right := stream.Sinewave(signalsCtx,
d.Config().SampleRate()/100,
cfgFile.GetFloat64("right.amplitude"),
cfgFile.GetFloat64("right.frequency"),
cfgFile.GetFloat64("right.phase"),
d.Config().SampleRate())
lrMerged := stream.MergeChannels(signalsCtx, d.Config().SampleRate()/100, left, right)
glog.Info("Stream started")
var streamClosed bool
for {
select {
case samples, more := <-lrMerged:
if more {
streamCh <- samples
} else if !streamClosed {
glog.Infof("No more samples from generators, closing stream channel")
close(streamCh)
streamClosed = true
}
case <-c:
glog.Info("Got interrupt")
signalsCancel()
case err, more := <-d.Done():
if err != nil {
glog.Infof("Error streaming data: %v", err)
}
if !more {
glog.Info("Exiting")
return
}
}
}
}
|
package main
import (
"fmt"
"sort"
)
// bucketShift returns 1<<b, optimized for code generation.
func bucketShift(b uint8) uintptr {
// Masking the shift amount allows overflow checks to be elided.
return uintptr(1) << (b & (4<<(^uintptr(0)>>63)*8 - 1))
}
func main() {
fmt.Println(11 >> 56)
m := make(map[string]int, 9000)
fmt.Println(m)
fmt.Println(bucketShift(8))
fmt.Println(102500 & (1024 - 1))
fmt.Println(findMaxK([]int{
-1, 2, 3, -3,
}))
}
func findMaxK(nums []int) int {
mm := make(map[int]struct{})
for _, v := range nums {
mm[v] = struct{}{}
}
ans := -1
for _, v := range nums {
if _, ok := mm[-v]; ok && v > ans {
ans = v
}
}
return ans
}
func findMaxK2(nums []int) int {
abs := func(a int) int {
if a < 0 {
return -a
}
return a
}
sort.Slice(nums, func(i, j int) bool {
if abs(nums[i]) == abs(nums[j]) {
return nums[i] < 0
}
return abs(nums[i]) > abs(nums[j])
})
mm := make(map[int]bool)
for _, v := range nums {
if _, ok := mm[-v]; ok {
return v
}
mm[v] = true
}
return -1
}
|
package product
import (
"MI/pkg/logger"
service "MI/service/product"
"github.com/gin-gonic/gin"
"strconv"
)
func Product(c *gin.Context){
Cid := c.Query("cid")
Page := c.DefaultQuery("page", "1")
PageSize := c.DefaultQuery("pageSize", "7")
Is_recursion := c.Query("is_recursion")
cid, _ := strconv.Atoi(Cid)
page,_ := strconv.Atoi(Page)
pageSize, _ := strconv.Atoi(PageSize)
is_recursion,_:= strconv.ParseBool(Is_recursion)
service.GetProductByCid(c,page,pageSize,cid,is_recursion)
}
func GetProductDetail(c *gin.Context){
pid := c.Query("pid")
id, err := strconv.Atoi(pid)
if err != nil {
logger.Logger.Info(err)
}
service.GetProductByPid(c,id)
}
func GetProductBySearch(c *gin.Context){
search := c.Query("search")
Page := c.DefaultQuery("page", "1")
PageSize := c.DefaultQuery("pageSize", "20")
page,_ := strconv.Atoi(Page)
pageSize, _ := strconv.Atoi(PageSize)
service.Search(c,search,page,pageSize)
} |
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package dsmr4p1
const (
poly = 0xA001
)
// Table is a 256-word table representing the polynomial for efficient processing.
type Table struct {
entries [256]uint16
reversed bool
}
var crcTable = makeTable(poly)
func makeTable(poly uint16) *Table {
t := &Table{
reversed: false,
}
for i := 0; i < 256; i++ {
crc := uint16(i)
for j := 0; j < 8; j++ {
if crc&1 == 1 {
crc = (crc >> 1) ^ poly
} else {
crc >>= 1
}
}
t.entries[i] = crc
}
return t
}
func calcChecksum(data []byte) uint16 {
var crc uint16
for _, v := range data {
crc = crcTable.entries[byte(crc)^v] ^ (crc >> 8)
}
return crc
}
|
package main
import (
"os/exec"
"log"
"fmt"
"bytes"
)
func main() {
cmd := exec.Command("dir", "-lah")
var stdout, stderr bytes.Buffer
cmd.Stderr = &stderr
cmd.Stdout = &stdout
err := cmd.Run()
if err != nil {
log.Fatalf("cmd.run() failed with %s\n", err)
}
outStr,errStr:=string(stdout.Bytes()),string(stderr.Bytes())
fmt.Printf("out:\n%s\n err:\n%s\n",outStr,errStr)
}
|
package model
import (
"database/sql"
"github.com/fberrez/forum/datastore"
"time"
)
type User struct {
Id int `json:"id" db:"user_id"`
Pseudo string `json:"pseudo" db:"user_pseudo"`
Password string `json:"password" db:"user_password"`
Email string `json:"email" db:"user_email"`
Date time.Time `json:"date" db:"user_date"`
Date_lastConnec time.Time `json:"date_lastConnec" db:"user_date_lastConnection"`
Group int `json:"group" db:"user_groupId"`
Karma float64 `json:"karma" db:"user_karma"`
Ip string `json:"ip" db:"user_ip"`
}
func GetUserByPseudo(pseudo string) (User, error) {
result := User{}
var err error
switch datastore.ReadConfig().Type {
case datastore.TypeMySQL:
err = datastore.SQL.Get(&result, "SELECT * FROM forum_user WHERE user_pseudo = ? LIMIT 1", pseudo)
}
return result, err
}
func GetUserByEmail(email string) (User, error) {
result := User{}
var err error
switch datastore.ReadConfig().Type {
case datastore.TypeMySQL:
err = datastore.SQL.Get(&result, "SELECT * FROM forum_user WHERE user_email = ? LIMIT 1", email)
}
return result, err
}
func CreateUser(newUser User) error {
var err error
switch datastore.ReadConfig().Type {
case datastore.TypeMySQL:
_, err = datastore.SQL.Exec("INSERT INTO forum_user (user_pseudo, user_password, user_email, user_karma, user_ip) VALUES(?, ?, ?, 5, ?)", newUser.Pseudo, newUser.Password, newUser.Email, newUser.Ip)
}
return err
}
func EditUser(newUser User) (sql.Result, error) {
var result sql.Result
var err error
switch datastore.ReadConfig().Type {
case datastore.TypeMySQL:
result, err = datastore.SQL.NamedExec("UPDATE forum_user SET user_pseudo=:pseudo, user_password=:password, user_email=:email, user_karma=:karma, user_ip=:ip, user_groupId:=:group, user_date_lastConnection=:date_lastConnec WHERE id=:id", newUser)
}
return result, err
}
|
package main_test
import (
"fmt"
"io"
"os"
"os/exec"
"path/filepath"
"sort"
"strings"
"testing"
"github.com/stretchr/testify/assert"
)
func content() []string {
return []string{
"grand theft wumps",
"replublics of haskell",
"a sunset is a sunset because it's crimson, beautiful, and I want it to be crimson",
"hold fast that which is good",
"domains of interest to people",
"snowflake",
"strict or lazy",
"ehekatl of luck",
"crime of using a side effect",
"you must not request the world to stagnate",
}
}
// TestMain performs end to end test.
func TestMain(t *testing.T) {
g, err := newGrepper()
fatalOnError(t, err)
defer g.close()
// prepare grep targets
target := strings.Join(content(), "\n")
fatalOnError(t, g.createFile("testmain0", target))
fatalOnError(t, g.copyFile("testmain1", "testmain0"))
test := func(t *testing.T, args, want []string) {
cmd := exec.Command(g.command, args...)
stdout, err := cmd.StdoutPipe()
fatalOnError(t, err)
fatalOnError(t, cmd.Start())
gotBytes, err := io.ReadAll(stdout)
fatalOnError(t, err)
fatalOnError(t, cmd.Wait())
got := strings.Split(strings.TrimSuffix(string(gotBytes), "\n"), "\n")
assert.Equal(t, len(want), len(got))
sort.Strings(want)
sort.Strings(got)
for i, w := range want {
g := got[i]
assert.Equal(t, w, g)
}
}
t.Run("files", func(t *testing.T) {
wantContent := []string{
"grand theft wumps",
"snowflake",
}
filenames := []string{
g.filePath("testmain0"),
g.filePath("testmain1"),
}
want := []string{}
for _, c := range wantContent {
for _, p := range filenames {
want = append(want, fmt.Sprintf("%s:%s", p, c))
}
}
args := []string{`snowflake|wumps`}
args = append(args, filenames...)
test(t, args, want)
})
t.Run("file", func(t *testing.T) {
want := []string{
"grand theft wumps",
"snowflake",
}
args := []string{
`snowflake|wumps`,
g.filePath("testmain0"),
}
test(t, args, want)
})
t.Run("stdin", func(t *testing.T) {
want := []string{
"grand theft wumps",
"snowflake",
}
cmd := exec.Command(g.command, `snowflake|wumps`)
stdin, err := cmd.StdinPipe()
fatalOnError(t, err)
stdout, err := cmd.StdoutPipe()
fatalOnError(t, err)
fatalOnError(t, cmd.Start())
go func() {
defer stdin.Close()
_, _ = io.WriteString(stdin, target)
}()
gotBytes, err := io.ReadAll(stdout)
fatalOnError(t, err)
fatalOnError(t, cmd.Wait())
got := strings.Split(strings.TrimSuffix(string(gotBytes), "\n"), "\n")
assert.Equal(t, len(want), len(got))
sort.Strings(want)
sort.Strings(got)
for i, w := range want {
g := got[i]
assert.Equal(t, w, g)
}
})
}
type grepper struct {
workDir string // temporary directory
command string // gogrep binary path
}
// newGrepper creates a temporary directory, compiles gogrep and install it into the directory.
func newGrepper() (*grepper, error) {
workDir, err := os.MkdirTemp("", "gogrep")
if err != nil {
return nil, err
}
command := filepath.Join(workDir, "gogrep")
if err := run("go", "build", "-o", command); err != nil {
return nil, err
}
return &grepper{
workDir: workDir,
command: command,
}, nil
}
func (s *grepper) close() { os.RemoveAll(s.workDir) }
func (s *grepper) filePath(name string) string { return filepath.Join(s.workDir, name) }
func (s *grepper) copyFile(to, from string) error { return copyFile(s.filePath(to), s.filePath(from)) }
func (s *grepper) createFile(name string, content string) error {
f, err := os.Create(s.filePath(name))
if err != nil {
return err
}
defer f.Close()
_, err = io.WriteString(f, content)
return err
}
func copyFile(to, from string) error {
toFile, err := os.Create(to)
if err != nil {
return err
}
defer toFile.Close()
fromFile, err := os.Open(from)
if err != nil {
return err
}
defer fromFile.Close()
_, err = io.Copy(toFile, fromFile)
return err
}
func run(name string, arg ...string) error {
cmd := exec.Command(name, arg...)
cmd.Dir = "."
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
return cmd.Run()
}
func fatalOnError(t *testing.T, err error) {
t.Helper()
if err != nil {
t.Fatal(err)
}
}
|
package planner
import (
"context"
"time"
"github.com/google/uuid"
"go.uber.org/zap"
)
// Scheduler takes a plan and it executes it.
type Scheduler struct {
// stepCounter keep track of the number of steps exectued by the scheduler.
// It is used for debug and logged out at the end of every execution.
stepCounter int
// logger is an instance of the zap.Logger
logger *zap.Logger
}
// NewScheduler creates a new scheduler
func NewScheduler() *Scheduler {
return &Scheduler{
stepCounter: 0,
logger: zap.NewNop(),
}
}
// WithLogger allows you to pass a logger from the outside.
func (s *Scheduler) WithLogger(logger *zap.Logger) {
s.logger = logger
}
// Execute takes a plan it executes it
func (s *Scheduler) Execute(ctx context.Context, p Plan) error {
uuidGenerator := uuid.New()
logger := s.logger.With(zap.String("execution_id", uuidGenerator.String()))
start := time.Now()
if loggableP, ok := p.(Loggable); ok {
loggableP.WithLogger(logger)
}
logger.Info("Started execution plan " + p.Name())
s.stepCounter = 0
for {
steps, err := p.Create(ctx)
if err != nil {
logger.Error(err.Error())
return err
}
if len(steps) == 0 {
break
}
err = s.react(ctx, steps, logger)
if err != nil {
logger.Error(err.Error(), zap.String("execution_time", time.Since(start).String()), zap.Int("step_executed", s.stepCounter))
return err
}
}
logger.Info("Plan executed without errors.", zap.String("execution_time", time.Since(start).String()), zap.Int("step_executed", s.stepCounter))
return nil
}
// react is a recursive function that goes over all the steps and the one
// returned by previous steps until the plan does not return anymore steps
func (s *Scheduler) react(ctx context.Context, steps []Procedure, logger *zap.Logger) error {
var innerSteps []Procedure
for _, step := range steps {
var err error
s.stepCounter = s.stepCounter + 1
if loggableS, ok := step.(Loggable); ok {
loggableS.WithLogger(logger)
}
select {
case <-ctx.Done():
logger.Error("Step not executed.", zap.String("step", step.Name()), zap.Error(ctx.Err()))
return ctx.Err()
default:
innerSteps, err = step.Do(ctx)
if err != nil {
logger.Error("Step failed.", zap.String("step", step.Name()), zap.Error(err))
return err
}
}
if len(innerSteps) > 0 {
if err := s.react(ctx, innerSteps, logger); err != nil {
return err
}
}
}
return nil
}
|
package app
import (
"main/src/gvabe/bo/user"
)
const (
TableApp = "exter_app"
)
// AppDao defines API to access App storage.
type AppDao interface {
// Delete removes the specified business object from storage.
Delete(bo *App) (bool, error)
// Create persists a new business object to storage.
Create(bo *App) (bool, error)
// Get retrieves a business object from storage.
Get(id string) (*App, error)
// // getN retrieves N business objects from storage.
// getN(fromOffset, maxNumRows int) ([]*App, error)
//
// // getAll retrieves all available business objects from storage.
// getAll() ([]*App, error)
// GetUserApps retrieves all apps belong to a specific user.
GetUserApps(u *user.User) ([]*App, error)
// Update modifies an existing business object.
Update(bo *App) (bool, error)
}
|
package _300_Longest_Increasing_Subsequence
import "testing"
func TestLengthOfLIS(t *testing.T) {
if ret := lengthOfLIS([]int{10, 9, 2, 5, 3, 7, 19, 101, 18}); ret != 5 {
t.Errorf("should be 4, wrong length with %d", ret)
}
}
func TestFindPos(t *testing.T) {
if pos := findPos([]int{1, 2, 3}, 1); pos != 0 {
t.Errorf("wrong pos with %d", pos)
}
if pos := findPos([]int{1, 2, 3}, 2); pos != 1 {
t.Errorf("wrong pos with %d", pos)
}
if pos := findPos([]int{1, 2, 4}, 3); pos != 2 {
t.Errorf("wrong pos with %d", pos)
}
}
|
package main
//TODO write a check to to see how many things the line includes. Should be x number. If not send email to update code.
//TODO set download all to execute at 2 am every night.
//TODO send me an email with the error output.
//TODO make monday - sunday a map
//TODO make a map for seasons courses
//TODO more efficient search
//TODO send me an email with the error output.
import (
"log"
"github.com/ZacharyJacobCollins/Scheduler/services"
"github.com/ZacharyJacobCollins/Scheduler/models"
"fmt"
"strings"
)
var fall []models.Course
var summer []models.Course
var winter []models.Course
func main() {
//services.DownloadFiles()
fall, summer, winter = services.LoadSemesters()
log.Print(fall[100].Category)
prompt()
}
func prompt() {
fmt.Print("Enter a professor you're looking for to see their location: ")
var professor string
fmt.Scan(&professor)
//fmt.Print("Enter the class number of the course you're looking for: ")
//var classNumber string
//fmt.Scan(&classNumber)
find(professor, fall)
}
func find(professor string, set []models.Course) {
for _, course := range set {
if (strings.TrimSpace(course.Professor) == professor) {
fmt.Println("Here you are! ", course)
}
}
}
func check(e error) {
if e != nil {
log.Print(e)
}
}
|
package split
import (
"strings"
"testing"
)
func TestSplitSimple(t *testing.T) {
os(t, "Hello, world!", "Hello / world")
}
func TestSplitTwitter(t *testing.T) {
os(t, "Contact @foo.", "Contact / @foo")
os(t, "Tweet with #CoolHashtag!", "Tweet / with / #CoolHashtag")
}
func TestSplitEmail(t *testing.T) {
os(t, "Contact someone@example.com.", "Contact / someone@example.com")
}
func TestSplitURL(t *testing.T) {
os(t, "Contact http://example.com/.", "Contact / http://example.com/")
}
func TestSplitPhone(t *testing.T) {
os(t, "Contact 555-55-55.", "Contact / 555-55-55")
}
func os(t *testing.T, input string, expected string) {
actual := strings.Join(SplitWords(input), " / ")
if actual != expected {
t.Errorf("Split(%#v) != %#v", actual, expected)
}
}
|
package cmd
import (
"fmt"
"os"
"path/filepath"
"time"
"github.com/odpf/optimus/models"
"github.com/odpf/optimus/run"
"github.com/odpf/optimus/utils"
"github.com/odpf/salt/log"
cli "github.com/spf13/cobra"
)
var (
templateEngine = run.NewGoEngine()
)
func renderCommand(l log.Logger, host string, jobSpecRepo JobSpecRepository) *cli.Command {
cmd := &cli.Command{
Use: "render",
Short: "convert raw representation of specification to consumables",
}
if jobSpecRepo != nil {
cmd.AddCommand(renderTemplateCommand(l, jobSpecRepo))
}
return cmd
}
func renderTemplateCommand(l log.Logger, jobSpecRepo JobSpecRepository) *cli.Command {
cmd := &cli.Command{
Use: "template",
Short: "render templates for a job to current 'render' directory",
Example: "optimus render template",
}
cmd.RunE = func(c *cli.Command, args []string) error {
var err error
var jobName string
if len(args) == 0 {
// doing it locally for now, ideally using optimus service will give
// more accurate results
jobName, err = selectJobSurvey(jobSpecRepo)
if err != nil {
return err
}
} else {
jobName = args[0]
}
jobSpec, _ := jobSpecRepo.GetByName(jobName)
// create temporary directory
renderedPath := filepath.Join(".", "render", jobSpec.Name)
_ = os.MkdirAll(renderedPath, 0770)
l.Info(fmt.Sprintf("rendering assets in %s", renderedPath))
now := time.Now()
l.Info(fmt.Sprintf("assuming execution time as current time of %s", now.Format(models.InstanceScheduledAtTimeLayout)))
templates, err := run.DumpAssets(jobSpec, now, templateEngine, true)
if err != nil {
return err
}
writeToFileFn := utils.WriteStringToFileIndexed()
for name, content := range templates {
if err := writeToFileFn(filepath.Join(renderedPath, name), content, l.Writer()); err != nil {
return err
}
}
l.Info(coloredSuccess("render complete"))
return nil
}
return cmd
}
|
package main
import (
"testing"
)
func TestDay02Part1(t *testing.T) {
runDayTests(t, 2, []dayTest{
{
input: `1,9,10,3,2,3,11,0,99,30,40,50`,
want: int64(3500),
},
{
input: `1,0,0,0,99`,
want: int64(2),
},
{
input: `2,3,0,3,99`,
want: int64(2),
},
{
input: `2,4,4,5,99,0`,
want: int64(2),
},
{
input: `1,1,1,4,99,5,6,0,99`,
want: int64(30),
},
})
}
|
package main
import (
"bufio"
"flag"
"fmt"
"os"
"strings"
)
var (
intFlag int
)
func main() {
register()
col1, _ := os.Open("./chap-02/col1.txt")
defer col1.Close()
fmt.Println(strings.Join(tail(col1, intFlag), "\n"))
}
func register() {
flag.IntVar(&intFlag, "int", 10, "help message for \"i\" option (default 10)")
flag.IntVar(&intFlag, "i", 10, "help message for \"i\" option (default 10)")
flag.Parse()
}
func tail(f *os.File, n int) []string {
var ret []string
sc := bufio.NewScanner(f)
for i := 0; sc.Scan(); i++ {
ret = append(ret, sc.Text())
}
max := len(ret)
return ret[max-n:]
}
|
package saucecloud
import (
"context"
"testing"
"time"
"github.com/jarcoal/httpmock"
"github.com/saucelabs/saucectl/internal/config"
"github.com/saucelabs/saucectl/internal/espresso"
"github.com/saucelabs/saucectl/internal/job"
"github.com/saucelabs/saucectl/internal/mocks"
"github.com/stretchr/testify/assert"
)
func TestEspresso_GetSuiteNames(t *testing.T) {
runner := &EspressoRunner{
Project: espresso.Project{
Suites: []espresso.Suite{
{Name: "suite1"},
{Name: "suite2"},
{Name: "suite3"},
},
},
}
assert.Equal(t, "suite1, suite2, suite3", runner.getSuiteNames())
}
func TestEspressoRunner_CalculateJobCount(t *testing.T) {
tests := []struct {
name string
suites []espresso.Suite
wants int
}{
{
name: "should multiply emulator combinations",
suites: []espresso.Suite{
{
Name: "valid espresso project",
Emulators: []config.Emulator{
{
Name: "Android GoogleApi Emulator",
PlatformVersions: []string{"11.0", "10.0"},
},
{
Name: "Android Emulator",
PlatformVersions: []string{"11.0"},
},
},
},
},
wants: 3,
},
{
name: "should multiply jobs by NumShards if defined",
wants: 18,
suites: []espresso.Suite{
{
Name: "first suite",
TestOptions: espresso.TestOptions{
NumShards: 3,
},
Emulators: []config.Emulator{
{
Name: "Android GoogleApi Emulator",
PlatformVersions: []string{"11.0", "10.0"},
},
{
Name: "Android Emulator",
PlatformVersions: []string{"11.0"},
},
},
},
{
Name: "second suite",
TestOptions: espresso.TestOptions{
NumShards: 3,
},
Emulators: []config.Emulator{
{
Name: "Android GoogleApi Emulator",
PlatformVersions: []string{"11.0", "10.0"},
},
{
Name: "Android Emulator",
PlatformVersions: []string{"11.0"},
},
},
},
},
},
}
for _, tt := range tests {
runner := &EspressoRunner{
Project: espresso.Project{
Espresso: espresso.Espresso{
App: "/path/to/app.apk",
TestApp: "/path/to/testApp.apk",
},
Suites: tt.suites,
},
}
assert.Equal(t, runner.calculateJobsCount(runner.Project.Suites), tt.wants)
}
}
func TestEspressoRunner_RunProject(t *testing.T) {
httpmock.Activate()
defer func() {
httpmock.DeactivateAndReset()
}()
// Fake JobStarter
var startOpts job.StartOptions
starter := mocks.FakeJobStarter{
StartJobFn: func(ctx context.Context, opts job.StartOptions) (jobID string, isRDC bool, err error) {
startOpts = opts
return "fake-job-id", false, nil
},
}
reader := mocks.FakeJobReader{
PollJobFn: func(ctx context.Context, id string, interval time.Duration) (job.Job, error) {
return job.Job{ID: id, Passed: true}, nil
},
GetJobAssetFileNamesFn: func(ctx context.Context, jobID string) ([]string, error) {
return []string{"file1", "file2"}, nil
},
GetJobAssetFileContentFn: func(ctx context.Context, jobID, fileName string) ([]byte, error) {
return []byte("file content"), nil
},
}
writer := mocks.FakeJobWriter{
UploadAssetFn: func(jobID string, fileName string, contentType string, content []byte) error {
return nil
},
}
ccyReader := mocks.CCYReader{ReadAllowedCCYfn: func(ctx context.Context) (int, error) {
return 1, nil
}}
uploader := &mocks.FakeProjectUploader{
UploadSuccess: true,
}
downloader := mocks.FakeArifactDownloader{
DownloadArtifactFn: func(jobID string) {},
}
runner := &EspressoRunner{
CloudRunner: CloudRunner{
JobStarter: &starter,
JobReader: &reader,
JobWriter: &writer,
CCYReader: ccyReader,
ProjectUploader: uploader,
ArtifactDownloader: &downloader,
},
Project: espresso.Project{
Espresso: espresso.Espresso{
App: "/path/to/app.apk",
TestApp: "/path/to/testApp.apk",
},
Suites: []espresso.Suite{
{
Name: "my espresso project",
Emulators: []config.Emulator{
{
Name: "Android GoogleApi Emulator",
Orientation: "landscape",
PlatformVersions: []string{"11.0"},
},
},
},
},
Sauce: config.SauceConfig{
Concurrency: 1,
},
},
}
cnt, err := runner.RunProject()
assert.Nil(t, err)
assert.Equal(t, cnt, 0)
assert.Equal(t, "landscape", startOpts.DeviceOrientation)
}
|
package jsonresult
import "github.com/incognitochain/incognito-chain/metadata"
type PortalCustodianWithdrawRequest struct {
CustodianWithdrawRequest metadata.CustodianWithdrawRequestStatus `json:"CustodianWithdraw"`
} |
package ansible
import (
"os"
"strings"
)
type Inventory struct {
Role, Output string
Variables map[string]map[string]interface{}
}
func NewInventory(role string) *Inventory {
inventory := new(Inventory)
inventory.Output = "[" + role + "]" + "\n"
inventory.Role = role
inventory.Variables = make(map[string]map[string]interface{}, 0)
return inventory
}
func (inventory *Inventory) AddVirtualMachine(id string) {
inventory.Variables[id] = make(map[string]interface{}, 0)
}
func (inventory *Inventory) AddVariable(instanceID, variable string, value interface{}) {
inventory.Variables[instanceID][variable] = value
}
func (inventory *Inventory) Export() error {
file, err := os.OpenFile("hosts", os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
return err
}
defer file.Close()
inventory.stringBuilder()
if _, err := file.WriteString(inventory.Output); err != nil {
return nil
}
return nil
}
func (inventory *Inventory) stringBuilder() {
for instanceID, variables := range inventory.Variables {
if instanceID == "" {
inventory.Output += instanceID
} else {
inventory.Output += instanceID + " "
}
for variable, value := range variables {
switch variable {
case "public_ip":
variable = "ansible_host"
case "user":
variable = "ansible_user"
}
inventory.Output += variable + "=" + strings.TrimSuffix(value.(string), "\n") + " "
if inventory.Role == "all:vars" {
inventory.Output += "\n"
}
}
inventory.Output += "\n"
}
inventory.Output += "\n"
} |
package Problem0467
func findSubstringInWraproundString(p string) int {
// count[0] = 4 表示,以 'a' 结尾的连续字符串的最大长度为 4
// 那么,在符合题意的 subString 中以 'a' 结尾的个数为 4
// 这样统计起来,既不会遗漏也不会重复
//
count := [26]int{}
length := 0
for i := 0; i < len(p); i++ {
if 0 < i &&
(p[i-1]+1 == p[i] || p[i-1] == p[i]+25) {
length++
} else {
length = 1
}
b := p[i] - 'a'
count[b] = max(count[b], length)
}
res := 0
for i := 0; i < 26; i++ {
res += count[i]
}
return res
}
func max(a, b int) int {
if a > b {
return a
}
return b
}
|
package database
import (
"database/sql"
"fmt"
"os"
)
var DB *sql.DB
func ConnectDB() {
db, err := sql.Open("pgx", os.Getenv("DATABASE_URL"))
if err != nil {
fmt.Fprintf(os.Stderr, "Unable to connect to database: %v\n", err)
os.Exit(1)
}
DB = db
}
|
/*
* @lc app=leetcode.cn id=219 lang=golang
*
* [219] 存在重复元素 II
*/
// @lc code=start
package main
import "fmt"
func main() {
var a []int
a = []int{1,2,3,1}
fmt.Printf("%v, %t\n", a, containsNearbyDuplicate(a, 3))
a = []int{1,0,1,1}
fmt.Printf("%v, %t\n", a, containsNearbyDuplicate(a, 1))
a = []int{1,2,3,1,2,3}
fmt.Printf("%v, %t\n", a, containsNearbyDuplicate(a, 2))
}
func containsNearbyDuplicate(nums []int, k int) bool {
map1 := map[int]int{}
for i := 0 ; i < len(nums) ; i++ {
if v, ok := map1[nums[i]]; ok {
if i - v <= k {
return true
}
}
map1[nums[i]] = i
}
return false
}
func Min(a, b int) int {
if a <= b {
return a
} else {
return b
}
}
// @lc code=end
|
package qpeerset
import (
"testing"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/core/test"
kb "github.com/libp2p/go-libp2p-kbucket"
"github.com/stretchr/testify/require"
)
func TestQPeerSet(t *testing.T) {
key := "test"
qp := NewQueryPeerset(key)
// -----------------Ordering between peers for the Test -----
// KEY < peer3 < peer1 < peer4 < peer2
// ----------------------------------------------------------
peer2 := test.RandPeerIDFatal(t)
var peer4 peer.ID
for {
peer4 = test.RandPeerIDFatal(t)
if kb.Closer(peer4, peer2, key) {
break
}
}
var peer1 peer.ID
for {
peer1 = test.RandPeerIDFatal(t)
if kb.Closer(peer1, peer4, key) {
break
}
}
var peer3 peer.ID
for {
peer3 = test.RandPeerIDFatal(t)
if kb.Closer(peer3, peer1, key) {
break
}
}
oracle := test.RandPeerIDFatal(t)
// find fails
require.Equal(t, -1, qp.find(peer2))
// add peer2,assert state & then another add fails
require.True(t, qp.TryAdd(peer2, oracle))
require.Equal(t, PeerHeard, qp.GetState(peer2))
require.False(t, qp.TryAdd(peer2, oracle))
require.Equal(t, 0, qp.NumWaiting())
// add peer4
require.True(t, qp.TryAdd(peer4, oracle))
cl := qp.GetClosestNInStates(2, PeerHeard, PeerWaiting, PeerQueried)
require.Equal(t, []peer.ID{peer4, peer2}, cl)
cl = qp.GetClosestNInStates(3, PeerHeard, PeerWaiting, PeerQueried)
require.Equal(t, []peer.ID{peer4, peer2}, cl)
cl = qp.GetClosestNInStates(1, PeerHeard, PeerWaiting, PeerQueried)
require.Equal(t, []peer.ID{peer4}, cl)
// mark as unreachable & try to get it
qp.SetState(peer4, PeerUnreachable)
cl = qp.GetClosestNInStates(1, PeerHeard, PeerWaiting, PeerQueried)
require.Equal(t, []peer.ID{peer2}, cl)
// add peer1
require.True(t, qp.TryAdd(peer1, oracle))
cl = qp.GetClosestNInStates(1, PeerHeard, PeerWaiting, PeerQueried)
require.Equal(t, []peer.ID{peer1}, cl)
cl = qp.GetClosestNInStates(2, PeerHeard, PeerWaiting, PeerQueried)
require.Equal(t, []peer.ID{peer1, peer2}, cl)
// mark as waiting and assert
qp.SetState(peer2, PeerWaiting)
require.Equal(t, []peer.ID{peer2}, qp.GetClosestInStates(PeerWaiting))
require.Equal(t, []peer.ID{peer1}, qp.GetClosestInStates(PeerHeard))
require.True(t, qp.TryAdd(peer3, oracle))
require.Equal(t, []peer.ID{peer3, peer1}, qp.GetClosestInStates(PeerHeard))
require.Equal(t, 2, qp.NumHeard())
}
|
package usecase
import (
"context"
"errors"
"fmt"
"sync"
"time"
"encoding/json"
"github.com/syariatifaris/shopeetax/app/resource/usecaseres"
)
var (
//HTTPServiceType service type of http
HTTPServiceType ServiceType = "HTTPServiceType"
//SubsriberEventType service type of subsrciber
SubsriberEventType ServiceType = "SubscriberType"
)
//UseCase base contract
type UseCase interface {
Name() string
HandleUseCase(ctx context.Context, res *usecaseres.UseCaseResource, data *UseCaseData) (interface{}, error)
Notify(ctx context.Context, res *usecaseres.UseCaseResource, data *UseCaseData) error
NotifyResult() interface{}
}
//ServiceType as use case service type
type ServiceType string
//HandleUseCase abstraction for use case business operation
type HandleUseCase func(ctx context.Context, res *usecaseres.UseCaseResource, data *UseCaseData) (interface{}, error)
//SubscribeData subscriber data
type SubscribeData struct {
Data []byte
}
//UseCaseData structure
type UseCaseData struct {
ServiceType *ServiceType `json:"data"`
HTTPData interface{}
NSQBody []byte
Subscribers []UseCase
SubscribeData *SubscribeData
Language string
Arguments []string // for exec binary only
}
//Cast casts target data based on service
//args:
// target: target of casted data
//error:
// error response
func (u *UseCaseData) Cast(target interface{}) error {
if u.ServiceType == nil {
return errors.New("service type is not defined")
}
switch *u.ServiceType {
case HTTPServiceType:
return u.castHTTPRequest(target)
case SubsriberEventType:
return u.castSubscribeData(target)
default:
return errors.New("unimplemented service type for casting")
}
}
//castHTTPRequest cast use case's request data to specific structure
//args:
// target: cast target
//returns:
// error: operation
func (u *UseCaseData) castHTTPRequest(target interface{}) error {
if u.HTTPData == nil {
return errors.New("empty http data")
}
byteData, err := json.Marshal(u.HTTPData)
if err != nil {
return err
}
return json.Unmarshal(byteData, &target)
}
//castExecRequest cast use case's bytes data to specific structure
//args:
// target: cast target
//returns:
// error: operation
func (u *UseCaseData) castExecRequest(target interface{}) error {
if len(u.Arguments) == 0 {
return errors.New("arguments data len is 0")
}
byteData, err := json.Marshal(u.Arguments)
if err != nil {
return err
}
return json.Unmarshal(byteData, &target)
}
//castNSQBody cast use case's bytes data to specific structure
//args:
// target: cast target
//returns:
// error: operation
func (u *UseCaseData) castNSQBody(target interface{}) error {
if u.NSQBody == nil {
return errors.New("undefined/ empty nsq body")
}
return json.Unmarshal(u.NSQBody, &target)
}
//castSubscribeData cast subscribe data to specific structure
//args:
// target: cast target
//returns:
// error: operation
func (u *UseCaseData) castSubscribeData(target interface{}) error {
return json.Unmarshal(u.SubscribeData.Data, &target)
}
//NotifySubscribers will notify all subscriber with async operation
//args:
// ctx: context,
// res: usecase resource,
// data: data which will be passed to subscriber
//returns:
// error: operation
func (u *UseCaseData) NotifySubscribers(ctx context.Context, timeout time.Duration, res *usecaseres.UseCaseResource, data interface{}) error {
timeoutCtx, cancel := context.WithTimeout(ctx, timeout)
err := u.setSubscribeData(data)
if err != nil {
return err
}
doneChan := make(chan bool)
errChan := make(chan error, len(u.Subscribers))
go func() {
var wg sync.WaitGroup
wg.Add(len(u.Subscribers))
for _, s := range u.Subscribers {
go func(s UseCase) {
defer wg.Done()
errChan <- s.Notify(ctx, res, u)
}(s)
}
wg.Wait()
close(errChan)
doneChan <- true
}()
select {
case <-timeoutCtx.Done():
cancel()
return errors.New("context deadline exceeded")
case <-doneChan:
var msgs []string
for e := range errChan {
if e != nil {
msgs = append(msgs, e.Error())
}
}
if len(msgs) > 0 {
byteData, err := json.Marshal(msgs)
if err != nil {
return err
}
return fmt.Errorf("fail to notify one of several use case %s",
string(byteData))
}
}
return err
}
//GetNotifyResultByName will get notify result from specific subscriber
//args:
// ctx: context,
// name: name subsriber which will be notified,
//returns:
// result: Result from notify operation
func (u *UseCaseData) GetNotifyResultByName(ctx context.Context, name string) (result interface{}) {
for _, s := range u.Subscribers {
if s.Name() == name {
result = s.NotifyResult()
return result
}
}
return nil
}
//GetSubscriberByName gets subscriber by name
//args:
// name: name of subscriber use case
//returns:
// filtered use case
func (u *UseCaseData) GetSubscriberByName(name string) UseCase {
for _, s := range u.Subscribers {
if s.Name() == name {
return s
}
}
return nil
}
//setSubscribeData will notify specific subscriber
//args:
// data: data which will be set on SubscribeData that will be passed when notify a subscriber
//returns:
// error: operation
func (u *UseCaseData) setSubscribeData(data interface{}) error {
byteData, err := json.Marshal(data)
if err != nil {
return err
}
u.SubscribeData = new(SubscribeData)
u.SubscribeData.Data = byteData
u.ServiceType = &SubsriberEventType
return nil
}
|
// +build !exclude_graphdriver_rbd
package daemon
import (
_ "github.com/docker/docker/daemon/graphdriver/rbd"
)
|
package middleware
import (
"io/ioutil"
"gopkg.in/go-playground/validator.v9"
"gopkg.in/yaml.v2"
)
type DatabaseConfig struct {
Host string `yaml:"host" validate:"required"`
Port string `yaml:"port" validate:"required"`
User string `yaml:"user" validate:"required"`
Password string `yaml:"password" validate:"required"`
Name string `yaml:"name" validate:"required"`
}
type ServerConfig struct {
Host string `yaml:"host"`
Port string `yaml:"port" validate:"required"`
}
type Config struct {
Database DatabaseConfig `yaml:"database" validate:"required"`
Server ServerConfig `yaml:"server" validate:"required"`
}
func ParseConfig(cfgPath *string) (*Config, error) {
data, err := ioutil.ReadFile(*cfgPath)
if err != nil {
return nil, err
}
var cfg Config
if err := yaml.Unmarshal(data, &cfg); err != nil {
return nil, err
}
return &cfg, nil
}
func (c *Config) ValidateConfig() error {
validate := validator.New()
if err := validate.Struct(c); err != nil {
return err
}
return nil
}
|
// This file was generated for SObject VisualforceAccessMetrics, API Version v43.0 at 2018-07-30 03:47:37.25685993 -0400 EDT m=+23.600351916
package sobjects
import (
"fmt"
"strings"
)
type VisualforceAccessMetrics struct {
BaseSObject
ApexPageId string `force:",omitempty"`
DailyPageViewCount int `force:",omitempty"`
Id string `force:",omitempty"`
MetricsDate string `force:",omitempty"`
SystemModstamp string `force:",omitempty"`
}
func (t *VisualforceAccessMetrics) ApiName() string {
return "VisualforceAccessMetrics"
}
func (t *VisualforceAccessMetrics) String() string {
builder := strings.Builder{}
builder.WriteString(fmt.Sprintf("VisualforceAccessMetrics #%s - %s\n", t.Id, t.Name))
builder.WriteString(fmt.Sprintf("\tApexPageId: %v\n", t.ApexPageId))
builder.WriteString(fmt.Sprintf("\tDailyPageViewCount: %v\n", t.DailyPageViewCount))
builder.WriteString(fmt.Sprintf("\tId: %v\n", t.Id))
builder.WriteString(fmt.Sprintf("\tMetricsDate: %v\n", t.MetricsDate))
builder.WriteString(fmt.Sprintf("\tSystemModstamp: %v\n", t.SystemModstamp))
return builder.String()
}
type VisualforceAccessMetricsQueryResponse struct {
BaseQuery
Records []VisualforceAccessMetrics `json:"Records" force:"records"`
}
|
package controllers
import "github.com/astaxie/beego"
type TestController struct {
beego.Controller
}
func (this *TestController) Get() {
this.Data["Username"] = "astaxie"
this.Ctx.Output.Body([]byte("ok"))
}
func (this *TestController) List() {
this.Ctx.Output.Body([]byte("i am list"))
}
func (this *TestController) Params() {
this.Ctx.Output.Body([]byte(this.Ctx.Input.Params()["0"] + this.Ctx.Input.Params()["1"] + this.Ctx.Input.Params()["2"]))
}
func (this *TestController) Myext() {
this.Ctx.Output.Body([]byte(this.Ctx.Input.Param(":ext")))
}
func (this *TestController) GetUrl() {
this.Ctx.Output.Body([]byte(this.URLFor(".Myext")))
}
|
package mysql
import (
"database/sql"
"fmt"
"time"
"github.com/smilga/analyzer/api"
)
type ReportStore struct {
DB *sql.DB
}
func (s *ReportStore) Save(r *api.Report) error {
now := time.Now()
if r.ID == 0 {
r.CreatedAt = &now
}
// NOTE there is trigger that moves deleted reports to reports_archive table
_, err := s.DB.Exec(`DELETE from reports where website_id=?`, r.WebsiteID)
if err != nil {
return err
}
res, err := s.DB.Exec(`
INSERT INTO reports
(id, user_id, website_id, started_in, loaded_in, resource_check_in, html_check_in, total_in, created_at)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
`, r.ID, r.UserID, r.WebsiteID, r.StartedIn, r.LoadedIn, r.ResourceCheckIn, r.HTMLCheckIn, r.TotalIn, r.CreatedAt)
if err != nil {
return err
}
if r.ID == 0 {
id, err := res.LastInsertId()
if err != nil {
return err
}
r.ID = api.ReportID(id)
}
return nil
}
func (s *ReportStore) ByWebsite(id api.WebsiteID) (*api.Report, error) {
r := &api.Report{}
err := s.DB.QueryRow(`
SELECT r.*, w.url FROM reports r
LEFT JOIN websites w on w.id = r.website_id
WHERE r.website_id = ?
`, id).Scan(&r.ID, &r.UserID, &r.WebsiteID, &r.StartedIn, &r.LoadedIn, &r.ResourceCheckIn, &r.HTMLCheckIn, &r.TotalIn, &r.CreatedAt,
&r.WebsiteURL)
if err != nil {
return nil, err
}
rows, err := s.DB.Query(`
SELECT * FROM matches where report_id = ?
`, r.ID)
if err != nil {
return nil, err
}
defer rows.Close()
patternIDs := []api.PatternID{}
for rows.Next() {
m := &api.Match{}
err := rows.Scan(&m.ID, &m.PatternID, &m.WebsiteID, &m.ReportID, &m.Value, &m.CreatedAt)
if err != nil {
return nil, err
}
r.Matches = append(r.Matches, m)
patternIDs = append(patternIDs, m.PatternID)
}
if len(patternIDs) == 0 {
return r, nil
}
rows, err = s.DB.Query(fmt.Sprintf("SELECT * FROM patterns WHERE id IN (%s);"), patternIDs)
if err != nil {
return nil, err
}
defer rows.Close()
patterns := []*api.Pattern{}
for rows.Next() {
var p api.Pattern
err := rows.Scan(&p.ID, &p.Type, &p.Value, &p.Description, &p.CreatedAt, &p.UpdatedAt, &p.DeletedAt)
if err != nil {
return nil, err
}
patterns = append(patterns, &p)
}
patternMap := make(map[api.PatternID]*api.Pattern, len(patterns))
for _, p := range patterns {
patternMap[p.ID] = p
}
for _, m := range r.Matches {
p, ok := patternMap[m.PatternID]
if !ok {
return nil, err
}
m.Pattern = p
}
return r, nil
}
func NewReportStore(DB *sql.DB) *ReportStore {
return &ReportStore{DB}
}
|
// Copyright 2018 Diego Bernardes. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package test
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"github.com/pkg/errors"
"github.com/smartystreets/goconvey/convey"
)
// Load is used by tests to load mocks. It used the runtime.Caller to get the request file directory
// and load all the files from a testdata folder at the same level.
func Load(name string) []byte {
_, file, _, ok := runtime.Caller(1)
if !ok {
panic("could not get the caller that invoked load")
}
path := fmt.Sprintf("%s/testdata/%s", filepath.Dir(file), name)
f, err := os.Open(path)
if err != nil {
panic(errors.Wrap(err, fmt.Sprintf("error during open '%s'", path)))
}
content, err := ioutil.ReadAll(f)
if err != nil {
panic(errors.Wrap(err, fmt.Sprintf("error during read '%s'", path)))
}
return content
}
// CompareJSONBytes take two arrays of byte, marshal then to a json struct and then
// compare if they are equal.
func CompareJSONBytes(a, b []byte) {
c1, c2 := make(map[string]interface{}), make(map[string]interface{})
err := json.Unmarshal(a, &c1)
convey.So(err, convey.ShouldBeNil)
err = json.Unmarshal(b, &c2)
convey.So(err, convey.ShouldBeNil)
convey.So(c1, convey.ShouldResemble, c2)
}
|
package main
import (
"bytes"
"fmt"
"strings"
"unicode"
)
// 是否具有某个前缀
func HasPrefix(s, prefix string) bool {
return len(prefix) < len(s) && s[0:len(prefix)] == prefix
}
// 是否以xx为结尾
func HasSuffix(s, suffix string) bool {
return len(s) > len(suffix) && s[len(s) - len(suffix):] == suffix
}
func Join(str []string, sep string) string {
if len(str) == 0 {
return ""
}
if len(str) == 1 {
return str[0]
}
buffer := bytes.NewBufferString(str[0])
for _, s := range str[1:] {
buffer.WriteString(sep)
buffer.WriteString(s)
}
return buffer.String()
}
func main() {
fmt.Println(strings.ContainsAny("team", "i"))
fmt.Println(strings.ContainsAny("failure", "u & i"))
fmt.Println(strings.ContainsAny("in failure", "s g"))
fmt.Println(strings.ContainsAny("foo", ""))
fmt.Println(strings.ContainsAny("",""))
// 统计子串出现的位置
fmt.Println(strings.Index("anziguoer", "guo"))
// 统计子串出现的次数
fmt.Println(strings.Count("aaaa22dd", "a"))
// 字符串分割
fmt.Printf("Fields are: %q \n", strings.Fields(" foo bar baz "))
fmt.Println(strings.FieldsFunc(" foo bar baz ", unicode.IsSpace))
// repeat
fmt.Println("ba", strings.Repeat("na", 2))
// replace
fmt.Println(strings.Replace("oink oink oink", "oink", "moo", -1))
}
|
package core
import (
"net/http"
)
func getStatus(url string) int {
resp, err := http.Head(url)
if err != nil {
return 500
}
return resp.StatusCode
}
func healthCheck(urls []string) map[string]int {
statusMap := make(map[string]int)
for x := 0; x < len(urls); x++ {
statusMap[urls[x]] = getStatus(urls[x])
}
return statusMap
}
func Check() map[string]int {
urls := GetUrls()
return healthCheck(urls)
}
|
package goSolution
func generateParenthesisRecursively(n int, currentIndex int, currentString string, parValue int, results *[]string) {
if n == currentIndex {
*results = append(*results, currentString)
return
}
if n - currentIndex - 1 >= parValue + 1 {
generateParenthesisRecursively(n, currentIndex + 1, currentString + "(", parValue + 1, results)
}
if parValue > 0 {
generateParenthesisRecursively(n, currentIndex + 1, currentString + ")", parValue - 1, results)
}
}
func generateParenthesis(n int) []string {
ret := make([]string, 0)
generateParenthesisRecursively(n << 1, 0, "", 0, &ret)
return ret
}
|
package array
import "testing"
func Test(t *testing.T) {
a := array(10)
println(a.len(), a.cap())
for i := 0; i < 30; i++ {
a.insert(i)
a.show()
}
}
|
package controller
import (
"fmt"
"github.com/suaas21/library-management-api/controller/authentication"
"github.com/suaas21/library-management-api/database"
"net/http"
"strconv"
"gopkg.in/macaron.v1"
)
func Register(ctx *macaron.Context, user database.User) {
imageName, err := FileUpload(ctx)
if err != nil {
// don't return, because we ignore if the image is not upload
fmt.Println("image not uploaded, because:", err.Error())
}
// create the user and store in database
if imageName != "" {
user.Image = imageName
}
result, err := database.CreateUser(user)
if err != nil {
ctx.JSON(http.StatusNotImplemented, fmt.Sprintf("the user already exist, err: %v", err.Error()))
return
}
ctx.JSON(http.StatusCreated, result)
}
func UserProfile(ctx *macaron.Context) {
key := ctx.Params(":userId")
userId, err := strconv.Atoi(key)
if err != nil {
ctx.JSON(http.StatusBadRequest, "invalid user profile")
return
}
userResult, err := database.GetUserInfo(userId)
if userResult == nil || err != nil {
ctx.JSON(http.StatusBadRequest, "invalid user profile")
return
}
ctx.JSON(http.StatusOK, userResult)
return
}
func UpdateUserProfile(ctx *macaron.Context, user database.User) {
currentUserType := ctx.Req.Header.Get("current_user_type")
currentUserMail := ctx.Req.Header.Get("current_user_mail")
if currentUserType != "user" {
ctx.JSON(http.StatusNotAcceptable, "type of user didn't match")
return
}
if currentUserMail != "" {
user.Mail = currentUserMail
resultUser, err := database.UpdateUserProfileToDB(user)
if err != nil {
ctx.JSON(http.StatusNotImplemented, "profile updating failed")
return
}
ctx.JSON(http.StatusOK, resultUser)
return
}
ctx.JSON(http.StatusNotAcceptable, "mail is not valid")
return
}
func Login(ctx *macaron.Context, user database.User) {
userLoginInfo, err := database.GetUserLoginInfo(user)
if err != nil {
ctx.JSON(http.StatusNotFound, err.Error())
return
}
if userLoginInfo == nil {
ctx.JSON(http.StatusNotFound, "User credential not fount in database")
return
}
tokenString, err := authentication.GenerateJWT(userLoginInfo.Mail, userLoginInfo.UserType, userLoginInfo.Id)
if err != nil {
ctx.JSON(http.StatusUnauthorized, err.Error())
return
}
ctx.JSON(http.StatusOK, tokenString)
return
}
func ChangeUserImage(ctx *macaron.Context) {
currentUserType := ctx.Req.Header.Get("current_user_type")
if currentUserType != "user" {
ctx.JSON(http.StatusNotAcceptable, "user is not authenticated, need bearer token to upload image")
return
}
key := ctx.Params(":userId")
userId, err := strconv.Atoi(key)
if err != nil {
ctx.JSON(http.StatusBadGateway, err.Error())
return
}
imageName, err := FileUpload(ctx)
//here we call the function we made to get the image and save it
if err != nil {
// don't return, because we ignore if the image is not upload
fmt.Println("image not uploaded, because:", err.Error())
}
updatedUser, err := database.ChangeUserImageNameToDB(userId, imageName)
if err != nil {
ctx.JSON(http.StatusBadGateway, err.Error())
return
}
ctx.JSON(http.StatusOK, updatedUser)
return
}
|
package main
import (
"log"
"testing"
"time"
)
func TestDownload(t *testing.T) {
speed := download("https://universal.bigbuckbunny.workers.dev/Consti10/LiveVideo10ms/master/Screenshots/device2.png?xprotocol=https&xhost=raw.githubusercontent.com", "104.21.90.173", 50*time.Second)
log.Println((speed / 1024))
}
func TestPing(t *testing.T) {
log.Println(ping("1.1.1.1", 10))
}
func TestParsePingResponsePacketLoss(t *testing.T) {
log.Print(parsePingResponsePacketLoss("5 packets transmitted, 4 packets received, 20.0% packet loss"))
}
func TestParsePingResponseAVG(t *testing.T) {
log.Print(parsePingResponseAVG("round-trip min/avg/max/stddev = 164.740/165.221/165.544/0.305 ms"))
}
|
package clcv2
import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/url"
"os"
"os/user"
"path"
"runtime"
"strings"
yaml "gopkg.in/yaml.v2"
"github.com/grrtrr/clcv2/utils"
"github.com/pkg/errors"
)
const (
// Name of the file to store the last bearer-token credentials
credentialsName = "credentials.json"
// Configuration file in CLC_HOME that stores the ClientConfig
configName = "client_config.yml"
)
// CLIClient specializes Client for command-line use
type CLIClient struct {
*Client
Config *ClientConfig
}
// ClientConfig encapsulates a commandline-client configuration file
type ClientConfig struct {
Username string `yaml:"User"` // CLC portal username
Password string `yaml:"Password"` // CLC portal password (FIXME: store encrypted)
Account string `yaml:"Account"` // account that was used last time
Location string `yaml:"Location"` // data centre that was used last time
}
func (c ClientConfig) String() string {
return fmt.Sprintf("Config(%s/%s, account: %q, location: %q)",
c.Username, c.Password, c.Account, c.Location)
}
// NewCLIClient returns an authenticated commandline client.
// This will use the default values for AccountAlias and LocationAlias.
// It will respect the following environment variables to override the defaults:
// - CLC_ACCOUNT: takes precedence over default AccountAlias
// - CLC_LOCATION: takes precedence over default LocationAlias
// - CLC_BASE_URL: overrides the API URL (for testing)
func NewCLIClient(conf *ClientConfig) (*CLIClient, error) {
// Attempt to load existing configuration first, and reconcile with @conf.
savedConfig, err := LoadClientConfig()
if err != nil && conf == nil {
return nil, errors.Errorf("failed to load saved configuration: %s", err)
}
if conf == nil {
if conf = savedConfig; conf == nil {
conf = &ClientConfig{}
}
} else if savedConfig != nil { // Override only parameters that were not explicitly set
if conf.Username == "" {
conf.Username = savedConfig.Username
}
if conf.Password == "" {
conf.Password = savedConfig.Password
}
if conf.Account == "" {
conf.Account = savedConfig.Account
}
if conf.Location == "" {
conf.Location = savedConfig.Location
}
}
// Ensure that both username and password are filled in
conf.Username, conf.Password = utils.ResolveUserAndPass(conf.Username, conf.Password)
client := &CLIClient{
Client: newClient(conf.Username, conf.Password),
Config: conf,
}
client.credentialsChanged = client.saveCredentials
if Debug {
client.Log = log.New(os.Stdout, "", log.Ltime|log.Lshortfile)
}
if loginRes, err := client.loadCredentials(); err != nil {
return nil, err
} else if loginRes != nil {
client.credentials = loginRes
} else if err = client.login(); err != nil {
return nil, err
}
// Set/override AccountAlias
if conf.Account != "" {
client.AccountAlias = conf.Account
} else if account := os.Getenv("CLC_ACCOUNT"); account != "" {
client.AccountAlias = account
} else {
client.AccountAlias = client.credentials.AccountAlias
}
// Set/override LocationAlias
if conf.Location != "" {
client.LocationAlias = conf.Location
} else if location := os.Getenv("CLC_LOCATION"); location != "" {
client.LocationAlias = location
} else {
client.LocationAlias = client.credentials.LocationAlias
}
// Set/override %baseURL (experimental).
if envURL := os.Getenv("CLC_BASE_URL"); envURL != "" {
url, err := url.Parse(envURL)
if err != nil {
return nil, err
}
if url.Scheme == "" {
url.Scheme = "https"
}
baseURL = url.String()
}
return client, nil
}
// GetClcHome returns the path to the CLC configuration directory, which is the same
// as used by, and compatible with, clc-go-cli (including the CLC_HOME environment variable).
func GetClcHome() string {
if clcHome := os.Getenv("CLC_HOME"); clcHome != "" {
return clcHome
}
u, err := user.Current()
if err != nil {
log.Fatalf("failed to look up current user: %s", err)
}
if runtime.GOOS == "windows" {
return path.Join(u.HomeDir, "clc")
} else {
return path.Join(u.HomeDir, ".clc")
}
}
// LoadClientConfig attempts to load a configuration from CLC_HOME/configName
func LoadClientConfig() (*ClientConfig, error) {
var confFile = path.Join(GetClcHome(), configName)
if _, err := os.Stat(confFile); err == nil {
var config ClientConfig
fd, err := os.Open(confFile)
if err != nil {
return nil, errors.Errorf("failed to load client config: %s", err)
}
defer fd.Close()
if content, err := ioutil.ReadAll(fd); err != nil {
return nil, errors.Errorf("failed to read %s: %s", confFile, err)
} else if err = yaml.Unmarshal(content, &config); err != nil {
return nil, errors.Errorf("failed to deserialize %s: %s", confFile, err)
}
return &config, nil
}
return configFromCliGo()
}
// configFromCliGo checks to see if a clc-cli-go configuration file exists.
// If yes, it will import a client configuration based on those settings.
func configFromCliGo() (*ClientConfig, error) {
var confFile = path.Join(GetClcHome(), "config.yml")
if _, err := os.Stat(confFile); err == nil {
var cliGoData = make(map[string]interface{})
fd, err := os.Open(confFile)
if err != nil {
return nil, errors.Errorf("failed to load client config: %s", err)
}
defer fd.Close()
if content, err := ioutil.ReadAll(fd); err != nil {
return nil, errors.Errorf("failed to read %s: %s", confFile, err)
} else if err = yaml.Unmarshal(content, cliGoData); err != nil {
return nil, errors.Errorf("failed to deserialize %s: %s", confFile, err)
}
return &ClientConfig{
Username: cliGoData["user"].(string),
Password: cliGoData["password"].(string),
Location: cliGoData["defaultdatacenter"].(string),
}, nil
}
return nil, nil
}
// SaveConfig writes the configuration data of @c to CLC_HOME/configName
func (c *CLIClient) SaveConfig() error {
if c == nil || c.Client == nil {
return errors.New("attempt to save configuration for nil client")
} else if c.Config == nil {
return errors.New("attempt to save a nil client configuration")
}
if enc, err := yaml.Marshal(c.Config); err != nil {
return errors.Errorf("failed to serialize client configuration: %s", err)
} else {
return writeCLCdata(configName, enc, 0644)
}
}
// Populate and allocate c.credentials, either by loading from file or via a fresh login.
// Save (updated) credentials if successful.
func (c *CLIClient) loadCredentials() (*LoginRes, error) {
var credsFile = path.Join(GetClcHome(), credentialsName)
if _, err := os.Stat(credsFile); err == nil {
var loginRes = new(LoginRes)
fd, err := os.Open(credsFile)
if err != nil {
return nil, errors.Errorf("failed to load credentials: %s", err)
}
defer fd.Close()
if err = json.NewDecoder(fd).Decode(loginRes); err != nil {
return nil, errors.Errorf("failed to deserialize %s: %s", credsFile, err)
}
if strings.ToLower(loginRes.User) == strings.ToLower(c.LoginReq.Username) {
return loginRes, nil
}
// FIXME: if names differ, save to credsFile + loginRes.User
/* User switch: move the original credentials file to a backup extension. */
os.Rename(credsFile, credsFile+".bak")
} else if err != nil && !os.IsNotExist(err) {
return nil, err
}
return nil, nil
}
// Save credentials to CLC_HOME/$credentialsName. Return error on failure.
func (c *CLIClient) saveCredentials() error {
if c.credentials == nil { // nothing to serialize
return nil
} else if enc, err := json.MarshalIndent(c.credentials, "", "\t"); err != nil {
return errors.Errorf("failed to serialize bearer credentials: %s", err)
} else {
return writeCLCdata(credentialsName, append(enc, '\n'), 0600)
}
}
// writeCLCitem writes @data to CLC_HOME/fileName
func writeCLCdata(fileName string, data []byte, perm os.FileMode) error {
var clcHome = GetClcHome()
if _, err := os.Stat(clcHome); os.IsNotExist(err) {
if err = os.MkdirAll(clcHome, 0700); err != nil {
return errors.Errorf("failed to create CLC directory %s: %s", clcHome, err)
}
}
return ioutil.WriteFile(path.Join(clcHome, fileName), data, perm)
}
|
package gui
import (
"fyne.io/fyne/v2"
"fyne.io/fyne/v2/test"
"github.com/archon/backend"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("EnterEntry widget", func() {
var session *backend.Session
var entry *EnterEntry
BeforeEach(func() {
session = backend.NewSession("Untitled Session", 0)
entry = NewEnterEntry(session)
})
It("should render without crashing", func() {
render := func() {
test.NewWindow(entry)
}
Expect(render).ToNot(Panic())
})
It("should clear the text from the field when Enter is pressed", func() {
test.NewWindow(entry)
test.Type(entry, "Hello world!")
entry.TypedKey(&fyne.KeyEvent{Name: fyne.KeyReturn})
Expect(entry.Text).To(BeEmpty())
})
It("should not clear the text from the field if Enter is not pressed", func() {
test.NewWindow(entry)
note := "Hello world!"
test.Type(entry, note)
Expect(entry.Text).To(Equal(note))
})
It("should modify the session such that a new note exists when the user presses Enter", func() {
test.NewWindow(entry)
note := "Hello world!"
test.Type(entry, note)
entry.TypedKey(&fyne.KeyEvent{Name: fyne.KeyReturn})
Expect(session.Notes).To(HaveLen(1))
Expect(session.Notes[0].Content).To(Equal(note))
})
It("should not modify the session such that a new note exists when the user does not press Enter", func() {
test.NewWindow(entry)
note := "Hello world!"
test.Type(entry, note)
Expect(session.Notes).To(BeEmpty())
})
})
|
/*
Package account handles account requests.
*/
package account
import (
"encoding/json"
"github.com/MerinEREN/iiPackages/api"
"github.com/MerinEREN/iiPackages/datastore/account"
"github.com/MerinEREN/iiPackages/datastore/user"
"github.com/MerinEREN/iiPackages/session"
"google.golang.org/appengine/datastore"
"google.golang.org/appengine/memcache"
"log"
"net/http"
)
// Handler returns and modifies account entities.
func Handler(s *session.Session) {
k := new(datastore.Key)
var err error
ID := s.R.URL.Path[len("/accounts/"):]
if ID == "" && s.R.Method != "GET" {
log.Printf("Path: %s, Error: no user ID\n", s.R.URL.Path)
http.Error(s.W, "No user ID", http.StatusBadRequest)
return
}
if ID != "" {
k, err = datastore.DecodeKey(ID)
if err != nil {
log.Printf("Page:%s, Error: %v\n", s.R.URL.Path, err)
http.Error(s.W, err.Error(), http.StatusInternalServerError)
return
}
}
switch s.R.Method {
case "PUT":
// Handle PUT requests
case "DELETE":
err = datastore.Delete(s.Ctx, k)
if err != nil {
log.Printf("Path: %s, Error: %v\n", s.R.URL.Path, err)
http.Error(s.W, err.Error(), http.StatusInternalServerError)
return
}
s.W.WriteHeader(http.StatusNoContent)
default:
// Handles GET requests
acc := new(account.Account)
if ID == "" {
item, err := memcache.Get(s.Ctx, "acc")
if err == nil {
err = json.Unmarshal(item.Value, acc)
if err != nil {
log.Printf("Page:%s, Error: %v\n",
s.R.URL.Path, err)
http.Error(s.W, err.Error(),
http.StatusInternalServerError)
return
}
} else {
ku := new(datastore.Key)
item, err = memcache.Get(s.Ctx, "uKey")
if err == nil {
err = json.Unmarshal(item.Value, ku)
if err != nil {
log.Printf("Page:%s, Error: %v\n",
s.R.URL.Path, err)
http.Error(s.W, err.Error(),
http.StatusInternalServerError)
return
}
acc, err = account.Get(s.Ctx, ku.Parent())
if err != nil {
log.Printf("Page:%s, Error: %v\n",
s.R.URL.Path, err)
// ALSO LOG THIS WITH DATASTORE LOG !!!!!!!
http.Error(s.W, err.Error(),
http.StatusInternalServerError)
return
}
} else {
ku, err = user.GetKeyViaEmail(s)
if err == datastore.Done {
// IMPOSIBLE BUT !!!!!!!!!!!!!!!!!!!!!!!!!!
log.Printf("Page:%s, Error: %v\n",
s.R.URL.Path, err)
// ALSO LOG THIS WITH DATASTORE LOG !!!!!!!
http.Error(s.W, err.Error(),
http.StatusNoContent)
return
} else if err != nil {
log.Printf("Page:%s, Error: %v\n",
s.R.URL.Path, err)
// ALSO LOG THIS WITH DATASTORE LOG !!!!!!!
http.Error(s.W, err.Error(),
http.StatusInternalServerError)
return
} else {
acc, err = account.Get(s.Ctx, ku.Parent())
if err != nil {
log.Printf("Page:%s, Error: %v\n",
s.R.URL.Path, err)
// ALSO LOG THIS WITH DATASTORE LOG
http.Error(s.W, err.Error(),
http.StatusInternalServerError)
return
}
}
bs, err := json.Marshal(ku)
if err != nil {
log.Printf("Page:%s, Error: %v\n",
s.R.URL.Path, err)
}
item = &memcache.Item{
Key: "uKey",
Value: bs,
}
err = memcache.Add(s.Ctx, item)
if err != nil {
log.Printf("Page:%s, Error: %v\n",
s.R.URL.Path, err)
}
}
bs, err := json.Marshal(acc)
if err != nil {
log.Printf("Page:%s, Error: %v\n",
s.R.URL.Path, err)
}
item = &memcache.Item{
Key: "acc",
Value: bs,
}
err = memcache.Add(s.Ctx, item)
if err != nil {
log.Printf("Page:%s, Error: %v\n",
s.R.URL.Path, err)
}
}
} else {
acc, err = account.Get(s.Ctx, k)
if err != nil {
log.Printf("Path: %s, Error: %v\n", s.R.URL.Path, err)
http.Error(s.W, err.Error(),
http.StatusInternalServerError)
return
}
}
s.W.Header().Set("Content-Type", "application/json")
api.WriteResponseJSON(s, acc)
}
/* t := &http.Transport{}
t.RegisterProtocol("file", http.NewFileTransport(http.Dir("/")))
c := &http.Client{Transport: t}
res, err := c.Get("file:///etc/passwd")
log.Println(res, err) */
// To respond to request without any data
// w.WriteHeader(StatusOK)
// Always send corresponding header values instead of defaults !!!!
//w.Header().Set("Content-Type", "application/json; charset=utf-8")
// http.NotFound(w, r)
// http.Redirect(w, r, "/MerinEREN", http.StatusFound)
}
|
// Copyright 2020. Akamai Technologies, Inc
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cmd
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net"
"net/http"
"net/url"
"os"
"strings"
"time"
edgegrid "github.com/akamai/AkamaiOPEN-edgegrid-golang"
)
func doHTTPRequest(method string, url string, payload *[]byte) (*http.Response, *[]byte) {
var err error
client := http.Client{}
var req *http.Request
if payload != nil {
req, err = http.NewRequest(method, "https://"+config.Host+url, bytes.NewBuffer(*payload))
if err != nil {
fmt.Println(err)
os.Exit(1)
}
} else {
req, err = http.NewRequest(method, "https://"+config.Host+url, nil)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
}
//colorPrintln("yellow", req.URL.Query())
req = edgegrid.AddRequestHeader(config, req)
req.Header.Set("x-user-agent", "cli-diagnostics")
resp, er := client.Do(req)
if er != nil {
fmt.Println(er)
os.Exit(1)
}
byt, err := ioutil.ReadAll(resp.Body)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
return resp, &byt
}
//return 0 if ip-address, 1 if ghost location, 2 for error
func checkEdgeServerIPorLocation(addr string) (int, string) {
ip := net.ParseIP(addr)
if ip != nil {
return 0, addr
}
resp, byt := doHTTPRequest("GET", "/diagnostic-tools/v2/ghost-locations/available", nil)
if resp.StatusCode != 200 {
printGenericErrorMsg()
os.Exit(1)
}
var obj GhostLocationsList
err := json.Unmarshal(*byt, &obj)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
for _, loc := range obj.Locations {
if strings.ToLower(loc["id"]) == strings.ToLower(addr) {
return 1, loc["id"]
}
}
return 2, ""
}
func validErrorString(str string) bool {
return true
}
func checkAbsoluteURL(str string) bool {
urlCheck, err := url.Parse(str)
if err != nil || !urlCheck.IsAbs() {
return false
}
return true
}
func isoToDate(isoDate string) (string, string, int, int, string) {
date, _ := time.Parse(time.RFC3339, isoDate)
h, m, s := date.UTC().Clock()
clock := fmt.Sprintf("%d:%d:%d", h, m, s)
return date.UTC().Weekday().String(), date.Month().String(), date.Day(), date.Year(), clock
}
func getReportedTime() string {
loc, _ := time.LoadLocation("UTC")
reportedTime := time.Now().In(loc)
return reportedTime.Format(time.RFC3339)
}
func getDecodedResponse(responseJson []byte) []byte {
responseJson = bytes.Replace(responseJson, []byte(`\u003c`), []byte("<"), -1)
responseJson = bytes.Replace(responseJson, []byte(`\u003e`), []byte(">"), -1)
responseJson = bytes.Replace(responseJson, []byte(`\u0026`), []byte("&"), -1)
return responseJson
}
|
package main
import "fmt"
func main() {
i := 100
// if - else if example(1)
if i >= 120 {
fmt.Println("over 120")
} else if i >= 100 && i < 120 {
fmt.Println("over 100 under 120")
} else if i < 100 && i >= 50 {
fmt.Println("over 50 under 100")
} else {
fmt.Println("under 50")
}
}
|
package libs
import (
"context"
"crypto/sha256"
"database/sql"
"encoding/json"
"errors"
"fmt"
"log"
"math/big"
"net/http"
"github.com/gorilla/mux"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/crypto/secp256k1"
)
// Checks if there is a CompletePurchase event indexed by the clientAddr and the hash
func checkEventLog(ethClient ComponentConfig, clientAddr, hash string) (bool, error) {
topic := []byte("CompletePurchase(bytes32,address,address,bytes32)")
query := ethereum.FilterQuery{
FromBlock: big.NewInt(0),
Addresses: []common.Address{
common.HexToAddress(ethClient.GeneralConfig["balanceContractAddr"].(string))},
Topics: [][]common.Hash{{crypto.Keccak256Hash(topic)}},
}
logs, err := ethClient.EthereumClient.FilterLogs(context.Background(), query)
if err != nil {
return false, err
}
for _, vlog := range logs {
hashLog := vlog.Topics[1].Hex()
fromLog := common.HexToAddress(vlog.Topics[2].Hex())
if hashLog == hash && fromLog == common.HexToAddress(clientAddr) {
return true, nil
}
}
return false, errors.New("Did not find any event that matches the requirements")
}
// Queries the database to retrieve the value of a measurement
func queryDB(configParams map[string]interface{}, hash, tableName string) ([]byte, error) {
type dbResponseStruct struct {
Hash string `json:"hash"`
Measurement string `json:"measurement"`
}
var dbResponse dbResponseStruct
// The format of the URL to the database is:
// user:password@tcp(dbHost)/dbName
connParams := configParams["dbUsername"].(string) + ":" +
configParams["dbPassword"].(string) + "@tcp(" +
configParams["dbHost"].(string) + ")/" +
configParams["dbName"].(string)
// Connect to the MySQL database
db, err := sql.Open("mysql", connParams)
if err != nil {
return nil, err
}
defer db.Close()
// Create query
query := fmt.Sprintf("SELECT * FROM %s WHERE Hash='%s';", tableName, hash)
err = db.QueryRow(query).Scan(&dbResponse.Hash, &dbResponse.Measurement)
if err != nil {
return nil, err
}
// Convert the struct to JSON
response, err := json.Marshal(dbResponse)
if err != nil {
return nil, err
}
return response, nil
}
// RetrieveMeasurement sends the purchases to customers
// 1) The body of the request must be a json string containing the following fields:
// + clientAddr -> address of the customer who purchased the measurement
// + hash -> hash of the measurement
// + timestamp -> timestamp of the request
// + signature -> signature of the previous fields
// 2) Gets the public key of the customer from the Access SC
// 3) Verifies the identity of the customer by checking the signature of the message and the
// the public key obtained in the previous point.
// 4) Verifies that there is an event in the Blockchain matching the purchase
// 5) Retrieves the measurement from the DB and sends it to the customer
func RetrieveMeasurement(ethClient ComponentConfig, body map[string]interface{}, req *http.Request) ([]byte, error) {
// Extract the parameters from the URL
tableName := mux.Vars(req)["table"]
hashURL := mux.Vars(req)["hash"]
clientAddr := body["clientAddr"].(string)
hash := body["hash"].(string)
// The hash indicated in the path of the URL must match the one in the body of the message
if hashURL != hash {
log.Println("Wrong URL")
return nil, errors.New("Wrong URL")
}
log.Printf("%s: Resquest measurement %s\n", clientAddr, hash)
// Get the public key of the customer
pubKey, err := ethClient.AccessCon.PubKeysKeystore(nil, common.HexToAddress(clientAddr))
if err != nil {
log.Println(err)
return nil, err
}
// Verify the signature of the message
log.Printf("%s: Verifying customer signature\n", clientAddr)
msg := clientAddr + hash + fmt.Sprintf("%d", uint64(body["timestamp"].(float64)))
msgHash := sha256.Sum256([]byte(msg))
isSignatureOK := secp256k1.VerifySignature(common.Hex2Bytes(pubKey), msgHash[:], common.Hex2Bytes(body["signature"].(string)))
if err != nil {
log.Println(err)
return nil, err
}
if !isSignatureOK {
log.Printf("%s: Wrong signature\n", clientAddr)
return nil, errors.New("Wrong signature")
}
log.Printf("%s: Signature ok\n", clientAddr)
// Check that there is an event that matches the purchase
_, err = checkEventLog(ethClient, "0x"+clientAddr, "0x"+hash)
if err != nil {
log.Println(err)
return nil, err
}
// Query the database
response, err := queryDB(ethClient.GeneralConfig, hashURL, tableName)
if err != nil {
log.Println(err)
return nil, err
}
return response, nil
}
|
package web
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"strings"
"time"
log "github.com/sirupsen/logrus"
"github.com/lonnng/nex"
"github.com/johnull/mop-ng/internal/errutil"
"github.com/johnull/mop-ng/internal/token"
"github.com/johnull/mop-ng/internal/db"
"github.com/johnull/mop-ng/cmd/mop/web/protocol"
"github.com/johnull/mop-ng/internal/algoutil"
"github.com/johnull/mop-ng/internal/db/model"
)
const (
AgentStatusPending = "等待审核"
AgentStatusActived = "审核通过"
)
const initAgentLevel = 3
var ErrRechargeServerFailed = errors.New("充值服务器返回错误")
type rechargeMessage struct {
OrderId string `json:"order_id"`
Quantity string `json:"quantity"`
Note string `json:"note"`
}
type RechargeRequest struct {
Count int64 `json:"count"`
Uid int64 `json:"uid"`
}
type StringMessage struct {
Code int `json:"code"`
Message string `json:"message"`
}
func agentListHandler(query *nex.Form, headers http.Header) (*protocol.AgentListResponse, error) {
t := strings.TrimSpace(headers.Get(authorization))
if t == "" {
return nil, errutil.ErrPermissionDenied
}
meta, err := token.Parse(t)
if err != nil {
return nil, err
}
agent, err := db.QueryUserByAccount(meta.Account)
if err != nil {
return nil, err
}
// 超级管理员和管理员允许此操作
if agent.Role != db.RoleSuperAdmin && agent.Role != db.RoleAdmin {
return nil, errutil.ErrPermissionDenied
}
offset := query.IntOrDefault("offset", 0)
count := query.IntOrDefault("count", -1)
account := meta.Account
if agent.Account == superAdmin {
account = ""
}
agents, total, err := db.QueryUserList(offset, count, account, db.RoleOrdinary)
if err != nil {
return nil, err
}
ret := make([]protocol.AgentDetail, len(agents))
for i, agent := range agents {
status := AgentStatusPending
if agent.Status == db.AgentStatusActivated {
status = AgentStatusActived
}
createAccount := agent.CreateAccount
if createAccount == "" {
createAccount = agent.ConfirmAccount
}
totalRecharge, err := db.TotalRecharge(agent.Account)
if err != nil {
return nil, err
}
ret[i] = protocol.AgentDetail{
Id: agent.Id,
Name: agent.Name,
Account: agent.Account,
CardCount: agent.CardCount,
CreateAt: agent.CreateAt,
CreateAccount: createAccount,
Level: agent.Level,
RechargeUrl: agent.RechargeUrl,
Discount: agent.Discount,
Status: status,
Extra: agent.Extra,
TotalRecharge: totalRecharge,
}
}
return &protocol.AgentListResponse{Agents: ret, Total: total}, nil
}
func searchAgentHandler(query *nex.Form, headers http.Header) (*protocol.AgentListResponse, error) {
t := strings.TrimSpace(headers.Get(authorization))
if t == "" {
return nil, errutil.ErrPermissionDenied
}
meta, err := token.Parse(t)
if err != nil {
return nil, err
}
agent, err := db.QueryUserByAccount(meta.Account)
if err != nil {
return nil, err
}
// 超级管理员和管理员允许此操作
if agent.Role != db.RoleSuperAdmin && agent.Role != db.RoleAdmin {
return nil, errutil.ErrPermissionDenied
}
keyword := query.Get("keyword")
agents, err := db.SearchUser(keyword, db.RoleOrdinary)
if err != nil {
return nil, err
}
ret := make([]protocol.AgentDetail, len(agents))
for i, agent := range agents {
ret[i] = protocol.AgentDetail{
Id: agent.Id,
Name: agent.Name,
Account: agent.Account,
CardCount: agent.CardCount,
CreateAt: agent.CreateAt,
RechargeUrl: agent.RechargeUrl,
Level: agent.Level,
Discount: agent.Discount,
Extra: agent.Extra,
}
}
return &protocol.AgentListResponse{Agents: ret, Total: int64(len(ret))}, nil
}
func createAgentHandler(req *protocol.RegisterAgentRequest, headers http.Header) (*protocol.StringMessage, error) {
name := strings.TrimSpace(req.Name)
account := strings.TrimSpace(req.Account)
password := strings.TrimSpace(req.Password)
if name == "" || account == "" || password == "" {
return nil, errutil.ErrIllegalParameter
}
t := strings.TrimSpace(headers.Get(authorization))
if t == "" {
return nil, errutil.ErrPermissionDenied
}
meta, err := token.Parse(t)
if err != nil {
return nil, err
}
agent, err := db.QueryUserByAccount(meta.Account)
if err != nil {
return nil, err
}
// 超级管理员和管理员允许创建用户
if agent.Role != db.RoleSuperAdmin && agent.Role != db.RoleAdmin {
return nil, errutil.ErrPermissionDenied
}
if db.IsAccountExist(account) {
return nil, errutil.ErrAccountExists
}
hash, salt := algoutil.PasswordHash(password)
u := &model.Agent{
Name: name,
Account: account,
Password: hash,
Salt: salt,
Role: db.RoleOrdinary,
Status: db.AgentStatusActivated,
Phone: req.Phone,
Wechat: req.Wechat,
Level: req.Level,
Discount: req.Discount,
CreateAccount: meta.Account,
RechargeUrl: agent.RechargeUrl,
Extra: req.Extra,
}
if err := db.RegisterUser(u); err != nil {
return nil, err
}
return protocol.SuccessMessage, nil
}
func registerAgentHandler(req *protocol.RegisterAgentRequest, headers http.Header) (*protocol.StringMessage, error) {
name := strings.TrimSpace(req.Name)
account := strings.TrimSpace(req.Account)
password := strings.TrimSpace(req.Password)
phone := strings.TrimSpace(req.Phone)
wechat := strings.TrimSpace(req.Wechat)
if name == "" || account == "" || password == "" || phone == "" || wechat == "" {
return nil, errutil.ErrIllegalParameter
}
if db.IsAccountExist(account) {
return nil, errutil.ErrAccountExists
}
hash, salt := algoutil.PasswordHash(password)
u := &model.Agent{
Name: name,
Account: account,
Password: hash,
Salt: salt,
Role: db.RoleOrdinary,
Status: db.AgentStatusPending,
CreateAccount: "",
Level: initAgentLevel,
Phone: phone,
Wechat: wechat,
Extra: req.Extra,
}
if err := db.RegisterUser(u); err != nil {
return nil, err
}
return protocol.SuccessMessage, nil
}
func resetAgentPasswordHandler(query *nex.Form, headers http.Header) (*protocol.StringMessage, error) {
var agentId = query.Int64OrDefault("id", -1)
var newPassword = query.Get("password")
if agentId < 0 || len(newPassword) < 8 {
return nil, errutil.ErrIllegalParameter
}
t := strings.TrimSpace(headers.Get(authorization))
if t == "" {
return nil, errutil.ErrPermissionDenied
}
meta, err := token.Parse(t)
if err != nil {
return nil, err
}
agent, err := db.QueryUser(agentId)
if err != nil {
return nil, err
}
metau, err := db.QueryUserByAccount(meta.Account)
if err != nil {
return nil, err
}
if metau.Role != db.RoleSuperAdmin && metau.Role != db.RoleAdmin && meta.Account != agent.Account {
return nil, errutil.ErrPermissionDenied
}
hash, salt := algoutil.PasswordHash(newPassword)
agent.Password = hash
agent.Salt = salt
if db.UpdateUser(agent); err != nil {
return nil, err
}
return protocol.SuccessMessage, nil
}
func resetAgentLevelHandler(query *nex.Form, headers http.Header) (*protocol.StringMessage, error) {
var agentId = query.Int64OrDefault("id", -1)
var level = query.IntOrDefault("level", -1)
if agentId < 0 || level < 0 || level > 30 {
return nil, errutil.ErrIllegalParameter
}
t := strings.TrimSpace(headers.Get(authorization))
if t == "" {
return nil, errutil.ErrPermissionDenied
}
meta, err := token.Parse(t)
if err != nil {
return nil, err
}
agent, err := db.QueryUser(agentId)
if err != nil {
return nil, err
}
metau, err := db.QueryUserByAccount(meta.Account)
if err != nil {
return nil, err
}
if metau.Role != db.RoleSuperAdmin && metau.Role != db.RoleAdmin {
return nil, errutil.ErrPermissionDenied
}
agent.Level = level
if db.UpdateUser(agent); err != nil {
return nil, err
}
return protocol.SuccessMessage, nil
}
func resetAgentDiscountHandler(query *nex.Form, headers http.Header) (*protocol.StringMessage, error) {
var agentId = query.Int64OrDefault("id", -1)
var discount = query.IntOrDefault("discount", -1)
if agentId < 0 || discount < 0 || discount > 100 {
return nil, errutil.ErrIllegalParameter
}
t := strings.TrimSpace(headers.Get(authorization))
if t == "" {
return nil, errutil.ErrPermissionDenied
}
meta, err := token.Parse(t)
if err != nil {
return nil, err
}
agent, err := db.QueryUser(agentId)
if err != nil {
return nil, err
}
metau, err := db.QueryUserByAccount(meta.Account)
if err != nil {
return nil, err
}
if metau.Role != db.RoleSuperAdmin && metau.Role != db.RoleAdmin {
return nil, errutil.ErrPermissionDenied
}
agent.Discount = discount
if db.UpdateUser(agent); err != nil {
return nil, err
}
return protocol.SuccessMessage, nil
}
func resetAgentRechargeUrlHandler(query *nex.Form, headers http.Header) (*protocol.StringMessage, error) {
var agentId = query.Int64OrDefault("id", -1)
var rechargeUrl = query.Get("url")
if agentId < 0 || rechargeUrl == "" {
return nil, errutil.ErrIllegalParameter
}
t := strings.TrimSpace(headers.Get(authorization))
if t == "" {
return nil, errutil.ErrPermissionDenied
}
meta, err := token.Parse(t)
if err != nil {
return nil, err
}
agent, err := db.QueryUser(agentId)
if err != nil {
return nil, err
}
metau, err := db.QueryUserByAccount(meta.Account)
if err != nil {
return nil, err
}
if metau.Role != db.RoleSuperAdmin && metau.Role != db.RoleAdmin {
return nil, errutil.ErrPermissionDenied
}
agent.RechargeUrl = rechargeUrl
if db.UpdateUser(agent); err != nil {
return nil, err
}
return protocol.SuccessMessage, nil
}
func agentRechargeUrlListHandler(query *nex.Form, headers http.Header) (*protocol.CommonResponse, error) {
t := strings.TrimSpace(headers.Get(authorization))
if t == "" {
return nil, errutil.ErrPermissionDenied
}
meta, err := token.Parse(t)
if err != nil {
return nil, err
}
metau, err := db.QueryUserByAccount(meta.Account)
if err != nil {
return nil, err
}
if metau.Role != db.RoleSuperAdmin && metau.Role != db.RoleAdmin {
return nil, errutil.ErrPermissionDenied
}
list, err := db.QueryRechargeUrlList()
if err != nil {
return nil, err
}
return &protocol.CommonResponse{Data: list}, nil
}
func createAgentRechargeUrlHandler(query *nex.Form, headers http.Header) (*protocol.StringMessage, error) {
var area = query.Get("area")
var rechargeUrl = query.Get("url")
if area == "" || rechargeUrl == "" {
return nil, errutil.ErrIllegalParameter
}
t := strings.TrimSpace(headers.Get(authorization))
if t == "" {
return nil, errutil.ErrPermissionDenied
}
meta, err := token.Parse(t)
if err != nil {
return nil, err
}
if meta.Account != superAdmin {
return nil, errutil.ErrPermissionDenied
}
ru := &model.RechargeUrl{
Area: area,
Url: rechargeUrl,
}
if err := db.Insert(ru); err != nil {
return nil, err
}
return protocol.SuccessMessage, nil
}
func deleteAgentPasswordHandler(query *nex.Form, headers http.Header) (*protocol.StringMessage, error) {
var agentId = query.Int64OrDefault("id", -1)
if agentId < 0 {
return nil, errutil.ErrIllegalParameter
}
t := strings.TrimSpace(headers.Get(authorization))
if t == "" {
return nil, errutil.ErrPermissionDenied
}
meta, err := token.Parse(t)
if err != nil {
return nil, err
}
metau, err := db.QueryUserByAccount(meta.Account)
if err != nil {
return nil, err
}
// 超级管理员和管理员允许创建用户
if metau.Role != db.RoleSuperAdmin && metau.Role != db.RoleAdmin {
return nil, errutil.ErrPermissionDenied
}
agent, err := db.QueryUser(agentId)
if err != nil {
return nil, err
}
agent.Status = db.AgentStatusDeleted
if db.UpdateUser(agent); err != nil {
return nil, err
}
return protocol.SuccessMessage, nil
}
func rechargeListHandler(query *nex.Form, headers http.Header) (*protocol.RechargeListResponse, error) {
t := strings.TrimSpace(headers.Get(authorization))
if t == "" {
return nil, errutil.ErrPermissionDenied
}
meta, err := token.Parse(t)
if err != nil {
return nil, err
}
metau, err := db.QueryUserByAccount(meta.Account)
if err != nil {
return nil, err
}
var agentId = query.Int64OrDefault("id", -1)
var agent *model.Agent
if metau.Role == db.RoleSuperAdmin || metau.Role == db.RoleAdmin {
agent, err = db.QueryUser(agentId)
if err != nil {
return nil, err
}
if agent.CreateAccount != meta.Account {
return nil, errutil.ErrPermissionDenied
}
} else {
agent, err = db.QueryUserByAccount(meta.Account)
if err != nil {
return nil, err
}
}
offset := query.IntOrDefault("offset", 0)
count := query.IntOrDefault("count", -1)
list, total, err := db.QueryRechargeList(offset, count, agent.Id, metau.Role == db.RoleSuperAdmin)
if err != nil {
return nil, err
}
ret := make([]protocol.RechargeDetail, len(list))
for i, recharge := range list {
ret[i] = protocol.RechargeDetail{
PlayerId: recharge.PlayerId,
Extra: recharge.Extra,
CreateAt: recharge.CreateAt,
CardCount: recharge.CardCount,
}
}
return &protocol.RechargeListResponse{Recharges: ret, Total: total}, nil
}
func rechargeHandler(query *nex.Form, headers http.Header) (*protocol.StringMessage, error) {
count := query.Int64OrDefault("count", -1)
if count <= 0 {
return nil, errutil.ErrIllegalParameter
}
t := strings.TrimSpace(headers.Get(authorization))
if t == "" {
return nil, errutil.ErrPermissionDenied
}
meta, err := token.Parse(t)
if err != nil {
return nil, err
}
uid := query.Int64OrDefault("uid", -1)
if uid < 0 {
return nil, errutil.ErrIllegalParameter
}
metau, err := db.QueryUserByAccount(meta.Account)
if err != nil {
return nil, err
}
if uid < 100000 && (metau.Role == db.RoleSuperAdmin || metau.Role == db.RoleAdmin) {
log.Info("管理员给代理商充值")
if metau.Account != superAdmin {
if metau.CardCount < count {
return nil, errors.New("管理员元宝不足")
} else {
metau.CardCount -= count
if err := db.UpdateUser(metau); err != nil {
return nil, err
}
}
}
agent, err := db.QueryUser(uid)
if err != nil {
log.Error(err)
return nil, err
}
agent.CardCount += count
if err := db.UpdateUser(agent); err != nil {
return nil, err
}
recharge := &model.AdminRecharge{
AgentId: agent.Id,
AgentName: agent.Name,
AgentAccount: agent.Account,
Extra: query.Get("extra"),
CreateAt: time.Now().Unix(),
CardCount: count,
AdminId: metau.Id,
AdminName: metau.Name,
AdminAccount: metau.Account,
}
if err := db.Insert(recharge); err != nil {
log.Error(err)
return nil, err
}
} else {
log.Info("代理商给玩家充值")
agent, err := db.QueryUserByAccount(meta.Account)
if err != nil {
return nil, err
}
if agent.Role != db.RoleSuperAdmin && agent.CardCount < count {
return nil, fmt.Errorf("你剩余房卡%d, 不能为玩家%d充值", agent.CardCount, uid)
}
payload := RechargeRequest{
Count: count,
Uid: uid,
}
remote := rechargeUrl
if agent.RechargeUrl != "" {
remote = agent.RechargeUrl
}
buffer := bytes.NewBuffer(nil)
json.NewEncoder(buffer).Encode(payload)
log.Debugf("准备请求充值服务器:%s", remote)
resp, err := http.Post(remote, "application/json", buffer)
if err != nil {
return nil, err
}
respBody, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
log.Debugf("充值服务器返回消息: %s", string(respBody))
respPayload := &StringMessage{}
json.NewDecoder(bytes.NewBuffer(respBody)).Decode(respPayload)
log.Debugf("responseBody: %+v", respPayload)
if respPayload.Code != 0 {
return nil, ErrRechargeServerFailed
}
if agent.Role != db.RoleSuperAdmin {
agent.CardCount -= count
if err := db.UpdateUser(agent); err != nil {
return nil, err
}
}
recharge := &model.Recharge{
AgentId: agent.Id,
AgentName: agent.Name,
AgentAccount: agent.Account,
PlayerId: uid,
Extra: query.Get("extra"),
CreateAt: time.Now().Unix(),
CardCount: count,
}
if err := db.Insert(recharge); err != nil {
return nil, err
}
}
return protocol.SuccessMessage, nil
}
func provenAgentHandler(query *nex.Form, headers http.Header) (*protocol.StringMessage, error) {
id := query.Int64OrDefault("id", -1)
if id <= 0 {
return nil, errutil.ErrIllegalParameter
}
t := strings.TrimSpace(headers.Get(authorization))
if t == "" {
return nil, errutil.ErrPermissionDenied
}
meta, err := token.Parse(t)
if err != nil {
return nil, err
}
metau, err := db.QueryUserByAccount(meta.Account)
if err != nil {
return nil, err
}
if metau.Role != db.RoleSuperAdmin && metau.Role != db.RoleAdmin {
return nil, errutil.ErrPermissionDenied
} else {
u, err := db.QueryUser(id)
if err != nil {
return nil, err
}
u.Status = db.AgentStatusActivated
u.ConfirmAccount = metau.Account
u.RechargeUrl = metau.RechargeUrl
u.CreateAccount = metau.Account
if err := db.UpdateUser(u); err != nil {
return nil, err
}
}
return protocol.SuccessMessage, nil
}
|
package radware
import (
"fmt"
"github.com/zdnscloud/elb-controller/driver"
"github.com/zdnscloud/elb-controller/driver/radware/types"
)
type radwareConfig struct {
RealServers map[string]*types.RealServer
RealServerPort *types.RealServerPort
VsID string
ServerGroup *types.ServerGroup
VirtualServer *types.VirtualServer
VirtualService *types.VirtualService
}
type updateRadwareConfig struct {
old radwareConfig
new radwareConfig
}
func getRadwareConfigs(config driver.Config) []radwareConfig {
result := []radwareConfig{}
for _, s := range config.Services {
c := radwareConfig{}
c.RealServers = getRsmap(config, s)
c.RealServerPort = getRsport(s)
c.VsID = genVsID(s, config)
c.ServerGroup = getServerGroup(s, config)
c.VirtualServer = getVirtualServer(config)
c.VirtualService = getVirtualService(s)
result = append(result, c)
}
return result
}
func getToDeleteRdConfigs(old, new []radwareConfig) []radwareConfig {
result := []radwareConfig{}
for _, oldc := range old {
var found bool
for _, newc := range new {
if oldc.VsID == newc.VsID {
found = true
break
}
}
if !found {
result = append(result, oldc)
}
}
return result
}
func getToAddRdConfigs(old, new []radwareConfig) []radwareConfig {
result := []radwareConfig{}
for _, newc := range new {
var found bool
for _, oldc := range old {
if oldc.VsID == newc.VsID {
found = true
break
}
}
if !found {
result = append(result, newc)
}
}
return result
}
func getUpdateRdConfigs(old, new []radwareConfig) []updateRadwareConfig {
result := []updateRadwareConfig{}
for _, newc := range new {
for _, oldc := range old {
if oldc.VsID == newc.VsID {
result = append(result, updateRadwareConfig{
old: oldc,
new: newc,
})
break
}
}
}
return result
}
func getToDeleteRsmap(old, new radwareConfig) map[string]*types.RealServer {
result := map[string]*types.RealServer{}
for k, v := range old.RealServers {
if _, ok := new.RealServers[k]; !ok {
result[k] = v
}
}
return result
}
func getToAddRsmap(old, new radwareConfig) map[string]*types.RealServer {
result := map[string]*types.RealServer{}
for k, v := range new.RealServers {
if _, ok := old.RealServers[k]; !ok {
result[k] = v
}
}
return result
}
func getRsmap(cfg driver.Config, s driver.Service) map[string]*types.RealServer {
result := map[string]*types.RealServer{}
for _, h := range s.BackendHosts {
id := genRealServerID(h, s.BackendPort, s.Protocol, cfg)
rs := &types.RealServer{
IpAddr: h,
State: 2,
Type: 1,
}
result[id] = rs
}
return result
}
func getRsport(s driver.Service) *types.RealServerPort {
return &types.RealServerPort{
RealPort: s.BackendPort,
}
}
func genRealServerID(nodeIP string, nodePort int32, protocol driver.Protocol, cfg driver.Config) string {
return fmt.Sprintf("%s_%s_%s_%s_%s_%v", cfg.K8sCluster, cfg.K8sNamespace, cfg.K8sService, nodeIP, protocol, nodePort)
}
func getServerGroup(s driver.Service, c driver.Config) *types.ServerGroup {
result := &types.ServerGroup{
Metric: 1, // default use round robin
HealthID: "tcp", // default use tcp healthcheck
}
switch c.Method {
case driver.LBMethodLeastConnections:
result.Metric = 2
case driver.LBMethodHash:
result.Metric = 4
}
if s.Protocol == driver.ProtocolUDP {
result.HealthID = "udp"
}
return result
}
func genVsID(service driver.Service, cfg driver.Config) string {
return fmt.Sprintf("%s_%s_%s_%s_%s_%v", cfg.K8sCluster, cfg.K8sNamespace, cfg.K8sService, cfg.VIP, service.Protocol, service.Port)
}
func getVirtualServer(c driver.Config) *types.VirtualServer {
return &types.VirtualServer{
VirtServerIpAddress: c.VIP,
VirtServerState: 2,
}
}
func getVirtualService(s driver.Service) *types.VirtualService {
result := &types.VirtualService{
UDPBalance: 3, // default tcp service type
VirtPort: s.Port,
RealPort: s.BackendPort,
DBind: 2,
PBind: 2,
}
if s.Protocol == driver.ProtocolUDP {
result.UDPBalance = 2 // set virtual service type udp
}
return result
}
|
/*
* @lc app=leetcode id=30 lang=golang
*
* [30] Substring with Concatenation of All Words
*/
func find_in_words(words []string, word_hit []int, word string) (found bool, index int) {
for i, a := range words {
if 0 == word_hit[i] {
if 0 == strings.Compare(a, word) {
return true, i
}
}
}
return false, -1
}
func check_all_and_once_hit(word_hit []int) bool {
for _, a := range word_hit {
if a != 1 {
return false
}
}
return true
}
func clear_word_hit(word_hit *[]int) {
for i, _ := range *word_hit {
(*word_hit)[i] = 0
}
}
func findSubstring(s string, words []string) []int {
var ret []int
if 0 == len(words) {
return ret
}
word_hit := make([]int, len(words))
word_size := len(words[0])
for i := 0 ; i < word_size ; i++ {
for j := i ; j < len(s); j += word_size {
if len(s) - j < len(words) * word_size {
break
}
for k, _ := range words {
word_start := j + k * word_size
word_end := word_start + word_size
if word_end > len(s) {
break
}
word := s[word_start:word_end]
if found, index := find_in_words(words, word_hit, word) ; true == found {
word_hit[index] += 1
} else {
break
}
}
if true == check_all_and_once_hit(word_hit) {
ret = append(ret, j)
}
clear_word_hit(&word_hit)
}
}
return ret
}
|
// examples.go show how to implement a basic crud for one data structure with the api2go server functionality
// to play with this example server you can for example run some of the following curl requests
// Create a new user:
// `curl -X POST http://localhost:31415/v0/users -d '{"data" : [{"type" : "users" , "username" : "marvin"}]}'`
// List users:
// `curl -X GET http://localhost:31415/v0/users`
// List paginated users:
// `curl -X GET http://localhost:31415/v0/users?page[offset]=0&page[limit]=2`
// OR
// `curl -X GET http://localhost:31415/v0/users?page[number]=1&page[size]=2`
// Update:
// `curl -vX PUT http://localhost:31415/v0/users/1 -d '{ "data" : {"type" : "users", "username" : "better marvin", "id" : "1"}}'`
// Delete:
// `curl -vX DELETE http://localhost:31415/v0/users/2`
// FindMultiple (this only works if you've called create a bunch of times :)
// `curl -X GET http://localhost:31415/v0/users/3,4`
// Create a chocolate with the name sweet
// `curl -X POST http://localhost:31415/v0/chocolates -d '{"data" : [{"type" : "chocolates" , "name" : "Ritter Sport", "taste": "Very Good"}]}'`
// Link the sweet
// `curl -X POST http://localhost:31415/v0/users -d '{"data" : [{"type" : "users" , "username" : "marvin", "links": {"sweets": {"linkage": {"type": "chocolates", "id": "1"}}}}]}'`
package main
import (
"errors"
"fmt"
"sort"
"strconv"
"github.com/univedo/api2go"
"github.com/univedo/api2go/jsonapi"
)
import "net/http"
//User is a generic database user
type User struct {
ID string
Username string
PasswordHash string `json:"-"`
Chocolates []Chocolate `json:"-"`
ChocolatesIDs []string `json:"-"`
exists bool
}
// GetID to satisfy jsonapi.MarshalIdentifier interface
func (u User) GetID() string {
return u.ID
}
// SetID to satisfy jsonapi.UnmarshalIdentifier interface
func (u *User) SetID(id string) error {
u.ID = id
return nil
}
// GetReferences to satisfy the jsonapi.MarshalReferences interface
func (u User) GetReferences() []jsonapi.Reference {
return []jsonapi.Reference{
{
Type: "chocolates",
Name: "sweets",
},
}
}
// GetReferencedIDs to satisfy the jsonapi.MarshalLinkedRelations interface
func (u User) GetReferencedIDs() []jsonapi.ReferenceID {
result := []jsonapi.ReferenceID{}
for _, chocolate := range u.Chocolates {
result = append(result, jsonapi.ReferenceID{
ID: chocolate.ID,
Type: "chocolates",
Name: "sweets",
})
}
return result
}
// GetReferencedStructs to satisfy the jsonapi.MarhsalIncludedRelations interface
func (u User) GetReferencedStructs() []jsonapi.MarshalIdentifier {
result := []jsonapi.MarshalIdentifier{}
for key := range u.Chocolates {
result = append(result, u.Chocolates[key])
}
return result
}
// SetReferencedIDs to satisfy the jsonapi.UnmarshalLinkedRelations interface
func (u *User) SetReferencedIDs(references []jsonapi.ReferenceID) error {
for _, reference := range references {
if reference.Name == "sweets" {
u.ChocolatesIDs = append(u.ChocolatesIDs, reference.ID)
}
}
return nil
}
// Chocolate is the chocolate that a user consumes in order to get fat and happy
type Chocolate struct {
ID string
Name string
Taste string
}
// GetID to satisfy jsonapi.MarshalIdentifier interface
func (c Chocolate) GetID() string {
return c.ID
}
// SetID to satisfy jsonapi.UnmarshalIdentifier interface
func (c *Chocolate) SetID(id string) error {
c.ID = id
return nil
}
// ChocolateStorage stores all of the tasty chocolate, needs to be injected into
// User and Chocolate Resource. In the real world, you would use a database for that.
type ChocolateStorage struct {
chocolates map[string]Chocolate
idCount int
}
// GetAll of the chocolate
func (s ChocolateStorage) GetAll() []Chocolate {
result := []Chocolate{}
for key := range s.chocolates {
result = append(result, s.chocolates[key])
}
return result
}
// GetOne tasty chocolate
func (s ChocolateStorage) GetOne(id string) (Chocolate, error) {
choc, ok := s.chocolates[id]
if ok {
return choc, nil
}
return Chocolate{}, fmt.Errorf("Chocolate for id %s not found", id)
}
// Insert a fresh one
func (s *ChocolateStorage) Insert(c Chocolate) string {
id := fmt.Sprintf("%d", s.idCount)
c.ID = id
s.chocolates[id] = c
s.idCount++
return id
}
// Delete one :(
func (s *ChocolateStorage) Delete(id string) error {
_, exists := s.chocolates[id]
if !exists {
return fmt.Errorf("Chocolate with id %s does not exist", id)
}
delete(s.chocolates, id)
return nil
}
// Update updates an existing chocolate
func (s *ChocolateStorage) Update(c Chocolate) error {
_, exists := s.chocolates[c.ID]
if !exists {
return fmt.Errorf("Chocolate with id %s does not exist", c.ID)
}
s.chocolates[c.ID] = c
return nil
}
// the user resource holds all users in the array
type userResource struct {
chocStorage *ChocolateStorage
users map[string]User
idCount int
}
// FindAll to satisfy api2go data source interface
func (s *userResource) FindAll(r api2go.Request) (interface{}, error) {
var users []User
for _, value := range s.users {
users = append(users, value)
}
return users, nil
}
func (s *userResource) PaginatedFindAll(r api2go.Request) (interface{}, uint, error) {
var (
users []User
number, size, offset, limit string
keys []int
)
for k := range s.users {
i, err := strconv.ParseInt(k, 10, 64)
if err != nil {
return nil, 0, err
}
keys = append(keys, int(i))
}
sort.Ints(keys)
numberQuery, ok := r.QueryParams["page[number]"]
if ok {
number = numberQuery[0]
}
sizeQuery, ok := r.QueryParams["page[size]"]
if ok {
size = sizeQuery[0]
}
offsetQuery, ok := r.QueryParams["page[offset]"]
if ok {
offset = offsetQuery[0]
}
limitQuery, ok := r.QueryParams["page[limit]"]
if ok {
limit = limitQuery[0]
}
if size != "" {
sizeI, err := strconv.ParseUint(size, 10, 64)
if err != nil {
return nil, 0, err
}
numberI, err := strconv.ParseUint(number, 10, 64)
if err != nil {
return nil, 0, err
}
start := sizeI * (numberI - 1)
for i := start; i < start+sizeI; i++ {
if i >= uint64(len(s.users)) {
break
}
users = append(users, s.users[strconv.FormatInt(int64(keys[i]), 10)])
}
} else {
limitI, err := strconv.ParseUint(limit, 10, 64)
if err != nil {
return nil, 0, err
}
offsetI, err := strconv.ParseUint(offset, 10, 64)
if err != nil {
return nil, 0, err
}
for i := offsetI; i < offsetI+limitI; i++ {
if i >= uint64(len(s.users)) {
break
}
users = append(users, s.users[strconv.FormatInt(int64(keys[i]), 10)])
}
}
return users, uint(len(s.users)), nil
}
// FindOne to satisfy `api2go.DataSource` interface
// this method should return the user with the given ID, otherwise an error
func (s *userResource) FindOne(ID string, r api2go.Request) (interface{}, error) {
if user, ok := s.users[ID]; ok {
return user, nil
}
return nil, api2go.NewHTTPError(errors.New("Not Found"), "Not Found", http.StatusNotFound)
}
// FindMultiple to satifiy `api2go.DataSource` interface
func (s *userResource) FindMultiple(IDs []string, r api2go.Request) (interface{}, error) {
var users []User
for _, id := range IDs {
user, err := s.FindOne(id, r)
if err != nil {
return nil, err
}
if typedUser, ok := user.(User); ok {
users = append(users, typedUser)
}
}
return users, nil
}
// Create method to satisfy `api2go.DataSource` interface
func (s *userResource) Create(obj interface{}, r api2go.Request) (string, error) {
user, ok := obj.(User)
if !ok {
return "", api2go.NewHTTPError(errors.New("Invalid instance given"), "Invalid instance given", http.StatusBadRequest)
}
if _, ok := s.users[user.GetID()]; ok {
return "", api2go.NewHTTPError(errors.New("User exists"), "User exists", http.StatusConflict)
}
s.idCount++
id := fmt.Sprintf("%d", s.idCount)
user.SetID(id)
// check references and get embedded objects
for _, chocID := range user.ChocolatesIDs {
choc, err := s.chocStorage.GetOne(chocID)
if err != nil {
return "", err
}
user.Chocolates = append(user.Chocolates, choc)
}
s.users[id] = user
return id, nil
}
// Delete to satisfy `api2go.DataSource` interface
func (s *userResource) Delete(id string, r api2go.Request) error {
obj, err := s.FindOne(id, api2go.Request{})
if err != nil {
return err
}
user, ok := obj.(User)
if !ok {
return errors.New("Invalid instance given")
}
delete(s.users, user.GetID())
return nil
}
//Update stores all changes on the user
func (s *userResource) Update(obj interface{}, r api2go.Request) error {
user, ok := obj.(User)
if !ok {
return api2go.NewHTTPError(errors.New("Invalid instance given"), "Invalid instance given", http.StatusBadRequest)
}
// check references and get embedded objects
for _, chocID := range user.ChocolatesIDs {
choc, err := s.chocStorage.GetOne(chocID)
if err != nil {
return err
}
user.Chocolates = append(user.Chocolates, choc)
}
s.users[user.GetID()] = user
return nil
}
type chocolateResource struct {
storage *ChocolateStorage
}
func (c *chocolateResource) FindAll(r api2go.Request) (interface{}, error) {
return c.storage.GetAll(), nil
}
func (c *chocolateResource) FindOne(ID string, r api2go.Request) (interface{}, error) {
return c.storage.GetOne(ID)
}
func (c *chocolateResource) FindMultiple(IDs []string, r api2go.Request) (interface{}, error) {
var chocolates []Chocolate
for _, id := range IDs {
choc, err := c.FindOne(id, r)
if err != nil {
return nil, err
}
if typedChoc, ok := choc.(Chocolate); ok {
chocolates = append(chocolates, typedChoc)
}
}
return chocolates, nil
}
func (c *chocolateResource) Create(obj interface{}, r api2go.Request) (string, error) {
choc, ok := obj.(Chocolate)
if !ok {
return "", api2go.NewHTTPError(errors.New("Invalid instance given"), "Invalid instance given", http.StatusBadRequest)
}
return c.storage.Insert(choc), nil
}
func (c *chocolateResource) Delete(id string, r api2go.Request) error {
return c.storage.Delete(id)
}
func (c *chocolateResource) Update(obj interface{}, r api2go.Request) error {
choc, ok := obj.(Chocolate)
if !ok {
return api2go.NewHTTPError(errors.New("Invalid instance given"), "Invalid instance given", http.StatusBadRequest)
}
return c.storage.Update(choc)
}
func main() {
api := api2go.NewAPIWithBaseURL("v0", "http://localhost:31415")
users := make(map[string]User)
chocStorage := ChocolateStorage{chocolates: make(map[string]Chocolate), idCount: 1}
api.AddResource(User{}, &userResource{users: users, chocStorage: &chocStorage})
api.AddResource(Chocolate{}, &chocolateResource{storage: &chocStorage})
fmt.Println("Listening on :31415")
http.ListenAndServe(":31415", api.Handler())
}
|
package bootstrap
import (
rbac "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"mobingi/ocean/pkg/constants"
)
// AllowBootstrapTokensToPostCSRs creates RBAC rules in a way the makes Node Bootstrap Tokens able to post CSRs
func AllowBootstrapTokensToPostCSRs(client clientset.Interface) error {
roleBinding := &rbac.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: constants.NodeKubeletBootstrap,
},
RoleRef: rbac.RoleRef{
APIGroup: rbac.GroupName,
Kind: "ClusterRole",
Name: constants.NodeBootstrapperClusterRoleName,
},
Subjects: []rbac.Subject{
{
Kind: rbac.GroupKind,
Name: constants.NodeBootstrapTokenAuthGroup,
},
},
}
if _, err := client.RbacV1().ClusterRoleBindings().Create(roleBinding); err != nil {
return err
}
return nil
}
// AutoApproveNodeBootstrapTokens creates RBAC rules in a way that makes Node Bootstrap Tokens' CSR auto-approved by the csrapprover controller
func AutoApproveNodeBootstrapTokens(client clientset.Interface) error {
roleBinding := &rbac.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: constants.NodeAutoApproveBootstrapClusterRoleBinding,
},
RoleRef: rbac.RoleRef{
APIGroup: rbac.GroupName,
Kind: "ClusterRole",
Name: constants.CSRAutoApprovalClusterRoleName,
},
Subjects: []rbac.Subject{
{
Kind: "Group",
Name: constants.NodeBootstrapTokenAuthGroup,
},
},
}
if _, err := client.RbacV1().ClusterRoleBindings().Create(roleBinding); err != nil {
return err
}
return nil
}
// AutoApproveNodeCertificateRotation creates RBAC rules in a way that makes Node certificate rotation CSR auto-approved by the csrapprover controller
func AutoApproveNodeCertificateRotation(client clientset.Interface) error {
roleBinding := &rbac.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: constants.NodeAutoApproveCertificateRotationClusterRoleBinding,
},
RoleRef: rbac.RoleRef{
APIGroup: rbac.GroupName,
Kind: "ClusterRole",
Name: constants.NodeSelfCSRAutoApprovalClusterRoleName,
},
Subjects: []rbac.Subject{
{
Kind: "Group",
Name: constants.NodesGroup,
},
},
}
if _, err := client.RbacV1().ClusterRoleBindings().Create(roleBinding); err != nil {
return err
}
return nil
}
|
package kafka_mock
import (
"github.com/anchorfree/kafka-ambassador/pkg/kafka"
k "github.com/confluentinc/confluent-kafka-go/kafka"
"github.com/prometheus/client_golang/prometheus"
"github.com/stretchr/testify/mock"
)
type MockedProducer struct {
mock.Mock
}
func (m *MockedProducer) Send(topic string, message []byte) {
m.Called(topic, message)
}
func (m *MockedProducer) ListTopics() ([]string, error) {
args := m.Called()
return args.Get(0).([]string), args.Error(1)
}
func (m *MockedProducer) GetProducersCount() int {
args := m.Called()
return args.Int(0)
}
func (m *MockedProducer) GetActiveProducerID() string {
args := m.Called()
return args.String(0)
}
func (m *MockedProducer) GetProducer() *kafka.ProducerWrapper {
args := m.Called()
return args.Get(0).(*kafka.ProducerWrapper)
}
func (m *MockedProducer) GenerateProducerID() uint {
args := m.Called()
return args.Get(0).(uint)
}
func (m *MockedProducer) AddActiveProducer(kafka.ProducerI, *k.ConfigMap) error {
args := m.Called()
return args.Error(0)
}
func (m *MockedProducer) Init(*k.ConfigMap, *prometheus.Registry) error {
args := m.Called()
return args.Error(0)
}
func (m *MockedProducer) ReSend() {
m.Called()
}
func (m *MockedProducer) QueueIsEmpty() bool {
args := m.Called()
return args.Bool(0)
}
func (m *MockedProducer) Shutdown() {
m.Called()
}
|
package load_balance
import (
"fmt"
"math/rand"
"testing"
"time"
)
func TestGetHash(t *testing.T) {
fmt.Println(GetHash("new"))
fmt.Println(GetHash("new2"))
fmt.Println(GetHash("new3") % 3)
}
func TestIPHashLB_GetServer(t *testing.T) {
lb := &IPHashLB{
servers: []*HServer{
{"10.13.0.5"},
{"10.13.0.9"},
{"10.13.0.7"},
}}
N := 15
for i := 0; i < N; i++ {
iip := RandomIP()
s := lb.GetServer(iip)
fmt.Printf("%v -> %v\n", iip, s.addr)
time.Sleep(200 * time.Millisecond)
}
}
func RandomIP() string {
rand.Seed(time.Now().UnixNano())
ip := fmt.Sprintf("%d.%d.%d.%d", rand.Intn(255), rand.Intn(255), rand.Intn(255), rand.Intn(255))
return ip
}
|
// 服务常量文件
package main
|
package main
import (
"github.com/valyala/fasthttp"
"go.uber.org/zap"
)
var logger *zap.Logger
func init() {
logger, _ = zap.NewProduction()
}
func fastHTTPHandler(ctx *fasthttp.RequestCtx) {
logger.Info("hello, go module", zap.ByteString("uri", ctx.RequestURI()))
}
func main() {
fasthttp.ListenAndServe(":8081", fastHTTPHandler)
}
|
/**
*@Author: haoxiongxiao
*@Date: 2019/1/26
*@Description: CREATE GO FILE api_services
*/
package hotel_api_services
import (
"encoding/json"
"errors"
"reflect"
"github.com/spf13/cast"
"github.com/xhaoxiong/ShowApiSdk/normalRequest"
)
type SearchApiServices struct {
ReqParams SearchRequestParams
Res SearchRes
}
type SearchRequestParams struct {
KeyWord string `json:"keyWords"` //查询关键字,酒店名称、位置、品牌等
Page string `json:"page"` //页码
CityName string `json:"cityName"` //城市
IDate string `json:"iDate"` //入住时间,格式为:YYYY-MM-DD(默认2天后)
OutDate string `json:"outDate"` //离开时间,格式为:YYYY-MM-DD(默认3天后)
SortCode string `json:"sortCode"` //排序规则(默认1.推荐值排序) 1、推荐值降序 2、起价升序 3、起价降序 6、装修时间排序
ReturnFilter string `json:"returnFilter"` //是否返回聚合筛选条件,0:否,1:是。注意:returnFilter=1时搜索性能较差,尽量设置returnFilter=0
Star string `json:"star"` //星级 TWO:二星级, THREE:三星级, FOUR:四星级, FIVE:五星级, BUDGET:经济型, CONFORT:舒适型, HIGHEND:高档型, LUXURY:豪华型【多个以逗号:‘,’分隔】
Feature string `json:"feature"` //品牌:通过搜索结果反向聚合
MinPrice string `json:"minPrice"` //房价最低价
MaxPrice string `json:"maxPrice"` //房价最高价
Facility string `json:"facility"` //设施:通过搜索结果反向聚合
HotelLabels string `json:"hotellablels"` //特色 1、温泉 3、休闲度假 4、购物便捷 5、客栈民宿 6、青年旅舍 7、精品酒店 8、亲子时刻
}
type SearchRes struct {
ShowapiResError string `json:"showapi_res_error"`
ShowapiResID string `json:"showapi_res_id"`
ShowapiResCode int `json:"showapi_res_code"`
ShowapiResBody struct {
CityName string `json:"cityName"`
Remark string `json:"remark"`
Data struct {
HotelList []struct {
EnglishName string `json:"englishName"`
HotelID int `json:"hotelId"`
Longitude float64 `json:"longitude"`
Facilities []interface{} `json:"facilities"`
Address string `json:"address"`
Latitude float64 `json:"latitude"`
Price int `json:"price"`
ChineseName string `json:"chineseName"`
Star int `json:"star"`
Picture string `json:"picture"`
StarName string `json:"starName"`
} `json:"hotelList"`
Count int `json:"count"`
Filter []struct {
FilterName string `json:"filterName"`
FilterID string `json:"filterId"`
Pros []struct {
PoiName string `json:"poiName"`
PoiKey string `json:"poiKey"`
Filter []struct {
Longitude float64 `json:"longitude"`
Code int `json:"code"`
HotelCount int `json:"hotelCount"`
Name string `json:"name"`
Heat int `json:"heat"`
Latitude float64 `json:"latitude"`
} `json:"filter"`
} `json:"pros"`
} `json:"filter"`
} `json:"data"`
RetCode int `json:"ret_code"`
} `json:"showapi_res_body"`
}
func (this *SearchApiServices) GetSearchDataServices() (res SearchRes, err error) {
req := normalRequest.ShowapiRequest("http://route.showapi.com/1653-1", appId, appSecret)
t := reflect.TypeOf(this.ReqParams)
v := reflect.ValueOf(this.ReqParams)
for i := 0; i < v.NumField(); i++ {
if v.Field(i).CanInterface() {
if v.Field(i).CanInterface() {
key := cast.ToString(t.Field(i).Tag.Get("json"))
val := cast.ToString(v.Field(i).Interface())
req.AddTextPara(key, val)
}
}
}
s, err := req.Post()
if err != nil {
return res, err
}
if err := json.Unmarshal([]byte(s), &this.Res); err != nil {
return res, err
}
if this.Res.ShowapiResCode == 0 {
return this.Res, nil
} else {
return this.Res, errors.New(this.Res.ShowapiResError)
}
}
func ApiSearch(req SearchRequestParams) (res SearchRes, err error) {
apiService := SearchApiServices{req, res}
return apiService.GetSearchDataServices()
}
|
package main
import (
"flag"
"image"
"log"
"math"
"math/rand"
"os"
"strconv"
"github.com/jcorbin/anansi"
)
/* Ported from [antirez's LOLWUT](http://antirez.com/news/123)
*
* Creates output like:
*
* ⠀⡤⠤⠤⠤⠤⠤⠤⠤⠤⡤⠤⠤⠤⠤⠤⠤⠤⠤⡤⠤⠤⠤⠤⠤⠤⠤⠤⡄⠀
* ⠀⡇⠀⠀⠀⠀⠀⠀⠀⠀⡇⠀⠀⠀⠀⠀⠀⠀⠀⡇⠀⠀⠀⠀⠀⠀⠀⠀⡇⠀
* ⠀⡇⠀⠀⠀⠀⠀⠀⠀⠀⡇⠀⠀⠀⠀⠀⠀⠀⠀⡇⠀⠀⠀⠀⠀⠀⠀⠀⡇⠀
* ⠀⡇⠀⠀⠀⠀⠀⠀⠀⠀⡇⠀⠀⠀⠀⠀⠀⠀⠀⡇⠀⠀⠀⠀⠀⠀⠀⠀⡇⠀
* ⠀⡇⠀⠀⠀⠀⠀⠀⠀⠀⡇⠀⠀⠀⠀⠀⠀⠀⠀⡇⠀⠀⠀⠀⠀⠀⠀⠀⡇⠀
* ⠀⡏⠉⠉⠉⠉⠉⠉⠉⠉⡏⠉⠉⠉⠉⠉⠉⠉⠉⡏⠉⠉⠉⠉⠉⠉⠉⠉⡇⠀
* ⠀⡇⠀⠀⠀⠀⠀⠀⠀⠀⡇⠀⠀⠀⠀⠀⠀⠀⠀⡇⠀⠀⠀⠀⠀⠀⠀⠀⡇⠀
* ⠀⡇⠀⠀⠀⠀⠀⠀⠀⠀⡇⠀⠀⠀⠀⠀⠀⠀⠀⡇⠀⠀⠀⠀⠀⠀⠀⠀⡇⠀
* ⠀⡇⠀⠀⠀⠀⠀⠀⠀⠀⡇⠀⠀⠀⠀⠀⠀⠀⠀⡇⠀⠀⠀⠀⠀⠀⠀⠀⡇⠀
* ⠀⡷⠦⠤⢤⣤⣤⠤⠤⠤⡧⠤⠤⠤⠤⠤⠤⠤⢤⣧⣤⠤⠤⠤⠤⠴⠶⢶⠇⠀
* ⢸⠀⠀⠀⠀⠀⠀⠉⠉⠒⡇⠀⠀⠀⠀⠀⠀⠀⢸⡇⠀⠀⠀⠀⠀⠀⠀⠘⡄⠀
* ⡇⠀⠀⠀⠀⠀⠀⠀⠀⢸⡇⠀⠀⠀⠀⠀⠀⠀⠀⡇⠀⠀⠀⠀⠀⠀⠀⠀⡇⠀
* ⠁⠀⠀⠀⠀⠀⠀⠀⠀⡇⡇⠀⠀⠀⠀⠀⠀⠀⠀⡇⠀⠀⠀⠀⠀⠀⠀⠀⢇⠀
* ⡀⠀⠀⠀⠀⣀⡠⠔⢶⠁⡇⣀⣀⠤⠤⠔⠒⠊⠹⣷⡰⠢⠤⣀⡀⠀⢀⣀⣸⠀
* ⢈⡩⠵⠒⠛⠤⠤⣀⡎⡾⡉⠉⠉⠉⠉⠉⠉⠉⠉⢿⠓⠒⠉⠉⠉⠉⠓⠢⠤⣀
* ⡅⠀⠀⠀⠀⠀⠀⠀⠀⠘⣇⠀⠀⠀⠀⠀⠀⠀⠀⣾⠀⠀⠀⠀⠀⠀⠀⠀⠀⡎
* ⠸⡀⠀⠀⠀⠀⠀⠀⠀⠀⢸⡀⠀⠀⠀⠀⠀⠀⡸⠀⡇⢀⢄⡀⠀⠀⠀⠀⡸⠀
* ⠀⠱⡀⠀⠀⠀⠀⠀⠀⠀⣀⡷⠀⠀⠀⠀⠀⢠⣃⡠⡼⠊⠀⠈⠢⢄⠀⢠⠃⠀
* ⠀⠀⢣⠀⠀⣀⣠⠴⡊⠉⠀⠸⢤⠶⢖⡉⠉⠁⢈⠝⠓⠢⠤⣀⡀⠀⠑⡮⡀⠀
* ⠀⠀⣀⠷⠛⠉⠀⠀⠱⡀⠀⢀⠎⠀⠀⠈⠑⡢⢎⡀⠀⠀⠀⠀⠈⠉⠚⠀⠈⡕
* ⠒⠉⠀⠀⠀⠀⠀⠀⠀⠱⣀⠎⠀⠀⠀⠀⠐⠥⡀⠈⢱⠂⠀⠀⠀⠀⠀⢠⠊⠀
* ⢣⠀⠀⠀⠀⠀⠀⠀⠀⢀⠿⡀⠀⠀⠀⠀⠀⠀⠈⢲⢇⠀⠀⠀⠀⠀⡔⠁⠀⠀
* ⠀⢣⠀⠀⠀⠀⠀⠀⠀⠚⢤⣱⠀⠀⠀⠀⠀⠀⢠⠃⠀⠉⠢⣀⢠⠊⠀⠀⠀⠀
* ⠀⠀⠃⠀⠀⠀⠀⠀⠐⠊⠁⠀⠉⠒⠀⠀⠀⠀⠃⠀⠀⠀⠀⠀⠁⠀⠀⠀⠀⠀
*/
var sd schotterDemo
func main() {
interactive := flag.Bool("i", false, "interactive mode")
flag.Parse()
var (
// parameter = parseArg(arg, "name", default, min, max)
cols = parseArg(flag.Arg(0), "cols", 60, 1)
squaresPerRow = parseArg(flag.Arg(1), "squares-per-row", 8, 1)
squaresPerCol = parseArg(flag.Arg(2), "squares-per-col", 12, 1)
)
if *interactive {
runInteractive() // TODO pass squaresPerRow / squaresPerCol ?
return
}
sd.setup(cols, squaresPerRow, squaresPerCol)
sd.draw()
_, err := anansi.WriteBitmap(os.Stdout, sd.canvas)
if err == nil {
_, err = os.Stdout.WriteString("\n")
}
if err != nil {
log.Fatalln(err)
}
}
type schotterDemoConfig struct {
squaresPerRow int
squaresPerCol int
squareSide int
padding int
seed int64
angleOffset float64
}
type schotterDemo struct {
schotterDemoConfig
canvas anansi.Bitmap
rand *rand.Rand
}
// setup for the static demo, by computing config from command line arguments
// and allocating a canvas.
func (sd *schotterDemo) setup(cols, squaresPerRow, squaresPerCol int) {
sd.squaresPerRow = squaresPerRow
sd.squaresPerCol = squaresPerCol
canvasWidth := cols * 2
sd.padding = 0
if canvasWidth > 4 {
sd.padding = 2
}
sd.squareSide = int(float64(canvasWidth-sd.padding*2) / float64(sd.squaresPerRow))
canvasHeight := sd.squareSide*sd.squaresPerCol + sd.padding*2
sd.canvas.Resize(image.Pt(canvasWidth, canvasHeight))
}
// draw a computer graphic art piece generated by Georg Nees in the 60s. It
// explores the relationship between chaos and order.
func (sd *schotterDemo) draw() {
sd.rand = rand.New(rand.NewSource(sd.seed))
for y := 0; y < sd.squaresPerCol; y++ {
for x := 0; x < sd.squaresPerRow; x++ {
sx := x*sd.squareSide + sd.squareSide/2 + sd.padding
sy := y*sd.squareSide + sd.squareSide/2 + sd.padding
// Rotate and translate randomly as we go down to lower rows.
angle := sd.angleOffset
if y > 0 {
r1 := sd.rand.Float64() / float64(sd.squaresPerCol) * float64(y)
r2 := sd.rand.Float64() / float64(sd.squaresPerCol) * float64(y)
r3 := sd.rand.Float64() / float64(sd.squaresPerCol) * float64(y)
if sd.rand.Intn(2) == 1 {
r1 = -r1
}
if sd.rand.Intn(2) == 1 {
r2 = -r2
}
if sd.rand.Intn(2) == 1 {
r3 = -r3
}
angle = sd.angleOffset + r1
sx += int(r2 * float64(sd.squareSide) / 3)
sy += int(r3 * float64(sd.squareSide) / 3)
}
drawSquare(sd.canvas, image.Pt(sx, sy), sd.squareSide, angle)
}
}
}
// drawSquare draws a square centered at the specified x,y coordinates, with
// the specified rotation angle and size.
func drawSquare(canvas anansi.Bitmap, at image.Point, size int, angle float64) {
// In order to write a rotated square, we use the trivial fact that the
// parametric equation:
// x, y = sin(k), cos(k)
//
// Describes a circle for values going from 0 to 2*PI. So basically if we
// start at 45 degrees, that is k = PI/4, with the first point, and then we
// find the other three points incrementing K by PI/2 (90 degrees), we'll
// have the points of the square. In order to rotate the square, we just
// start with k = PI/4 + rotation_angle, and we are done.
//
// Of course the vanilla equations above will describe the square inside a
// circle of radius 1, so in order to draw larger squares we'll have to
// multiply the obtained coordinates, and then translate them. However this
// is much simpler than implementing the abstract concept of 2D shape and
// then performing the rotation/translation transformation, so for LOLWUT
// it's a good approach.
// Adjust the desired size according to the fact that the square inscribed
// into a circle of radius 1 has the side of length SQRT(2). This way
// size becomes a simple multiplication factor we can use with our
// coordinates to magnify them.
fsize := math.Round(float64(size) / math.Sqrt2)
// Draw the square.
k := math.Pi/4 + angle
last := rotPt(k, fsize).Add(at)
for i := 0; i < 4; i++ {
k += math.Pi / 2
pt := rotPt(k, fsize).Add(at)
drawLine(canvas, last, pt, true)
last = pt
}
}
func rotPt(angle, scale float64) image.Point {
return image.Pt(
int(math.Round(math.Sin(angle)*scale)),
int(math.Round(math.Cos(angle)*scale)),
)
}
// drawLine draws a bitmap line starting at the from point, through the to
// point using the Bresenham algorithm.
func drawLine(canvas anansi.Bitmap, from, to image.Point, val bool) {
dx := abs(to.X - from.X)
dy := abs(to.Y - from.Y)
sx, sy := 1, 1
if from.X >= to.X {
sx = -1
}
if from.Y >= to.Y {
sy = -1
}
err := dx - dy
canvas.Set(from, val)
for from != to {
e2 := err * 2
if e2 > -dy {
err -= dy
from.X += sx
}
if e2 < dx {
err += dx
from.Y += sy
}
canvas.Set(from, val)
}
}
func abs(n int) int {
if n < 0 {
return -n
}
return n
}
func parseArg(arg, name string, def, min int) int {
if arg == "" {
return def
}
n, err := strconv.Atoi(arg)
if err != nil {
log.Fatalf("invalid %s argument %q: %v", name, arg, err)
}
if n < min {
return min
}
return n
}
|
package main
import (
"github.com/gin-gonic/gin"
"net/http"
)
func main() {
router := gin.Default()
// 简单的路由组: v1
v1 := router.Group("/v1")
{
v1.GET("/login", func(c *gin.Context) {
c.JSON(http.StatusOK,gin.H{"m":"login1"})
})
v1.GET("/submit", func(c *gin.Context) {
c.JSON(http.StatusOK,gin.H{"m":"submit1"})
})
v1.GET("/read", func(c *gin.Context) {
c.JSON(http.StatusOK,gin.H{"m":"read1"})
})
}
// 简单的路由组: v2
v2 := router.Group("/v2")
{
v2.GET("/login", func(c *gin.Context) {
c.JSON(http.StatusOK,gin.H{"m":"login2"})
})
v2.GET("/submit", func(c *gin.Context) {
c.JSON(http.StatusOK,gin.H{"m":"submit2"})
})
v2.GET("/read", func(c *gin.Context) {
c.JSON(http.StatusOK,gin.H{"m":"read2"})
})
}
router.Run(":8080")
} |
package filestore_test
import (
"context"
"testing"
"github.com/direktiv/direktiv/pkg/refactor/database"
"github.com/direktiv/direktiv/pkg/refactor/filestore"
"github.com/direktiv/direktiv/pkg/refactor/filestore/filestoresql"
"github.com/google/uuid"
)
func assertFileStoreCorrectRootCreation(t *testing.T, fs filestore.FileStore, id uuid.UUID) {
t.Helper()
root, err := fs.CreateRoot(context.Background(), uuid.New(), id, "test")
if err != nil {
t.Errorf("unexpected CreateRoot() error: %v", err)
return
}
if root == nil {
t.Errorf("unexpected nil root CreateRoot()")
return
}
if root.NamespaceID != id {
t.Errorf("unexpected root.NamespaceID, got: >%s<, want: >%s<", root.NamespaceID, id)
return
}
}
func assertFileStoreHasRoot(t *testing.T, fs filestore.FileStore, ids ...uuid.UUID) {
t.Helper()
all, err := fs.GetAllRoots(context.Background())
if err != nil {
t.Errorf("unexpected GetAllRoots() error: %v", err)
return
}
if len(all) != len(ids) {
t.Errorf("unexpected GetAllRoots() length, got: %d, want: %d", len(all), len(ids))
return
}
for i := range ids {
if all[i].NamespaceID != ids[i] {
t.Errorf("unexpected all[%d].ID , got: >%s<, want: >%s<", i, all[i].NamespaceID, ids[i])
return
}
}
}
func assertFileStoreCorrectRootDeletion(t *testing.T, fs filestore.FileStore, ids ...uuid.UUID) {
t.Helper()
for i := range ids {
err := fs.ForRootID(ids[i]).Delete(context.Background())
if err != nil {
t.Errorf("unexpected Delete() error: %v", err)
}
}
}
func Test_sqlFileStore_CreateRoot(t *testing.T) {
db, err := database.NewMockGorm()
if err != nil {
t.Fatalf("unepxected NewMockGorm() error = %v", err)
}
fs := filestoresql.NewSQLFileStore(db)
tests := []struct {
name string
id uuid.UUID
}{
{"validCase", uuid.New()},
{"validCase", uuid.New()},
{"validCase", uuid.New()},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
assertFileStoreCorrectRootCreation(t, fs, tt.id)
})
}
}
func Test_sqlFileStore_ListingAfterCreate(t *testing.T) {
db, err := database.NewMockGorm()
if err != nil {
t.Fatalf("unepxected NewMockGorm() error = %v", err)
}
fs := filestoresql.NewSQLFileStore(db)
myRoot1 := uuid.New()
myRoot2 := uuid.New()
myRoot3 := uuid.New()
// assert correct empty list.
assertFileStoreHasRoot(t, fs)
// create two roots:
assertFileStoreCorrectRootCreation(t, fs, myRoot1)
assertFileStoreCorrectRootCreation(t, fs, myRoot2)
// assert existence.
assertFileStoreHasRoot(t, fs, myRoot1, myRoot2)
// add a third one:
assertFileStoreCorrectRootCreation(t, fs, myRoot3)
// assert existence:
assertFileStoreHasRoot(t, fs, myRoot1, myRoot2, myRoot3)
roots, err := fs.GetAllRootsForNamespace(context.Background(), myRoot2)
if err != nil {
panic(err)
}
if len(roots) != 1 {
panic(len(roots))
}
// delete one:
assertFileStoreCorrectRootDeletion(t, fs, roots[0].ID)
// assert correct list:
assertFileStoreHasRoot(t, fs, myRoot1, myRoot3)
roots, err = fs.GetAllRoots(context.Background())
if err != nil {
panic(err)
}
if len(roots) != 2 {
panic(len(roots))
}
// delete all:
assertFileStoreCorrectRootDeletion(t, fs, roots[0].ID, roots[1].ID)
// assert correct empty list.
assertFileStoreHasRoot(t, fs)
}
func TestSha256CalculateChecksum(t *testing.T) {
got := string(filestore.Sha256CalculateChecksum([]byte("some_string")))
want := "539a374ff43dce2e894fd4061aa545e6f7f5972d40ee9a1676901fb92125ffee"
if got != want {
t.Errorf("unexpected Sha256CalculateChecksum() result, got: %s, want: %s", got, want)
}
}
|
/*
* Copyright © 2021-2022 Software AG, Darmstadt, Germany and/or its licensors
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package adabas
import (
"github.com/SoftwareAG/adabas-go-api/adatypes"
)
// UpdateLOBRecord update lob records in an stream, call will write segment to offset of LOB
func (request *StoreRequest) UpdateLOBRecord(isn adatypes.Isn, field string, offset uint64, data []byte) (err error) {
debug := adatypes.Central.IsDebugLevel()
if debug {
adatypes.Central.Log.Debugf("Store LOB record initiated ...")
}
err = request.Open()
if err != nil {
return
}
err = request.StoreFields(field)
if err != nil {
adatypes.Central.Log.Debugf("Store fields error ...%#v", err)
return err
}
if debug {
adatypes.Central.Log.Debugf("LOB Definition generated ...BlockSize=%d", len(data))
}
var record *Record
record, err = request.CreateRecord()
if err != nil {
return
}
record.Isn = isn
err = record.SetPartialValue(field, uint32(offset+1), data)
if err != nil {
adatypes.Central.Log.Debugf("Set partial value error ...%#v", err)
return err
}
if debug {
adatypes.Central.Log.Debugf("Update LOB with ...%#v", field)
}
adabasRequest, prepareErr := request.prepareRequest(false)
if prepareErr != nil {
return prepareErr
}
err = request.update(adabasRequest, record)
if debug {
adatypes.Central.Log.Debugf("Error reading %v", err)
}
return err
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.